From 747e137a04b6316fdcdec7c494ec4b8f2a8f0bc5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 16 May 2024 20:39:08 +0000 Subject: [PATCH] removes kind tab filters when window expands 993fef69b8d9542d906468c6dcd45ffd5918073b --- asset-manifest.json | 6 +++--- index.html | 2 +- static/js/{main.64a04592.js => main.606584c4.js} | 6 +++--- ...64a04592.js.LICENSE.txt => main.606584c4.js.LICENSE.txt} | 0 static/js/{main.64a04592.js.map => main.606584c4.js.map} | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) rename static/js/{main.64a04592.js => main.606584c4.js} (68%) rename static/js/{main.64a04592.js.LICENSE.txt => main.606584c4.js.LICENSE.txt} (100%) rename static/js/{main.64a04592.js.map => main.606584c4.js.map} (70%) diff --git a/asset-manifest.json b/asset-manifest.json index 89d1c7dc..5b0c02aa 100644 --- a/asset-manifest.json +++ b/asset-manifest.json @@ -1,6 +1,6 @@ { "files": { - "main.js": "/static/js/main.64a04592.js", + "main.js": "/static/js/main.606584c4.js", "static/js/12.a52bd582.chunk.js": "/static/js/12.a52bd582.chunk.js", "static/js/361.5ef997bf.chunk.js": "/static/js/361.5ef997bf.chunk.js", "static/js/692.c2396cc3.chunk.js": "/static/js/692.c2396cc3.chunk.js", @@ -49,10 +49,10 @@ "static/media/VolumeOff.svg": "/static/media/VolumeOff.1149786552b52a25580655f2ce3391c1.svg", "static/media/VolumeUp.svg": "/static/media/VolumeUp.30de055e122b96e2aa207a710b1bd74a.svg", "static/media/Moon.svg": "/static/media/Moon.593035e92f1f02f04668bb75454d1fb3.svg", - "main.64a04592.js.map": "/static/js/main.64a04592.js.map", + "main.606584c4.js.map": "/static/js/main.606584c4.js.map", "845.d9f7556a.chunk.js.map": "/static/js/845.d9f7556a.chunk.js.map" }, "entrypoints": [ - "static/js/main.64a04592.js" + "static/js/main.606584c4.js" ] } \ No newline at end of file diff --git a/index.html b/index.html index e2e3b455..4ac690a0 100644 --- a/index.html +++ b/index.html @@ -1 +1 @@ -Piximi
\ No newline at end of file +Piximi
\ No newline at end of file diff --git a/static/js/main.64a04592.js b/static/js/main.606584c4.js similarity index 68% rename from static/js/main.64a04592.js rename to static/js/main.606584c4.js index eba00570..c1d0daf3 100644 --- a/static/js/main.64a04592.js +++ b/static/js/main.606584c4.js @@ -1,3 +1,3 @@ -/*! For license information please see main.64a04592.js.LICENSE.txt */ -(function(){var __webpack_modules__={83361:function(e,t,n){"use strict";n.d(t,{Z:function(){return ae}});var r=function(){function e(e){var t=this;this._insertTag=function(e){var n;n=0===t.tags.length?t.insertionPoint?t.insertionPoint.nextSibling:t.prepend?t.container.firstChild:t.before:t.tags[t.tags.length-1].nextSibling,t.container.insertBefore(e,n),t.tags.push(e)},this.isSpeedy=void 0===e.speedy||e.speedy,this.tags=[],this.ctr=0,this.nonce=e.nonce,this.key=e.key,this.container=e.container,this.prepend=e.prepend,this.insertionPoint=e.insertionPoint,this.before=null}var t=e.prototype;return t.hydrate=function(e){e.forEach(this._insertTag)},t.insert=function(e){this.ctr%(this.isSpeedy?65e3:1)===0&&this._insertTag(function(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),t.setAttribute("data-s",""),t}(this));var t=this.tags[this.tags.length-1];if(this.isSpeedy){var n=function(e){if(e.sheet)return e.sheet;for(var t=0;t0?l(y,--I):0,p--,10===v&&(p=1,g--),v}function B(){return v=I2||x(v)>3?"":" "}function R(e,t){for(;--t&&B()&&!(v<48||v>102||v>57&&v<65||v>70&&v<97););return k(e,Q()+(t<6&&32==w()&&32==B()))}function F(e){for(;B();)switch(v){case e:return I;case 34:case 39:34!==e&&39!==e&&F(v);break;case 40:41===e&&F(e);break;case 92:B()}return I}function T(e,t){for(;B()&&e+v!==57&&(e+v!==84||47!==w()););return"/*"+k(t,I-1)+"*"+i(47===e?e:B())}function M(e){for(;!x(w());)B();return k(e,I)}var L="-ms-",O="-moz-",U="-webkit-",G="comm",P="rule",j="decl",q="@keyframes";function Z(e,t){for(var n="",r=h(e),a=0;a0&&d(O)-m&&f(v>32?Y(O+";",r,n,m-1):Y(c(O," ","")+";",r,n,m-2),h);break;case 59:O+=";";default:if(f(L=J(O,t,n,g,p,a,A,_,S=[],F=[],m),o),123===x)if(0===p)W(O,t,L,L,S,o,m,A,F);else switch(99===I&&110===l(O,3)?100:I){case 100:case 108:case 109:case 115:W(e,L,L,r&&f(J(e,L,L,0,0,a,A,_,a,S=[],m),F),a,F,m,A,r?S:F);break;default:W(O,L,L,L,[""],F,0,A,F)}}g=p=v=0,C=k=1,_=O="",m=s;break;case 58:m=1+d(O),v=y;default:if(C<1)if(123==x)--C;else if(125==x&&0==C++&&125==b())continue;switch(O+=i(x),x*C){case 38:k=p>0?1:(O+="\f",-1);break;case 44:A[g++]=(d(O)-1)*k,k=1;break;case 64:45===w()&&(O+=D(B())),I=w(),p=m=d(_=O+=M(Q())),x++;break;case 45:45===y&&2==d(O)&&(C=0)}}return o}function J(e,t,n,r,i,o,u,l,d,f,g){for(var p=i-1,m=0===i?o:[""],I=h(m),v=0,y=0,E=0;v0?m[b]+" "+B:c(B,/&\f/g,m[b])))&&(d[E++]=w);return C(e,t,n,0===i?P:l,d,f,g)}function K(e,t,n){return C(e,t,n,G,i(v),A(e,2,-2),0)}function Y(e,t,n,r){return C(e,t,n,j,A(e,0,r),A(e,r+1,-1),r)}var V=function(e,t,n){for(var r=0,a=0;r=a,a=w(),38===r&&12===a&&(t[n]=1),!x(a);)B();return k(e,I)},X=function(e,t){return S(function(e,t){var n=-1,r=44;do{switch(x(r)){case 0:38===r&&12===w()&&(t[n]=1),e[n]+=V(I-1,t,n);break;case 2:e[n]+=D(r);break;case 4:if(44===r){e[++n]=58===w()?"&\f":"",t[n]=e[n].length;break}default:e[n]+=i(r)}}while(r=B());return e}(_(e),t))},$=new WeakMap,ee=function(e){if("rule"===e.type&&e.parent&&!(e.length<1)){for(var t=e.value,n=e.parent,r=e.column===n.column&&e.line===n.line;"rule"!==n.type;)if(!(n=n.parent))return;if((1!==e.props.length||58===t.charCodeAt(0)||$.get(n))&&!r){$.set(e,!0);for(var a=[],i=X(t,a),o=n.props,s=0,c=0;s6)switch(l(e,t+1)){case 109:if(45!==l(e,t+4))break;case 102:return c(e,/(.+:)(.+)-([^]+)/,"$1"+U+"$2-$3$1"+O+(108==l(e,t+3)?"$3":"$2-$3"))+e;case 115:return~u(e,"stretch")?ne(c(e,"stretch","fill-available"),t)+e:e}break;case 4949:if(115!==l(e,t+1))break;case 6444:switch(l(e,d(e)-3-(~u(e,"!important")&&10))){case 107:return c(e,":",":"+U)+e;case 101:return c(e,/(.+:)([^;!]+)(;|!.+)?/,"$1"+U+(45===l(e,14)?"inline-":"")+"box$3$1"+U+"$2$3$1"+L+"$2box$3")+e}break;case 5936:switch(l(e,t+11)){case 114:return U+e+L+c(e,/[svh]\w+-[tblr]{2}/,"tb")+e;case 108:return U+e+L+c(e,/[svh]\w+-[tblr]{2}/,"tb-rl")+e;case 45:return U+e+L+c(e,/[svh]\w+-[tblr]{2}/,"lr")+e}return U+e+L+e+e}return e}var re=[function(e,t,n,r){if(e.length>-1&&!e.return)switch(e.type){case j:e.return=ne(e.value,e.length);break;case q:return Z([E(e,{value:c(e.value,"@","@"+U)})],r);case P:if(e.length)return function(e,t){return e.map(t).join("")}(e.props,(function(t){switch(function(e,t){return(e=t.exec(e))?e[0]:e}(t,/(::plac\w+|:read-\w+)/)){case":read-only":case":read-write":return Z([E(e,{props:[c(t,/:(read-\w+)/,":-moz-$1")]})],r);case"::placeholder":return Z([E(e,{props:[c(t,/:(plac\w+)/,":"+U+"input-$1")]}),E(e,{props:[c(t,/:(plac\w+)/,":-moz-$1")]}),E(e,{props:[c(t,/:(plac\w+)/,L+"input-$1")]})],r)}return""}))}}],ae=function(e){var t=e.key;if("css"===t){var n=document.querySelectorAll("style[data-emotion]:not([data-s])");Array.prototype.forEach.call(n,(function(e){-1!==e.getAttribute("data-emotion").indexOf(" ")&&(document.head.appendChild(e),e.setAttribute("data-s",""))}))}var a=e.stylisPlugins||re;var i,o,s={},c=[];i=e.container||document.head,Array.prototype.forEach.call(document.querySelectorAll('style[data-emotion^="'+t+' "]'),(function(e){for(var t=e.getAttribute("data-emotion").split(" "),n=1;n=4;++r,a-=4)t=1540483477*(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))+(59797*(t>>>16)<<16),n=1540483477*(65535&(t^=t>>>24))+(59797*(t>>>16)<<16)^1540483477*(65535&n)+(59797*(n>>>16)<<16);switch(a){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n=1540483477*(65535&(n^=255&e.charCodeAt(r)))+(59797*(n>>>16)<<16)}return(((n=1540483477*(65535&(n^=n>>>13))+(59797*(n>>>16)<<16))^n>>>15)>>>0).toString(36)}(a)+c;return{name:u,styles:a,next:d}}},82561:function(e,t,n){"use strict";var r;n.d(t,{L:function(){return o},j:function(){return s}});var a=n(72791),i=!!(r||(r=n.t(a,2))).useInsertionEffect&&(r||(r=n.t(a,2))).useInsertionEffect,o=i||function(e){return e()},s=i||a.useLayoutEffect},95438:function(e,t,n){"use strict";n.d(t,{My:function(){return i},fp:function(){return r},hC:function(){return a}});function r(e,t,n){var r="";return n.split(" ").forEach((function(n){void 0!==e[n]?t.push(e[n]+";"):r+=n+" "})),r}var a=function(e,t,n){var r=e.key+"-"+t.name;!1===n&&void 0===e.registered[r]&&(e.registered[r]=t.styles)},i=function(e,t,n){a(e,t,n);var r=e.key+"-"+t.name;if(void 0===e.inserted[t.name]){var i=t;do{e.insert(t===i?"."+r:"",i,e.sheet,!0),i=i.next}while(void 0!==i)}}},52504:function(e,t,n){"use strict";var r,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return o.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:n,width:i,height:c},l)},A),o.default.createElement("path",{d:"M21,7L9,19L3.5,13.5L4.91,12.09L9,16.17L19.59,5.59L21,7Z"}))}},52403:function(e,t,n){"use strict";var r,a=Object.assign||function(e){for(var t=1;t=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}(e,["fill","width","height","style"]);return o.default.createElement("svg",a({viewBox:"0 0 24 24",style:a({fill:n,width:i,height:c},l)},A),o.default.createElement("path",{d:"M12,18.17L8.83,15L7.42,16.41L12,21L16.59,16.41L15.17,15M12,5.83L15.17,9L16.58,7.59L12,3L7.41,7.59L8.83,9L12,5.83Z"}))}},42419:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M19 13h-6v6h-2v-6H5v-2h6V5h2v6h6v2z"}),"Add");t.Z=o},26759:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"m7 10 5 5 5-5z"}),"ArrowDropDown");t.Z=o},70366:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"m7 14 5-5 5 5z"}),"ArrowDropUp");t.Z=o},1724:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zM9 17H7v-7h2v7zm4 0h-2V7h2v10zm4 0h-2v-4h2v4z"}),"Assessment");t.Z=o},29823:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"}),"Close");t.Z=o},27247:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M6 19c0 1.1.9 2 2 2h8c1.1 0 2-.9 2-2V7H6v12zM19 4h-3.5l-1-1h-5l-1 1H5v2h14V4z"}),"Delete");t.Z=o},73518:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M5 20h14v-2H5v2zM19 9h-4V3H9v6H5l7 7 7-7z"}),"Download");t.Z=o},22885:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"m12 8-6 6 1.41 1.41L12 10.83l4.59 4.58L18 14z"}),"ExpandLess");t.Z=o},81131:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M16.59 8.59 12 13.17 7.41 8.59 6 10l6 6 6-6z"}),"ExpandMore");t.Z=o},27606:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M20 2H4c-1.1 0-1.99.9-1.99 2L2 22l4-4h14c1.1 0 2-.9 2-2V4c0-1.1-.9-2-2-2zm-7 12h-2v-2h2v2zm0-4h-2V6h2v4z"}),"Feedback");t.Z=o},68657:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M14 2H6c-1.1 0-2 .9-2 2v16c0 1.1.89 2 1.99 2H15v-8h5V8l-6-6zm-1 7V3.5L18.5 9H13zm4 12.66V16h5.66v2h-2.24l2.95 2.95-1.41 1.41L19 19.41v2.24h-2z"}),"FileOpen");t.Z=o},32672:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M7 6h10l-5.01 6.3L7 6zm-2.75-.39C6.27 8.2 10 13 10 13v6c0 .55.45 1 1 1h2c.55 0 1-.45 1-1v-6s3.72-4.8 5.74-7.39c.51-.66.04-1.61-.79-1.61H5.04c-.83 0-1.3.95-.79 1.61z"}),"FilterAltOutlined");t.Z=o},8014:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M20 6h-8l-2-2H4c-1.1 0-1.99.9-1.99 2L2 18c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2zm0 12H4V8h16v10z"}),"FolderOpen");t.Z=o},25878:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm1 15h-2v-6h2v6zm0-8h-2V7h2v2z"}),"Info");t.Z=o},55860:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M11 7h2v2h-2zm0 4h2v6h-2zm1-9C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.41 0-8-3.59-8-8s3.59-8 8-8 8 3.59 8 8-3.59 8-8 8z"}),"InfoOutlined");t.Z=o},65567:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M17.63 5.84C17.27 5.33 16.67 5 16 5L5 5.01C3.9 5.01 3 5.9 3 7v10c0 1.1.9 1.99 2 1.99L16 19c.67 0 1.27-.33 1.63-.84L22 12l-4.37-6.16z"}),"Label");t.Z=o},90983:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M11.99 2C6.47 2 2 6.48 2 12s4.47 10 9.99 10C17.52 22 22 17.52 22 12S17.52 2 11.99 2zm6.93 6h-2.95c-.32-1.25-.78-2.45-1.38-3.56 1.84.63 3.37 1.91 4.33 3.56zM12 4.04c.83 1.2 1.48 2.53 1.91 3.96h-3.82c.43-1.43 1.08-2.76 1.91-3.96zM4.26 14C4.1 13.36 4 12.69 4 12s.1-1.36.26-2h3.38c-.08.66-.14 1.32-.14 2 0 .68.06 1.34.14 2H4.26zm.82 2h2.95c.32 1.25.78 2.45 1.38 3.56-1.84-.63-3.37-1.9-4.33-3.56zm2.95-8H5.08c.96-1.66 2.49-2.93 4.33-3.56C8.81 5.55 8.35 6.75 8.03 8zM12 19.96c-.83-1.2-1.48-2.53-1.91-3.96h3.82c-.43 1.43-1.08 2.76-1.91 3.96zM14.34 14H9.66c-.09-.66-.16-1.32-.16-2 0-.68.07-1.35.16-2h4.68c.09.65.16 1.32.16 2 0 .68-.07 1.34-.16 2zm.25 5.56c.6-1.11 1.06-2.31 1.38-3.56h2.95c-.96 1.65-2.49 2.93-4.33 3.56zM16.36 14c.08-.66.14-1.32.14-2 0-.68-.06-1.34-.14-2h3.38c.16.64.26 1.31.26 2s-.1 1.36-.26 2h-3.38z"}),"Language");t.Z=o},40786:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M6 19h12v2H6z"}),"Minimize");t.Z=o},2094:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M6 10c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm12 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2zm-6 0c-1.1 0-2 .9-2 2s.9 2 2 2 2-.9 2-2-.9-2-2-2z"}),"MoreHoriz");t.Z=o},16426:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)([(0,i.jsx)("circle",{cx:"7",cy:"14",r:"3"},"0"),(0,i.jsx)("circle",{cx:"11",cy:"6",r:"3"},"1"),(0,i.jsx)("circle",{cx:"16.6",cy:"17.6",r:"3"},"2")],"ScatterPlot");t.Z=o},87122:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M19.14 12.94c.04-.3.06-.61.06-.94 0-.32-.02-.64-.07-.94l2.03-1.58c.18-.14.23-.41.12-.61l-1.92-3.32c-.12-.22-.37-.29-.59-.22l-2.39.96c-.5-.38-1.03-.7-1.62-.94l-.36-2.54c-.04-.24-.24-.41-.48-.41h-3.84c-.24 0-.43.17-.47.41l-.36 2.54c-.59.24-1.13.57-1.62.94l-2.39-.96c-.22-.08-.47 0-.59.22L2.74 8.87c-.12.21-.08.47.12.61l2.03 1.58c-.05.3-.09.63-.09.94s.02.64.07.94l-2.03 1.58c-.18.14-.23.41-.12.61l1.92 3.32c.12.22.37.29.59.22l2.39-.96c.5.38 1.03.7 1.62.94l.36 2.54c.05.24.24.41.48.41h3.84c.24 0 .44-.17.47-.41l.36-2.54c.59-.24 1.13-.56 1.62-.94l2.39.96c.22.08.47 0 .59-.22l1.92-3.32c.12-.22.07-.47-.12-.61l-2.01-1.58zM12 15.6c-1.98 0-3.6-1.62-3.6-3.6s1.62-3.6 3.6-3.6 3.6 1.62 3.6 3.6-1.62 3.6-3.6 3.6z"}),"Settings");t.Z=o},44897:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M3 18h6v-2H3v2zM3 6v2h18V6H3zm0 7h12v-2H3v2z"}),"Sort");t.Z=o},86616:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M12 6c3.79 0 7.17 2.13 8.82 5.5-.59 1.22-1.42 2.27-2.41 3.12l1.41 1.41c1.39-1.23 2.49-2.77 3.18-4.53C21.27 7.11 17 4 12 4c-1.27 0-2.49.2-3.64.57l1.65 1.65C10.66 6.09 11.32 6 12 6zm-1.07 1.14L13 9.21c.57.25 1.03.71 1.28 1.28l2.07 2.07c.08-.34.14-.7.14-1.07C16.5 9.01 14.48 7 12 7c-.37 0-.72.05-1.07.14zM2.01 3.87l2.68 2.68C3.06 7.83 1.77 9.53 1 11.5 2.73 15.89 7 19 12 19c1.52 0 2.98-.29 4.32-.82l3.42 3.42 1.41-1.41L3.42 2.45 2.01 3.87zm7.5 7.5 2.61 2.61c-.04.01-.08.02-.12.02-1.38 0-2.5-1.12-2.5-2.5 0-.05.01-.08.01-.13zm-3.4-3.4 1.75 1.75c-.23.55-.36 1.15-.36 1.78 0 2.48 2.02 4.5 4.5 4.5.63 0 1.23-.13 1.77-.36l.98.98c-.88.24-1.8.38-2.75.38-3.79 0-7.17-2.13-8.82-5.5.7-1.43 1.72-2.61 2.93-3.53z"}),"VisibilityOffOutlined");t.Z=o},77194:function(e,t,n){"use strict";var r=n(64836);t.Z=void 0;var a=r(n(45649)),i=n(80184),o=(0,a.default)((0,i.jsx)("path",{d:"M12 6c3.79 0 7.17 2.13 8.82 5.5C19.17 14.87 15.79 17 12 17s-7.17-2.13-8.82-5.5C4.83 8.13 8.21 6 12 6m0-2C7 4 2.73 7.11 1 11.5 2.73 15.89 7 19 12 19s9.27-3.11 11-7.5C21.27 7.11 17 4 12 4zm0 5c1.38 0 2.5 1.12 2.5 2.5S13.38 14 12 14s-2.5-1.12-2.5-2.5S10.62 9 12 9m0-2c-2.48 0-4.5 2.02-4.5 4.5S9.52 16 12 16s4.5-2.02 4.5-4.5S14.48 7 12 7z"}),"VisibilityOutlined");t.Z=o},45649:function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return r.createSvgIcon}});var r=n(28610)},99259:function(e,t,n){"use strict";n.d(t,{Z:function(){return I}});var r=n(87462),a=n(63366),i=n(72791),o=n(28182),s=n(94419),c=n(14036),u=n(31402),l=n(66934),A=n(75878),d=n(21217);function h(e){return(0,d.Z)("MuiSvgIcon",e)}(0,A.Z)("MuiSvgIcon",["root","colorPrimary","colorSecondary","colorAction","colorError","colorDisabled","fontSizeInherit","fontSizeSmall","fontSizeMedium","fontSizeLarge"]);var f=n(80184),g=["children","className","color","component","fontSize","htmlColor","inheritViewBox","titleAccess","viewBox"],p=(0,l.ZP)("svg",{name:"MuiSvgIcon",slot:"Root",overridesResolver:function(e,t){var n=e.ownerState;return[t.root,"inherit"!==n.color&&t["color".concat((0,c.Z)(n.color))],t["fontSize".concat((0,c.Z)(n.fontSize))]]}})((function(e){var t,n,r,a,i,o,s,c,u,l,A,d,h,f,g,p,m,I=e.theme,v=e.ownerState;return{userSelect:"none",width:"1em",height:"1em",display:"inline-block",fill:"currentColor",flexShrink:0,transition:null==(t=I.transitions)||null==(n=t.create)?void 0:n.call(t,"fill",{duration:null==(r=I.transitions)||null==(a=r.duration)?void 0:a.shorter}),fontSize:{inherit:"inherit",small:(null==(i=I.typography)||null==(o=i.pxToRem)?void 0:o.call(i,20))||"1.25rem",medium:(null==(s=I.typography)||null==(c=s.pxToRem)?void 0:c.call(s,24))||"1.5rem",large:(null==(u=I.typography)||null==(l=u.pxToRem)?void 0:l.call(u,35))||"2.1875rem"}[v.fontSize],color:null!=(A=null==(d=(I.vars||I).palette)||null==(h=d[v.color])?void 0:h.main)?A:{action:null==(f=(I.vars||I).palette)||null==(g=f.action)?void 0:g.active,disabled:null==(p=(I.vars||I).palette)||null==(m=p.action)?void 0:m.disabled,inherit:void 0}[v.color]}})),m=i.forwardRef((function(e,t){var n=(0,u.Z)({props:e,name:"MuiSvgIcon"}),i=n.children,l=n.className,A=n.color,d=void 0===A?"inherit":A,m=n.component,I=void 0===m?"svg":m,v=n.fontSize,y=void 0===v?"medium":v,C=n.htmlColor,E=n.inheritViewBox,b=void 0!==E&&E,B=n.titleAccess,w=n.viewBox,Q=void 0===w?"0 0 24 24":w,k=(0,a.Z)(n,g),x=(0,r.Z)({},n,{color:d,component:I,fontSize:y,instanceFontSize:e.fontSize,inheritViewBox:b,viewBox:Q}),_={};b||(_.viewBox=Q);var S=function(e){var t=e.color,n=e.fontSize,r=e.classes,a={root:["root","inherit"!==t&&"color".concat((0,c.Z)(t)),"fontSize".concat((0,c.Z)(n))]};return(0,s.Z)(a,h,r)}(x);return(0,f.jsxs)(p,(0,r.Z)({as:I,className:(0,o.Z)(S.root,l),focusable:"false",color:C,"aria-hidden":!B||void 0,role:B?"img":void 0,ref:t},_,k,{ownerState:x,children:[i,B?(0,f.jsx)("title",{children:B}):null]}))}));m.muiName="SvgIcon";var I=m},61979:function(e,t,n){"use strict";n.d(t,{Z:function(){return M}});var r=n(87462),a=n(63366),i=n(46189),o=n(82466),s=n(85080),c=n(87416),u=n(60104),l=n(4942);function A(e,t){var n;return(0,r.Z)({toolbar:(n={minHeight:56},(0,l.Z)(n,e.up("xs"),{"@media (orientation: landscape)":{minHeight:48}}),(0,l.Z)(n,e.up("sm"),{minHeight:64}),n)},t)}var d=n(12065),h={black:"#000",white:"#fff"},f={50:"#fafafa",100:"#f5f5f5",200:"#eeeeee",300:"#e0e0e0",400:"#bdbdbd",500:"#9e9e9e",600:"#757575",700:"#616161",800:"#424242",900:"#212121",A100:"#f5f5f5",A200:"#eeeeee",A400:"#bdbdbd",A700:"#616161"},g={50:"#f3e5f5",100:"#e1bee7",200:"#ce93d8",300:"#ba68c8",400:"#ab47bc",500:"#9c27b0",600:"#8e24aa",700:"#7b1fa2",800:"#6a1b9a",900:"#4a148c",A100:"#ea80fc",A200:"#e040fb",A400:"#d500f9",A700:"#aa00ff"},p={50:"#ffebee",100:"#ffcdd2",200:"#ef9a9a",300:"#e57373",400:"#ef5350",500:"#f44336",600:"#e53935",700:"#d32f2f",800:"#c62828",900:"#b71c1c",A100:"#ff8a80",A200:"#ff5252",A400:"#ff1744",A700:"#d50000"},m={50:"#fff3e0",100:"#ffe0b2",200:"#ffcc80",300:"#ffb74d",400:"#ffa726",500:"#ff9800",600:"#fb8c00",700:"#f57c00",800:"#ef6c00",900:"#e65100",A100:"#ffd180",A200:"#ffab40",A400:"#ff9100",A700:"#ff6d00"},I={50:"#e3f2fd",100:"#bbdefb",200:"#90caf9",300:"#64b5f6",400:"#42a5f5",500:"#2196f3",600:"#1e88e5",700:"#1976d2",800:"#1565c0",900:"#0d47a1",A100:"#82b1ff",A200:"#448aff",A400:"#2979ff",A700:"#2962ff"},v={50:"#e1f5fe",100:"#b3e5fc",200:"#81d4fa",300:"#4fc3f7",400:"#29b6f6",500:"#03a9f4",600:"#039be5",700:"#0288d1",800:"#0277bd",900:"#01579b",A100:"#80d8ff",A200:"#40c4ff",A400:"#00b0ff",A700:"#0091ea"},y={50:"#e8f5e9",100:"#c8e6c9",200:"#a5d6a7",300:"#81c784",400:"#66bb6a",500:"#4caf50",600:"#43a047",700:"#388e3c",800:"#2e7d32",900:"#1b5e20",A100:"#b9f6ca",A200:"#69f0ae",A400:"#00e676",A700:"#00c853"},C=["mode","contrastThreshold","tonalOffset"],E={text:{primary:"rgba(0, 0, 0, 0.87)",secondary:"rgba(0, 0, 0, 0.6)",disabled:"rgba(0, 0, 0, 0.38)"},divider:"rgba(0, 0, 0, 0.12)",background:{paper:h.white,default:h.white},action:{active:"rgba(0, 0, 0, 0.54)",hover:"rgba(0, 0, 0, 0.04)",hoverOpacity:.04,selected:"rgba(0, 0, 0, 0.08)",selectedOpacity:.08,disabled:"rgba(0, 0, 0, 0.26)",disabledBackground:"rgba(0, 0, 0, 0.12)",disabledOpacity:.38,focus:"rgba(0, 0, 0, 0.12)",focusOpacity:.12,activatedOpacity:.12}},b={text:{primary:h.white,secondary:"rgba(255, 255, 255, 0.7)",disabled:"rgba(255, 255, 255, 0.5)",icon:"rgba(255, 255, 255, 0.5)"},divider:"rgba(255, 255, 255, 0.12)",background:{paper:"#121212",default:"#121212"},action:{active:h.white,hover:"rgba(255, 255, 255, 0.08)",hoverOpacity:.08,selected:"rgba(255, 255, 255, 0.16)",selectedOpacity:.16,disabled:"rgba(255, 255, 255, 0.3)",disabledBackground:"rgba(255, 255, 255, 0.12)",disabledOpacity:.38,focus:"rgba(255, 255, 255, 0.12)",focusOpacity:.12,activatedOpacity:.24}};function B(e,t,n,r){var a=r.light||r,i=r.dark||1.5*r;e[t]||(e.hasOwnProperty(n)?e[t]=e[n]:"light"===t?e.light=(0,d.$n)(e.main,a):"dark"===t&&(e.dark=(0,d._j)(e.main,i)))}function w(e){var t=e.mode,n=void 0===t?"light":t,s=e.contrastThreshold,c=void 0===s?3:s,u=e.tonalOffset,l=void 0===u?.2:u,A=(0,a.Z)(e,C),w=e.primary||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:I[200],light:I[50],dark:I[400]}:{main:I[700],light:I[400],dark:I[800]}}(n),Q=e.secondary||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:g[200],light:g[50],dark:g[400]}:{main:g[500],light:g[300],dark:g[700]}}(n),k=e.error||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:p[500],light:p[300],dark:p[700]}:{main:p[700],light:p[400],dark:p[800]}}(n),x=e.info||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:v[400],light:v[300],dark:v[700]}:{main:v[700],light:v[500],dark:v[900]}}(n),_=e.success||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:y[400],light:y[300],dark:y[700]}:{main:y[800],light:y[500],dark:y[900]}}(n),S=e.warning||function(){return"dark"===(arguments.length>0&&void 0!==arguments[0]?arguments[0]:"light")?{main:m[400],light:m[300],dark:m[700]}:{main:"#ed6c02",light:m[500],dark:m[900]}}(n);function D(e){return(0,d.mi)(e,b.text.primary)>=c?b.text.primary:E.text.primary}var N=function(e){var t=e.color,n=e.name,a=e.mainShade,o=void 0===a?500:a,s=e.lightShade,c=void 0===s?300:s,u=e.darkShade,A=void 0===u?700:u;if(!(t=(0,r.Z)({},t)).main&&t[o]&&(t.main=t[o]),!t.hasOwnProperty("main"))throw new Error((0,i.Z)(11,n?" (".concat(n,")"):"",o));if("string"!==typeof t.main)throw new Error((0,i.Z)(12,n?" (".concat(n,")"):"",JSON.stringify(t.main)));return B(t,"light",c,l),B(t,"dark",A,l),t.contrastText||(t.contrastText=D(t.main)),t},R={dark:b,light:E};return(0,o.Z)((0,r.Z)({common:(0,r.Z)({},h),mode:n,primary:N({color:w,name:"primary"}),secondary:N({color:Q,name:"secondary",mainShade:"A400",lightShade:"A200",darkShade:"A700"}),error:N({color:k,name:"error"}),warning:N({color:S,name:"warning"}),info:N({color:x,name:"info"}),success:N({color:_,name:"success"}),grey:f,contrastThreshold:c,getContrastText:D,augmentColor:N,tonalOffset:l},R[n]),A)}var Q=["fontFamily","fontSize","fontWeightLight","fontWeightRegular","fontWeightMedium","fontWeightBold","htmlFontSize","allVariants","pxToRem"];var k={textTransform:"uppercase"},x='"Roboto", "Helvetica", "Arial", sans-serif';function _(e,t){var n="function"===typeof t?t(e):t,i=n.fontFamily,s=void 0===i?x:i,c=n.fontSize,u=void 0===c?14:c,l=n.fontWeightLight,A=void 0===l?300:l,d=n.fontWeightRegular,h=void 0===d?400:d,f=n.fontWeightMedium,g=void 0===f?500:f,p=n.fontWeightBold,m=void 0===p?700:p,I=n.htmlFontSize,v=void 0===I?16:I,y=n.allVariants,C=n.pxToRem,E=(0,a.Z)(n,Q);var b=u/14,B=C||function(e){return"".concat(e/v*b,"rem")},w=function(e,t,n,a,i){return(0,r.Z)({fontFamily:s,fontWeight:e,fontSize:B(t),lineHeight:n},s===x?{letterSpacing:"".concat((o=a/t,Math.round(1e5*o)/1e5),"em")}:{},i,y);var o},_={h1:w(A,96,1.167,-1.5),h2:w(A,60,1.2,-.5),h3:w(h,48,1.167,0),h4:w(h,34,1.235,.25),h5:w(h,24,1.334,0),h6:w(g,20,1.6,.15),subtitle1:w(h,16,1.75,.15),subtitle2:w(g,14,1.57,.1),body1:w(h,16,1.5,.15),body2:w(h,14,1.43,.15),button:w(g,14,1.75,.4,k),caption:w(h,12,1.66,.4),overline:w(h,12,2.66,1,k),inherit:{fontFamily:"inherit",fontWeight:"inherit",fontSize:"inherit",lineHeight:"inherit",letterSpacing:"inherit"}};return(0,o.Z)((0,r.Z)({htmlFontSize:v,pxToRem:B,fontFamily:s,fontSize:u,fontWeightLight:A,fontWeightRegular:h,fontWeightMedium:g,fontWeightBold:m},_),E,{clone:!1})}function S(){return["".concat(arguments.length<=0?void 0:arguments[0],"px ").concat(arguments.length<=1?void 0:arguments[1],"px ").concat(arguments.length<=2?void 0:arguments[2],"px ").concat(arguments.length<=3?void 0:arguments[3],"px rgba(0,0,0,").concat(.2,")"),"".concat(arguments.length<=4?void 0:arguments[4],"px ").concat(arguments.length<=5?void 0:arguments[5],"px ").concat(arguments.length<=6?void 0:arguments[6],"px ").concat(arguments.length<=7?void 0:arguments[7],"px rgba(0,0,0,").concat(.14,")"),"".concat(arguments.length<=8?void 0:arguments[8],"px ").concat(arguments.length<=9?void 0:arguments[9],"px ").concat(arguments.length<=10?void 0:arguments[10],"px ").concat(arguments.length<=11?void 0:arguments[11],"px rgba(0,0,0,").concat(.12,")")].join(",")}var D=["none",S(0,2,1,-1,0,1,1,0,0,1,3,0),S(0,3,1,-2,0,2,2,0,0,1,5,0),S(0,3,3,-2,0,3,4,0,0,1,8,0),S(0,2,4,-1,0,4,5,0,0,1,10,0),S(0,3,5,-1,0,5,8,0,0,1,14,0),S(0,3,5,-1,0,6,10,0,0,1,18,0),S(0,4,5,-2,0,7,10,1,0,2,16,1),S(0,5,5,-3,0,8,10,1,0,3,14,2),S(0,5,6,-3,0,9,12,1,0,3,16,2),S(0,6,6,-3,0,10,14,1,0,4,18,3),S(0,6,7,-4,0,11,15,1,0,4,20,3),S(0,7,8,-4,0,12,17,2,0,5,22,4),S(0,7,8,-4,0,13,19,2,0,5,24,4),S(0,7,9,-4,0,14,21,2,0,5,26,4),S(0,8,9,-5,0,15,22,2,0,6,28,5),S(0,8,10,-5,0,16,24,2,0,6,30,5),S(0,8,11,-5,0,17,26,2,0,6,32,5),S(0,9,11,-5,0,18,28,2,0,7,34,6),S(0,9,12,-6,0,19,29,2,0,7,36,6),S(0,10,13,-6,0,20,31,3,0,8,38,7),S(0,10,13,-6,0,21,33,3,0,8,40,7),S(0,10,14,-6,0,22,35,3,0,8,42,7),S(0,11,14,-7,0,23,36,3,0,9,44,8),S(0,11,15,-7,0,24,38,3,0,9,46,8)],N=n(81314),R={mobileStepper:1e3,fab:1050,speedDial:1050,appBar:1100,drawer:1200,modal:1300,snackbar:1400,tooltip:1500},F=["breakpoints","mixins","spacing","palette","transitions","typography","shape"];function T(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.mixins,n=void 0===t?{}:t,l=e.palette,d=void 0===l?{}:l,h=e.transitions,f=void 0===h?{}:h,g=e.typography,p=void 0===g?{}:g,m=(0,a.Z)(e,F);if(e.vars)throw new Error((0,i.Z)(18));var I=w(d),v=(0,s.Z)(e),y=(0,o.Z)(v,{mixins:A(v.breakpoints,n),palette:I,shadows:D.slice(),typography:_(I,p),transitions:(0,N.ZP)(f),zIndex:(0,r.Z)({},R)});y=(0,o.Z)(y,m);for(var C=arguments.length,E=new Array(C>1?C-1:0),b=1;b0&&void 0!==arguments[0]?arguments[0]:["all"],a=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},o=a.duration,s=void 0===o?n.standard:o,u=a.easing,l=void 0===u?t.easeInOut:u,A=a.delay,d=void 0===A?0:A;(0,r.Z)(a,i);return(Array.isArray(e)?e:[e]).map((function(e){return"".concat(e," ").concat("string"===typeof s?s:c(s)," ").concat(l," ").concat("string"===typeof d?d:c(d))})).join(",")}},e,{easing:t,duration:n})}},36482:function(e,t,n){"use strict";var r=(0,n(61979).Z)();t.Z=r},988:function(e,t){"use strict";t.Z="$$material"},66934:function(e,t,n){"use strict";n.d(t,{Dz:function(){return s},FO:function(){return o}});var r=n(44046),a=n(36482),i=n(988),o=function(e){return(0,r.x9)(e)&&"classes"!==e},s=r.x9,c=(0,r.ZP)({themeId:i.Z,defaultTheme:a.Z,rootShouldForwardProp:o});t.ZP=c},31402:function(e,t,n){"use strict";n.d(t,{Z:function(){return o}});var r=n(86083),a=n(36482),i=n(988);function o(e){var t=e.props,n=e.name;return(0,r.Z)({props:t,name:n,defaultTheme:a.Z,themeId:i.Z})}},14036:function(e,t,n){"use strict";var r=n(27312);t.Z=r.Z},31260:function(e,t,n){"use strict";var r=n(78949);t.Z=r.Z},74223:function(e,t,n){"use strict";n.d(t,{Z:function(){return s}});var r=n(87462),a=n(72791),i=n(99259),o=n(80184);function s(e,t){function n(n,a){return(0,o.jsx)(i.Z,(0,r.Z)({"data-testid":"".concat(t,"Icon"),ref:a},n,{children:e}))}return n.muiName=i.Z.muiName,a.memo(a.forwardRef(n))}},83199:function(e,t,n){"use strict";var r=n(93981);t.Z=r.Z},28610:function(e,t,n){"use strict";n.r(t),n.d(t,{capitalize:function(){return a.Z},createChainedFunction:function(){return i.Z},createSvgIcon:function(){return o.Z},debounce:function(){return s.Z},deprecatedPropType:function(){return c},isMuiElement:function(){return u.Z},ownerDocument:function(){return l.Z},ownerWindow:function(){return A.Z},requirePropFactory:function(){return d},setRef:function(){return h},unstable_ClassNameGenerator:function(){return C},unstable_useEnhancedEffect:function(){return f.Z},unstable_useId:function(){return g.Z},unsupportedProp:function(){return p},useControlled:function(){return m.Z},useEventCallback:function(){return I.Z},useForkRef:function(){return v.Z},useIsFocusVisible:function(){return y.Z}});var r=n(55902),a=n(14036),i=n(31260),o=n(74223),s=n(83199);var c=function(e,t){return function(){return null}},u=n(19103),l=n(98301),A=n(17602);n(1413);var d=function(e,t){return function(){return null}},h=n(62971).Z,f=n(40162),g=n(67384);var p=function(e,t,n,r,a){return null},m=n(98278),I=n(89683),v=n(42071),y=n(68221),C={configure:function(e){r.Z.configure(e)}}},19103:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(72791);var a=function(e,t){return r.isValidElement(e)&&-1!==t.indexOf(e.type.muiName)}},98301:function(e,t,n){"use strict";var r=n(99723);t.Z=r.Z},17602:function(e,t,n){"use strict";var r=n(27979);t.Z=r.Z},98278:function(e,t,n){"use strict";var r=n(58959);t.Z=r.Z},40162:function(e,t,n){"use strict";var r=n(75721);t.Z=r.Z},89683:function(e,t,n){"use strict";var r=n(58956);t.Z=r.Z},42071:function(e,t,n){"use strict";var r=n(47563);t.Z=r.Z},67384:function(e,t,n){"use strict";var r=n(96248);t.Z=r.Z},68221:function(e,t,n){"use strict";var r=n(45372);t.Z=r.Z},22421:function(e,t,n){"use strict";n.d(t,{ZP:function(){return I},Co:function(){return v}});var r=n(87462),a=n(72791),i=n(49797),o=/^((children|dangerouslySetInnerHTML|key|ref|autoFocus|defaultValue|defaultChecked|innerHTML|suppressContentEditableWarning|suppressHydrationWarning|valueLink|abbr|accept|acceptCharset|accessKey|action|allow|allowUserMedia|allowPaymentRequest|allowFullScreen|allowTransparency|alt|async|autoComplete|autoPlay|capture|cellPadding|cellSpacing|challenge|charSet|checked|cite|classID|className|cols|colSpan|content|contentEditable|contextMenu|controls|controlsList|coords|crossOrigin|data|dateTime|decoding|default|defer|dir|disabled|disablePictureInPicture|download|draggable|encType|enterKeyHint|form|formAction|formEncType|formMethod|formNoValidate|formTarget|frameBorder|headers|height|hidden|high|href|hrefLang|htmlFor|httpEquiv|id|inputMode|integrity|is|keyParams|keyType|kind|label|lang|list|loading|loop|low|marginHeight|marginWidth|max|maxLength|media|mediaGroup|method|min|minLength|multiple|muted|name|nonce|noValidate|open|optimum|pattern|placeholder|playsInline|poster|preload|profile|radioGroup|readOnly|referrerPolicy|rel|required|reversed|role|rows|rowSpan|sandbox|scope|scoped|scrolling|seamless|selected|shape|size|sizes|slot|span|spellCheck|src|srcDoc|srcLang|srcSet|start|step|style|summary|tabIndex|target|title|translate|type|useMap|value|width|wmode|wrap|about|datatype|inlist|prefix|property|resource|typeof|vocab|autoCapitalize|autoCorrect|autoSave|color|incremental|fallback|inert|itemProp|itemScope|itemType|itemID|itemRef|on|option|results|security|unselectable|accentHeight|accumulate|additive|alignmentBaseline|allowReorder|alphabetic|amplitude|arabicForm|ascent|attributeName|attributeType|autoReverse|azimuth|baseFrequency|baselineShift|baseProfile|bbox|begin|bias|by|calcMode|capHeight|clip|clipPathUnits|clipPath|clipRule|colorInterpolation|colorInterpolationFilters|colorProfile|colorRendering|contentScriptType|contentStyleType|cursor|cx|cy|d|decelerate|descent|diffuseConstant|direction|display|divisor|dominantBaseline|dur|dx|dy|edgeMode|elevation|enableBackground|end|exponent|externalResourcesRequired|fill|fillOpacity|fillRule|filter|filterRes|filterUnits|floodColor|floodOpacity|focusable|fontFamily|fontSize|fontSizeAdjust|fontStretch|fontStyle|fontVariant|fontWeight|format|from|fr|fx|fy|g1|g2|glyphName|glyphOrientationHorizontal|glyphOrientationVertical|glyphRef|gradientTransform|gradientUnits|hanging|horizAdvX|horizOriginX|ideographic|imageRendering|in|in2|intercept|k|k1|k2|k3|k4|kernelMatrix|kernelUnitLength|kerning|keyPoints|keySplines|keyTimes|lengthAdjust|letterSpacing|lightingColor|limitingConeAngle|local|markerEnd|markerMid|markerStart|markerHeight|markerUnits|markerWidth|mask|maskContentUnits|maskUnits|mathematical|mode|numOctaves|offset|opacity|operator|order|orient|orientation|origin|overflow|overlinePosition|overlineThickness|panose1|paintOrder|pathLength|patternContentUnits|patternTransform|patternUnits|pointerEvents|points|pointsAtX|pointsAtY|pointsAtZ|preserveAlpha|preserveAspectRatio|primitiveUnits|r|radius|refX|refY|renderingIntent|repeatCount|repeatDur|requiredExtensions|requiredFeatures|restart|result|rotate|rx|ry|scale|seed|shapeRendering|slope|spacing|specularConstant|specularExponent|speed|spreadMethod|startOffset|stdDeviation|stemh|stemv|stitchTiles|stopColor|stopOpacity|strikethroughPosition|strikethroughThickness|string|stroke|strokeDasharray|strokeDashoffset|strokeLinecap|strokeLinejoin|strokeMiterlimit|strokeOpacity|strokeWidth|surfaceScale|systemLanguage|tableValues|targetX|targetY|textAnchor|textDecoration|textRendering|textLength|to|transform|u1|u2|underlinePosition|underlineThickness|unicode|unicodeBidi|unicodeRange|unitsPerEm|vAlphabetic|vHanging|vIdeographic|vMathematical|values|vectorEffect|version|vertAdvY|vertOriginX|vertOriginY|viewBox|viewTarget|visibility|widths|wordSpacing|writingMode|x|xHeight|x1|x2|xChannelSelector|xlinkActuate|xlinkArcrole|xlinkHref|xlinkRole|xlinkShow|xlinkTitle|xlinkType|xmlBase|xmlns|xmlnsXlink|xmlLang|xmlSpace|y|y1|y2|yChannelSelector|z|zoomAndPan|for|class|autofocus)|(([Dd][Aa][Tt][Aa]|[Aa][Rr][Ii][Aa]|x)-.*))$/,s=(0,i.Z)((function(e){return o.test(e)||111===e.charCodeAt(0)&&110===e.charCodeAt(1)&&e.charCodeAt(2)<91})),c=n(62564),u=n(95438),l=n(9140),A=n(82561),d=s,h=function(e){return"theme"!==e},f=function(e){return"string"===typeof e&&e.charCodeAt(0)>96?d:h},g=function(e,t,n){var r;if(t){var a=t.shouldForwardProp;r=e.__emotion_forwardProp&&a?function(t){return e.__emotion_forwardProp(t)&&a(t)}:a}return"function"!==typeof r&&n&&(r=e.__emotion_forwardProp),r},p=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return(0,u.hC)(t,n,r),(0,A.L)((function(){return(0,u.My)(t,n,r)})),null},m=function e(t,n){var i,o,s=t.__emotion_real===t,A=s&&t.__emotion_base||t;void 0!==n&&(i=n.label,o=n.target);var d=g(t,n,s),h=d||f(A),m=!h("as");return function(){var I=arguments,v=s&&void 0!==t.__emotion_styles?t.__emotion_styles.slice(0):[];if(void 0!==i&&v.push("label:"+i+";"),null==I[0]||void 0===I[0].raw)v.push.apply(v,I);else{0,v.push(I[0][0]);for(var y=I.length,C=1;C0&&void 0!==arguments[0]?arguments[0]:{};return(null==(e=t.keys)?void 0:e.reduce((function(e,n){return e[t.up(n)]={},e}),{}))||{}}function c(e,t){return e.reduce((function(e,t){var n=e[t];return(!n||0===Object.keys(n).length)&&delete e[t],e}),t)}function u(e){for(var t=s(e),n=arguments.length,a=new Array(n>1?n-1:0),i=1;i1&&void 0!==arguments[1]?arguments[1]:0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1;return Math.min(Math.max(t,e),n)}function i(e){if(e.type)return e;if("#"===e.charAt(0))return i(function(e){e=e.slice(1);var t=new RegExp(".{1,".concat(e.length>=6?2:1,"}"),"g"),n=e.match(t);return n&&1===n[0].length&&(n=n.map((function(e){return e+e}))),n?"rgb".concat(4===n.length?"a":"","(").concat(n.map((function(e,t){return t<3?parseInt(e,16):Math.round(parseInt(e,16)/255*1e3)/1e3})).join(", "),")"):""}(e));var t=e.indexOf("("),n=e.substring(0,t);if(-1===["rgb","rgba","hsl","hsla","color"].indexOf(n))throw new Error((0,r.Z)(9,e));var a,o=e.substring(t+1,e.length-1);if("color"===n){if(a=(o=o.split(" ")).shift(),4===o.length&&"/"===o[3].charAt(0)&&(o[3]=o[3].slice(1)),-1===["srgb","display-p3","a98-rgb","prophoto-rgb","rec-2020"].indexOf(a))throw new Error((0,r.Z)(10,a))}else o=o.split(",");return{type:n,values:o=o.map((function(e){return parseFloat(e)})),colorSpace:a}}function o(e){var t=e.type,n=e.colorSpace,r=e.values;return-1!==t.indexOf("rgb")?r=r.map((function(e,t){return t<3?parseInt(e,10):e})):-1!==t.indexOf("hsl")&&(r[1]="".concat(r[1],"%"),r[2]="".concat(r[2],"%")),r=-1!==t.indexOf("color")?"".concat(n," ").concat(r.join(" ")):"".concat(r.join(", ")),"".concat(t,"(").concat(r,")")}function s(e){var t="hsl"===(e=i(e)).type||"hsla"===e.type?i(function(e){var t=(e=i(e)).values,n=t[0],r=t[1]/100,a=t[2]/100,s=r*Math.min(a,1-a),c=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:(e+n/30)%12;return a-s*Math.max(Math.min(t-3,9-t,1),-1)},u="rgb",l=[Math.round(255*c(0)),Math.round(255*c(8)),Math.round(255*c(4))];return"hsla"===e.type&&(u+="a",l.push(t[3])),o({type:u,values:l})}(e)).values:e.values;return t=t.map((function(t){return"color"!==e.type&&(t/=255),t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4)})),Number((.2126*t[0]+.7152*t[1]+.0722*t[2]).toFixed(3))}function c(e,t){var n=s(e),r=s(t);return(Math.max(n,r)+.05)/(Math.min(n,r)+.05)}function u(e,t){return e=i(e),t=a(t),"rgb"!==e.type&&"hsl"!==e.type||(e.type+="a"),"color"===e.type?e.values[3]="/".concat(t):e.values[3]=t,o(e)}function l(e,t){if(e=i(e),t=a(t),-1!==e.type.indexOf("hsl"))e.values[2]*=1-t;else if(-1!==e.type.indexOf("rgb")||-1!==e.type.indexOf("color"))for(var n=0;n<3;n+=1)e.values[n]*=1-t;return o(e)}function A(e,t){if(e=i(e),t=a(t),-1!==e.type.indexOf("hsl"))e.values[2]+=(100-e.values[2])*t;else if(-1!==e.type.indexOf("rgb"))for(var n=0;n<3;n+=1)e.values[n]+=(255-e.values[n])*t;else if(-1!==e.type.indexOf("color"))for(var r=0;r<3;r+=1)e.values[r]+=(1-e.values[r])*t;return o(e)}},44046:function(e,t,n){"use strict";n.d(t,{ZP:function(){return C},x9:function(){return I}});var r=n(93433),a=n(29439),i=n(63366),o=n(87462),s=n(22421),c=n(85080),u=n(27312),l=["variant"];function A(e){return 0===e.length}function d(e){var t=e.variant,n=(0,i.Z)(e,l),r=t||"";return Object.keys(n).sort().forEach((function(t){r+="color"===t?A(r)?e[t]:(0,u.Z)(e[t]):"".concat(A(r)?t:(0,u.Z)(t)).concat((0,u.Z)(e[t].toString()))})),r}var h=n(60104),f=["name","slot","skipVariantsResolver","skipSx","overridesResolver"];var g=function(e,t){return t.components&&t.components[e]&&t.components[e].styleOverrides?t.components[e].styleOverrides:null},p=function(e,t){var n=[];t&&t.components&&t.components[e]&&t.components[e].variants&&(n=t.components[e].variants);var r={};return n.forEach((function(e){var t=d(e.props);r[t]=e.style})),r},m=function(e,t,n,r){var a,i,o=e.ownerState,s=void 0===o?{}:o,c=[],u=null==n||null==(a=n.components)||null==(i=a[r])?void 0:i.variants;return u&&u.forEach((function(n){var r=!0;Object.keys(n.props).forEach((function(t){s[t]!==n.props[t]&&e[t]!==n.props[t]&&(r=!1)})),r&&c.push(t[d(n.props)])})),c};function I(e){return"ownerState"!==e&&"theme"!==e&&"sx"!==e&&"as"!==e}var v=(0,c.Z)();function y(e){var t,n=e.defaultTheme,r=e.theme,a=e.themeId;return t=r,0===Object.keys(t).length?n:r[a]||r}function C(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.themeId,n=e.defaultTheme,c=void 0===n?v:n,u=e.rootShouldForwardProp,l=void 0===u?I:u,A=e.slotShouldForwardProp,d=void 0===A?I:A,C=function(e){return(0,h.Z)((0,o.Z)({},e,{theme:y((0,o.Z)({},e,{defaultTheme:c,themeId:t}))}))};return C.__mui_systemSx=!0,function(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};(0,s.Co)(e,(function(e){return e.filter((function(e){return!(null!=e&&e.__mui_systemSx)}))}));var u=n.name,A=n.slot,h=n.skipVariantsResolver,v=n.skipSx,E=n.overridesResolver,b=(0,i.Z)(n,f),B=void 0!==h?h:A&&"Root"!==A||!1,w=v||!1;var Q=I;"Root"===A?Q=l:A?Q=d:function(e){return"string"===typeof e&&e.charCodeAt(0)>96}(e)&&(Q=void 0);var k=(0,s.ZP)(e,(0,o.Z)({shouldForwardProp:Q,label:undefined},b)),x=function(n){for(var i=arguments.length,s=new Array(i>1?i-1:0),l=1;l0){var f=new Array(h).fill("");(d=[].concat((0,r.Z)(n),(0,r.Z)(f))).raw=[].concat((0,r.Z)(n.raw),(0,r.Z)(f))}else"function"===typeof n&&n.__emotion_real!==n&&(d=function(e){return n((0,o.Z)({},e,{theme:y((0,o.Z)({},e,{defaultTheme:c,themeId:t}))}))});var I=k.apply(void 0,[d].concat((0,r.Z)(A)));return e.muiName&&(I.muiName=e.muiName),I};return k.withConfig&&(x.withConfig=k.withConfig),x}}},85080:function(e,t,n){"use strict";n.d(t,{Z:function(){return f}});var r=n(87462),a=n(63366),i=n(82466),o=n(4942),s=["values","unit","step"],c=function(e){var t=Object.keys(e).map((function(t){return{key:t,val:e[t]}}))||[];return t.sort((function(e,t){return e.val-t.val})),t.reduce((function(e,t){return(0,r.Z)({},e,(0,o.Z)({},t.key,t.val))}),{})};var u={borderRadius:4},l=n(45682);var A=n(60104),d=n(87416),h=["breakpoints","palette","spacing","shape"];var f=function(){for(var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.breakpoints,n=void 0===t?{}:t,o=e.palette,f=void 0===o?{}:o,g=e.spacing,p=e.shape,m=void 0===p?{}:p,I=(0,a.Z)(e,h),v=function(e){var t=e.values,n=void 0===t?{xs:0,sm:600,md:900,lg:1200,xl:1536}:t,i=e.unit,o=void 0===i?"px":i,u=e.step,l=void 0===u?5:u,A=(0,a.Z)(e,s),d=c(n),h=Object.keys(d);function f(e){var t="number"===typeof n[e]?n[e]:e;return"@media (min-width:".concat(t).concat(o,")")}function g(e){var t="number"===typeof n[e]?n[e]:e;return"@media (max-width:".concat(t-l/100).concat(o,")")}function p(e,t){var r=h.indexOf(t);return"@media (min-width:".concat("number"===typeof n[e]?n[e]:e).concat(o,") and ")+"(max-width:".concat((-1!==r&&"number"===typeof n[h[r]]?n[h[r]]:t)-l/100).concat(o,")")}return(0,r.Z)({keys:h,values:d,up:f,down:g,between:p,only:function(e){return h.indexOf(e)+10&&void 0!==arguments[0]?arguments[0]:8;if(e.mui)return e;var t=(0,l.hB)({spacing:e}),n=function(){for(var e=arguments.length,n=new Array(e),r=0;r1?E-1:0),B=1;B2){if(!u[e])return[e];e=u[e]}var t=e.split(""),n=(0,r.Z)(t,2),a=n[0],i=n[1],o=s[a],l=c[i]||"";return Array.isArray(l)?l.map((function(e){return o+e})):[o+l]})),A=["m","mt","mr","mb","ml","mx","my","margin","marginTop","marginRight","marginBottom","marginLeft","marginX","marginY","marginInline","marginInlineStart","marginInlineEnd","marginBlock","marginBlockStart","marginBlockEnd"],d=["p","pt","pr","pb","pl","px","py","padding","paddingTop","paddingRight","paddingBottom","paddingLeft","paddingX","paddingY","paddingInline","paddingInlineStart","paddingInlineEnd","paddingBlock","paddingBlockStart","paddingBlockEnd"],h=[].concat(A,d);function f(e,t,n,r){var a,o=null!=(a=(0,i.DW)(e,t,!1))?a:n;return"number"===typeof o?function(e){return"string"===typeof e?e:o*e}:Array.isArray(o)?function(e){return"string"===typeof e?e:o[e]}:"function"===typeof o?o:function(){}}function g(e){return f(e,"spacing",8)}function p(e,t){if("string"===typeof t||null==t)return t;var n=e(Math.abs(t));return t>=0?n:"number"===typeof n?-n:"-".concat(n)}function m(e,t,n,r){if(-1===t.indexOf(n))return null;var i=function(e,t){return function(n){return e.reduce((function(e,r){return e[r]=p(t,n),e}),{})}}(l(n),r),o=e[n];return(0,a.k9)(e,o,i)}function I(e,t){var n=g(e.theme);return Object.keys(e).map((function(r){return m(e,t,r,n)})).reduce(o.Z,{})}function v(e){return I(e,A)}function y(e){return I(e,d)}function C(e){return I(e,h)}v.propTypes={},v.filterProps=A,y.propTypes={},y.filterProps=d,C.propTypes={},C.filterProps=h},18529:function(e,t,n){"use strict";n.d(t,{DW:function(){return o},Jq:function(){return s}});var r=n(4942),a=n(27312),i=n(51184);function o(e,t){var n=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];if(!t||"string"!==typeof t)return null;if(e&&e.vars&&n){var r="vars.".concat(t).split(".").reduce((function(e,t){return e&&e[t]?e[t]:null}),e);if(null!=r)return r}return t.split(".").reduce((function(e,t){return e&&null!=e[t]?e[t]:null}),e)}function s(e,t,n){var r,a=arguments.length>3&&void 0!==arguments[3]?arguments[3]:n;return r="function"===typeof e?e(n):Array.isArray(e)?e[n]||a:o(e,n)||a,t&&(r=t(r,a,e)),r}t.ZP=function(e){var t=e.prop,n=e.cssProperty,c=void 0===n?e.prop:n,u=e.themeKey,l=e.transform,A=function(e){if(null==e[t])return null;var n=e[t],A=o(e.theme,u)||{};return(0,i.k9)(e,n,(function(e){var n=s(A,l,e);return e===n&&"string"===typeof e&&(n=s(A,l,"".concat(t).concat("default"===e?"":(0,a.Z)(e)),e)),!1===c?n:(0,r.Z)({},c,n)}))};return A.propTypes={},A.filterProps=[t],A}},87416:function(e,t,n){"use strict";n.d(t,{Z:function(){return D}});var r=n(45682),a=n(18529),i=n(98247);var o=function(){for(var e=arguments.length,t=new Array(e),n=0;n0&&void 0!==arguments[0]?arguments[0]:i;return(0,a.Z)(e)}},33073:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(35735);function a(e){var t=e.theme,n=e.name,a=e.props;return t&&t.components&&t.components[n]&&t.components[n].defaultProps?(0,r.Z)(t.components[n].defaultProps,a):a}},86083:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(33073),a=n(30418);function i(e){var t=e.props,n=e.name,i=e.defaultTheme,o=e.themeId,s=(0,a.Z)(i);return o&&(s=s[o]||s),(0,r.Z)({theme:s,name:n,props:t})}},69120:function(e,t,n){"use strict";var r=n(72791),a=n(62564);t.Z=function(){var e,t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:null,n=r.useContext(a.T);return n&&(e=n,0!==Object.keys(e).length)?n:t}},55902:function(e,t){"use strict";var n=function(e){return e},r=function(){var e=n;return{configure:function(t){e=t},generate:function(t){return e(t)},reset:function(){e=n}}}();t.Z=r},27312:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(46189);function a(e){if("string"!==typeof e)throw new Error((0,r.Z)(7));return e.charAt(0).toUpperCase()+e.slice(1)}},94419:function(e,t,n){"use strict";function r(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:void 0,r={};return Object.keys(e).forEach((function(a){r[a]=e[a].reduce((function(e,r){if(r){var a=t(r);""!==a&&e.push(a),n&&n[r]&&e.push(n[r])}return e}),[]).join(" ")})),r}n.d(t,{Z:function(){return r}})},78949:function(e,t,n){"use strict";function r(){for(var e=arguments.length,t=new Array(e),n=0;n1&&void 0!==arguments[1]?arguments[1]:166;function r(){for(var r=this,a=arguments.length,i=new Array(a),o=0;o2&&void 0!==arguments[2]?arguments[2]:{clone:!0},s=n.clone?(0,r.Z)({},e):e;return a(e)&&a(t)&&Object.keys(t).forEach((function(r){"__proto__"!==r&&(a(t[r])&&r in e&&a(e[r])?s[r]=o(e[r],t[r],n):n.clone?s[r]=a(t[r])?i(t[r]):t[r]:s[r]=t[r])})),s}},46189:function(e,t,n){"use strict";function r(e){for(var t="https://mui.com/production-error/?code="+e,n=1;n2&&void 0!==arguments[2]?arguments[2]:"Mui",i=a[t];return i?"".concat(n,"-").concat(i):"".concat(r.Z.generate(e),"-").concat(t)}},75878:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(21217);function a(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"Mui",a={};return t.forEach((function(t){a[t]=(0,r.Z)(e,t,n)})),a}},99723:function(e,t,n){"use strict";function r(e){return e&&e.ownerDocument||document}n.d(t,{Z:function(){return r}})},27979:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(99723);function a(e){return(0,r.Z)(e).defaultView||window}},35735:function(e,t,n){"use strict";n.d(t,{Z:function(){return a}});var r=n(1413);function a(e,t){var n=(0,r.Z)({},t);return Object.keys(e).forEach((function(i){if(i.toString().match(/^(components|slots)$/))n[i]=(0,r.Z)((0,r.Z)({},e[i]),n[i]);else if(i.toString().match(/^(componentsProps|slotProps)$/)){var o=e[i]||{},s=t[i];n[i]={},s&&Object.keys(s)?o&&Object.keys(o)?(n[i]=(0,r.Z)({},s),Object.keys(o).forEach((function(e){n[i][e]=a(o[e],s[e])}))):n[i]=s:n[i]=o}else void 0===n[i]&&(n[i]=e[i])})),n}},62971:function(e,t,n){"use strict";function r(e,t){"function"===typeof e?e(t):e&&(e.current=t)}n.d(t,{Z:function(){return r}})},58959:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(29439),a=n(72791);function i(e){var t=e.controlled,n=e.default,i=(e.name,e.state,a.useRef(void 0!==t).current),o=a.useState(n),s=(0,r.Z)(o,2),c=s[0],u=s[1];return[i?t:c,a.useCallback((function(e){i||u(e)}),[])]}},75721:function(e,t,n){"use strict";var r=n(72791),a="undefined"!==typeof window?r.useLayoutEffect:r.useEffect;t.Z=a},58956:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(72791),a=n(75721);function i(e){var t=r.useRef(e);return(0,a.Z)((function(){t.current=e})),r.useCallback((function(){return t.current.apply(void 0,arguments)}),[])}},47563:function(e,t,n){"use strict";n.d(t,{Z:function(){return i}});var r=n(72791),a=n(62971);function i(){for(var e=arguments.length,t=new Array(e),n=0;ne.byteArray.length-e.position&&(c=e.byteArray.length-e.position),t.fragments.push({offset:e.position-o-8,position:e.position,length:c}),e.seek(c),void(t.length=e.position-t.dataOffset);t.fragments.push({offset:e.position-o-8,position:e.position,length:c}),e.seek(c)}n&&n.push("pixel data element ".concat(t.tag," missing sequence delimiter tag xfffee0dd"))}function m(e,t){if(void 0===e)throw"dicomParser.findAndSetUNElementLength: missing required parameter 'byteStream'";for(var n=e.byteArray.length-8;e.position<=n;)if(65534===e.readUint16()&&57565===e.readUint16())return 0!==e.readUint32()&&e.warnings("encountered non zero length following item delimiter at position ".concat(e.position-4," while reading element of undefined length with tag ").concat(t.tag)),void(t.length=e.position-t.dataOffset);t.length=e.byteArray.length-t.dataOffset,e.seek(e.byteArray.length-e.position)}function I(e,t,n){if(n<0)throw"dicomParser.readFixedString - length cannot be less than 0";if(t+n>e.length)throw"dicomParser.readFixedString: attempt to read past end of buffer";for(var r,a="",i=0;it.byteArray.length)throw"dicomParser.parseDicomDataSetExplicit: invalid value for parameter 'maxP osition'";for(var a=e.elements;t.positionn)throw"dicomParser:parseDicomDataSetExplicit: buffer overrun"}function R(e,t,n){var r=3t.byteArray.length)throw"dicomParser.parseDicomDataSetImplicit: invalid value for parameter 'maxPosition'";for(var a=e.elements;t.positione.length)throw"bigEndianByteArrayParser.readUint16: attempt to read past end of buffer";return(e[t]<<8)+e[t+1]},readInt16:function(e,t){if(t<0)throw"bigEndianByteArrayParser.readInt16: position cannot be less than 0";if(t+2>e.length)throw"bigEndianByteArrayParser.readInt16: attempt to read past end of buffer";return 32768&(t=(e[t]<<8)+e[t+1])?t-65535-1:t},readUint32:function(e,t){if(t<0)throw"bigEndianByteArrayParser.readUint32: position cannot be less than 0";if(t+4>e.length)throw"bigEndianByteArrayParser.readUint32: attempt to read past end of buffer";return 256*(256*(256*e[t]+e[t+1])+e[t+2])+e[t+3]},readInt32:function(e,t){if(t<0)throw"bigEndianByteArrayParser.readInt32: position cannot be less than 0";if(t+4>e.length)throw"bigEndianByteArrayParser.readInt32: attempt to read past end of buffer";return(e[t]<<24)+(e[t+1]<<16)+(e[t+2]<<8)+e[t+3]},readFloat:function(e,t){if(t<0)throw"bigEndianByteArrayParser.readFloat: position cannot be less than 0";if(t+4>e.length)throw"bigEndianByteArrayParser.readFloat: attempt to read past end of buffer";var n=new Uint8Array(4);return n[3]=e[t],n[2]=e[t+1],n[1]=e[t+2],n[0]=e[t+3],new Float32Array(n.buffer)[0]},readDouble:function(e,t){if(t<0)throw"bigEndianByteArrayParser.readDouble: position cannot be less than 0";if(t+8>e.length)throw"bigEndianByteArrayParser.readDouble: attempt to read past end of buffer";var n=new Uint8Array(8);return n[7]=e[t],n[6]=e[t+1],n[5]=e[t+2],n[4]=e[t+3],n[3]=e[t+4],n[2]=e[t+5],n[1]=e[t+6],n[0]=e[t+7],new Float64Array(n.buffer)[0]}};function L(e,t,n){if("undefined"!=typeof Buffer&&e instanceof Buffer)return e.slice(t,t+n);if(e instanceof Uint8Array)return new Uint8Array(e.buffer,e.byteOffset+t,n);throw"dicomParser.from: unknown type for byteArray"}function O(e,t){for(var n=0;n=n.length)throw"dicomParser.ByteStream: parameter 'position' cannot be greater than or equal to 'byteArray' length";this.byteArrayParser=t,this.byteArray=n,this.position=r||0,this.warnings=[]}var t,n,r;return t=e,(n=[{key:"seek",value:function(e){if(this.position+e<0)throw"dicomParser.ByteStream.prototype.seek: cannot seek to position < 0";this.position+=e}},{key:"readByteStream",value:function(t){if(this.position+t>this.byteArray.length)throw"dicomParser.ByteStream.prototype.readByteStream: readByteStream - buffer overread";var n=L(this.byteArray,this.position,t);return this.position+=t,new e(this.byteArrayParser,n)}},{key:"getSize",value:function(){return this.byteArray.length}},{key:"readUint16",value:function(){var e=this.byteArrayParser.readUint16(this.byteArray,this.position);return this.position+=2,e}},{key:"readUint32",value:function(){var e=this.byteArrayParser.readUint32(this.byteArray,this.position);return this.position+=4,e}},{key:"readFixedString",value:function(e){var t=I(this.byteArray,this.position,e);return this.position+=e,t}}])&&O(t.prototype,n),r&&O(t,r),Object.defineProperty(t,"prototype",{writable:!1}),e}(),G={readUint16:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readUint16: position cannot be less than 0";if(t+2>e.length)throw"littleEndianByteArrayParser.readUint16: attempt to read past end of buffer";return e[t]+256*e[t+1]},readInt16:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readInt16: position cannot be less than 0";if(t+2>e.length)throw"littleEndianByteArrayParser.readInt16: attempt to read past end of buffer";return 32768&(t=e[t]+(e[t+1]<<8))?t-65535-1:t},readUint32:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readUint32: position cannot be less than 0";if(t+4>e.length)throw"littleEndianByteArrayParser.readUint32: attempt to read past end of buffer";return e[t]+256*e[t+1]+256*e[t+2]*256+256*e[t+3]*256*256},readInt32:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readInt32: position cannot be less than 0";if(t+4>e.length)throw"littleEndianByteArrayParser.readInt32: attempt to read past end of buffer";return e[t]+(e[t+1]<<8)+(e[t+2]<<16)+(e[t+3]<<24)},readFloat:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readFloat: position cannot be less than 0";if(t+4>e.length)throw"littleEndianByteArrayParser.readFloat: attempt to read past end of buffer";var n=new Uint8Array(4);return n[0]=e[t],n[1]=e[t+1],n[2]=e[t+2],n[3]=e[t+3],new Float32Array(n.buffer)[0]},readDouble:function(e,t){if(t<0)throw"littleEndianByteArrayParser.readDouble: position cannot be less than 0";if(t+8>e.length)throw"littleEndianByteArrayParser.readDouble: attempt to read past end of buffer";var n=new Uint8Array(8);return n[0]=e[t],n[1]=e[t+1],n[2]=e[t+2],n[3]=e[t+3],n[4]=e[t+4],n[5]=e[t+5],n[6]=e[t+6],n[7]=e[t+7],new Float64Array(n.buffer)[0]}};function P(e){var t=1= 0";if(n>=t.fragments.length)throw"dicomParser.readEncapsulatedPixelDataFromFragments: parameter 'startFragmentIndex' must be < number of fragments";if(r<1)throw"dicomParser.readEncapsulatedPixelDataFromFragments: parameter 'numFragments' must be > 0";if(n+r>t.fragments.length)throw"dicomParser.readEncapsulatedPixelDataFromFragments: parameter 'startFragment' + 'numFragments' < number of fragments";var i=new U(e.byteArrayParser,e.byteArray,t.dataOffset);if("xfffee000"!==(t=w(i)).tag)throw"dicomParser.readEncapsulatedPixelData: missing basic offset table xfffee000";i.seek(t.length);var o=i.position;if(1===r)return L(i.byteArray,o+a[n].offset+8,a[n].length);t=Z(a,n,r);for(var s=F(i.byteArray,t),c=0,u=n;u= 0";if(n>=r.length)throw"dicomParser.readEncapsulatedImageFrame: parameter 'frameIndex' must be < basicOffsetTable.length";var i=r[n];if(void 0===(i=z(a,i)))throw"dicomParser.readEncapsulatedImageFrame: unable to find fragment that matches basic offset table entry";return H(e,t,i,W(n,r,a,i),a)}var K=!1;function Y(e,t,n){if(K||(K=!0,console&&console.log&&console.log("WARNING: dicomParser.readEncapsulatedPixelData() has been deprecated")),void 0===e)throw"dicomParser.readEncapsulatedPixelData: missing required parameter 'dataSet'";if(void 0===t)throw"dicomParser.readEncapsulatedPixelData: missing required parameter 'element'";if(void 0===n)throw"dicomParser.readEncapsulatedPixelData: missing required parameter 'frame'";if("x7fe00010"!==t.tag)throw"dicomParser.readEncapsulatedPixelData: parameter 'element' refers to non pixel data tag (expected tag = x7fe00010)";if(!0!==t.encapsulatedPixelData)throw"dicomParser.readEncapsulatedPixelData: parameter 'element' refers to pixel data element that does not have encapsulated pixel data";if(!0!==t.hadUndefinedLength)throw"dicomParser.readEncapsulatedPixelData: parameter 'element' refers to pixel data element that does not have encapsulated pixel data";if(void 0===t.basicOffsetTable)throw"dicomParser.readEncapsulatedPixelData: parameter 'element' refers to pixel data element that does not have encapsulated pixel data";if(void 0===t.fragments)throw"dicomParser.readEncapsulatedPixelData: parameter 'element' refers to pixel data element that does not have encapsulated pixel data";if(n<0)throw"dicomParser.readEncapsulatedPixelData: parameter 'frame' must be >= 0";return 0!==t.basicOffsetTable.length?J(e,t,n):H(e,t,0,t.fragments.length)}t.default={isStringVr:a,isPrivateTag:i,parsePN:o,parseTM:s,parseDA:u,explicitElementToString:l,explicitDataSetToJS:A,createJPEGBasicOffsetTable:f,parseDicomDataSetExplicit:N,parseDicomDataSetImplicit:R,readFixedString:I,alloc:F,version:T,bigEndianByteArrayParser:M,ByteStream:U,sharedCopy:L,DataSet:C,findAndSetUNElementLength:m,findEndOfEncapsulatedElement:p,findItemDelimitationItemAndSetElementLength:E,littleEndianByteArrayParser:G,parseDicom:q,readDicomElementExplicit:D,readDicomElementImplicit:B,readEncapsulatedImageFrame:J,readEncapsulatedPixelData:Y,readEncapsulatedPixelDataFromFragments:H,readPart10Header:P,readSequenceItemsExplicit:_,readSequenceItemsImplicit:k,readSequenceItem:w,readTag:g,LEI:"1.2.840.10008.1.2",LEE:"1.2.840.10008.1.2.1"}}],r={},t.m=n,t.c=r,t.d=function(e,n,r){t.o(e,n)||Object.defineProperty(e,n,{enumerable:!0,get:r})},t.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},t.t=function(e,n){if(1&n&&(e=t(e)),8&n)return e;if(4&n&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(t.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&n&&"string"!=typeof e)for(var a in e)t.d(r,a,function(t){return e[t]}.bind(null,a));return r},t.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return t.d(n,"a",n),n},t.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},t.p="",t(t.s=1);function t(e){if(r[e])return r[e].exports;var a=r[e]={i:e,l:!1,exports:{}};return n[e].call(a.exports,a,a.exports,t),a.l=!0,a.exports}var n,r}(n(23848))},20858:function(e,t,n){var r,a,i;!function(o,s){"use strict";a=[n(44711)],void 0===(i="function"===typeof(r=function(e){var t=/(^|@)\S+:\d+/,n=/^\s*at .*(\S+:\d+|\(native\))/m,r=/^(eval@)?(\[native code])?$/;return{parse:function(e){if("undefined"!==typeof e.stacktrace||"undefined"!==typeof e["opera#sourceloc"])return this.parseOpera(e);if(e.stack&&e.stack.match(n))return this.parseV8OrIE(e);if(e.stack)return this.parseFFOrSafari(e);throw new Error("Cannot parse given Error object")},extractLocation:function(e){if(-1===e.indexOf(":"))return[e];var t=/(.+?)(?::(\d+))?(?::(\d+))?$/.exec(e.replace(/[()]/g,""));return[t[1],t[2]||void 0,t[3]||void 0]},parseV8OrIE:function(t){return t.stack.split("\n").filter((function(e){return!!e.match(n)}),this).map((function(t){t.indexOf("(eval ")>-1&&(t=t.replace(/eval code/g,"eval").replace(/(\(eval at [^()]*)|(,.*$)/g,""));var n=t.replace(/^\s+/,"").replace(/\(eval code/g,"(").replace(/^.*?\s+/,""),r=n.match(/ (\(.+\)$)/);n=r?n.replace(r[0],""):n;var a=this.extractLocation(r?r[1]:n),i=r&&n||void 0,o=["eval",""].indexOf(a[0])>-1?void 0:a[0];return new e({functionName:i,fileName:o,lineNumber:a[1],columnNumber:a[2],source:t})}),this)},parseFFOrSafari:function(t){return t.stack.split("\n").filter((function(e){return!e.match(r)}),this).map((function(t){if(t.indexOf(" > eval")>-1&&(t=t.replace(/ line (\d+)(?: > eval line \d+)* > eval:\d+:\d+/g,":$1")),-1===t.indexOf("@")&&-1===t.indexOf(":"))return new e({functionName:t});var n=/((.*".+"[^@]*)?[^@]*)(?:@)/,r=t.match(n),a=r&&r[1]?r[1]:void 0,i=this.extractLocation(t.replace(n,""));return new e({functionName:a,fileName:i[0],lineNumber:i[1],columnNumber:i[2],source:t})}),this)},parseOpera:function(e){return!e.stacktrace||e.message.indexOf("\n")>-1&&e.message.split("\n").length>e.stacktrace.split("\n").length?this.parseOpera9(e):e.stack?this.parseOpera11(e):this.parseOpera10(e)},parseOpera9:function(t){for(var n=/Line (\d+).*script (?:in )?(\S+)/i,r=t.message.split("\n"),a=[],i=2,o=r.length;i/,"$2").replace(/\([^)]*\)/g,"")||void 0;i.match(/\(([^)]*)\)/)&&(n=i.replace(/^[^(]+\(([^)]*)\)$/,"$1"));var s=void 0===n||"[arguments not available]"===n?void 0:n.split(",");return new e({functionName:o,args:s,fileName:a[0],lineNumber:a[1],columnNumber:a[2],source:t})}),this)}}})?r.apply(t,a):r)||(e.exports=i)}()},76297:function(e,t,n){"use strict";for(var r=n(56690).default,a=n(89728).default,i=n(61655).default,o=n(26389).default,s=n(71352).IOBuffer,c=n(17886),u=[],l=0;l<=8;l++)u.push(255<=0;h--){var f=0===h;n.reset(),n.skip(h*r);for(var g=0;g>d)),p&&(l+=o||0,n.skip(i),d=8-(A=l%8))}}r>a&&(n.reset(),n.skip(c-1),n.writeUint8(0))}},{key:"writeColorTable",value:function(){this.encoded.writeUint32(0).writeUint32(16777215)}},{key:"writeBitmapFileHeader",value:function(e){this.encoded.writeChars("BM").writeInt32(this.encoded.lastWrittenByte).writeUint16(0).writeUint16(0).writeUint32(e)}},{key:"writeBitmapV5Header",value:function(){var e=4*Math.floor((this.bitDepth*this.width+31)/32)*this.height;this.encoded.writeUint32(124).writeInt32(this.width).writeInt32(this.height).writeUint16(1).writeUint16(this.bitDepth).writeUint32(c.BITMAPV5HEADER.Compression.BI_RGB).writeUint32(e).writeInt32(0).writeInt32(0).writeUint32(Math.pow(2,this.bitDepth)).writeUint32(Math.pow(2,this.bitDepth)).writeUint32(4278190080).writeUint32(16711680).writeUint32(65280).writeUint32(255).writeUint32(c.BITMAPV5HEADER.LogicalColorSpace.LCS_sRGB).skip(36).skip(12).writeUint32(c.BITMAPV5HEADER.GamutMappingIntent.LCS_GM_IMAGES).skip(12)}}]),n}(s);e.exports=A},17886:function(e){"use strict";e.exports={BITMAPV5HEADER:{LogicalColorSpace:{LCS_CALIBRATED_RGB:0,LCS_sRGB:1934772034,LCS_WINDOWS_COLOR_SPACE:1466527264},Compression:{BI_RGB:0,BI_RLE8:1,BI_RLE4:2,BI_BITFIELDS:3,BI_JPEG:4,BI_PNG:5,BI_CMYK:11,BI_CMYKRLE8:12,BI_CMYKRLE4:13},GamutMappingIntent:{LCS_GM_ABS_COLORIMETRIC:8,LCS_GM_BUSINESS:1,LCS_GM_GRAPHICS:2,LCS_GM_IMAGES:4}}}},88545:function(e,t,n){"use strict";var r=n(76297);t.c=function(e){return new r(e).encode()}},78262:function(e){"use strict";e.exports=function e(t,n){if(t===n)return!0;if(t&&n&&"object"==typeof t&&"object"==typeof n){if(t.constructor!==n.constructor)return!1;var r,a,i;if(Array.isArray(t)){if((r=t.length)!=n.length)return!1;for(a=r;0!==a--;)if(!e(t[a],n[a]))return!1;return!0}if(t.constructor===RegExp)return t.source===n.source&&t.flags===n.flags;if(t.valueOf!==Object.prototype.valueOf)return t.valueOf()===n.valueOf();if(t.toString!==Object.prototype.toString)return t.toString()===n.toString();if((r=(i=Object.keys(t)).length)!==Object.keys(n).length)return!1;for(a=r;0!==a--;)if(!Object.prototype.hasOwnProperty.call(n,i[a]))return!1;for(a=r;0!==a--;){var o=i[a];if(!e(t[o],n[o]))return!1}return!0}return t!==t&&n!==n}},39823:function(e,t,n){"use strict";var r=n(56690).default,a=n(89728).default,i=[],o=function(){function e(t,n){r(this,e),n=n||{},void 0===t&&(t=8192),"number"===typeof t&&(t=new ArrayBuffer(t));var a=t.byteLength,i=n.offset?n.offset>>>0:0;t.buffer&&(a=t.byteLength-i,t=t.byteLength!==t.buffer.byteLength?t.buffer.slice(t.byteOffset+i,t.byteOffset+t.byteLength):i?t.buffer.slice(i):t.buffer),this.buffer=t,this.length=a,this.byteLength=a,this.byteOffset=0,this.offset=0,this.littleEndian=!0,this._data=new DataView(this.buffer),this._increment=a||8192,this._mark=0}return a(e,[{key:"available",value:function(e){return void 0===e&&(e=1),this.offset+e<=this.length}},{key:"isLittleEndian",value:function(){return this.littleEndian}},{key:"setLittleEndian",value:function(){this.littleEndian=!0}},{key:"isBigEndian",value:function(){return!this.littleEndian}},{key:"setBigEndian",value:function(){this.littleEndian=!1}},{key:"skip",value:function(e){void 0===e&&(e=1),this.offset+=e}},{key:"seek",value:function(e){this.offset=e}},{key:"mark",value:function(){this._mark=this.offset}},{key:"reset",value:function(){this.offset=this._mark}},{key:"rewind",value:function(){this.offset=0}},{key:"ensureAvailable",value:function(e){if(void 0===e&&(e=1),!this.available(e)){var t=this._increment+this._increment;this._increment=t;var n=this.length+t,r=new Uint8Array(n);r.set(new Uint8Array(this.buffer)),this.buffer=r.buffer,this.length=n,this._data=new DataView(this.buffer)}}},{key:"readBoolean",value:function(){return 0!==this.readUint8()}},{key:"readInt8",value:function(){return this._data.getInt8(this.offset++)}},{key:"readUint8",value:function(){return this._data.getUint8(this.offset++)}},{key:"readByte",value:function(){return this.readUint8()}},{key:"readBytes",value:function(e){void 0===e&&(e=1);for(var t=new Uint8Array(e),n=0;n12)this.skip(4);else{l.getByteLength(r,a)>4&&this.seek(this.readUint32());var i=l.readData(this,r,a);if(e.fields.set(n,i),34665===n||34853===n){var o,s=this.offset;34665===n?o="exif":34853===n&&(o="gps"),this._nextIFD=i,e[o]=this.decodeIFD({kind:o,ignoreImageData:!0}),this.offset=s}this.seek(t),this.skip(12)}}},{key:"decodeImageData",value:function(e){var t=e.orientation;switch(t&&1!==t&&p("orientation",t),e.type){case 1:case 2:this.readStripData(e);break;default:p("image type",e.type)}}},{key:"readStripData",value:function(e){for(var t=e.width,n=e.height,r=function(e){if(e.length){var t=e;e=t[0];for(var n=0;nc?c:i;i-=I,8===r?A=h(o,m,A,I):16===r?A=f(o,m,A,I,this.isLittleEndian()):32===r&&3===a?A=g(o,m,A,I,this.isLittleEndian()):p("bitDepth",r)}e.data=o}},{key:"getStripData",value:function(e,t,n){switch(e){case 1:return new DataView(this.buffer,t,n);case 2:case 32773:return p("Compression",e);default:throw new Error("invalid compression: "+e)}}}]),n}(s);function h(e,t,n,r){for(var a=0;a0&&t;)t=t.next;return t?t.data:void 0},slice:function(e,t){if(e||(e=0),t||(t=this.length),t<0&&(t=this.length+t),e<0&&(e=this.length+e),t===e)return[];if(t0&&i;)i=i.next;for(;ai;i<<=1)a++;this._width=a%2===0?a-1:a,this._bitrev=new Array(1<>>s&3)<>>1),r=0;r>>1]=e[r];return n},t.prototype.createComplexArray=function(){for(var e=new Array(this._csize),t=0;t>>1],n[r+1]=0;return n},t.prototype.completeSpectrum=function(e){for(var t=this._csize,n=t>>>1,r=2;r>=2;a>=2;a>>=2){var A=(i=r/a<<1)>>>2;for(e=0;e>>1,a>>>1)}else for(e=0,t=0;e>>1,a>>>1)}var u=this._inv?-1:1,l=this.table;for(a>>=2;a>=2;a>>=2){var A=(i=r/a<<1)>>>1,d=A>>>1,h=d>>>1;for(e=0;e=t.status}function i(e){try{e.dispatchEvent(new MouseEvent("click"))}catch(r){var t=document.createEvent("MouseEvents");t.initMouseEvent("click",!0,!0,window,0,0,0,80,20,!1,!1,!1,!1,0,null),e.dispatchEvent(t)}}var o="object"==typeof window&&window.window===window?window:"object"==typeof self&&self.self===self?self:"object"==typeof n.g&&n.g.global===n.g?n.g:void 0,s=o.navigator&&/Macintosh/.test(navigator.userAgent)&&/AppleWebKit/.test(navigator.userAgent)&&!/Safari/.test(navigator.userAgent),c=o.saveAs||("object"!=typeof window||window!==o?function(){}:"download"in HTMLAnchorElement.prototype&&!s?function(e,t,n){var s=o.URL||o.webkitURL,c=document.createElement("a");t=t||e.name||"download",c.download=t,c.rel="noopener","string"==typeof e?(c.href=e,c.origin===location.origin?i(c):a(c.href)?r(e,t,n):i(c,c.target="_blank")):(c.href=s.createObjectURL(e),setTimeout((function(){s.revokeObjectURL(c.href)}),4e4),setTimeout((function(){i(c)}),0))}:"msSaveOrOpenBlob"in navigator?function(e,n,o){if(n=n||e.name||"download","string"!=typeof e)navigator.msSaveOrOpenBlob(t(e,o),n);else if(a(e))r(e,n,o);else{var s=document.createElement("a");s.href=e,s.target="_blank",setTimeout((function(){i(s)}))}}:function(e,t,n,a){if((a=a||open("","_blank"))&&(a.document.title=a.document.body.innerText="downloading..."),"string"==typeof e)return r(e,t,n);var i="application/octet-stream"===e.type,c=/constructor/i.test(o.HTMLElement)||o.safari,u=/CriOS\/[\d]+/.test(navigator.userAgent);if((u||i&&c||s)&&"undefined"!=typeof FileReader){var l=new FileReader;l.onloadend=function(){var e=l.result;e=u?e:e.replace(/^data:[^;]*;/,"data:attachment/file;"),a?a.location.href=e:location=e,a=null},l.readAsDataURL(e)}else{var A=o.URL||o.webkitURL,d=A.createObjectURL(e);a?a.location=d:location.href=d,a=null,setTimeout((function(){A.revokeObjectURL(d)}),4e4)}});o.saveAs=c.saveAs=c,e.exports=c})?r.apply(t,a):r)||(e.exports=i)},95323:function(module,__unused_webpack_exports,__webpack_require__){"use strict";var _toConsumableArray=__webpack_require__(861).default,toBytes=function(e){return _toConsumableArray(e).map((function(e){return e.charCodeAt(0)}))},xpiZipFilename=toBytes("META-INF/mozilla.rsa"),oxmlContentTypes=toBytes("[Content_Types].xml"),oxmlRels=toBytes("_rels/.rels");function readUInt64LE(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n=e[t],r=1,a=0;++a<8;)r*=256,n+=e[t+a]*r;return n}var fileType=function(e){if(!(e instanceof Uint8Array||e instanceof ArrayBuffer||Buffer.isBuffer(e)))throw new TypeError("Expected the `input` argument to be of type `Uint8Array` or `Buffer` or `ArrayBuffer`, got `".concat(typeof e,"`"));var t=e instanceof Uint8Array?e:new Uint8Array(e);if(!(t&&t.length>1))return null;var n=function(e,n){n=Object.assign({offset:0},n);for(var r=0;r1&&void 0!==arguments[1]?arguments[1]:0;return e.findIndex((function(e,n,r){return n>=t&&80===r[n]&&75===r[n+1]&&3===r[n+2]&&4===r[n+3]}))},i=0,o=!1,s=null;do{var c=i+30;if(o||(o=n(oxmlContentTypes,{offset:c})||n(oxmlRels,{offset:c})),s||(r("word/",{offset:c})?s={ext:"docx",mime:"application/vnd.openxmlformats-officedocument.wordprocessingml.document"}:r("ppt/",{offset:c})?s={ext:"pptx",mime:"application/vnd.openxmlformats-officedocument.presentationml.presentation"}:r("xl/",{offset:c})&&(s={ext:"xlsx",mime:"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"})),o&&s)return s;i=a(t,c)}while(i>=0);if(s)return s}if(n([80,75])&&(3===t[2]||5===t[2]||7===t[2])&&(4===t[3]||6===t[3]||8===t[3]))return{ext:"zip",mime:"application/zip"};if(n([117,115,116,97,114],{offset:257}))return{ext:"tar",mime:"application/x-tar"};if(n([82,97,114,33,26,7])&&(0===t[6]||1===t[6]))return{ext:"rar",mime:"application/x-rar-compressed"};if(n([31,139,8]))return{ext:"gz",mime:"application/gzip"};if(n([66,90,104]))return{ext:"bz2",mime:"application/x-bzip2"};if(n([55,122,188,175,39,28]))return{ext:"7z",mime:"application/x-7z-compressed"};if(n([120,1]))return{ext:"dmg",mime:"application/x-apple-diskimage"};if(n([51,103,112,53])||n([0,0,0])&&n([102,116,121,112],{offset:4})&&(n([109,112,52,49],{offset:8})||n([109,112,52,50],{offset:8})||n([105,115,111,109],{offset:8})||n([105,115,111,50],{offset:8})||n([109,109,112,52],{offset:8})||n([77,52,86],{offset:8})||n([100,97,115,104],{offset:8})))return{ext:"mp4",mime:"video/mp4"};if(n([77,84,104,100]))return{ext:"mid",mime:"audio/midi"};if(n([26,69,223,163])){var u=t.subarray(4,4100),l=u.findIndex((function(e,t,n){return 66===n[t]&&130===n[t+1]}));if(-1!==l){var A=l+3,d=function(e){return _toConsumableArray(e).every((function(e,t){return u[A+t]===e.charCodeAt(0)}))};if(d("matroska"))return{ext:"mkv",mime:"video/x-matroska"};if(d("webm"))return{ext:"webm",mime:"video/webm"}}}if(n([0,0,0,20,102,116,121,112,113,116,32,32])||n([102,114,101,101],{offset:4})||n([102,116,121,112,113,116,32,32],{offset:4})||n([109,100,97,116],{offset:4})||n([109,111,111,118],{offset:4})||n([119,105,100,101],{offset:4}))return{ext:"mov",mime:"video/quicktime"};if(n([82,73,70,70])){if(n([65,86,73],{offset:8}))return{ext:"avi",mime:"video/vnd.avi"};if(n([87,65,86,69],{offset:8}))return{ext:"wav",mime:"audio/vnd.wave"};if(n([81,76,67,77],{offset:8}))return{ext:"qcp",mime:"audio/qcelp"}}if(n([48,38,178,117,142,102,207,17,166,217])){var h=30;do{var f=readUInt64LE(t,h+16);if(n([145,7,220,183,183,169,207,17,142,230,0,192,12,32,83,101],{offset:h})){if(n([64,158,105,248,77,91,207,17,168,253,0,128,95,92,68,43],{offset:h+24}))return{ext:"wma",mime:"audio/x-ms-wma"};if(n([192,239,25,188,77,91,207,17,168,253,0,128,95,92,68,43],{offset:h+24}))return{ext:"wmv",mime:"video/x-ms-asf"};break}h+=f}while(h+24<=t.length);return{ext:"asf",mime:"application/vnd.ms-asf"}}if(n([0,0,1,186])||n([0,0,1,179]))return{ext:"mpg",mime:"video/mpeg"};if(n([102,116,121,112,51,103],{offset:4}))return{ext:"3gp",mime:"video/3gpp"};for(var g=0;g<2&&g0&&e<=this.maxKeyLength},e.prototype.find=function(e,t,n){e:for(var r=0,a=this.caches[n-1];r=this.maxLengthPerKey?n[Math.random()*n.length|0]=r:n.push(r)},e.prototype.decode=function(e,t,n){var a=this.find(e,t,n);if(null!=a)return this.hit++,a;this.miss++;var i=Object(r.utf8DecodeJs)(e,t,n),o=Uint8Array.prototype.slice.call(e,t,t+n);return this.store(o,i),i},e}()},"./node_modules/@msgpack/msgpack/dist.es5+esm/DecodeError.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DecodeError",(function(){return a}));var r=function(){var e=function(t,n){return e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])},e(t,n)};return function(t,n){if("function"!==typeof n&&null!==n)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}}(),a=function(e){function t(n){var r=e.call(this,n)||this,a=Object.create(t.prototype);return Object.setPrototypeOf(r,a),Object.defineProperty(r,"name",{configurable:!0,enumerable:!1,value:t.name}),r}return r(t,e),t}(Error)},"./node_modules/@msgpack/msgpack/dist.es5+esm/Decoder.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DataViewIndexOutOfBoundsError",(function(){return I})),n.d(t,"Decoder",(function(){return C}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/prettyByte.mjs"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/ExtensionCodec.mjs"),i=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),o=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs"),s=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs"),c=n("./node_modules/@msgpack/msgpack/dist.es5+esm/CachedKeyDecoder.mjs"),u=n("./node_modules/@msgpack/msgpack/dist.es5+esm/DecodeError.mjs"),l=function(e,t,n,r){return new(n||(n=Promise))((function(a,i){function o(e){try{c(r.next(e))}catch(t){i(t)}}function s(e){try{c(r.throw(e))}catch(t){i(t)}}function c(e){var t;e.done?a(e.value):(t=e.value,t instanceof n?t:new n((function(e){e(t)}))).then(o,s)}c((r=r.apply(e,t||[])).next())}))},A=function(e,t){var n,r,a,i,o={label:0,sent:function(){if(1&a[0])throw a[1];return a[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"===typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(i){return function(s){return function(i){if(n)throw new TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(a=2&i[0]?r.return:i[0]?r.throw||((a=r.return)&&a.call(r),0):r.next)&&!(a=a.call(r,i[1])).done)return a;switch(r=0,a&&(i=[2&i[0],a.value]),i[0]){case 0:case 1:a=i;break;case 4:return o.label++,{value:i[1],done:!1};case 5:o.label++,r=i[1],i=[0];continue;case 7:i=o.ops.pop(),o.trys.pop();continue;default:if(!(a=(a=o.trys).length>0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]1||s(e,t)}))})}function s(e,t){try{!function(e){e.value instanceof h?Promise.resolve(e.value.v).then(c,u):l(i[0][2],e)}(a[e](t))}catch(n){l(i[0][3],n)}}function c(e){s("next",e)}function u(e){s("throw",e)}function l(e,t){e(t),i.shift(),i.length&&s(i[0][0],i[0][1])}},g=function(e){var t=typeof e;return"string"===t||"number"===t},p=new DataView(new ArrayBuffer(0)),m=new Uint8Array(p.buffer),I=function(){try{p.getInt8(0)}catch(e){return e.constructor}throw new Error("never reached")}(),v=new I("Insufficient data"),y=new c.CachedKeyDecoder,C=function(){function e(e,t,n,r,o,s,c,u){void 0===e&&(e=a.ExtensionCodec.defaultCodec),void 0===t&&(t=void 0),void 0===n&&(n=i.UINT32_MAX),void 0===r&&(r=i.UINT32_MAX),void 0===o&&(o=i.UINT32_MAX),void 0===s&&(s=i.UINT32_MAX),void 0===c&&(c=i.UINT32_MAX),void 0===u&&(u=y),this.extensionCodec=e,this.context=t,this.maxStrLength=n,this.maxBinLength=r,this.maxArrayLength=o,this.maxMapLength=s,this.maxExtLength=c,this.keyDecoder=u,this.totalPos=0,this.pos=0,this.view=p,this.bytes=m,this.headByte=-1,this.stack=[]}return e.prototype.reinitializeState=function(){this.totalPos=0,this.headByte=-1,this.stack.length=0},e.prototype.setBuffer=function(e){this.bytes=Object(s.ensureUint8Array)(e),this.view=Object(s.createDataView)(this.bytes),this.pos=0},e.prototype.appendBuffer=function(e){if(-1!==this.headByte||this.hasRemaining(1)){var t=this.bytes.subarray(this.pos),n=Object(s.ensureUint8Array)(e),r=new Uint8Array(t.length+n.length);r.set(t),r.set(n,t.length),this.setBuffer(r)}else this.setBuffer(e)},e.prototype.hasRemaining=function(e){return this.view.byteLength-this.pos>=e},e.prototype.createExtraByteError=function(e){var t=this.view,n=this.pos;return new RangeError("Extra "+(t.byteLength-n)+" of "+t.byteLength+" byte(s) found at buffer["+e+"]")},e.prototype.decode=function(e){this.reinitializeState(),this.setBuffer(e);var t=this.doDecodeSync();if(this.hasRemaining(1))throw this.createExtraByteError(this.pos);return t},e.prototype.decodeMulti=function(e){return A(this,(function(t){switch(t.label){case 0:this.reinitializeState(),this.setBuffer(e),t.label=1;case 1:return this.hasRemaining(1)?[4,this.doDecodeSync()]:[3,3];case 2:return t.sent(),[3,1];case 3:return[2]}}))},e.prototype.decodeAsync=function(e){var t,n,a,i;return l(this,void 0,void 0,(function(){var o,s,c,u,l,h,f,g;return A(this,(function(A){switch(A.label){case 0:o=!1,A.label=1;case 1:A.trys.push([1,6,7,12]),t=d(e),A.label=2;case 2:return[4,t.next()];case 3:if((n=A.sent()).done)return[3,5];if(c=n.value,o)throw this.createExtraByteError(this.totalPos);this.appendBuffer(c);try{s=this.doDecodeSync(),o=!0}catch(p){if(!(p instanceof I))throw p}this.totalPos+=this.pos,A.label=4;case 4:return[3,2];case 5:return[3,12];case 6:return u=A.sent(),a={error:u},[3,12];case 7:return A.trys.push([7,,10,11]),n&&!n.done&&(i=t.return)?[4,i.call(t)]:[3,9];case 8:A.sent(),A.label=9;case 9:return[3,11];case 10:if(a)throw a.error;return[7];case 11:return[7];case 12:if(o){if(this.hasRemaining(1))throw this.createExtraByteError(this.totalPos);return[2,s]}throw h=(l=this).headByte,f=l.pos,g=l.totalPos,new RangeError("Insufficient data in parsing "+Object(r.prettyByte)(h)+" at "+g+" ("+f+" in the current buffer)")}}))}))},e.prototype.decodeArrayStream=function(e){return this.decodeMultiAsync(e,!0)},e.prototype.decodeStream=function(e){return this.decodeMultiAsync(e,!1)},e.prototype.decodeMultiAsync=function(e,t){return f(this,arguments,(function(){var n,r,a,i,o,s,c,u,l;return A(this,(function(A){switch(A.label){case 0:n=t,r=-1,A.label=1;case 1:A.trys.push([1,13,14,19]),a=d(e),A.label=2;case 2:return[4,h(a.next())];case 3:if((i=A.sent()).done)return[3,12];if(o=i.value,t&&0===r)throw this.createExtraByteError(this.totalPos);this.appendBuffer(o),n&&(r=this.readArraySize(),n=!1,this.complete()),A.label=4;case 4:A.trys.push([4,9,,10]),A.label=5;case 5:return[4,h(this.doDecodeSync())];case 6:return[4,A.sent()];case 7:return A.sent(),0===--r?[3,8]:[3,5];case 8:return[3,10];case 9:if(!((s=A.sent())instanceof I))throw s;return[3,10];case 10:this.totalPos+=this.pos,A.label=11;case 11:return[3,2];case 12:return[3,19];case 13:return c=A.sent(),u={error:c},[3,19];case 14:return A.trys.push([14,,17,18]),i&&!i.done&&(l=a.return)?[4,h(l.call(a))]:[3,16];case 15:A.sent(),A.label=16;case 16:return[3,18];case 17:if(u)throw u.error;return[7];case 18:return[7];case 19:return[2]}}))}))},e.prototype.doDecodeSync=function(){e:for(;;){var e=this.readHeadByte(),t=void 0;if(e>=224)t=e-256;else if(e<192)if(e<128)t=e;else if(e<144){if(0!==(a=e-128)){this.pushMapState(a),this.complete();continue e}t={}}else if(e<160){if(0!==(a=e-144)){this.pushArrayState(a),this.complete();continue e}t=[]}else{var n=e-160;t=this.decodeUtf8String(n,0)}else if(192===e)t=null;else if(194===e)t=!1;else if(195===e)t=!0;else if(202===e)t=this.readF32();else if(203===e)t=this.readF64();else if(204===e)t=this.readU8();else if(205===e)t=this.readU16();else if(206===e)t=this.readU32();else if(207===e)t=this.readU64();else if(208===e)t=this.readI8();else if(209===e)t=this.readI16();else if(210===e)t=this.readI32();else if(211===e)t=this.readI64();else if(217===e)n=this.lookU8(),t=this.decodeUtf8String(n,1);else if(218===e)n=this.lookU16(),t=this.decodeUtf8String(n,2);else if(219===e)n=this.lookU32(),t=this.decodeUtf8String(n,4);else if(220===e){if(0!==(a=this.readU16())){this.pushArrayState(a),this.complete();continue e}t=[]}else if(221===e){if(0!==(a=this.readU32())){this.pushArrayState(a),this.complete();continue e}t=[]}else if(222===e){if(0!==(a=this.readU16())){this.pushMapState(a),this.complete();continue e}t={}}else if(223===e){if(0!==(a=this.readU32())){this.pushMapState(a),this.complete();continue e}t={}}else if(196===e){var a=this.lookU8();t=this.decodeBinary(a,1)}else if(197===e)a=this.lookU16(),t=this.decodeBinary(a,2);else if(198===e)a=this.lookU32(),t=this.decodeBinary(a,4);else if(212===e)t=this.decodeExtension(1,0);else if(213===e)t=this.decodeExtension(2,0);else if(214===e)t=this.decodeExtension(4,0);else if(215===e)t=this.decodeExtension(8,0);else if(216===e)t=this.decodeExtension(16,0);else if(199===e)a=this.lookU8(),t=this.decodeExtension(a,1);else if(200===e)a=this.lookU16(),t=this.decodeExtension(a,2);else{if(201!==e)throw new u.DecodeError("Unrecognized type byte: "+Object(r.prettyByte)(e));a=this.lookU32(),t=this.decodeExtension(a,4)}this.complete();for(var i=this.stack;i.length>0;){var o=i[i.length-1];if(0===o.type){if(o.array[o.position]=t,o.position++,o.position!==o.size)continue e;i.pop(),t=o.array}else{if(1===o.type){if(!g(t))throw new u.DecodeError("The type of key must be string or number but "+typeof t);if("__proto__"===t)throw new u.DecodeError("The key __proto__ is not allowed");o.key=t,o.type=2;continue e}if(o.map[o.key]=t,o.readCount++,o.readCount!==o.size){o.key=null,o.type=1;continue e}i.pop(),t=o.map}}return t}},e.prototype.readHeadByte=function(){return-1===this.headByte&&(this.headByte=this.readU8()),this.headByte},e.prototype.complete=function(){this.headByte=-1},e.prototype.readArraySize=function(){var e=this.readHeadByte();switch(e){case 220:return this.readU16();case 221:return this.readU32();default:if(e<160)return e-144;throw new u.DecodeError("Unrecognized array type byte: "+Object(r.prettyByte)(e))}},e.prototype.pushMapState=function(e){if(e>this.maxMapLength)throw new u.DecodeError("Max length exceeded: map length ("+e+") > maxMapLengthLength ("+this.maxMapLength+")");this.stack.push({type:1,size:e,key:null,readCount:0,map:{}})},e.prototype.pushArrayState=function(e){if(e>this.maxArrayLength)throw new u.DecodeError("Max length exceeded: array length ("+e+") > maxArrayLength ("+this.maxArrayLength+")");this.stack.push({type:0,size:e,array:new Array(e),position:0})},e.prototype.decodeUtf8String=function(e,t){var n;if(e>this.maxStrLength)throw new u.DecodeError("Max length exceeded: UTF-8 byte length ("+e+") > maxStrLength ("+this.maxStrLength+")");if(this.bytes.byteLengtho.TEXT_DECODER_THRESHOLD?Object(o.utf8DecodeTD)(this.bytes,a,e):Object(o.utf8DecodeJs)(this.bytes,a,e),this.pos+=t+e,r},e.prototype.stateIsMapKey=function(){return this.stack.length>0&&1===this.stack[this.stack.length-1].type},e.prototype.decodeBinary=function(e,t){if(e>this.maxBinLength)throw new u.DecodeError("Max length exceeded: bin length ("+e+") > maxBinLength ("+this.maxBinLength+")");if(!this.hasRemaining(e+t))throw v;var n=this.pos+t,r=this.bytes.subarray(n,n+e);return this.pos+=t+e,r},e.prototype.decodeExtension=function(e,t){if(e>this.maxExtLength)throw new u.DecodeError("Max length exceeded: ext length ("+e+") > maxExtLength ("+this.maxExtLength+")");var n=this.view.getInt8(this.pos+t),r=this.decodeBinary(e,t+1);return this.extensionCodec.decode(r,n,this.context)},e.prototype.lookU8=function(){return this.view.getUint8(this.pos)},e.prototype.lookU16=function(){return this.view.getUint16(this.pos)},e.prototype.lookU32=function(){return this.view.getUint32(this.pos)},e.prototype.readU8=function(){var e=this.view.getUint8(this.pos);return this.pos++,e},e.prototype.readI8=function(){var e=this.view.getInt8(this.pos);return this.pos++,e},e.prototype.readU16=function(){var e=this.view.getUint16(this.pos);return this.pos+=2,e},e.prototype.readI16=function(){var e=this.view.getInt16(this.pos);return this.pos+=2,e},e.prototype.readU32=function(){var e=this.view.getUint32(this.pos);return this.pos+=4,e},e.prototype.readI32=function(){var e=this.view.getInt32(this.pos);return this.pos+=4,e},e.prototype.readU64=function(){var e=Object(i.getUint64)(this.view,this.pos);return this.pos+=8,e},e.prototype.readI64=function(){var e=Object(i.getInt64)(this.view,this.pos);return this.pos+=8,e},e.prototype.readF32=function(){var e=this.view.getFloat32(this.pos);return this.pos+=4,e},e.prototype.readF64=function(){var e=this.view.getFloat64(this.pos);return this.pos+=8,e},e}()},"./node_modules/@msgpack/msgpack/dist.es5+esm/Encoder.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DEFAULT_MAX_DEPTH",(function(){return s})),n.d(t,"DEFAULT_INITIAL_BUFFER_SIZE",(function(){return c})),n.d(t,"Encoder",(function(){return u}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/ExtensionCodec.mjs"),i=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),o=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs"),s=100,c=2048,u=function(){function e(e,t,n,r,i,o,u,l){void 0===e&&(e=a.ExtensionCodec.defaultCodec),void 0===t&&(t=void 0),void 0===n&&(n=s),void 0===r&&(r=c),void 0===i&&(i=!1),void 0===o&&(o=!1),void 0===u&&(u=!1),void 0===l&&(l=!1),this.extensionCodec=e,this.context=t,this.maxDepth=n,this.initialBufferSize=r,this.sortKeys=i,this.forceFloat32=o,this.ignoreUndefined=u,this.forceIntegerToFloat=l,this.pos=0,this.view=new DataView(new ArrayBuffer(this.initialBufferSize)),this.bytes=new Uint8Array(this.view.buffer)}return e.prototype.getUint8Array=function(){return this.bytes.subarray(0,this.pos)},e.prototype.reinitializeState=function(){this.pos=0},e.prototype.encode=function(e){return this.reinitializeState(),this.doEncode(e,1),this.getUint8Array()},e.prototype.doEncode=function(e,t){if(t>this.maxDepth)throw new Error("Too deep objects in depth "+t);null==e?this.encodeNil():"boolean"===typeof e?this.encodeBoolean(e):"number"===typeof e?this.encodeNumber(e):"string"===typeof e?this.encodeString(e):this.encodeObject(e,t)},e.prototype.ensureBufferSizeToWrite=function(e){var t=this.pos+e;this.view.byteLength=0?e<128?this.writeU8(e):e<256?(this.writeU8(204),this.writeU8(e)):e<65536?(this.writeU8(205),this.writeU16(e)):e<4294967296?(this.writeU8(206),this.writeU32(e)):(this.writeU8(207),this.writeU64(e)):e>=-32?this.writeU8(224|e+32):e>=-128?(this.writeU8(208),this.writeI8(e)):e>=-32768?(this.writeU8(209),this.writeI16(e)):e>=-2147483648?(this.writeU8(210),this.writeI32(e)):(this.writeU8(211),this.writeI64(e)):this.forceFloat32?(this.writeU8(202),this.writeF32(e)):(this.writeU8(203),this.writeF64(e))},e.prototype.writeStringHeader=function(e){if(e<32)this.writeU8(160+e);else if(e<256)this.writeU8(217),this.writeU8(e);else if(e<65536)this.writeU8(218),this.writeU16(e);else{if(!(e<4294967296))throw new Error("Too long string: "+e+" bytes in UTF-8");this.writeU8(219),this.writeU32(e)}},e.prototype.encodeString=function(e){if(e.length>r.TEXT_ENCODER_THRESHOLD){var t=Object(r.utf8Count)(e);this.ensureBufferSizeToWrite(5+t),this.writeStringHeader(t),Object(r.utf8EncodeTE)(e,this.bytes,this.pos),this.pos+=t}else t=Object(r.utf8Count)(e),this.ensureBufferSizeToWrite(5+t),this.writeStringHeader(t),Object(r.utf8EncodeJs)(e,this.bytes,this.pos),this.pos+=t},e.prototype.encodeObject=function(e,t){var n=this.extensionCodec.tryToEncode(e,this.context);if(null!=n)this.encodeExtension(n);else if(Array.isArray(e))this.encodeArray(e,t);else if(ArrayBuffer.isView(e))this.encodeBinary(e);else{if("object"!==typeof e)throw new Error("Unrecognized object: "+Object.prototype.toString.apply(e));this.encodeMap(e,t)}},e.prototype.encodeBinary=function(e){var t=e.byteLength;if(t<256)this.writeU8(196),this.writeU8(t);else if(t<65536)this.writeU8(197),this.writeU16(t);else{if(!(t<4294967296))throw new Error("Too large binary: "+t);this.writeU8(198),this.writeU32(t)}var n=Object(o.ensureUint8Array)(e);this.writeU8a(n)},e.prototype.encodeArray=function(e,t){var n=e.length;if(n<16)this.writeU8(144+n);else if(n<65536)this.writeU8(220),this.writeU16(n);else{if(!(n<4294967296))throw new Error("Too large array: "+n);this.writeU8(221),this.writeU32(n)}for(var r=0,a=e;r=0)this.encoders[t]=n,this.decoders[t]=r;else{var a=1+t;this.builtInEncoders[a]=n,this.builtInDecoders[a]=r}},e.prototype.tryToEncode=function(e,t){for(var n=0;n0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]=0&&r>=0&&n<=s){if(0===r&&n<=o){var i=new Uint8Array(4);return(t=new DataView(i.buffer)).setUint32(0,n),i}var c=n/4294967296,u=4294967295&n;return i=new Uint8Array(8),(t=new DataView(i.buffer)).setUint32(0,r<<2|3&c),t.setUint32(4,u),i}return i=new Uint8Array(12),(t=new DataView(i.buffer)).setUint32(0,r),Object(a.setInt64)(t,4,n),i}function u(e){var t=e.getTime(),n=Math.floor(t/1e3),r=1e6*(t-1e3*n),a=Math.floor(r/1e9);return{sec:n+a,nsec:r-1e9*a}}function l(e){return e instanceof Date?c(u(e)):null}function A(e){var t=new DataView(e.buffer,e.byteOffset,e.byteLength);switch(e.byteLength){case 4:return{sec:t.getUint32(0),nsec:0};case 8:var n=t.getUint32(0);return{sec:4294967296*(3&n)+t.getUint32(4),nsec:n>>>2};case 12:return{sec:Object(a.getInt64)(t,4),nsec:t.getUint32(0)};default:throw new r.DecodeError("Unrecognized data size for timestamp (expected 4, 8, or 12): "+e.length)}}function d(e){var t=A(e);return new Date(1e3*t.sec+t.nsec/1e6)}var h={type:i,encode:l,decode:d}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"UINT32_MAX",(function(){return r})),n.d(t,"setUint64",(function(){return a})),n.d(t,"setInt64",(function(){return i})),n.d(t,"getInt64",(function(){return o})),n.d(t,"getUint64",(function(){return s}));var r=4294967295;function a(e,t,n){var r=n/4294967296,a=n;e.setUint32(t,r),e.setUint32(t+4,a)}function i(e,t,n){var r=Math.floor(n/4294967296),a=n;e.setUint32(t,r),e.setUint32(t+4,a)}function o(e,t){return 4294967296*e.getInt32(t)+e.getUint32(t+4)}function s(e,t){return 4294967296*e.getUint32(t)+e.getUint32(t+4)}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/prettyByte.mjs":function(e,t,n){"use strict";function r(e){return(e<0?"-":"")+"0x"+Math.abs(e).toString(16).padStart(2,"0")}n.r(t),n.d(t,"prettyByte",(function(){return r}))},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/stream.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"isAsyncIterable",(function(){return o})),n.d(t,"asyncIterableFromStream",(function(){return s})),n.d(t,"ensureAsyncIterable",(function(){return c}));var r=function(e,t){var n,r,a,i,o={label:0,sent:function(){if(1&a[0])throw a[1];return a[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"===typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(i){return function(s){return function(i){if(n)throw new TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(a=2&i[0]?r.return:i[0]?r.throw||((a=r.return)&&a.call(r),0):r.next)&&!(a=a.call(r,i[1])).done)return a;switch(r=0,a&&(i=[2&i[0],a.value]),i[0]){case 0:case 1:a=i;break;case 4:return o.label++,{value:i[1],done:!1};case 5:o.label++,r=i[1],i=[0];continue;case 7:i=o.ops.pop(),o.trys.pop();continue;default:if(!(a=(a=o.trys).length>0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]1||c(e,t)}))})}function c(e,t){try{!function(e){e.value instanceof a?Promise.resolve(e.value.v).then(u,l):A(o[0][2],e)}(i[e](t))}catch(n){A(o[0][3],n)}}function u(e){c("next",e)}function l(e){c("throw",e)}function A(e,t){e(t),o.shift(),o.length&&c(o[0][0],o[0][1])}};function o(e){return null!=e[Symbol.asyncIterator]}function s(e){return i(this,arguments,(function(){var t,n,i,o;return r(this,(function(r){switch(r.label){case 0:t=e.getReader(),r.label=1;case 1:r.trys.push([1,,9,10]),r.label=2;case 2:return[4,a(t.read())];case 3:return n=r.sent(),i=n.done,o=n.value,i?[4,a(void 0)]:[3,5];case 4:return[2,r.sent()];case 5:return function(e){if(null==e)throw new Error("Assertion Failure: value must not be null nor undefined")}(o),[4,a(o)];case 6:return[4,r.sent()];case 7:return r.sent(),[3,2];case 8:return[3,10];case 9:return t.releaseLock(),[7];case 10:return[2]}}))}))}function c(e){return o(e)?e:s(e)}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs":function(e,t,n){"use strict";function r(e){return e instanceof Uint8Array?e:ArrayBuffer.isView(e)?new Uint8Array(e.buffer,e.byteOffset,e.byteLength):e instanceof ArrayBuffer?new Uint8Array(e):Uint8Array.from(e)}function a(e){if(e instanceof ArrayBuffer)return new DataView(e);var t=r(e);return new DataView(t.buffer,t.byteOffset,t.byteLength)}n.r(t),n.d(t,"ensureUint8Array",(function(){return r})),n.d(t,"createDataView",(function(){return a}))},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"utf8Count",(function(){return i})),n.d(t,"utf8EncodeJs",(function(){return o})),n.d(t,"TEXT_ENCODER_THRESHOLD",(function(){return c})),n.d(t,"utf8EncodeTE",(function(){return u})),n.d(t,"utf8DecodeJs",(function(){return A})),n.d(t,"TEXT_DECODER_THRESHOLD",(function(){return h})),n.d(t,"utf8DecodeTD",(function(){return f}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),a=("undefined"===typeof process||"never"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_ENCODING)&&"undefined"!==typeof TextEncoder&&"undefined"!==typeof TextDecoder;function i(e){for(var t=e.length,n=0,r=0;r=55296&&a<=56319&&r>6&31|192;else{if(o>=55296&&o<=56319&&i>12&15|224,t[a++]=o>>6&63|128):(t[a++]=o>>18&7|240,t[a++]=o>>12&63|128,t[a++]=o>>6&63|128)}t[a++]=63&o|128}else t[a++]=o}}var s=a?new TextEncoder:void 0,c=a?"undefined"!==typeof process&&"force"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_ENCODING?200:0:r.UINT32_MAX,u=(null===s||void 0===s?void 0:s.encodeInto)?function(e,t,n){s.encodeInto(e,t.subarray(n))}:function(e,t,n){t.set(s.encode(e),n)},l=4096;function A(e,t,n){for(var r=t,a=r+n,i=[],o="";r65535&&(A-=65536,i.push(A>>>10&1023|55296),A=56320|1023&A),i.push(A)}else i.push(s);i.length>=l&&(o+=String.fromCharCode.apply(String,i),i.length=0)}return i.length>0&&(o+=String.fromCharCode.apply(String,i)),o}var d=a?new TextDecoder:null,h=a?"undefined"!==typeof process&&"force"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_DECODER?200:0:r.UINT32_MAX;function f(e,t,n){var r=e.subarray(t,t+n);return d.decode(r)}},"./package.json":function(e){e.exports=JSON.parse('{"name":"imjoy-rpc","version":"0.5.21","description":"Remote procedure calls for ImJoy.","module":"index.js","types":"index.d.ts","scripts":{"build":"rm -rf dist && npm run build-umd","build-umd":"webpack --config webpack.config.js --mode development && NODE_ENV=production webpack --config webpack.config.js --mode production --devtool source-map ","watch":"NODE_ENV=production webpack --watch --progress --config webpack.config.js --mode production --devtool source-map","publish-npm":"npm install && npm run build && npm publish","serve":"webpack-dev-server","stats":"webpack --profile --json > stats.json","stats-prod":"webpack --profile --json --mode production > stats-prod.json","analyze":"webpack-bundle-analyzer -p 9999 stats.json","analyze-prod":"webpack-bundle-analyzer -p 9999 stats-prod.json","clean":"rimraf dist/*","deploy":"npm run build && node deploy-site.js","format":"prettier --write \\"{src,tests}/**/**\\"","check-format":"prettier --check \\"{src,tests}/**/**\\"","test":"karma start --single-run --browsers ChromeHeadless,FirefoxHeadless karma.conf.js","test-watch":"karma start --auto-watch --browsers ChromeDebugging karma.conf.js --debug"},"repository":{"type":"git","url":"git+https://github.com/imjoy-team/imjoy-rpc.git"},"keywords":["imjoy","rpc"],"author":"imjoy-team ","license":"MIT","bugs":{"url":"https://github.com/imjoy-team/imjoy-rpc/issues"},"homepage":"https://github.com/imjoy-team/imjoy-rpc","dependencies":{"@msgpack/msgpack":"^2.7.1","socket.io-client":"^4.6.2"},"devDependencies":{"@babel/core":"^7.16.12","@babel/plugin-syntax-dynamic-import":"^7.8.3","@babel/polyfill":"^7.12.1","@babel/preset-env":"^7.16.11","@types/requirejs":"^2.1.34","babel-core":"^6.26.0","babel-eslint":"^10.1.0","babel-loader":"^8.2.3","babel-runtime":"^6.26.0","chai":"^4.3.6","clean-webpack-plugin":"^0.1.19","copy-webpack-plugin":"^5.1.2","eslint":"^6.8.0","eslint-config-prettier":"^4.2.0","eslint-loader":"^4.0.2","file-loader":"^0.11.2","fs-extra":"^0.30.0","gh-pages":"^2.0.1","html-loader":"^0.5.5","html-webpack-plugin":"^3.2.0","json-loader":"^0.5.4","karma":"^6.3.12","karma-chrome-launcher":"^3.1.0","karma-firefox-launcher":"^1.3.0","karma-mocha":"^2.0.1","karma-sourcemap-loader":"^0.3.8","karma-spec-reporter":"0.0.32","karma-webpack":"^4.0.2","lerna":"^6.0.3","lodash.debounce":"^4.0.8","mocha":"^10.1.0","postcss":"^7.0.36","prettier":"^1.6.1","rimraf":"^2.6.2","schema-utils":"^0.4.3","style-loader":"^0.18.1","ts-loader":"^9.4.3","url-loader":"^0.5.9","webpack":"^4.46.0","webpack-bundle-analyzer":"^4.7.0","webpack-cli":"^3.3.12","webpack-dev-server":"^3.11.3","webpack-merge":"^4.1.1","workbox-webpack-plugin":"^4.3.1","worker-loader":"^2.0.0","write-file-webpack-plugin":"^4.5.1"},"eslintConfig":{"globals":{"document":true,"window":true}}}')},"./src/hypha/rpc.js":function(e,t,n){"use strict";n.r(t),n.d(t,"API_VERSION",(function(){return i})),n.d(t,"RPC",(function(){return A}));var r=n("./src/hypha/utils.js"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/index.mjs"),i="0.3.0",o=512e3,s=Object.getPrototypeOf(Object.getPrototypeOf(new Uint8Array)).constructor;function c(e,t){var n=new Uint8Array(e.byteLength+t.byteLength);return n.set(new Uint8Array(e),0),n.set(new Uint8Array(t),e.byteLength),n.buffer}function u(e,t){if(!t)throw new Error("undefined index");return"string"===typeof t?u(e,t.split(".")):0===t.length?e:u(e[t[0]],t.slice(1))}var l=function(){function e(t,n,r,a){_classCallCheck(this,e),this._timeout=t,this._callback=n,this._args=r,this._label=a||"timer",this._task=null,this.started=!1}return _createClass(e,[{key:"start",value:function(){var e=this;this.started?this.reset():(this._task=setTimeout((function(){e._callback.apply(e,e._args)}),1e3*this._timeout),this.started=!0)}},{key:"clear",value:function(){this._task?(clearTimeout(this._task),this._task=null,this.started=!1):console.warn("Clearing a timer (".concat(this._label,") which is not started"))}},{key:"reset",value:function(){var e=this;this._task&&clearTimeout(this._task),this._task=setTimeout((function(){e._callback.apply(e,e._args)}),1e3*this._timeout),this.started=!0}}]),e}(),A=function(e){_inherits(n,e);var t=_createSuper(n);function n(e,a){var i,o=a.client_id,s=void 0===o?null:o,c=a.manager_id,u=void 0===c?null:c,l=a.default_context,A=void 0===l?null:l,d=a.name,h=void 0===d?null:d,f=a.codecs,g=void 0===f?null:f,p=a.method_timeout,m=void 0===p?null:p,I=a.max_message_buffer_size,v=void 0===I?0:I,y=a.debug,C=void 0!==y&&y;return _classCallCheck(this,n),(i=t.call(this,C))._codecs=g||{},Object(r.assert)(s&&"string"===typeof s),Object(r.assert)(s,"client_id is required"),i._client_id=s,i._name=h,i._connection_info=null,i._workspace=null,i.manager_id=u,i.default_context=A||{},i._method_annotations=new WeakMap,i._manager_service=null,i._max_message_buffer_size=v,i._chunk_store={},i._method_timeout=m||20,i._services={},i._object_store={services:i._services},e?(i.add_service({id:"built-in",type:"built-in",name:"RPC built-in services",config:{require_context:!0,visibility:"public"},ping:i._ping.bind(_assertThisInitialized(i)),get_service:i.get_local_service.bind(_assertThisInitialized(i)),register_service:i.register_service.bind(_assertThisInitialized(i)),message_cache:{create:i._create_message.bind(_assertThisInitialized(i)),append:i._append_message.bind(_assertThisInitialized(i)),process:i._process_message.bind(_assertThisInitialized(i)),remove:i._remove_message.bind(_assertThisInitialized(i))}}),i.on("method",i._handle_method.bind(_assertThisInitialized(i))),Object(r.assert)(e.emit_message&&e.on_message),i._emit_message=e.emit_message.bind(e),e.on_message(i._on_message.bind(_assertThisInitialized(i))),i._connection=e,i._get_connection_info()):i._emit_message=function(){console.log("No connection to emit message")},i}return _createClass(n,[{key:"_get_connection_info",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(){var t;return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!this.manager_id){e.next=14;break}return e.prev=1,e.next=4,this.get_manager_service(5);case 4:return Object(r.assert)(this._manager_service),e.next=7,this._manager_service.get_connection_info();case 7:this._connection_info=e.sent,this._connection_info.reconnection_token&&this._connection.set_reconnection_token&&(this._connection.set_reconnection_token(this._connection_info.reconnection_token),t=.8*this._connection_info.reconnection_expires_in,console.info("Reconnection token obtained: ".concat(this._connection_info.reconnection_token,", will be refreshed in ").concat(t," seconds")),this._get_connection_info_task=setTimeout(this._get_connection_info.bind(this),1e3*t)),e.next=14;break;case 11:e.prev=11,e.t0=e.catch(1),console.warn("Failed to fetch user info from ",this.manager_id,e.t0);case 14:case"end":return e.stop()}}),e,this,[[1,11]])})));return function(){return e.apply(this,arguments)}}()},{key:"register_codec",value:function(e){if(!e.name||!e.encoder&&!e.decoder)throw new Error("Invalid codec format, please make sure you provide a name, type, encoder and decoder.");if(e.type)for(var t=0,n=Object.keys(this._codecs);t0&&"object"===typeof p[m-1]&&null!==p[m-1]&&p[m-1]._rkwargs)&&delete p[m-1]._rkwargs,v={type:"method",from:A._client_id,to:s,method:c},y={},p&&(y.args=p),I&&(y.with_kwargs=I),console.log("Calling remote method ".concat(s,":").concat(c,", session: ").concat(f)),t&&(v.parent=t),C=null,!u){i.next=27;break}return v.session=f,E="".concat(s,":").concat(c),C=new l(A._method_timeout,h,["Method call time out: ".concat(E)],E),i.next=26,A._encode_promise(d,h,f,!0,C,o);case 26:y.promise=i.sent;case 27:b=Object(a.encode)(v),y&&(B=Object(a.encode)(y),b=new Uint8Array([].concat(_toConsumableArray(b),_toConsumableArray(B)))),b.length<=513024?A._emit_message(b).then((function(){C&&(console.log("Start watchdog timer."),C.start())})):A._send_chunks(b,s,t).then((function(){C&&(console.log("Start watchdog timer."),C.start())}));case 31:case"end":return i.stop()}}),i)})));return function(e,t){return i.apply(this,arguments)}}())}return d.__rpc_object__=e,d}},{key:"_notify_service_update",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!this.manager_id){e.next=12;break}return e.prev=1,e.next=4,this.get_manager_service(5);case 4:return Object(r.assert)(this._manager_service),e.next=7,this._manager_service.update_client_info(this.get_client_info());case 7:e.next=12;break;case 9:e.prev=9,e.t0=e.catch(1),console.warn("Failed to notify service update to",this.manager_id,e.t0);case 12:case"end":return e.stop()}}),e,this,[[1,9]])})));return function(){return e.apply(this,arguments)}}()},{key:"get_client_info",value:function(){for(var e=[],t=0,n=Object.values(this._services);t=0&&this._event_handlers[e].splice(n,1)}}else this._event_handlers={}}},{key:"_fire",value:function(e,t){if(this._event_handlers[e])for(var n=this._event_handlers[e].length;n--;){var r=this._event_handlers[e][n];try{r(t)}catch(a){console.error(a)}finally{r.___event_run_once&&this._event_handlers[e].splice(n,1)}}else this._debug&&console.warn("unhandled event",e,t)}}]),e}()},"./src/hypha/websocket-client.js":function(e,t,n){"use strict";n.r(t),n.d(t,"connectToServer",(function(){return s}));var r=n("./src/hypha/rpc.js");n.d(t,"RPC",(function(){return r.RPC})),n.d(t,"API_VERSION",(function(){return r.API_VERSION}));var a=n("./src/hypha/utils.js");n.d(t,"loadRequirements",(function(){return a.loadRequirements}));var i=n("./package.json");n.d(t,"VERSION",(function(){return i.version}));var o=function(){function e(t,n,r,i,o){_classCallCheck(this,e),Object(a.assert)(t&&n,"server_url and client_id are required"),t=t+"?client_id="+n,r&&(t+="&workspace="+r),i&&(t+="&token="+i),this._websocket=null,this._handle_message=null,this._reconnection_token=null,this._server_url=t,this._timeout=o||5}return _createClass(e,[{key:"set_reconnection_token",value:function(e){this._reconnection_token=e}},{key:"on_message",value:function(e){Object(a.assert)(e,"handler is required"),this._handle_message=e}},{key:"open",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(){var t,n,r,i=this;return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return t=this._reconnection_token?"".concat(this._server_url,"&reconnection_token=").concat(this._reconnection_token):this._server_url,console.info("Receating a new connection to ",t.split("?")[0]),this._websocket=new WebSocket(t),this._websocket.binaryType="arraybuffer",this._websocket.onmessage=function(e){var t=e.data;i._handle_message(t)},n=this,this._websocket.onclose=function(){console.log("websocket closed"),n._websocket=null},e.next=9,new Promise((function(e){i._websocket.addEventListener("open",e)}));case 9:return r=e.sent,e.next=12,Object(a.waitFor)(r,this._timeout,"Timeout Error: Failed connect to the server "+t.split("?")[0]);case 12:return e.abrupt("return",e.sent);case 13:case"end":return e.stop()}}),e,this)})));return function(){return e.apply(this,arguments)}}()},{key:"emit_message",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(t){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(Object(a.assert)(this._handle_message,"No handler for message"),this._websocket){e.next=4;break}return e.next=4,this.open();case 4:e.prev=4,t.buffer&&(t=t.buffer),this._websocket.send(t),e.next=13;break;case 9:throw e.prev=9,e.t0=e.catch(4),console.error("Failed to send data, error: ".concat(e.t0)),e.t0;case 13:case"end":return e.stop()}}),e,this,[[4,9]])})));return function(t){return e.apply(this,arguments)}}()},{key:"disconnect",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(t){var n;return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:n=this._websocket,this._websocket=null,n&&n.close(1e3,t),console.info("Websocket connection disconnected (".concat(t,")"));case 4:case"end":return e.stop()}}),e,this)})));return function(t){return e.apply(this,arguments)}}()}]),e}();function s(e){return c.apply(this,arguments)}function c(){return c=_asyncToGenerator(_regeneratorRuntime().mark((function e(t){var n,i,s,c,u,l,A,d,h,f,g;return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return g=function(){return(g=_asyncToGenerator(_regeneratorRuntime().mark((function e(){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,c.disconnect();case 2:return e.next=4,s.disconnect();case 4:case"end":return e.stop()}}),e)})))).apply(this,arguments)},f=function(){return g.apply(this,arguments)},h=function(){return(h=_asyncToGenerator(_regeneratorRuntime().mark((function e(t){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,u.get_service(t+":default");case 2:return e.abrupt("return",e.sent);case 3:case"end":return e.stop()}}),e)})))).apply(this,arguments)},d=function(e){return h.apply(this,arguments)},A=function(){return(A=_asyncToGenerator(_regeneratorRuntime().mark((function e(n){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return n.id="default",n.name=t.name||n.id,e.next=4,c.register_service(n,!0);case 4:case"end":return e.stop()}}),e)})))).apply(this,arguments)},l=function(e){return A.apply(this,arguments)},(n=t.client_id)||(n=Object(a.randId)()),(i=t.server_url).startsWith("http://")?i=i.replace("http://","ws://").replace(/\/$/,"")+"/ws":i.startsWith("https://")&&(i=i.replace("https://","wss://").replace(/\/$/,"")+"/ws"),s=new o(i,n,t.workspace,t.token,t.method_timeout||5),e.next=13,s.open();case 13:return c=new r.RPC(s,{client_id:n,manager_id:"workspace-manager",default_context:{connection_type:"websocket"},name:t.name,method_timeout:t.method_timeout}),e.next=16,c.get_remote_service("workspace-manager:default");case 16:return(u=e.sent).rpc=c,u.export=l,u.getPlugin=d,u.listPlugins=u.listServices,u.disconnect=f,u.registerCodec=c.register_codec.bind(c),e.abrupt("return",u);case 24:case"end":return e.stop()}}),e)}))),c.apply(this,arguments)}}})},module.exports=factory()},69349:function(module,__unused_webpack_exports,__webpack_require__){var _toConsumableArray=__webpack_require__(861).default,_createForOfIteratorHelper=__webpack_require__(74704).default,_slicedToArray=__webpack_require__(27424).default,_regeneratorRuntime=__webpack_require__(17061).default,_asyncToGenerator=__webpack_require__(17156).default,_assertThisInitialized=__webpack_require__(66115).default,_inherits=__webpack_require__(61655).default,_createSuper=__webpack_require__(26389).default,_classCallCheck=__webpack_require__(56690).default,_createClass=__webpack_require__(89728).default,factory;window,factory=function(){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var a=t[r]={i:r,l:!1,exports:{}};return e[r].call(a.exports,a,a.exports,n),a.l=!0,a.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var a in e)n.d(r,a,function(t){return e[t]}.bind(null,a));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s="./src/hypha/rpc.js")}({"./node_modules/@msgpack/msgpack/dist.es5+esm/CachedKeyDecoder.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"CachedKeyDecoder",(function(){return a}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs"),a=function(){function e(e,t){void 0===e&&(e=16),void 0===t&&(t=16),this.maxKeyLength=e,this.maxLengthPerKey=t,this.hit=0,this.miss=0,this.caches=[];for(var n=0;n0&&e<=this.maxKeyLength},e.prototype.find=function(e,t,n){e:for(var r=0,a=this.caches[n-1];r=this.maxLengthPerKey?n[Math.random()*n.length|0]=r:n.push(r)},e.prototype.decode=function(e,t,n){var a=this.find(e,t,n);if(null!=a)return this.hit++,a;this.miss++;var i=Object(r.utf8DecodeJs)(e,t,n),o=Uint8Array.prototype.slice.call(e,t,t+n);return this.store(o,i),i},e}()},"./node_modules/@msgpack/msgpack/dist.es5+esm/DecodeError.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DecodeError",(function(){return a}));var r=function(){var e=function(t,n){return e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n])},e(t,n)};return function(t,n){if("function"!==typeof n&&null!==n)throw new TypeError("Class extends value "+String(n)+" is not a constructor or null");function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}}(),a=function(e){function t(n){var r=e.call(this,n)||this,a=Object.create(t.prototype);return Object.setPrototypeOf(r,a),Object.defineProperty(r,"name",{configurable:!0,enumerable:!1,value:t.name}),r}return r(t,e),t}(Error)},"./node_modules/@msgpack/msgpack/dist.es5+esm/Decoder.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DataViewIndexOutOfBoundsError",(function(){return I})),n.d(t,"Decoder",(function(){return C}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/prettyByte.mjs"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/ExtensionCodec.mjs"),i=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),o=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs"),s=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs"),c=n("./node_modules/@msgpack/msgpack/dist.es5+esm/CachedKeyDecoder.mjs"),u=n("./node_modules/@msgpack/msgpack/dist.es5+esm/DecodeError.mjs"),l=function(e,t,n,r){return new(n||(n=Promise))((function(a,i){function o(e){try{c(r.next(e))}catch(t){i(t)}}function s(e){try{c(r.throw(e))}catch(t){i(t)}}function c(e){var t;e.done?a(e.value):(t=e.value,t instanceof n?t:new n((function(e){e(t)}))).then(o,s)}c((r=r.apply(e,t||[])).next())}))},A=function(e,t){var n,r,a,i,o={label:0,sent:function(){if(1&a[0])throw a[1];return a[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"===typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(i){return function(s){return function(i){if(n)throw new TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(a=2&i[0]?r.return:i[0]?r.throw||((a=r.return)&&a.call(r),0):r.next)&&!(a=a.call(r,i[1])).done)return a;switch(r=0,a&&(i=[2&i[0],a.value]),i[0]){case 0:case 1:a=i;break;case 4:return o.label++,{value:i[1],done:!1};case 5:o.label++,r=i[1],i=[0];continue;case 7:i=o.ops.pop(),o.trys.pop();continue;default:if(!(a=(a=o.trys).length>0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]1||s(e,t)}))})}function s(e,t){try{!function(e){e.value instanceof h?Promise.resolve(e.value.v).then(c,u):l(i[0][2],e)}(a[e](t))}catch(n){l(i[0][3],n)}}function c(e){s("next",e)}function u(e){s("throw",e)}function l(e,t){e(t),i.shift(),i.length&&s(i[0][0],i[0][1])}},g=function(e){var t=typeof e;return"string"===t||"number"===t},p=new DataView(new ArrayBuffer(0)),m=new Uint8Array(p.buffer),I=function(){try{p.getInt8(0)}catch(e){return e.constructor}throw new Error("never reached")}(),v=new I("Insufficient data"),y=new c.CachedKeyDecoder,C=function(){function e(e,t,n,r,o,s,c,u){void 0===e&&(e=a.ExtensionCodec.defaultCodec),void 0===t&&(t=void 0),void 0===n&&(n=i.UINT32_MAX),void 0===r&&(r=i.UINT32_MAX),void 0===o&&(o=i.UINT32_MAX),void 0===s&&(s=i.UINT32_MAX),void 0===c&&(c=i.UINT32_MAX),void 0===u&&(u=y),this.extensionCodec=e,this.context=t,this.maxStrLength=n,this.maxBinLength=r,this.maxArrayLength=o,this.maxMapLength=s,this.maxExtLength=c,this.keyDecoder=u,this.totalPos=0,this.pos=0,this.view=p,this.bytes=m,this.headByte=-1,this.stack=[]}return e.prototype.reinitializeState=function(){this.totalPos=0,this.headByte=-1,this.stack.length=0},e.prototype.setBuffer=function(e){this.bytes=Object(s.ensureUint8Array)(e),this.view=Object(s.createDataView)(this.bytes),this.pos=0},e.prototype.appendBuffer=function(e){if(-1!==this.headByte||this.hasRemaining(1)){var t=this.bytes.subarray(this.pos),n=Object(s.ensureUint8Array)(e),r=new Uint8Array(t.length+n.length);r.set(t),r.set(n,t.length),this.setBuffer(r)}else this.setBuffer(e)},e.prototype.hasRemaining=function(e){return this.view.byteLength-this.pos>=e},e.prototype.createExtraByteError=function(e){var t=this.view,n=this.pos;return new RangeError("Extra "+(t.byteLength-n)+" of "+t.byteLength+" byte(s) found at buffer["+e+"]")},e.prototype.decode=function(e){this.reinitializeState(),this.setBuffer(e);var t=this.doDecodeSync();if(this.hasRemaining(1))throw this.createExtraByteError(this.pos);return t},e.prototype.decodeMulti=function(e){return A(this,(function(t){switch(t.label){case 0:this.reinitializeState(),this.setBuffer(e),t.label=1;case 1:return this.hasRemaining(1)?[4,this.doDecodeSync()]:[3,3];case 2:return t.sent(),[3,1];case 3:return[2]}}))},e.prototype.decodeAsync=function(e){var t,n,a,i;return l(this,void 0,void 0,(function(){var o,s,c,u,l,h,f,g;return A(this,(function(A){switch(A.label){case 0:o=!1,A.label=1;case 1:A.trys.push([1,6,7,12]),t=d(e),A.label=2;case 2:return[4,t.next()];case 3:if((n=A.sent()).done)return[3,5];if(c=n.value,o)throw this.createExtraByteError(this.totalPos);this.appendBuffer(c);try{s=this.doDecodeSync(),o=!0}catch(p){if(!(p instanceof I))throw p}this.totalPos+=this.pos,A.label=4;case 4:return[3,2];case 5:return[3,12];case 6:return u=A.sent(),a={error:u},[3,12];case 7:return A.trys.push([7,,10,11]),n&&!n.done&&(i=t.return)?[4,i.call(t)]:[3,9];case 8:A.sent(),A.label=9;case 9:return[3,11];case 10:if(a)throw a.error;return[7];case 11:return[7];case 12:if(o){if(this.hasRemaining(1))throw this.createExtraByteError(this.totalPos);return[2,s]}throw h=(l=this).headByte,f=l.pos,g=l.totalPos,new RangeError("Insufficient data in parsing "+Object(r.prettyByte)(h)+" at "+g+" ("+f+" in the current buffer)")}}))}))},e.prototype.decodeArrayStream=function(e){return this.decodeMultiAsync(e,!0)},e.prototype.decodeStream=function(e){return this.decodeMultiAsync(e,!1)},e.prototype.decodeMultiAsync=function(e,t){return f(this,arguments,(function(){var n,r,a,i,o,s,c,u,l;return A(this,(function(A){switch(A.label){case 0:n=t,r=-1,A.label=1;case 1:A.trys.push([1,13,14,19]),a=d(e),A.label=2;case 2:return[4,h(a.next())];case 3:if((i=A.sent()).done)return[3,12];if(o=i.value,t&&0===r)throw this.createExtraByteError(this.totalPos);this.appendBuffer(o),n&&(r=this.readArraySize(),n=!1,this.complete()),A.label=4;case 4:A.trys.push([4,9,,10]),A.label=5;case 5:return[4,h(this.doDecodeSync())];case 6:return[4,A.sent()];case 7:return A.sent(),0===--r?[3,8]:[3,5];case 8:return[3,10];case 9:if(!((s=A.sent())instanceof I))throw s;return[3,10];case 10:this.totalPos+=this.pos,A.label=11;case 11:return[3,2];case 12:return[3,19];case 13:return c=A.sent(),u={error:c},[3,19];case 14:return A.trys.push([14,,17,18]),i&&!i.done&&(l=a.return)?[4,h(l.call(a))]:[3,16];case 15:A.sent(),A.label=16;case 16:return[3,18];case 17:if(u)throw u.error;return[7];case 18:return[7];case 19:return[2]}}))}))},e.prototype.doDecodeSync=function(){e:for(;;){var e=this.readHeadByte(),t=void 0;if(e>=224)t=e-256;else if(e<192)if(e<128)t=e;else if(e<144){if(0!==(a=e-128)){this.pushMapState(a),this.complete();continue e}t={}}else if(e<160){if(0!==(a=e-144)){this.pushArrayState(a),this.complete();continue e}t=[]}else{var n=e-160;t=this.decodeUtf8String(n,0)}else if(192===e)t=null;else if(194===e)t=!1;else if(195===e)t=!0;else if(202===e)t=this.readF32();else if(203===e)t=this.readF64();else if(204===e)t=this.readU8();else if(205===e)t=this.readU16();else if(206===e)t=this.readU32();else if(207===e)t=this.readU64();else if(208===e)t=this.readI8();else if(209===e)t=this.readI16();else if(210===e)t=this.readI32();else if(211===e)t=this.readI64();else if(217===e)n=this.lookU8(),t=this.decodeUtf8String(n,1);else if(218===e)n=this.lookU16(),t=this.decodeUtf8String(n,2);else if(219===e)n=this.lookU32(),t=this.decodeUtf8String(n,4);else if(220===e){if(0!==(a=this.readU16())){this.pushArrayState(a),this.complete();continue e}t=[]}else if(221===e){if(0!==(a=this.readU32())){this.pushArrayState(a),this.complete();continue e}t=[]}else if(222===e){if(0!==(a=this.readU16())){this.pushMapState(a),this.complete();continue e}t={}}else if(223===e){if(0!==(a=this.readU32())){this.pushMapState(a),this.complete();continue e}t={}}else if(196===e){var a=this.lookU8();t=this.decodeBinary(a,1)}else if(197===e)a=this.lookU16(),t=this.decodeBinary(a,2);else if(198===e)a=this.lookU32(),t=this.decodeBinary(a,4);else if(212===e)t=this.decodeExtension(1,0);else if(213===e)t=this.decodeExtension(2,0);else if(214===e)t=this.decodeExtension(4,0);else if(215===e)t=this.decodeExtension(8,0);else if(216===e)t=this.decodeExtension(16,0);else if(199===e)a=this.lookU8(),t=this.decodeExtension(a,1);else if(200===e)a=this.lookU16(),t=this.decodeExtension(a,2);else{if(201!==e)throw new u.DecodeError("Unrecognized type byte: "+Object(r.prettyByte)(e));a=this.lookU32(),t=this.decodeExtension(a,4)}this.complete();for(var i=this.stack;i.length>0;){var o=i[i.length-1];if(0===o.type){if(o.array[o.position]=t,o.position++,o.position!==o.size)continue e;i.pop(),t=o.array}else{if(1===o.type){if(!g(t))throw new u.DecodeError("The type of key must be string or number but "+typeof t);if("__proto__"===t)throw new u.DecodeError("The key __proto__ is not allowed");o.key=t,o.type=2;continue e}if(o.map[o.key]=t,o.readCount++,o.readCount!==o.size){o.key=null,o.type=1;continue e}i.pop(),t=o.map}}return t}},e.prototype.readHeadByte=function(){return-1===this.headByte&&(this.headByte=this.readU8()),this.headByte},e.prototype.complete=function(){this.headByte=-1},e.prototype.readArraySize=function(){var e=this.readHeadByte();switch(e){case 220:return this.readU16();case 221:return this.readU32();default:if(e<160)return e-144;throw new u.DecodeError("Unrecognized array type byte: "+Object(r.prettyByte)(e))}},e.prototype.pushMapState=function(e){if(e>this.maxMapLength)throw new u.DecodeError("Max length exceeded: map length ("+e+") > maxMapLengthLength ("+this.maxMapLength+")");this.stack.push({type:1,size:e,key:null,readCount:0,map:{}})},e.prototype.pushArrayState=function(e){if(e>this.maxArrayLength)throw new u.DecodeError("Max length exceeded: array length ("+e+") > maxArrayLength ("+this.maxArrayLength+")");this.stack.push({type:0,size:e,array:new Array(e),position:0})},e.prototype.decodeUtf8String=function(e,t){var n;if(e>this.maxStrLength)throw new u.DecodeError("Max length exceeded: UTF-8 byte length ("+e+") > maxStrLength ("+this.maxStrLength+")");if(this.bytes.byteLengtho.TEXT_DECODER_THRESHOLD?Object(o.utf8DecodeTD)(this.bytes,a,e):Object(o.utf8DecodeJs)(this.bytes,a,e),this.pos+=t+e,r},e.prototype.stateIsMapKey=function(){return this.stack.length>0&&1===this.stack[this.stack.length-1].type},e.prototype.decodeBinary=function(e,t){if(e>this.maxBinLength)throw new u.DecodeError("Max length exceeded: bin length ("+e+") > maxBinLength ("+this.maxBinLength+")");if(!this.hasRemaining(e+t))throw v;var n=this.pos+t,r=this.bytes.subarray(n,n+e);return this.pos+=t+e,r},e.prototype.decodeExtension=function(e,t){if(e>this.maxExtLength)throw new u.DecodeError("Max length exceeded: ext length ("+e+") > maxExtLength ("+this.maxExtLength+")");var n=this.view.getInt8(this.pos+t),r=this.decodeBinary(e,t+1);return this.extensionCodec.decode(r,n,this.context)},e.prototype.lookU8=function(){return this.view.getUint8(this.pos)},e.prototype.lookU16=function(){return this.view.getUint16(this.pos)},e.prototype.lookU32=function(){return this.view.getUint32(this.pos)},e.prototype.readU8=function(){var e=this.view.getUint8(this.pos);return this.pos++,e},e.prototype.readI8=function(){var e=this.view.getInt8(this.pos);return this.pos++,e},e.prototype.readU16=function(){var e=this.view.getUint16(this.pos);return this.pos+=2,e},e.prototype.readI16=function(){var e=this.view.getInt16(this.pos);return this.pos+=2,e},e.prototype.readU32=function(){var e=this.view.getUint32(this.pos);return this.pos+=4,e},e.prototype.readI32=function(){var e=this.view.getInt32(this.pos);return this.pos+=4,e},e.prototype.readU64=function(){var e=Object(i.getUint64)(this.view,this.pos);return this.pos+=8,e},e.prototype.readI64=function(){var e=Object(i.getInt64)(this.view,this.pos);return this.pos+=8,e},e.prototype.readF32=function(){var e=this.view.getFloat32(this.pos);return this.pos+=4,e},e.prototype.readF64=function(){var e=this.view.getFloat64(this.pos);return this.pos+=8,e},e}()},"./node_modules/@msgpack/msgpack/dist.es5+esm/Encoder.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"DEFAULT_MAX_DEPTH",(function(){return s})),n.d(t,"DEFAULT_INITIAL_BUFFER_SIZE",(function(){return c})),n.d(t,"Encoder",(function(){return u}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/ExtensionCodec.mjs"),i=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),o=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs"),s=100,c=2048,u=function(){function e(e,t,n,r,i,o,u,l){void 0===e&&(e=a.ExtensionCodec.defaultCodec),void 0===t&&(t=void 0),void 0===n&&(n=s),void 0===r&&(r=c),void 0===i&&(i=!1),void 0===o&&(o=!1),void 0===u&&(u=!1),void 0===l&&(l=!1),this.extensionCodec=e,this.context=t,this.maxDepth=n,this.initialBufferSize=r,this.sortKeys=i,this.forceFloat32=o,this.ignoreUndefined=u,this.forceIntegerToFloat=l,this.pos=0,this.view=new DataView(new ArrayBuffer(this.initialBufferSize)),this.bytes=new Uint8Array(this.view.buffer)}return e.prototype.getUint8Array=function(){return this.bytes.subarray(0,this.pos)},e.prototype.reinitializeState=function(){this.pos=0},e.prototype.encode=function(e){return this.reinitializeState(),this.doEncode(e,1),this.getUint8Array()},e.prototype.doEncode=function(e,t){if(t>this.maxDepth)throw new Error("Too deep objects in depth "+t);null==e?this.encodeNil():"boolean"===typeof e?this.encodeBoolean(e):"number"===typeof e?this.encodeNumber(e):"string"===typeof e?this.encodeString(e):this.encodeObject(e,t)},e.prototype.ensureBufferSizeToWrite=function(e){var t=this.pos+e;this.view.byteLength=0?e<128?this.writeU8(e):e<256?(this.writeU8(204),this.writeU8(e)):e<65536?(this.writeU8(205),this.writeU16(e)):e<4294967296?(this.writeU8(206),this.writeU32(e)):(this.writeU8(207),this.writeU64(e)):e>=-32?this.writeU8(224|e+32):e>=-128?(this.writeU8(208),this.writeI8(e)):e>=-32768?(this.writeU8(209),this.writeI16(e)):e>=-2147483648?(this.writeU8(210),this.writeI32(e)):(this.writeU8(211),this.writeI64(e)):this.forceFloat32?(this.writeU8(202),this.writeF32(e)):(this.writeU8(203),this.writeF64(e))},e.prototype.writeStringHeader=function(e){if(e<32)this.writeU8(160+e);else if(e<256)this.writeU8(217),this.writeU8(e);else if(e<65536)this.writeU8(218),this.writeU16(e);else{if(!(e<4294967296))throw new Error("Too long string: "+e+" bytes in UTF-8");this.writeU8(219),this.writeU32(e)}},e.prototype.encodeString=function(e){if(e.length>r.TEXT_ENCODER_THRESHOLD){var t=Object(r.utf8Count)(e);this.ensureBufferSizeToWrite(5+t),this.writeStringHeader(t),Object(r.utf8EncodeTE)(e,this.bytes,this.pos),this.pos+=t}else t=Object(r.utf8Count)(e),this.ensureBufferSizeToWrite(5+t),this.writeStringHeader(t),Object(r.utf8EncodeJs)(e,this.bytes,this.pos),this.pos+=t},e.prototype.encodeObject=function(e,t){var n=this.extensionCodec.tryToEncode(e,this.context);if(null!=n)this.encodeExtension(n);else if(Array.isArray(e))this.encodeArray(e,t);else if(ArrayBuffer.isView(e))this.encodeBinary(e);else{if("object"!==typeof e)throw new Error("Unrecognized object: "+Object.prototype.toString.apply(e));this.encodeMap(e,t)}},e.prototype.encodeBinary=function(e){var t=e.byteLength;if(t<256)this.writeU8(196),this.writeU8(t);else if(t<65536)this.writeU8(197),this.writeU16(t);else{if(!(t<4294967296))throw new Error("Too large binary: "+t);this.writeU8(198),this.writeU32(t)}var n=Object(o.ensureUint8Array)(e);this.writeU8a(n)},e.prototype.encodeArray=function(e,t){var n=e.length;if(n<16)this.writeU8(144+n);else if(n<65536)this.writeU8(220),this.writeU16(n);else{if(!(n<4294967296))throw new Error("Too large array: "+n);this.writeU8(221),this.writeU32(n)}for(var r=0,a=e;r=0)this.encoders[t]=n,this.decoders[t]=r;else{var a=1+t;this.builtInEncoders[a]=n,this.builtInDecoders[a]=r}},e.prototype.tryToEncode=function(e,t){for(var n=0;n0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]=0&&r>=0&&n<=s){if(0===r&&n<=o){var i=new Uint8Array(4);return(t=new DataView(i.buffer)).setUint32(0,n),i}var c=n/4294967296,u=4294967295&n;return i=new Uint8Array(8),(t=new DataView(i.buffer)).setUint32(0,r<<2|3&c),t.setUint32(4,u),i}return i=new Uint8Array(12),(t=new DataView(i.buffer)).setUint32(0,r),Object(a.setInt64)(t,4,n),i}function u(e){var t=e.getTime(),n=Math.floor(t/1e3),r=1e6*(t-1e3*n),a=Math.floor(r/1e9);return{sec:n+a,nsec:r-1e9*a}}function l(e){return e instanceof Date?c(u(e)):null}function A(e){var t=new DataView(e.buffer,e.byteOffset,e.byteLength);switch(e.byteLength){case 4:return{sec:t.getUint32(0),nsec:0};case 8:var n=t.getUint32(0);return{sec:4294967296*(3&n)+t.getUint32(4),nsec:n>>>2};case 12:return{sec:Object(a.getInt64)(t,4),nsec:t.getUint32(0)};default:throw new r.DecodeError("Unrecognized data size for timestamp (expected 4, 8, or 12): "+e.length)}}function d(e){var t=A(e);return new Date(1e3*t.sec+t.nsec/1e6)}var h={type:i,encode:l,decode:d}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"UINT32_MAX",(function(){return r})),n.d(t,"setUint64",(function(){return a})),n.d(t,"setInt64",(function(){return i})),n.d(t,"getInt64",(function(){return o})),n.d(t,"getUint64",(function(){return s}));var r=4294967295;function a(e,t,n){var r=n/4294967296,a=n;e.setUint32(t,r),e.setUint32(t+4,a)}function i(e,t,n){var r=Math.floor(n/4294967296),a=n;e.setUint32(t,r),e.setUint32(t+4,a)}function o(e,t){return 4294967296*e.getInt32(t)+e.getUint32(t+4)}function s(e,t){return 4294967296*e.getUint32(t)+e.getUint32(t+4)}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/prettyByte.mjs":function(e,t,n){"use strict";function r(e){return(e<0?"-":"")+"0x"+Math.abs(e).toString(16).padStart(2,"0")}n.r(t),n.d(t,"prettyByte",(function(){return r}))},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/stream.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"isAsyncIterable",(function(){return o})),n.d(t,"asyncIterableFromStream",(function(){return s})),n.d(t,"ensureAsyncIterable",(function(){return c}));var r=function(e,t){var n,r,a,i,o={label:0,sent:function(){if(1&a[0])throw a[1];return a[1]},trys:[],ops:[]};return i={next:s(0),throw:s(1),return:s(2)},"function"===typeof Symbol&&(i[Symbol.iterator]=function(){return this}),i;function s(i){return function(s){return function(i){if(n)throw new TypeError("Generator is already executing.");for(;o;)try{if(n=1,r&&(a=2&i[0]?r.return:i[0]?r.throw||((a=r.return)&&a.call(r),0):r.next)&&!(a=a.call(r,i[1])).done)return a;switch(r=0,a&&(i=[2&i[0],a.value]),i[0]){case 0:case 1:a=i;break;case 4:return o.label++,{value:i[1],done:!1};case 5:o.label++,r=i[1],i=[0];continue;case 7:i=o.ops.pop(),o.trys.pop();continue;default:if(!(a=(a=o.trys).length>0&&a[a.length-1])&&(6===i[0]||2===i[0])){o=0;continue}if(3===i[0]&&(!a||i[1]>a[0]&&i[1]1||c(e,t)}))})}function c(e,t){try{!function(e){e.value instanceof a?Promise.resolve(e.value.v).then(u,l):A(o[0][2],e)}(i[e](t))}catch(n){A(o[0][3],n)}}function u(e){c("next",e)}function l(e){c("throw",e)}function A(e,t){e(t),o.shift(),o.length&&c(o[0][0],o[0][1])}};function o(e){return null!=e[Symbol.asyncIterator]}function s(e){return i(this,arguments,(function(){var t,n,i,o;return r(this,(function(r){switch(r.label){case 0:t=e.getReader(),r.label=1;case 1:r.trys.push([1,,9,10]),r.label=2;case 2:return[4,a(t.read())];case 3:return n=r.sent(),i=n.done,o=n.value,i?[4,a(void 0)]:[3,5];case 4:return[2,r.sent()];case 5:return function(e){if(null==e)throw new Error("Assertion Failure: value must not be null nor undefined")}(o),[4,a(o)];case 6:return[4,r.sent()];case 7:return r.sent(),[3,2];case 8:return[3,10];case 9:return t.releaseLock(),[7];case 10:return[2]}}))}))}function c(e){return o(e)?e:s(e)}},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/typedArrays.mjs":function(e,t,n){"use strict";function r(e){return e instanceof Uint8Array?e:ArrayBuffer.isView(e)?new Uint8Array(e.buffer,e.byteOffset,e.byteLength):e instanceof ArrayBuffer?new Uint8Array(e):Uint8Array.from(e)}function a(e){if(e instanceof ArrayBuffer)return new DataView(e);var t=r(e);return new DataView(t.buffer,t.byteOffset,t.byteLength)}n.r(t),n.d(t,"ensureUint8Array",(function(){return r})),n.d(t,"createDataView",(function(){return a}))},"./node_modules/@msgpack/msgpack/dist.es5+esm/utils/utf8.mjs":function(e,t,n){"use strict";n.r(t),n.d(t,"utf8Count",(function(){return i})),n.d(t,"utf8EncodeJs",(function(){return o})),n.d(t,"TEXT_ENCODER_THRESHOLD",(function(){return c})),n.d(t,"utf8EncodeTE",(function(){return u})),n.d(t,"utf8DecodeJs",(function(){return A})),n.d(t,"TEXT_DECODER_THRESHOLD",(function(){return h})),n.d(t,"utf8DecodeTD",(function(){return f}));var r=n("./node_modules/@msgpack/msgpack/dist.es5+esm/utils/int.mjs"),a=("undefined"===typeof process||"never"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_ENCODING)&&"undefined"!==typeof TextEncoder&&"undefined"!==typeof TextDecoder;function i(e){for(var t=e.length,n=0,r=0;r=55296&&a<=56319&&r>6&31|192;else{if(o>=55296&&o<=56319&&i>12&15|224,t[a++]=o>>6&63|128):(t[a++]=o>>18&7|240,t[a++]=o>>12&63|128,t[a++]=o>>6&63|128)}t[a++]=63&o|128}else t[a++]=o}}var s=a?new TextEncoder:void 0,c=a?"undefined"!==typeof process&&"force"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_ENCODING?200:0:r.UINT32_MAX,u=(null===s||void 0===s?void 0:s.encodeInto)?function(e,t,n){s.encodeInto(e,t.subarray(n))}:function(e,t,n){t.set(s.encode(e),n)},l=4096;function A(e,t,n){for(var r=t,a=r+n,i=[],o="";r65535&&(A-=65536,i.push(A>>>10&1023|55296),A=56320|1023&A),i.push(A)}else i.push(s);i.length>=l&&(o+=String.fromCharCode.apply(String,i),i.length=0)}return i.length>0&&(o+=String.fromCharCode.apply(String,i)),o}var d=a?new TextDecoder:null,h=a?"undefined"!==typeof process&&"force"!=={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0,REACT_APP_VERSION:"0.2.0"}.TEXT_DECODER?200:0:r.UINT32_MAX;function f(e,t,n){var r=e.subarray(t,t+n);return d.decode(r)}},"./src/hypha/rpc.js":function(e,t,n){"use strict";n.r(t),n.d(t,"API_VERSION",(function(){return i})),n.d(t,"RPC",(function(){return A}));var r=n("./src/hypha/utils.js"),a=n("./node_modules/@msgpack/msgpack/dist.es5+esm/index.mjs"),i="0.3.0",o=512e3,s=Object.getPrototypeOf(Object.getPrototypeOf(new Uint8Array)).constructor;function c(e,t){var n=new Uint8Array(e.byteLength+t.byteLength);return n.set(new Uint8Array(e),0),n.set(new Uint8Array(t),e.byteLength),n.buffer}function u(e,t){if(!t)throw new Error("undefined index");return"string"===typeof t?u(e,t.split(".")):0===t.length?e:u(e[t[0]],t.slice(1))}var l=function(){function e(t,n,r,a){_classCallCheck(this,e),this._timeout=t,this._callback=n,this._args=r,this._label=a||"timer",this._task=null,this.started=!1}return _createClass(e,[{key:"start",value:function(){var e=this;this.started?this.reset():(this._task=setTimeout((function(){e._callback.apply(e,e._args)}),1e3*this._timeout),this.started=!0)}},{key:"clear",value:function(){this._task?(clearTimeout(this._task),this._task=null,this.started=!1):console.warn("Clearing a timer (".concat(this._label,") which is not started"))}},{key:"reset",value:function(){var e=this;this._task&&clearTimeout(this._task),this._task=setTimeout((function(){e._callback.apply(e,e._args)}),1e3*this._timeout),this.started=!0}}]),e}(),A=function(e){_inherits(n,e);var t=_createSuper(n);function n(e,a){var i,o=a.client_id,s=void 0===o?null:o,c=a.manager_id,u=void 0===c?null:c,l=a.default_context,A=void 0===l?null:l,d=a.name,h=void 0===d?null:d,f=a.codecs,g=void 0===f?null:f,p=a.method_timeout,m=void 0===p?null:p,I=a.max_message_buffer_size,v=void 0===I?0:I,y=a.debug,C=void 0!==y&&y;return _classCallCheck(this,n),(i=t.call(this,C))._codecs=g||{},Object(r.assert)(s&&"string"===typeof s),Object(r.assert)(s,"client_id is required"),i._client_id=s,i._name=h,i._connection_info=null,i._workspace=null,i.manager_id=u,i.default_context=A||{},i._method_annotations=new WeakMap,i._manager_service=null,i._max_message_buffer_size=v,i._chunk_store={},i._method_timeout=m||20,i._services={},i._object_store={services:i._services},e?(i.add_service({id:"built-in",type:"built-in",name:"RPC built-in services",config:{require_context:!0,visibility:"public"},ping:i._ping.bind(_assertThisInitialized(i)),get_service:i.get_local_service.bind(_assertThisInitialized(i)),register_service:i.register_service.bind(_assertThisInitialized(i)),message_cache:{create:i._create_message.bind(_assertThisInitialized(i)),append:i._append_message.bind(_assertThisInitialized(i)),process:i._process_message.bind(_assertThisInitialized(i)),remove:i._remove_message.bind(_assertThisInitialized(i))}}),i.on("method",i._handle_method.bind(_assertThisInitialized(i))),Object(r.assert)(e.emit_message&&e.on_message),i._emit_message=e.emit_message.bind(e),e.on_message(i._on_message.bind(_assertThisInitialized(i))),i._connection=e,i._get_connection_info()):i._emit_message=function(){console.log("No connection to emit message")},i}return _createClass(n,[{key:"_get_connection_info",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(){var t;return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!this.manager_id){e.next=14;break}return e.prev=1,e.next=4,this.get_manager_service(5);case 4:return Object(r.assert)(this._manager_service),e.next=7,this._manager_service.get_connection_info();case 7:this._connection_info=e.sent,this._connection_info.reconnection_token&&this._connection.set_reconnection_token&&(this._connection.set_reconnection_token(this._connection_info.reconnection_token),t=.8*this._connection_info.reconnection_expires_in,console.info("Reconnection token obtained: ".concat(this._connection_info.reconnection_token,", will be refreshed in ").concat(t," seconds")),this._get_connection_info_task=setTimeout(this._get_connection_info.bind(this),1e3*t)),e.next=14;break;case 11:e.prev=11,e.t0=e.catch(1),console.warn("Failed to fetch user info from ",this.manager_id,e.t0);case 14:case"end":return e.stop()}}),e,this,[[1,11]])})));return function(){return e.apply(this,arguments)}}()},{key:"register_codec",value:function(e){if(!e.name||!e.encoder&&!e.decoder)throw new Error("Invalid codec format, please make sure you provide a name, type, encoder and decoder.");if(e.type)for(var t=0,n=Object.keys(this._codecs);t0&&"object"===typeof p[m-1]&&null!==p[m-1]&&p[m-1]._rkwargs)&&delete p[m-1]._rkwargs,v={type:"method",from:A._client_id,to:s,method:c},y={},p&&(y.args=p),I&&(y.with_kwargs=I),console.log("Calling remote method ".concat(s,":").concat(c,", session: ").concat(f)),t&&(v.parent=t),C=null,!u){i.next=27;break}return v.session=f,E="".concat(s,":").concat(c),C=new l(A._method_timeout,h,["Method call time out: ".concat(E)],E),i.next=26,A._encode_promise(d,h,f,!0,C,o);case 26:y.promise=i.sent;case 27:b=Object(a.encode)(v),y&&(B=Object(a.encode)(y),b=new Uint8Array([].concat(_toConsumableArray(b),_toConsumableArray(B)))),b.length<=513024?A._emit_message(b).then((function(){C&&(console.log("Start watchdog timer."),C.start())})):A._send_chunks(b,s,t).then((function(){C&&(console.log("Start watchdog timer."),C.start())}));case 31:case"end":return i.stop()}}),i)})));return function(e,t){return i.apply(this,arguments)}}())}return d.__rpc_object__=e,d}},{key:"_notify_service_update",value:function(){var e=_asyncToGenerator(_regeneratorRuntime().mark((function e(){return _regeneratorRuntime().wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!this.manager_id){e.next=12;break}return e.prev=1,e.next=4,this.get_manager_service(5);case 4:return Object(r.assert)(this._manager_service),e.next=7,this._manager_service.update_client_info(this.get_client_info());case 7:e.next=12;break;case 9:e.prev=9,e.t0=e.catch(1),console.warn("Failed to notify service update to",this.manager_id,e.t0);case 12:case"end":return e.stop()}}),e,this,[[1,9]])})));return function(){return e.apply(this,arguments)}}()},{key:"get_client_info",value:function(){for(var e=[],t=0,n=Object.values(this._services);t=0&&this._event_handlers[e].splice(n,1)}}else this._event_handlers={}}},{key:"_fire",value:function(e,t){if(this._event_handlers[e])for(var n=this._event_handlers[e].length;n--;){var r=this._event_handlers[e][n];try{r(t)}catch(a){console.error(a)}finally{r.___event_run_once&&this._event_handlers[e].splice(n,1)}}else this._debug&&console.warn("unhandled event",e,t)}}]),e}()}})},module.exports=factory()},33322:function(module,__unused_webpack_exports,__webpack_require__){var _slicedToArray=__webpack_require__(27424).default,_regeneratorRuntime=__webpack_require__(17061).default,_asyncToGenerator=__webpack_require__(17156).default,_createForOfIteratorHelper=__webpack_require__(74704).default,_get=__webpack_require__(41588).default,_getPrototypeOf=__webpack_require__(73808).default,_wrapNativeSuper=__webpack_require__(33496).default,_classCallCheck=__webpack_require__(56690).default,_createClass=__webpack_require__(89728).default,_assertThisInitialized=__webpack_require__(66115).default,_inherits=__webpack_require__(61655).default,_createSuper=__webpack_require__(26389).default,factory;window,factory=function(){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var a=t[r]={i:r,l:!1,exports:{}};return e[r].call(a.exports,a,a.exports,n),a.l=!0,a.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"===typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var a in e)n.d(r,a,function(t){return e[t]}.bind(null,a));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s="./src/socketIOMain.js")}({"./node_modules/@socket.io/base64-arraybuffer/dist/base64-arraybuffer.es5.js":function(e,t,n){"use strict";n.r(t),n.d(t,"decode",(function(){return s})),n.d(t,"encode",(function(){return o}));for(var r="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",a="undefined"===typeof Uint8Array?[]:new Uint8Array(256),i=0;i<64;i++)a[r.charCodeAt(i)]=i;var o=function(e){var t,n=new Uint8Array(e),a=n.length,i="";for(t=0;t>2],i+=r[(3&n[t])<<4|n[t+1]>>4],i+=r[(15&n[t+1])<<2|n[t+2]>>6],i+=r[63&n[t+2]];return a%3===2?i=i.substring(0,i.length-1)+"=":a%3===1&&(i=i.substring(0,i.length-2)+"=="),i},s=function(e){var t,n,r,i,o,s=.75*e.length,c=e.length,u=0;"="===e[e.length-1]&&(s--,"="===e[e.length-2]&&s--);var l=new ArrayBuffer(s),A=new Uint8Array(l);for(t=0;t>4,A[u++]=(15&r)<<4|i>>2,A[u++]=(3&i)<<6|63&o;return l}},"./node_modules/@socket.io/component-emitter/index.mjs":function(e,t,n){"use strict";function r(e){if(e)return function(e){for(var t in r.prototype)e[t]=r.prototype[t];return e}(e)}n.r(t),n.d(t,"Emitter",(function(){return r})),r.prototype.on=r.prototype.addEventListener=function(e,t){return this._callbacks=this._callbacks||{},(this._callbacks["$"+e]=this._callbacks["$"+e]||[]).push(t),this},r.prototype.once=function(e,t){function n(){this.off(e,n),t.apply(this,arguments)}return n.fn=t,this.on(e,n),this},r.prototype.off=r.prototype.removeListener=r.prototype.removeAllListeners=r.prototype.removeEventListener=function(e,t){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var n,r=this._callbacks["$"+e];if(!r)return this;if(1==arguments.length)return delete this._callbacks["$"+e],this;for(var a=0;a0?o-4:o;for(n=0;n>16&255,u[l++]=t>>8&255,u[l++]=255&t;return 2===s&&(t=a[e.charCodeAt(n)]<<2|a[e.charCodeAt(n+1)]>>4,u[l++]=255&t),1===s&&(t=a[e.charCodeAt(n)]<<10|a[e.charCodeAt(n+1)]<<4|a[e.charCodeAt(n+2)]>>2,u[l++]=t>>8&255,u[l++]=255&t),u},t.fromByteArray=function(e){for(var t,n=e.length,a=n%3,i=[],o=16383,s=0,c=n-a;sc?c:s+o));return 1===a?(t=e[n-1],i.push(r[t>>2]+r[t<<4&63]+"==")):2===a&&(t=(e[n-2]<<8)+e[n-1],i.push(r[t>>10]+r[t>>4&63]+r[t<<2&63]+"=")),i.join("")};for(var r=[],a=[],i="undefined"!==typeof Uint8Array?Uint8Array:Array,o="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",s=0;s<64;++s)r[s]=o[s],a[o.charCodeAt(s)]=s;function c(e){var t=e.length;if(t%4>0)throw new Error("Invalid string. Length must be a multiple of 4");var n=e.indexOf("=");return-1===n&&(n=t),[n,n===t?0:4-n%4]}function u(e,t,n){for(var a,i,o=[],s=t;s>18&63]+r[i>>12&63]+r[i>>6&63]+r[63&i]);return o.join("")}a["-".charCodeAt(0)]=62,a["_".charCodeAt(0)]=63},"./node_modules/buffer/index.js":function(e,t,n){"use strict";(function(e){var r=n("./node_modules/base64-js/index.js"),a=n("./node_modules/ieee754/index.js"),i=n("./node_modules/isarray/index.js");function o(){return c.TYPED_ARRAY_SUPPORT?2147483647:1073741823}function s(e,t){if(o()=o())throw new RangeError("Attempt to allocate Buffer larger than maximum size: 0x"+o().toString(16)+" bytes");return 0|e}function f(e,t){if(c.isBuffer(e))return e.length;if("undefined"!==typeof ArrayBuffer&&"function"===typeof ArrayBuffer.isView&&(ArrayBuffer.isView(e)||e instanceof ArrayBuffer))return e.byteLength;"string"!==typeof e&&(e=""+e);var n=e.length;if(0===n)return 0;for(var r=!1;;)switch(t){case"ascii":case"latin1":case"binary":return n;case"utf8":case"utf-8":case void 0:return P(e).length;case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return 2*n;case"hex":return n>>>1;case"base64":return j(e).length;default:if(r)return P(e).length;t=(""+t).toLowerCase(),r=!0}}function g(e,t,n){var r=!1;if((void 0===t||t<0)&&(t=0),t>this.length)return"";if((void 0===n||n>this.length)&&(n=this.length),n<=0)return"";if((n>>>=0)<=(t>>>=0))return"";for(e||(e="utf8");;)switch(e){case"hex":return S(this,t,n);case"utf8":case"utf-8":return Q(this,t,n);case"ascii":return x(this,t,n);case"latin1":case"binary":return _(this,t,n);case"base64":return w(this,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return D(this,t,n);default:if(r)throw new TypeError("Unknown encoding: "+e);e=(e+"").toLowerCase(),r=!0}}function p(e,t,n){var r=e[t];e[t]=e[n],e[n]=r}function m(e,t,n,r,a){if(0===e.length)return-1;if("string"===typeof n?(r=n,n=0):n>2147483647?n=2147483647:n<-2147483648&&(n=-2147483648),n=+n,isNaN(n)&&(n=a?0:e.length-1),n<0&&(n=e.length+n),n>=e.length){if(a)return-1;n=e.length-1}else if(n<0){if(!a)return-1;n=0}if("string"===typeof t&&(t=c.from(t,r)),c.isBuffer(t))return 0===t.length?-1:I(e,t,n,r,a);if("number"===typeof t)return t&=255,c.TYPED_ARRAY_SUPPORT&&"function"===typeof Uint8Array.prototype.indexOf?a?Uint8Array.prototype.indexOf.call(e,t,n):Uint8Array.prototype.lastIndexOf.call(e,t,n):I(e,[t],n,r,a);throw new TypeError("val must be string, number or Buffer")}function I(e,t,n,r,a){var i,o=1,s=e.length,c=t.length;if(void 0!==r&&("ucs2"===(r=String(r).toLowerCase())||"ucs-2"===r||"utf16le"===r||"utf-16le"===r)){if(e.length<2||t.length<2)return-1;o=2,s/=2,c/=2,n/=2}function u(e,t){return 1===o?e[t]:e.readUInt16BE(t*o)}if(a){var l=-1;for(i=n;is&&(n=s-c),i=n;i>=0;i--){for(var A=!0,d=0;da&&(r=a):r=a;var i=t.length;if(i%2!==0)throw new TypeError("Invalid hex string");r>i/2&&(r=i/2);for(var o=0;o>8,a=n%256,i.push(a),i.push(r);return i}(t,e.length-n),e,n,r)}function w(e,t,n){return 0===t&&n===e.length?r.fromByteArray(e):r.fromByteArray(e.slice(t,n))}function Q(e,t,n){n=Math.min(e.length,n);for(var r=[],a=t;a239?4:u>223?3:u>191?2:1;if(a+A<=n)switch(A){case 1:u<128&&(l=u);break;case 2:128===(192&(i=e[a+1]))&&(c=(31&u)<<6|63&i)>127&&(l=c);break;case 3:i=e[a+1],o=e[a+2],128===(192&i)&&128===(192&o)&&(c=(15&u)<<12|(63&i)<<6|63&o)>2047&&(c<55296||c>57343)&&(l=c);break;case 4:i=e[a+1],o=e[a+2],s=e[a+3],128===(192&i)&&128===(192&o)&&128===(192&s)&&(c=(15&u)<<18|(63&i)<<12|(63&o)<<6|63&s)>65535&&c<1114112&&(l=c)}null===l?(l=65533,A=1):l>65535&&(l-=65536,r.push(l>>>10&1023|55296),l=56320|1023&l),r.push(l),a+=A}return function(e){var t=e.length;if(t<=k)return String.fromCharCode.apply(String,e);for(var n="",r=0;r0&&(e=this.toString("hex",0,n).match(/.{2}/g).join(" "),this.length>n&&(e+=" ... ")),""},c.prototype.compare=function(e,t,n,r,a){if(!c.isBuffer(e))throw new TypeError("Argument must be a Buffer");if(void 0===t&&(t=0),void 0===n&&(n=e?e.length:0),void 0===r&&(r=0),void 0===a&&(a=this.length),t<0||n>e.length||r<0||a>this.length)throw new RangeError("out of range index");if(r>=a&&t>=n)return 0;if(r>=a)return-1;if(t>=n)return 1;if(this===e)return 0;for(var i=(a>>>=0)-(r>>>=0),o=(n>>>=0)-(t>>>=0),s=Math.min(i,o),u=this.slice(r,a),l=e.slice(t,n),A=0;Aa)&&(n=a),e.length>0&&(n<0||t<0)||t>this.length)throw new RangeError("Attempt to write outside buffer bounds");r||(r="utf8");for(var i=!1;;)switch(r){case"hex":return v(this,e,t,n);case"utf8":case"utf-8":return y(this,e,t,n);case"ascii":return C(this,e,t,n);case"latin1":case"binary":return E(this,e,t,n);case"base64":return b(this,e,t,n);case"ucs2":case"ucs-2":case"utf16le":case"utf-16le":return B(this,e,t,n);default:if(i)throw new TypeError("Unknown encoding: "+r);r=(""+r).toLowerCase(),i=!0}},c.prototype.toJSON=function(){return{type:"Buffer",data:Array.prototype.slice.call(this._arr||this,0)}};var k=4096;function x(e,t,n){var r="";n=Math.min(e.length,n);for(var a=t;ar)&&(n=r);for(var a="",i=t;in)throw new RangeError("Trying to access beyond buffer length")}function R(e,t,n,r,a,i){if(!c.isBuffer(e))throw new TypeError('"buffer" argument must be a Buffer instance');if(t>a||te.length)throw new RangeError("Index out of range")}function F(e,t,n,r){t<0&&(t=65535+t+1);for(var a=0,i=Math.min(e.length-n,2);a>>8*(r?a:1-a)}function T(e,t,n,r){t<0&&(t=4294967295+t+1);for(var a=0,i=Math.min(e.length-n,4);a>>8*(r?a:3-a)&255}function M(e,t,n,r,a,i){if(n+r>e.length)throw new RangeError("Index out of range");if(n<0)throw new RangeError("Index out of range")}function L(e,t,n,r,i){return i||M(e,0,n,4),a.write(e,t,n,r,23,4),n+4}function O(e,t,n,r,i){return i||M(e,0,n,8),a.write(e,t,n,r,52,8),n+8}c.prototype.slice=function(e,t){var n,r=this.length;if((e=~~e)<0?(e+=r)<0&&(e=0):e>r&&(e=r),(t=void 0===t?r:~~t)<0?(t+=r)<0&&(t=0):t>r&&(t=r),t0&&(a*=256);)r+=this[e+--t]*a;return r},c.prototype.readUInt8=function(e,t){return t||N(e,1,this.length),this[e]},c.prototype.readUInt16LE=function(e,t){return t||N(e,2,this.length),this[e]|this[e+1]<<8},c.prototype.readUInt16BE=function(e,t){return t||N(e,2,this.length),this[e]<<8|this[e+1]},c.prototype.readUInt32LE=function(e,t){return t||N(e,4,this.length),(this[e]|this[e+1]<<8|this[e+2]<<16)+16777216*this[e+3]},c.prototype.readUInt32BE=function(e,t){return t||N(e,4,this.length),16777216*this[e]+(this[e+1]<<16|this[e+2]<<8|this[e+3])},c.prototype.readIntLE=function(e,t,n){e|=0,t|=0,n||N(e,t,this.length);for(var r=this[e],a=1,i=0;++i=(a*=128)&&(r-=Math.pow(2,8*t)),r},c.prototype.readIntBE=function(e,t,n){e|=0,t|=0,n||N(e,t,this.length);for(var r=t,a=1,i=this[e+--r];r>0&&(a*=256);)i+=this[e+--r]*a;return i>=(a*=128)&&(i-=Math.pow(2,8*t)),i},c.prototype.readInt8=function(e,t){return t||N(e,1,this.length),128&this[e]?-1*(255-this[e]+1):this[e]},c.prototype.readInt16LE=function(e,t){t||N(e,2,this.length);var n=this[e]|this[e+1]<<8;return 32768&n?4294901760|n:n},c.prototype.readInt16BE=function(e,t){t||N(e,2,this.length);var n=this[e+1]|this[e]<<8;return 32768&n?4294901760|n:n},c.prototype.readInt32LE=function(e,t){return t||N(e,4,this.length),this[e]|this[e+1]<<8|this[e+2]<<16|this[e+3]<<24},c.prototype.readInt32BE=function(e,t){return t||N(e,4,this.length),this[e]<<24|this[e+1]<<16|this[e+2]<<8|this[e+3]},c.prototype.readFloatLE=function(e,t){return t||N(e,4,this.length),a.read(this,e,!0,23,4)},c.prototype.readFloatBE=function(e,t){return t||N(e,4,this.length),a.read(this,e,!1,23,4)},c.prototype.readDoubleLE=function(e,t){return t||N(e,8,this.length),a.read(this,e,!0,52,8)},c.prototype.readDoubleBE=function(e,t){return t||N(e,8,this.length),a.read(this,e,!1,52,8)},c.prototype.writeUIntLE=function(e,t,n,r){e=+e,t|=0,n|=0,r||R(this,e,t,n,Math.pow(2,8*n)-1,0);var a=1,i=0;for(this[t]=255&e;++i=0&&(i*=256);)this[t+a]=e/i&255;return t+n},c.prototype.writeUInt8=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,1,255,0),c.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),this[t]=255&e,t+1},c.prototype.writeUInt16LE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,2,65535,0),c.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):F(this,e,t,!0),t+2},c.prototype.writeUInt16BE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,2,65535,0),c.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):F(this,e,t,!1),t+2},c.prototype.writeUInt32LE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,4,4294967295,0),c.TYPED_ARRAY_SUPPORT?(this[t+3]=e>>>24,this[t+2]=e>>>16,this[t+1]=e>>>8,this[t]=255&e):T(this,e,t,!0),t+4},c.prototype.writeUInt32BE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,4,4294967295,0),c.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):T(this,e,t,!1),t+4},c.prototype.writeIntLE=function(e,t,n,r){if(e=+e,t|=0,!r){var a=Math.pow(2,8*n-1);R(this,e,t,n,a-1,-a)}var i=0,o=1,s=0;for(this[t]=255&e;++i>0)-s&255;return t+n},c.prototype.writeIntBE=function(e,t,n,r){if(e=+e,t|=0,!r){var a=Math.pow(2,8*n-1);R(this,e,t,n,a-1,-a)}var i=n-1,o=1,s=0;for(this[t+i]=255&e;--i>=0&&(o*=256);)e<0&&0===s&&0!==this[t+i+1]&&(s=1),this[t+i]=(e/o>>0)-s&255;return t+n},c.prototype.writeInt8=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,1,127,-128),c.TYPED_ARRAY_SUPPORT||(e=Math.floor(e)),e<0&&(e=255+e+1),this[t]=255&e,t+1},c.prototype.writeInt16LE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,2,32767,-32768),c.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8):F(this,e,t,!0),t+2},c.prototype.writeInt16BE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,2,32767,-32768),c.TYPED_ARRAY_SUPPORT?(this[t]=e>>>8,this[t+1]=255&e):F(this,e,t,!1),t+2},c.prototype.writeInt32LE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,4,2147483647,-2147483648),c.TYPED_ARRAY_SUPPORT?(this[t]=255&e,this[t+1]=e>>>8,this[t+2]=e>>>16,this[t+3]=e>>>24):T(this,e,t,!0),t+4},c.prototype.writeInt32BE=function(e,t,n){return e=+e,t|=0,n||R(this,e,t,4,2147483647,-2147483648),e<0&&(e=4294967295+e+1),c.TYPED_ARRAY_SUPPORT?(this[t]=e>>>24,this[t+1]=e>>>16,this[t+2]=e>>>8,this[t+3]=255&e):T(this,e,t,!1),t+4},c.prototype.writeFloatLE=function(e,t,n){return L(this,e,t,!0,n)},c.prototype.writeFloatBE=function(e,t,n){return L(this,e,t,!1,n)},c.prototype.writeDoubleLE=function(e,t,n){return O(this,e,t,!0,n)},c.prototype.writeDoubleBE=function(e,t,n){return O(this,e,t,!1,n)},c.prototype.copy=function(e,t,n,r){if(n||(n=0),r||0===r||(r=this.length),t>=e.length&&(t=e.length),t||(t=0),r>0&&r=this.length)throw new RangeError("sourceStart out of bounds");if(r<0)throw new RangeError("sourceEnd out of bounds");r>this.length&&(r=this.length),e.length-t=0;--a)e[a+t]=this[a+n];else if(i<1e3||!c.TYPED_ARRAY_SUPPORT)for(a=0;a>>=0,n=void 0===n?this.length:n>>>0,e||(e=0),"number"===typeof e)for(i=t;i55295&&n<57344){if(!a){if(n>56319){(t-=3)>-1&&i.push(239,191,189);continue}if(o+1===r){(t-=3)>-1&&i.push(239,191,189);continue}a=n;continue}if(n<56320){(t-=3)>-1&&i.push(239,191,189),a=n;continue}n=65536+(a-55296<<10|n-56320)}else a&&(t-=3)>-1&&i.push(239,191,189);if(a=null,n<128){if((t-=1)<0)break;i.push(n)}else if(n<2048){if((t-=2)<0)break;i.push(n>>6|192,63&n|128)}else if(n<65536){if((t-=3)<0)break;i.push(n>>12|224,n>>6&63|128,63&n|128)}else{if(!(n<1114112))throw new Error("Invalid code point");if((t-=4)<0)break;i.push(n>>18|240,n>>12&63|128,n>>6&63|128,63&n|128)}}return i}function j(e){return r.toByteArray(function(e){if((e=function(e){return e.trim?e.trim():e.replace(/^\s+|\s+$/g,"")}(e).replace(U,"")).length<2)return"";for(;e.length%4!==0;)e+="=";return e}(e))}function q(e,t,n,r){for(var a=0;a=t.length||a>=e.length);++a)t[a+n]=e[a];return a}}).call(this,n("./node_modules/webpack/buildin/global.js"))},"./node_modules/engine.io-client/build/esm/contrib/has-cors.js":function(e,t,n){"use strict";n.r(t),n.d(t,"hasCORS",(function(){return a}));var r=!1;try{r="undefined"!==typeof XMLHttpRequest&&"withCredentials"in new XMLHttpRequest}catch(i){}var a=r},"./node_modules/engine.io-client/build/esm/contrib/parseqs.js":function(e,t,n){"use strict";function r(e){var t="";for(var n in e)e.hasOwnProperty(n)&&(t.length&&(t+="&"),t+=encodeURIComponent(n)+"="+encodeURIComponent(e[n]));return t}function a(e){for(var t={},n=e.split("&"),r=0,a=n.length;r0);return t}function l(e){var t=0;for(c=0;c1&&void 0!==arguments[1]?arguments[1]:{};return _classCallCheck(this,n),(r=t.call(this)).writeBuffer=[],e&&"object"===typeof e&&(s=e,e=null),e?(e=Object(o.parse)(e),s.hostname=e.host,s.secure="https"===e.protocol||"wss"===e.protocol,s.port=e.port,e.query&&(s.query=e.query)):s.host&&(s.hostname=Object(o.parse)(s.host).host),Object(a.installTimerFunctions)(_assertThisInitialized(r),s),r.secure=null!=s.secure?s.secure:"undefined"!==typeof location&&"https:"===location.protocol,s.hostname&&!s.port&&(s.port=r.secure?"443":"80"),r.hostname=s.hostname||("undefined"!==typeof location?location.hostname:"localhost"),r.port=s.port||("undefined"!==typeof location&&location.port?location.port:r.secure?"443":"80"),r.transports=s.transports||["polling","websocket"],r.writeBuffer=[],r.prevBufferLen=0,r.opts=Object.assign({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,addTrailingSlash:!0,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!0},s),r.opts.path=r.opts.path.replace(/\/$/,"")+(r.opts.addTrailingSlash?"/":""),"string"===typeof r.opts.query&&(r.opts.query=Object(i.decode)(r.opts.query)),r.id=null,r.upgrades=null,r.pingInterval=null,r.pingTimeout=null,r.pingTimeoutTimer=null,"function"===typeof addEventListener&&(r.opts.closeOnBeforeunload&&(r.beforeunloadEventListener=function(){r.transport&&(r.transport.removeAllListeners(),r.transport.close())},addEventListener("beforeunload",r.beforeunloadEventListener,!1)),"localhost"!==r.hostname&&(r.offlineEventListener=function(){r.onClose("transport close",{description:"network connection lost"})},addEventListener("offline",r.offlineEventListener,!1))),r.open(),r}return _createClass(n,[{key:"createTransport",value:function(e){var t=Object.assign({},this.opts.query);t.EIO=c.protocol,t.transport=e,this.id&&(t.sid=this.id);var n=Object.assign({},this.opts.transportOptions[e],this.opts,{query:t,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return new r.transports[e](n)}},{key:"open",value:function(){var e,t=this;if(this.opts.rememberUpgrade&&n.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))e="websocket";else{if(0===this.transports.length)return void this.setTimeoutFn((function(){t.emitReserved("error","No transports available")}),0);e=this.transports[0]}this.readyState="opening";try{e=this.createTransport(e)}catch(r){return this.transports.shift(),void this.open()}e.open(),this.setTransport(e)}},{key:"setTransport",value:function(e){var t=this;this.transport&&this.transport.removeAllListeners(),this.transport=e,e.on("drain",this.onDrain.bind(this)).on("packet",this.onPacket.bind(this)).on("error",this.onError.bind(this)).on("close",(function(e){return t.onClose("transport close",e)}))}},{key:"probe",value:function(e){var t=this,r=this.createTransport(e),a=!1;n.priorWebsocketSuccess=!1;var i=function(){a||(r.send([{type:"ping",data:"probe"}]),r.once("packet",(function(e){if(!a)if("pong"===e.type&&"probe"===e.data){if(t.upgrading=!0,t.emitReserved("upgrading",r),!r)return;n.priorWebsocketSuccess="websocket"===r.name,t.transport.pause((function(){a||"closed"!==t.readyState&&(A(),t.setTransport(r),r.send([{type:"upgrade"}]),t.emitReserved("upgrade",r),r=null,t.upgrading=!1,t.flush())}))}else{var i=new Error("probe error");i.transport=r.name,t.emitReserved("upgradeError",i)}})))};function o(){a||(a=!0,A(),r.close(),r=null)}var s=function(e){var n=new Error("probe error: "+e);n.transport=r.name,o(),t.emitReserved("upgradeError",n)};function c(){s("transport closed")}function u(){s("socket closed")}function l(e){r&&e.name!==r.name&&o()}var A=function(){r.removeListener("open",i),r.removeListener("error",s),r.removeListener("close",c),t.off("close",u),t.off("upgrading",l)};r.once("open",i),r.once("error",s),r.once("close",c),this.once("close",u),this.once("upgrading",l),r.open()}},{key:"onOpen",value:function(){if(this.readyState="open",n.priorWebsocketSuccess="websocket"===this.transport.name,this.emitReserved("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade)for(var e=0,t=this.upgrades.length;e1))return this.writeBuffer;for(var e=1,t=0;t0&&e>this.maxPayload)return this.writeBuffer.slice(0,t);e+=2}return this.writeBuffer}},{key:"write",value:function(e,t,n){return this.sendPacket("message",e,t,n),this}},{key:"send",value:function(e,t,n){return this.sendPacket("message",e,t,n),this}},{key:"sendPacket",value:function(e,t,n,r){if("function"===typeof t&&(r=t,t=void 0),"function"===typeof n&&(r=n,n=null),"closing"!==this.readyState&&"closed"!==this.readyState){(n=n||{}).compress=!1!==n.compress;var a={type:e,data:t,options:n};this.emitReserved("packetCreate",a),this.writeBuffer.push(a),r&&this.once("flush",r),this.flush()}}},{key:"close",value:function(){var e=this,t=function(){e.onClose("forced close"),e.transport.close()},n=function n(){e.off("upgrade",n),e.off("upgradeError",n),t()},r=function(){e.once("upgrade",n),e.once("upgradeError",n)};return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(function(){e.upgrading?r():t()})):this.upgrading?r():t()),this}},{key:"onError",value:function(e){n.priorWebsocketSuccess=!1,this.emitReserved("error",e),this.onClose("transport error",e)}},{key:"onClose",value:function(e,t){"opening"!==this.readyState&&"open"!==this.readyState&&"closing"!==this.readyState||(this.clearTimeoutFn(this.pingTimeoutTimer),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),"function"===typeof removeEventListener&&(removeEventListener("beforeunload",this.beforeunloadEventListener,!1),removeEventListener("offline",this.offlineEventListener,!1)),this.readyState="closed",this.id=null,this.emitReserved("close",e,t),this.writeBuffer=[],this.prevBufferLen=0)}},{key:"filterUpgrades",value:function(e){for(var t=[],n=0,r=e.length;n0&&void 0!==arguments[0]?arguments[0]:{};return Object.assign(e,{xd:this.xd,xs:this.xs},this.opts),new f(this.uri(),e)}},{key:"doWrite",value:function(e,t){var n=this,r=this.request({method:"POST",data:e});r.on("success",t),r.on("error",(function(e,t){n.onError("xhr post error",e,t)}))}},{key:"doPoll",value:function(){var e=this,t=this.request();t.on("data",this.onData.bind(this)),t.on("error",(function(t,n){e.onError("xhr poll error",t,n)})),this.pollXhr=t}}]),n}(r.Transport),f=function(e){_inherits(n,e);var t=_createSuper(n);function n(e,r){var a;return _classCallCheck(this,n),a=t.call(this),Object(u.installTimerFunctions)(_assertThisInitialized(a),r),a.opts=r,a.method=r.method||"GET",a.uri=e,a.async=!1!==r.async,a.data=void 0!==r.data?r.data:null,a.create(),a}return _createClass(n,[{key:"create",value:function(){var e=this,t=Object(u.pick)(this.opts,"agent","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","autoUnref");t.xdomain=!!this.opts.xd,t.xscheme=!!this.opts.xs;var r=this.xhr=new s.XHR(t);try{r.open(this.method,this.uri,this.async);try{if(this.opts.extraHeaders)for(var a in r.setDisableHeaderCheck&&r.setDisableHeaderCheck(!0),this.opts.extraHeaders)this.opts.extraHeaders.hasOwnProperty(a)&&r.setRequestHeader(a,this.opts.extraHeaders[a])}catch(i){}if("POST"===this.method)try{r.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(i){}try{r.setRequestHeader("Accept","*/*")}catch(i){}"withCredentials"in r&&(r.withCredentials=this.opts.withCredentials),this.opts.requestTimeout&&(r.timeout=this.opts.requestTimeout),r.onreadystatechange=function(){4===r.readyState&&(200===r.status||1223===r.status?e.onLoad():e.setTimeoutFn((function(){e.onError("number"===typeof r.status?r.status:0)}),0))},r.send(this.data)}catch(i){return void this.setTimeoutFn((function(){e.onError(i)}),0)}"undefined"!==typeof document&&(this.index=n.requestsCount++,n.requests[this.index]=this)}},{key:"onError",value:function(e){this.emitReserved("error",e,this.xhr),this.cleanup(!0)}},{key:"cleanup",value:function(e){if("undefined"!==typeof this.xhr&&null!==this.xhr){if(this.xhr.onreadystatechange=A,e)try{this.xhr.abort()}catch(t){}"undefined"!==typeof document&&delete n.requests[this.index],this.xhr=null}}},{key:"onLoad",value:function(){var e=this.xhr.responseText;null!==e&&(this.emitReserved("data",e),this.emitReserved("success"),this.cleanup())}},{key:"abort",value:function(){this.cleanup()}}]),n}(c.Emitter);if(f.requestsCount=0,f.requests={},"undefined"!==typeof document)if("function"===typeof attachEvent)attachEvent("onunload",p);else if("function"===typeof addEventListener){var g="onpagehide"in l.globalThisShim?"pagehide":"unload";addEventListener(g,p,!1)}function p(){for(var e in f.requests)f.requests.hasOwnProperty(e)&&f.requests[e].abort()}},"./node_modules/engine.io-client/build/esm/transports/websocket-constructor.browser.js":function(e,t,n){"use strict";n.r(t),n.d(t,"nextTick",(function(){return a})),n.d(t,"WebSocket",(function(){return i})),n.d(t,"usingBrowserWebSocket",(function(){return o})),n.d(t,"defaultBinaryType",(function(){return s}));var r=n("./node_modules/engine.io-client/build/esm/globalThis.browser.js"),a="function"===typeof Promise&&"function"===typeof Promise.resolve?function(e){return Promise.resolve().then(e)}:function(e,t){return t(e,0)},i=r.globalThisShim.WebSocket||r.globalThisShim.MozWebSocket,o=!0,s="arraybuffer"},"./node_modules/engine.io-client/build/esm/transports/websocket.js":function(e,t,n){"use strict";n.r(t),function(e){n.d(t,"WS",(function(){return l}));var r=n("./node_modules/engine.io-client/build/esm/transport.js"),a=n("./node_modules/engine.io-client/build/esm/contrib/parseqs.js"),i=n("./node_modules/engine.io-client/build/esm/contrib/yeast.js"),o=n("./node_modules/engine.io-client/build/esm/util.js"),s=n("./node_modules/engine.io-client/build/esm/transports/websocket-constructor.browser.js"),c=n("./node_modules/engine.io-parser/build/esm/index.js"),u="undefined"!==typeof navigator&&"string"===typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),l=function(t){_inherits(r,t);var n=_createSuper(r);function r(e){var t;return _classCallCheck(this,r),(t=n.call(this,e)).supportsBinary=!e.forceBase64,t}return _createClass(r,[{key:"name",get:function(){return"websocket"}},{key:"doOpen",value:function(){if(this.check()){var e=this.uri(),t=this.opts.protocols,n=u?{}:Object(o.pick)(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(n.headers=this.opts.extraHeaders);try{this.ws=s.usingBrowserWebSocket&&!u?t?new s.WebSocket(e,t):new s.WebSocket(e):new s.WebSocket(e,t,n)}catch(r){return this.emitReserved("error",r)}this.ws.binaryType=this.socket.binaryType||s.defaultBinaryType,this.addEventListeners()}}},{key:"addEventListeners",value:function(){var e=this;this.ws.onopen=function(){e.opts.autoUnref&&e.ws._socket.unref(),e.onOpen()},this.ws.onclose=function(t){return e.onClose({description:"websocket connection closed",context:t})},this.ws.onmessage=function(t){return e.onData(t.data)},this.ws.onerror=function(t){return e.onError("websocket error",t)}}},{key:"write",value:function(t){var n=this;this.writable=!1;for(var r=function(){var r=t[a],i=a===t.length-1;Object(c.encodePacket)(r,n.supportsBinary,(function(t){var a={};s.usingBrowserWebSocket||(r.options&&(a.compress=r.options.compress),n.opts.perMessageDeflate&&("string"===typeof t?e.byteLength(t):t.length)1?t-1:0),r=1;r=57344?n+=3:(r++,n+=4);return n}(e):Math.ceil((e.byteLength||e.size)*c)}},"./node_modules/engine.io-parser/build/esm/commons.js":function(e,t,n){"use strict";n.r(t),n.d(t,"PACKET_TYPES",(function(){return r})),n.d(t,"PACKET_TYPES_REVERSE",(function(){return a})),n.d(t,"ERROR_PACKET",(function(){return i}));var r=Object.create(null);r.open="0",r.close="1",r.ping="2",r.pong="3",r.message="4",r.upgrade="5",r.noop="6";var a=Object.create(null);Object.keys(r).forEach((function(e){a[r[e]]=e}));var i={type:"error",data:"parser error"}},"./node_modules/engine.io-parser/build/esm/decodePacket.browser.js":function(e,t,n){"use strict";n.r(t);var r=n("./node_modules/engine.io-parser/build/esm/commons.js"),a=n("./node_modules/@socket.io/base64-arraybuffer/dist/base64-arraybuffer.es5.js"),i="function"===typeof ArrayBuffer,o=function(e,t){if(i){var n=Object(a.decode)(e);return s(n,t)}return{base64:!0,data:e}},s=function(e,t){return"blob"===t&&e instanceof ArrayBuffer?new Blob([e]):e};t.default=function(e,t){if("string"!==typeof e)return{type:"message",data:s(e,t)};var n=e.charAt(0);return"b"===n?{type:"message",data:o(e.substring(1),t)}:r.PACKET_TYPES_REVERSE[n]?e.length>1?{type:r.PACKET_TYPES_REVERSE[n],data:e.substring(1)}:{type:r.PACKET_TYPES_REVERSE[n]}:r.ERROR_PACKET}},"./node_modules/engine.io-parser/build/esm/encodePacket.browser.js":function(e,t,n){"use strict";n.r(t);var r=n("./node_modules/engine.io-parser/build/esm/commons.js"),a="function"===typeof Blob||"undefined"!==typeof Blob&&"[object BlobConstructor]"===Object.prototype.toString.call(Blob),i="function"===typeof ArrayBuffer,o=function(e,t){var n=new FileReader;return n.onload=function(){var e=n.result.split(",")[1];t("b"+e)},n.readAsDataURL(e)};t.default=function(e,t,n){var s,c=e.type,u=e.data;return a&&u instanceof Blob?t?n(u):o(u,n):i&&(u instanceof ArrayBuffer||(s=u,"function"===typeof ArrayBuffer.isView?ArrayBuffer.isView(s):s&&s.buffer instanceof ArrayBuffer))?t?n(u):o(new Blob([u]),n):n(r.PACKET_TYPES[c]+(u||""))}},"./node_modules/engine.io-parser/build/esm/index.js":function(e,t,n){"use strict";n.r(t),n.d(t,"protocol",(function(){return c})),n.d(t,"encodePayload",(function(){return o})),n.d(t,"decodePayload",(function(){return s}));var r=n("./node_modules/engine.io-parser/build/esm/encodePacket.browser.js");n.d(t,"encodePacket",(function(){return r.default}));var a=n("./node_modules/engine.io-parser/build/esm/decodePacket.browser.js");n.d(t,"decodePacket",(function(){return a.default}));var i=String.fromCharCode(30),o=function(e,t){var n=e.length,a=new Array(n),o=0;e.forEach((function(e,s){Object(r.default)(e,!1,(function(e){a[s]=e,++o===n&&t(a.join(i))}))}))},s=function(e,t){for(var n=e.split(i),r=[],o=0;o>1,l=-7,A=n?a-1:0,d=n?-1:1,h=e[t+A];for(A+=d,i=h&(1<<-l)-1,h>>=-l,l+=s;l>0;i=256*i+e[t+A],A+=d,l-=8);for(o=i&(1<<-l)-1,i>>=-l,l+=r;l>0;o=256*o+e[t+A],A+=d,l-=8);if(0===i)i=1-u;else{if(i===c)return o?NaN:1/0*(h?-1:1);o+=Math.pow(2,r),i-=u}return(h?-1:1)*o*Math.pow(2,i-r)},t.write=function(e,t,n,r,a,i){var o,s,c,u=8*i-a-1,l=(1<>1,d=23===a?Math.pow(2,-24)-Math.pow(2,-77):0,h=r?0:i-1,f=r?1:-1,g=t<0||0===t&&1/t<0?1:0;for(t=Math.abs(t),isNaN(t)||t===1/0?(s=isNaN(t)?1:0,o=l):(o=Math.floor(Math.log(t)/Math.LN2),t*(c=Math.pow(2,-o))<1&&(o--,c*=2),(t+=o+A>=1?d/c:d*Math.pow(2,1-A))*c>=2&&(o++,c/=2),o+A>=l?(s=0,o=l):o+A>=1?(s=(t*c-1)*Math.pow(2,a),o+=A):(s=t*Math.pow(2,A-1)*Math.pow(2,a),o=0));a>=8;e[n+h]=255&s,h+=f,s/=256,a-=8);for(o=o<0;e[n+h]=255&o,h+=f,o/=256,u-=8);e[n+h-f]|=128*g}},"./node_modules/isarray/index.js":function(e,t){var n={}.toString;e.exports=Array.isArray||function(e){return"[object Array]"==n.call(e)}},"./node_modules/socket.io-client/build/esm/contrib/backo2.js":function(e,t,n){"use strict";function r(e){e=e||{},this.ms=e.min||100,this.max=e.max||1e4,this.factor=e.factor||2,this.jitter=e.jitter>0&&e.jitter<=1?e.jitter:0,this.attempts=0}n.r(t),n.d(t,"Backoff",(function(){return r})),r.prototype.duration=function(){var e=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var t=Math.random(),n=Math.floor(t*this.jitter*e);e=0==(1&Math.floor(10*t))?e-n:e+n}return 0|Math.min(e,this.max)},r.prototype.reset=function(){this.attempts=0},r.prototype.setMin=function(e){this.ms=e},r.prototype.setMax=function(e){this.max=e},r.prototype.setJitter=function(e){this.jitter=e}},"./node_modules/socket.io-client/build/esm/index.js":function(e,t,n){"use strict";n.r(t),n.d(t,"io",(function(){return c})),n.d(t,"connect",(function(){return c})),n.d(t,"default",(function(){return c}));var r=n("./node_modules/socket.io-client/build/esm/url.js"),a=n("./node_modules/socket.io-client/build/esm/manager.js");n.d(t,"Manager",(function(){return a.Manager}));var i=n("./node_modules/socket.io-client/build/esm/socket.js");n.d(t,"Socket",(function(){return i.Socket}));var o=n("./node_modules/socket.io-parser/build/esm/index.js");n.d(t,"protocol",(function(){return o.protocol}));var s={};function c(e,t){"object"===typeof e&&(t=e,e=void 0),t=t||{};var n,i=Object(r.url)(e,t.path||"/socket.io"),o=i.source,c=i.id,u=i.path,l=s[c]&&u in s[c].nsps;return t.forceNew||t["force new connection"]||!1===t.multiplex||l?n=new a.Manager(o,t):(s[c]||(s[c]=new a.Manager(o,t)),n=s[c]),i.query&&!t.query&&(t.query=i.queryKey),n.socket(i.path,t)}Object.assign(c,{Manager:a.Manager,Socket:i.Socket,io:c,connect:c})},"./node_modules/socket.io-client/build/esm/manager.js":function(e,t,n){"use strict";n.r(t),n.d(t,"Manager",(function(){return c}));var r=n("./node_modules/engine.io-client/build/esm/index.js"),a=n("./node_modules/socket.io-client/build/esm/socket.js"),i=n("./node_modules/socket.io-parser/build/esm/index.js"),o=n("./node_modules/socket.io-client/build/esm/on.js"),s=n("./node_modules/socket.io-client/build/esm/contrib/backo2.js"),c=function(e){_inherits(n,e);var t=_createSuper(n);function n(e,a){var o,c;_classCallCheck(this,n),(o=t.call(this)).nsps={},o.subs=[],e&&"object"===typeof e&&(a=e,e=void 0),(a=a||{}).path=a.path||"/socket.io",o.opts=a,Object(r.installTimerFunctions)(_assertThisInitialized(o),a),o.reconnection(!1!==a.reconnection),o.reconnectionAttempts(a.reconnectionAttempts||1/0),o.reconnectionDelay(a.reconnectionDelay||1e3),o.reconnectionDelayMax(a.reconnectionDelayMax||5e3),o.randomizationFactor(null!==(c=a.randomizationFactor)&&void 0!==c?c:.5),o.backoff=new s.Backoff({min:o.reconnectionDelay(),max:o.reconnectionDelayMax(),jitter:o.randomizationFactor()}),o.timeout(null==a.timeout?2e4:a.timeout),o._readyState="closed",o.uri=e;var u=a.parser||i;return o.encoder=new u.Encoder,o.decoder=new u.Decoder,o._autoConnect=!1!==a.autoConnect,o._autoConnect&&o.open(),o}return _createClass(n,[{key:"reconnection",value:function(e){return arguments.length?(this._reconnection=!!e,this):this._reconnection}},{key:"reconnectionAttempts",value:function(e){return void 0===e?this._reconnectionAttempts:(this._reconnectionAttempts=e,this)}},{key:"reconnectionDelay",value:function(e){var t;return void 0===e?this._reconnectionDelay:(this._reconnectionDelay=e,null===(t=this.backoff)||void 0===t||t.setMin(e),this)}},{key:"randomizationFactor",value:function(e){var t;return void 0===e?this._randomizationFactor:(this._randomizationFactor=e,null===(t=this.backoff)||void 0===t||t.setJitter(e),this)}},{key:"reconnectionDelayMax",value:function(e){var t;return void 0===e?this._reconnectionDelayMax:(this._reconnectionDelayMax=e,null===(t=this.backoff)||void 0===t||t.setMax(e),this)}},{key:"timeout",value:function(e){return arguments.length?(this._timeout=e,this):this._timeout}},{key:"maybeReconnectOnOpen",value:function(){!this._reconnecting&&this._reconnection&&0===this.backoff.attempts&&this.reconnect()}},{key:"open",value:function(e){var t=this;if(~this._readyState.indexOf("open"))return this;this.engine=new r.Socket(this.uri,this.opts);var n=this.engine,a=this;this._readyState="opening",this.skipReconnect=!1;var i=Object(o.on)(n,"open",(function(){a.onopen(),e&&e()})),s=Object(o.on)(n,"error",(function(n){a.cleanup(),a._readyState="closed",t.emitReserved("error",n),e?e(n):a.maybeReconnectOnOpen()}));if(!1!==this._timeout){var c=this._timeout;0===c&&i();var u=this.setTimeoutFn((function(){i(),n.close(),n.emit("error",new Error("timeout"))}),c);this.opts.autoUnref&&u.unref(),this.subs.push((function(){clearTimeout(u)}))}return this.subs.push(i),this.subs.push(s),this}},{key:"connect",value:function(e){return this.open(e)}},{key:"onopen",value:function(){this.cleanup(),this._readyState="open",this.emitReserved("open");var e=this.engine;this.subs.push(Object(o.on)(e,"ping",this.onping.bind(this)),Object(o.on)(e,"data",this.ondata.bind(this)),Object(o.on)(e,"error",this.onerror.bind(this)),Object(o.on)(e,"close",this.onclose.bind(this)),Object(o.on)(this.decoder,"decoded",this.ondecoded.bind(this)))}},{key:"onping",value:function(){this.emitReserved("ping")}},{key:"ondata",value:function(e){try{this.decoder.add(e)}catch(t){this.onclose("parse error",t)}}},{key:"ondecoded",value:function(e){var t=this;Object(r.nextTick)((function(){t.emitReserved("packet",e)}),this.setTimeoutFn)}},{key:"onerror",value:function(e){this.emitReserved("error",e)}},{key:"socket",value:function(e,t){var n=this.nsps[e];return n?this._autoConnect&&!n.active&&n.connect():(n=new a.Socket(this,e,t),this.nsps[e]=n),n}},{key:"_destroy",value:function(e){for(var t=0,n=Object.keys(this.nsps);t=this._reconnectionAttempts)this.backoff.reset(),this.emitReserved("reconnect_failed"),this._reconnecting=!1;else{var n=this.backoff.duration();this._reconnecting=!0;var r=this.setTimeoutFn((function(){t.skipReconnect||(e.emitReserved("reconnect_attempt",t.backoff.attempts),t.skipReconnect||t.open((function(n){n?(t._reconnecting=!1,t.reconnect(),e.emitReserved("reconnect_error",n)):t.onreconnect()})))}),n);this.opts.autoUnref&&r.unref(),this.subs.push((function(){clearTimeout(r)}))}}},{key:"onreconnect",value:function(){var e=this.backoff.attempts;this._reconnecting=!1,this.backoff.reset(),this.emitReserved("reconnect",e)}}]),n}(n("./node_modules/@socket.io/component-emitter/index.mjs").Emitter)},"./node_modules/socket.io-client/build/esm/on.js":function(e,t,n){"use strict";function r(e,t,n){return e.on(t,n),function(){e.off(t,n)}}n.r(t),n.d(t,"on",(function(){return r}))},"./node_modules/socket.io-client/build/esm/socket.js":function(e,t,n){"use strict";n.r(t),n.d(t,"Socket",(function(){return s}));var r=n("./node_modules/socket.io-parser/build/esm/index.js"),a=n("./node_modules/socket.io-client/build/esm/on.js"),i=n("./node_modules/@socket.io/component-emitter/index.mjs"),o=Object.freeze({connect:1,connect_error:1,disconnect:1,disconnecting:1,newListener:1,removeListener:1}),s=function(e){_inherits(n,e);var t=_createSuper(n);function n(e,r,a){var i;return _classCallCheck(this,n),(i=t.call(this)).connected=!1,i.recovered=!1,i.receiveBuffer=[],i.sendBuffer=[],i._queue=[],i._queueSeq=0,i.ids=0,i.acks={},i.flags={},i.io=e,i.nsp=r,a&&a.auth&&(i.auth=a.auth),i._opts=Object.assign({},a),i.io._autoConnect&&i.open(),i}return _createClass(n,[{key:"disconnected",get:function(){return!this.connected}},{key:"subEvents",value:function(){if(!this.subs){var e=this.io;this.subs=[Object(a.on)(e,"open",this.onopen.bind(this)),Object(a.on)(e,"packet",this.onpacket.bind(this)),Object(a.on)(e,"error",this.onerror.bind(this)),Object(a.on)(e,"close",this.onclose.bind(this))]}}},{key:"active",get:function(){return!!this.subs}},{key:"connect",value:function(){return this.connected||(this.subEvents(),this.io._reconnecting||this.io.open(),"open"===this.io._readyState&&this.onopen()),this}},{key:"open",value:function(){return this.connect()}},{key:"send",value:function(){for(var e=arguments.length,t=new Array(e),n=0;n1?t-1:0),a=1;a1?n-1:0),a=1;an._opts.retries&&(n._queue.shift(),t&&t(e));else if(n._queue.shift(),t){for(var a=arguments.length,i=new Array(a>1?a-1:0),o=1;o0&&void 0!==arguments[0]&&arguments[0];if(this.connected&&0!==this._queue.length){var t=this._queue[0];t.pending&&!e||(t.pending=!0,t.tryCount++,this.flags=t.flags,this.emit.apply(this,t.args))}}},{key:"packet",value:function(e){e.nsp=this.nsp,this.io._packet(e)}},{key:"onopen",value:function(){var e=this;"function"==typeof this.auth?this.auth((function(t){e._sendConnectPacket(t)})):this._sendConnectPacket(this.auth)}},{key:"_sendConnectPacket",value:function(e){this.packet({type:r.PacketType.CONNECT,data:this._pid?Object.assign({pid:this._pid,offset:this._lastOffset},e):e})}},{key:"onerror",value:function(e){this.connected||this.emitReserved("connect_error",e)}},{key:"onclose",value:function(e,t){this.connected=!1,delete this.id,this.emitReserved("disconnect",e,t)}},{key:"onpacket",value:function(e){if(e.nsp===this.nsp)switch(e.type){case r.PacketType.CONNECT:e.data&&e.data.sid?this.onconnect(e.data.sid,e.data.pid):this.emitReserved("connect_error",new Error("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, but they are not compatible (more information here: https://socket.io/docs/v3/migrating-from-2-x-to-3-0/)"));break;case r.PacketType.EVENT:case r.PacketType.BINARY_EVENT:this.onevent(e);break;case r.PacketType.ACK:case r.PacketType.BINARY_ACK:this.onack(e);break;case r.PacketType.DISCONNECT:this.ondisconnect();break;case r.PacketType.CONNECT_ERROR:this.destroy();var t=new Error(e.data.message);t.data=e.data.data,this.emitReserved("connect_error",t)}}},{key:"onevent",value:function(e){var t=e.data||[];null!=e.id&&t.push(this.ack(e.id)),this.connected?this.emitEvent(t):this.receiveBuffer.push(Object.freeze(t))}},{key:"emitEvent",value:function(e){if(this._anyListeners&&this._anyListeners.length){var t,r=this._anyListeners.slice(),a=_createForOfIteratorHelper(r);try{for(a.s();!(t=a.n()).done;)t.value.apply(this,e)}catch(i){a.e(i)}finally{a.f()}}_get(_getPrototypeOf(n.prototype),"emit",this).apply(this,e),this._pid&&e.length&&"string"===typeof e[e.length-1]&&(this._lastOffset=e[e.length-1])}},{key:"ack",value:function(e){var t=this,n=!1;return function(){if(!n){n=!0;for(var a=arguments.length,i=new Array(a),o=0;o1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2?arguments[2]:void 0,a=e;n=n||"undefined"!==typeof location&&location,null==e&&(e=n.protocol+"//"+n.host),"string"===typeof e&&("/"===e.charAt(0)&&(e="/"===e.charAt(1)?n.protocol+e:n.host+e),/^(https?|wss?):\/\//.test(e)||(e="undefined"!==typeof n?n.protocol+"//"+e:"https://"+e),a=Object(r.parse)(e)),a.port||(/^(http|ws)$/.test(a.protocol)?a.port="80":/^(http|ws)s$/.test(a.protocol)&&(a.port="443")),a.path=a.path||"/";var i=-1!==a.host.indexOf(":")?"["+a.host+"]":a.host;return a.id=a.protocol+"://"+i+":"+a.port+t,a.href=a.protocol+"://"+i+(n&&n.port===a.port?"":":"+a.port),a}},"./node_modules/socket.io-parser/build/esm/binary.js":function(e,t,n){"use strict";n.r(t),n.d(t,"deconstructPacket",(function(){return a})),n.d(t,"reconstructPacket",(function(){return o}));var r=n("./node_modules/socket.io-parser/build/esm/is-binary.js");function a(e){var t=[],n=e.data,r=e;return r.data=i(n,t),r.attachments=t.length,{packet:r,buffers:t}}function i(e,t){if(!e)return e;if(Object(r.isBinary)(e)){var n={_placeholder:!0,num:t.length};return t.push(e),n}if(Array.isArray(e)){for(var a=new Array(e.length),o=0;o=0&&e.num stats.json","stats-prod":"webpack --profile --json --mode production > stats-prod.json","analyze":"webpack-bundle-analyzer -p 9999 stats.json","analyze-prod":"webpack-bundle-analyzer -p 9999 stats-prod.json","clean":"rimraf dist/*","deploy":"npm run build && node deploy-site.js","format":"prettier --write \\"{src,tests}/**/**\\"","check-format":"prettier --check \\"{src,tests}/**/**\\"","test":"karma start --single-run --browsers ChromeHeadless,FirefoxHeadless karma.conf.js","test-watch":"karma start --auto-watch --browsers ChromeDebugging karma.conf.js --debug"},"repository":{"type":"git","url":"git+https://github.com/imjoy-team/imjoy-rpc.git"},"keywords":["imjoy","rpc"],"author":"imjoy-team ","license":"MIT","bugs":{"url":"https://github.com/imjoy-team/imjoy-rpc/issues"},"homepage":"https://github.com/imjoy-team/imjoy-rpc","dependencies":{"@msgpack/msgpack":"^2.7.1","socket.io-client":"^4.6.2"},"devDependencies":{"@babel/core":"^7.16.12","@babel/plugin-syntax-dynamic-import":"^7.8.3","@babel/polyfill":"^7.12.1","@babel/preset-env":"^7.16.11","@types/requirejs":"^2.1.34","babel-core":"^6.26.0","babel-eslint":"^10.1.0","babel-loader":"^8.2.3","babel-runtime":"^6.26.0","chai":"^4.3.6","clean-webpack-plugin":"^0.1.19","copy-webpack-plugin":"^5.1.2","eslint":"^6.8.0","eslint-config-prettier":"^4.2.0","eslint-loader":"^4.0.2","file-loader":"^0.11.2","fs-extra":"^0.30.0","gh-pages":"^2.0.1","html-loader":"^0.5.5","html-webpack-plugin":"^3.2.0","json-loader":"^0.5.4","karma":"^6.3.12","karma-chrome-launcher":"^3.1.0","karma-firefox-launcher":"^1.3.0","karma-mocha":"^2.0.1","karma-sourcemap-loader":"^0.3.8","karma-spec-reporter":"0.0.32","karma-webpack":"^4.0.2","lerna":"^6.0.3","lodash.debounce":"^4.0.8","mocha":"^10.1.0","postcss":"^7.0.36","prettier":"^1.6.1","rimraf":"^2.6.2","schema-utils":"^0.4.3","style-loader":"^0.18.1","ts-loader":"^9.4.3","url-loader":"^0.5.9","webpack":"^4.46.0","webpack-bundle-analyzer":"^4.7.0","webpack-cli":"^3.3.12","webpack-dev-server":"^3.11.3","webpack-merge":"^4.1.1","workbox-webpack-plugin":"^4.3.1","worker-loader":"^2.0.0","write-file-webpack-plugin":"^4.5.1"},"eslintConfig":{"globals":{"document":true,"window":true}}}')},"./src/main.js":function(e,t,n){"use strict";n.r(t),n.d(t,"waitForInitialization",(function(){return l})),n.d(t,"setupRPC",(function(){return A}));var r=n("./src/plugin.webworker.js"),a=n.n(r),i=n("./src/pluginIframe.js"),o=n("./src/utils.js");n.d(t,"loadRequirements",(function(){return o.loadRequirements}));var s=n("./src/rpc.js");n.d(t,"RPC",(function(){return s.RPC})),n.d(t,"API_VERSION",(function(){return s.API_VERSION}));var c=n("./package.json");function u(){return"undefined"!==typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope}function l(e){u()&&(globalThis.parent=self),(e=e||{}).enable_service_worker&&(Object(o.setupServiceWorker)(e.base_url,e.target_origin,e.cache_requirements),e.enable_service_worker=!1),e.cache_requirements&&delete e.cache_requirements;var t=e.target_origin||"*";if(e.credential_required&&"function"!==typeof e.verify_credential)throw new Error("Please also provide the `verify_credential` function with `credential_required`.");if(e.credential_required&&"*"===t)throw new Error("`target_origin` was set to `*` with `credential_required=true`, there is a security risk that you may leak the credential to website from other origin. Please specify the `target_origin` explicitly.");var n=function(){globalThis.removeEventListener("message",a)},r=Object(o.randId)(),a=function(a){if("message"===a.type&&(!a.origin||"*"===t||a.origin===t)){if("initialize"!==a.data.type)throw new Error("unrecognized message: ".concat(a.data));n(),a.data.peer_id!==r&&console.warn("".concat(a.data.config&&a.data.config.name,": connection peer id mismatch ").concat(a.data.peer_id," !== ").concat(r));var i=a.data.config;"*"!==t&&(i.target_origin=t),e.credential_required?e.verify_credential(i.credential).then((function(e){if(!e||!e.auth||e.error)throw new Error("Failed to verify the credentail:"+(e&&e.error));i.auth=e.auth,A(i).then((function(){console.log("ImJoy RPC loaded successfully!")}))})):A(i).then((function(){console.log("ImJoy RPC loaded successfully!")}))}};globalThis.addEventListener("message",a),u()?parent.postMessage({type:"imjoyRPCReady",config:e,peer_id:r}):parent.postMessage({type:"imjoyRPCReady",config:e,peer_id:r},"*")}function A(e){return(e=e||{}).name=e.name||Object(o.randId)(),(e=Object(o.normalizeConfig)(e)).enable_service_worker&&Object(o.setupServiceWorker)(e.base_url,e.target_origin,e.cache_requirements),e.cache_requirements&&delete e.cache_requirements,new Promise((function(t,n){if(function(){try{return window.self!==window.top}catch(e){return!0}}()){if("web-worker"===e.type)try{!function(e){if(!e.allow_execution)throw new Error("web-worker plugin can only work with allow_execution=true");var t=null;e.broadcastChannel&&(t=new BroadcastChannel(e.broadcastChannel));var n=new a.a,r=setTimeout((function(){n.terminate(),console.warn("Plugin failed to start as a web-worker, running in an iframe instead."),Object(i.default)(e)}),2e3),s=Object(o.randId)();n.addEventListener("message",(function(a){var i=void 0,o=a.data;if("worker-ready"===o.type)return n.postMessage({type:"connectRPC",config:e}),void clearTimeout(r);"initialized"===o.type?(o.config=Object.assign({},e,o.config),o.origin=globalThis.location.origin,o.peer_id=s):"imjoy_remote_api_ready"===o.type?globalThis.dispatchEvent(new CustomEvent("imjoy_remote_api_ready",{detail:null})):"cacheRequirements"===o.type&&"function"===typeof cache_requirements?cache_requirements(o.requirements):"disconnect"===o.type?n.terminate():o.__transferables__&&(i=o.__transferables__,delete o.__transferables__),t?t.postMessage(o):parent.postMessage(o,e.target_origin||"*",i)})),(t||window).addEventListener("message",(function(r){if("message"===r.type&&(t||"*"===e.target_origin||r.origin===e.target_origin)){var a=void 0,i=r.data;i.__transferables__&&(a=i.__transferables__,delete i.__transferables__),i.peer_id===s?n.postMessage(i,a):e.debug&&console.log("connection peer id mismatch ".concat(i.peer_id," !== ").concat(s))}}))}(e)}catch(r){Object(i.default)(e)}else{if(!["rpc-window","rpc-worker","iframe","window"].includes(e.type))return console.error("Unsupported plugin type: "+e.type),void n("Unsupported plugin type: "+e.type);Object(i.default)(e)}globalThis.addEventListener("imjoy_remote_api_ready",(function n(r){var a=r.detail;e.expose_api_globally&&(globalThis.api=a),t(a),globalThis.removeEventListener("imjoy_remote_api_ready",n)}))}else u()?Object(i.default)(e):n(new Error("imjoy-rpc should only run inside an iframe or a webworker."))}))}n.d(t,"VERSION",(function(){return c.version}))},"./src/plugin.webworker.js":function(e,t,n){e.exports=function(){return n("./node_modules/worker-loader/dist/workers/InlineWorker.js")('/******/ (function(modules) { // webpackBootstrap\n/******/ \t// The module cache\n/******/ \tvar installedModules = {};\n/******/\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(installedModules[moduleId]) {\n/******/ \t\t\treturn installedModules[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = installedModules[moduleId] = {\n/******/ \t\t\ti: moduleId,\n/******/ \t\t\tl: false,\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/\n/******/ \t\t// Execute the module function\n/******/ \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n/******/\n/******/ \t\t// Flag the module as loaded\n/******/ \t\tmodule.l = true;\n/******/\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/\n/******/\n/******/ \t// expose the modules object (__webpack_modules__)\n/******/ \t__webpack_require__.m = modules;\n/******/\n/******/ \t// expose the module cache\n/******/ \t__webpack_require__.c = installedModules;\n/******/\n/******/ \t// define getter function for harmony exports\n/******/ \t__webpack_require__.d = function(exports, name, getter) {\n/******/ \t\tif(!__webpack_require__.o(exports, name)) {\n/******/ \t\t\tObject.defineProperty(exports, name, { enumerable: true, get: getter });\n/******/ \t\t}\n/******/ \t};\n/******/\n/******/ \t// define __esModule on exports\n/******/ \t__webpack_require__.r = function(exports) {\n/******/ \t\tif(typeof Symbol !== \'undefined\' && Symbol.toStringTag) {\n/******/ \t\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: \'Module\' });\n/******/ \t\t}\n/******/ \t\tObject.defineProperty(exports, \'__esModule\', { value: true });\n/******/ \t};\n/******/\n/******/ \t// create a fake namespace object\n/******/ \t// mode & 1: value is a module id, require it\n/******/ \t// mode & 2: merge all properties of value into the ns\n/******/ \t// mode & 4: return value when already ns object\n/******/ \t// mode & 8|1: behave like require\n/******/ \t__webpack_require__.t = function(value, mode) {\n/******/ \t\tif(mode & 1) value = __webpack_require__(value);\n/******/ \t\tif(mode & 8) return value;\n/******/ \t\tif((mode & 4) && typeof value === \'object\' && value && value.__esModule) return value;\n/******/ \t\tvar ns = Object.create(null);\n/******/ \t\t__webpack_require__.r(ns);\n/******/ \t\tObject.defineProperty(ns, \'default\', { enumerable: true, value: value });\n/******/ \t\tif(mode & 2 && typeof value != \'string\') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));\n/******/ \t\treturn ns;\n/******/ \t};\n/******/\n/******/ \t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t__webpack_require__.n = function(module) {\n/******/ \t\tvar getter = module && module.__esModule ?\n/******/ \t\t\tfunction getDefault() { return module[\'default\']; } :\n/******/ \t\t\tfunction getModuleExports() { return module; };\n/******/ \t\t__webpack_require__.d(getter, \'a\', getter);\n/******/ \t\treturn getter;\n/******/ \t};\n/******/\n/******/ \t// Object.prototype.hasOwnProperty.call\n/******/ \t__webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };\n/******/\n/******/ \t// __webpack_public_path__\n/******/ \t__webpack_require__.p = "";\n/******/\n/******/\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(__webpack_require__.s = "./src/plugin.webworker.js");\n/******/ })\n/************************************************************************/\n/******/ ({\n\n/***/ "./src/plugin.webworker.js":\n/*!*********************************!*\\\n !*** ./src/plugin.webworker.js ***!\n \\*********************************/\n/*! no exports provided */\n/***/ (function(module, __webpack_exports__, __webpack_require__) {\n\n"use strict";\n__webpack_require__.r(__webpack_exports__);\n/* harmony import */ var _pluginCore_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./pluginCore.js */ "./src/pluginCore.js");\n/* harmony import */ var _rpc_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./rpc.js */ "./src/rpc.js");\n/* harmony import */ var _utils_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./utils.js */ "./src/utils.js");\n/**\n * Contains the routines loaded by the plugin Worker under web-browser.\n *\n * Initializes the web environment version of the platform-dependent\n * connection object for the plugin site\n */\n\n\n\n\n// make sure this runs inside a webworker\nif (\n typeof WorkerGlobalScope === "undefined" ||\n !self ||\n !(self instanceof WorkerGlobalScope)\n) {\n throw new Error("This script can only loaded in a webworker");\n}\n\nasync function executeEsModule(content) {\n const dataUri =\n "data:text/javascript;charset=utf-8," + encodeURIComponent(content);\n await import(/* webpackIgnore: true */ dataUri);\n}\n\n/**\n * Connection object provided to the RPC constructor,\n * plugin site implementation for the web-based environment.\n * Global will be then cleared to prevent exposure into the\n * Worker, so we put this local connection object into a closure\n */\nclass Connection extends _utils_js__WEBPACK_IMPORTED_MODULE_2__["MessageEmitter"] {\n constructor(config) {\n super(config && config.debug);\n this.config = config || {};\n }\n connect() {\n self.addEventListener("message", e => {\n this._fire(e.data.type, e.data);\n });\n this.emit({\n type: "initialized",\n config: this.config\n });\n }\n disconnect() {\n this._fire("beforeDisconnect");\n self.close();\n this._fire("disconnected");\n }\n emit(data) {\n let transferables = undefined;\n if (data.__transferables__) {\n transferables = data.__transferables__;\n delete data.__transferables__;\n }\n self.postMessage(data, transferables);\n }\n async execute(code) {\n if (code.type === "requirements") {\n await Object(_utils_js__WEBPACK_IMPORTED_MODULE_2__["loadRequirementsInWebworker"])(code.requirements);\n } else if (code.type === "script") {\n try {\n if (code.attrs.type === "module") {\n await executeEsModule(code.content);\n } else {\n eval(code.content);\n }\n } catch (e) {\n console.error(e.message, e.stack);\n throw e;\n }\n } else {\n throw "unsupported code type.";\n }\n if (code.type === "requirements") {\n self.postMessage({\n type: "cacheRequirements",\n requirements: code.requirements\n });\n }\n }\n}\nconst config = {\n type: "web-worker",\n dedicated_thread: true,\n allow_execution: true,\n lang: "javascript",\n api_version: _rpc_js__WEBPACK_IMPORTED_MODULE_1__["API_VERSION"]\n};\nconst conn = new Connection(config);\nconn.on("connectRPC", data => {\n Object(_pluginCore_js__WEBPACK_IMPORTED_MODULE_0__["connectRPC"])(conn, Object.assign(data.config, config));\n});\nconn.connect();\nself.postMessage({\n type: "worker-ready"\n});\n\n\n/***/ }),\n\n/***/ "./src/pluginCore.js":\n/*!***************************!*\\\n !*** ./src/pluginCore.js ***!\n \\***************************/\n/*! exports provided: connectRPC */\n/***/ (function(module, __webpack_exports__, __webpack_require__) {\n\n"use strict";\n__webpack_require__.r(__webpack_exports__);\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "connectRPC", function() { return connectRPC; });\n/* harmony import */ var _rpc_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./rpc.js */ "./src/rpc.js");\n/**\n * Core plugin script loaded into the plugin process/thread.\n *\n * Initializes the plugin-site API global methods.\n */\n\nfunction connectRPC(connection, config) {\n config = config || {};\n const codecs = {};\n const rpc = new _rpc_js__WEBPACK_IMPORTED_MODULE_0__["RPC"](connection, config, codecs);\n rpc.on("getInterface", function () {\n launchConnected();\n });\n rpc.on("remoteReady", function () {\n const api = rpc.getRemote() || {};\n\n api.registerCodec = function (config) {\n if (!config["name"] || !config["encoder"] && !config["decoder"]) {\n throw new Error("Invalid codec format, please make sure you provide a name, type, encoder and decoder.");\n } else {\n if (config.type) {\n for (let k of Object.keys(codecs)) {\n if (codecs[k].type === config.type || k === config.name) {\n delete codecs[k];\n console.warn("Remove duplicated codec: " + k);\n }\n }\n }\n\n codecs[config["name"]] = config;\n }\n };\n\n api.init = function (config) {\n // register a minimal plugin api\n rpc.setInterface({\n setup() {}\n\n }, config);\n };\n\n api.disposeObject = function (obj) {\n rpc.disposeObject(obj);\n };\n\n api.export = function (_interface, config) {\n rpc.setInterface(_interface, config);\n };\n\n api.onLoad = function (handler) {\n handler = checkHandler(handler);\n\n if (connected) {\n handler();\n } else {\n connectedHandlers.push(handler);\n }\n };\n\n api.dispose = function (_interface) {\n rpc.disconnect();\n };\n\n api._rpc = rpc;\n\n if (typeof WorkerGlobalScope !== "undefined" && self instanceof WorkerGlobalScope) {\n self.api = api;\n self.postMessage({\n type: "imjoy_remote_api_ready"\n });\n self.dispatchEvent(new CustomEvent("imjoy_remote_api_ready", {\n detail: api\n }));\n } else if (typeof window) {\n window.dispatchEvent(new CustomEvent("imjoy_remote_api_ready", {\n detail: api\n }));\n }\n });\n let connected = false;\n const connectedHandlers = [];\n\n const launchConnected = function () {\n if (!connected) {\n connected = true;\n let handler;\n\n while (handler = connectedHandlers.pop()) {\n handler();\n }\n }\n };\n\n const checkHandler = function (handler) {\n const type = typeof handler;\n\n if (type !== "function") {\n const msg = "A function may only be subsribed to the event, " + type + " was provided instead";\n throw new Error(msg);\n }\n\n return handler;\n };\n\n return rpc;\n}\n\n/***/ }),\n\n/***/ "./src/rpc.js":\n/*!********************!*\\\n !*** ./src/rpc.js ***!\n \\********************/\n/*! exports provided: API_VERSION, RPC */\n/***/ (function(module, __webpack_exports__, __webpack_require__) {\n\n"use strict";\n__webpack_require__.r(__webpack_exports__);\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "API_VERSION", function() { return API_VERSION; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "RPC", function() { return RPC; });\n/* harmony import */ var _utils_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./utils.js */ "./src/utils.js");\n/**\n * Contains the RPC object used both by the application\n * site, and by each plugin\n */\n\nconst API_VERSION = "0.2.3";\nconst ArrayBufferView = Object.getPrototypeOf(Object.getPrototypeOf(new Uint8Array())).constructor;\n\nfunction _appendBuffer(buffer1, buffer2) {\n const tmp = new Uint8Array(buffer1.byteLength + buffer2.byteLength);\n tmp.set(new Uint8Array(buffer1), 0);\n tmp.set(new Uint8Array(buffer2), buffer1.byteLength);\n return tmp.buffer;\n}\n\nfunction indexObject(obj, is) {\n if (!is) throw new Error("undefined index");\n if (typeof is === "string") return indexObject(obj, is.split("."));else if (is.length === 0) return obj;else return indexObject(obj[is[0]], is.slice(1));\n}\n/**\n * RPC object represents a single site in the\n * communication protocol between the application and the plugin\n *\n * @param {Object} connection a special object allowing to send\n * and receive messages from the opposite site (basically it\n * should only provide send() and onMessage() methods)\n */\n\n\nclass RPC extends _utils_js__WEBPACK_IMPORTED_MODULE_0__["MessageEmitter"] {\n constructor(connection, config, codecs) {\n super(config && config.debug);\n this._connection = connection;\n this.config = config || {};\n this._codecs = codecs || {};\n this._object_store = {};\n this._method_weakmap = new WeakMap();\n this._object_weakmap = new WeakMap();\n this._local_api = null;\n this._remote_set = false; // make sure there is an execute function\n\n const name = this.config.name;\n\n this._connection.execute = this._connection.execute || function () {\n throw new Error(`connection.execute not implemented (in "${name}")`);\n };\n\n this._store = new ReferenceStore();\n this._method_refs = new ReferenceStore();\n\n this._method_refs.onReady(() => {\n this._fire("remoteIdle");\n });\n\n this._method_refs.onBusy(() => {\n this._fire("remoteBusy");\n });\n\n this._setupMessageHanlders();\n }\n\n init() {\n this._connection.emit({\n type: "initialized",\n config: this.config,\n peer_id: this._connection.peer_id\n });\n }\n\n setConfig(config) {\n if (config) for (const k of Object.keys(config)) {\n this.config[k] = config[k];\n }\n }\n /**\n * Set a handler to be called when received a responce from the\n * remote site reporting that the previously provided interface\n * has been successfully set as remote for that site\n *\n * @param {Function} handler\n */\n\n\n getRemoteCallStack() {\n return this._method_refs.getStack();\n }\n /**\n * @returns {Object} set of remote interface methods\n */\n\n\n getRemote() {\n return this._remote_interface;\n }\n /**\n * Sets the interface of this site making it available to the\n * remote site by sending a message with a set of methods names\n *\n * @param {Object} _interface to set\n */\n\n\n setInterface(_interface, config) {\n config = config || {};\n this.config.name = config.name || this.config.name;\n this.config.description = config.description || this.config.description;\n\n if (this.config.forwarding_functions) {\n for (let func_name of this.config.forwarding_functions) {\n const _remote = this._remote_interface;\n\n if (_remote[func_name]) {\n if (_interface.constructor === Object) {\n if (!_interface[func_name]) {\n _interface[func_name] = (...args) => {\n _remote[func_name](...args);\n };\n }\n } else if (_interface.constructor.constructor === Function) {\n if (!_interface.constructor.prototype[func_name]) {\n _interface.constructor.prototype[func_name] = (...args) => {\n _remote[func_name](...args);\n };\n }\n }\n }\n }\n }\n\n this._local_api = _interface;\n if (!this._remote_set) this._fire("interfaceAvailable");else this.sendInterface();\n return new Promise(resolve => {\n this.once("interfaceSetAsRemote", resolve);\n });\n }\n /**\n * Sends the actual interface to the remote site upon it was\n * updated or by a special request of the remote site\n */\n\n\n sendInterface() {\n if (!this._local_api) {\n throw new Error("interface is not set.");\n }\n\n this._encode(this._local_api, true).then(api => {\n this._connection.emit({\n type: "setInterface",\n api: api\n });\n });\n }\n\n _disposeObject(objectId) {\n if (this._object_store[objectId]) {\n delete this._object_store[objectId];\n } else {\n throw new Error(`Object (id=${objectId}) not found.`);\n }\n }\n\n disposeObject(obj) {\n return new Promise((resolve, reject) => {\n if (this._object_weakmap.has(obj)) {\n const objectId = this._object_weakmap.get(obj);\n\n this._connection.once("disposed", data => {\n if (data.error) reject(new Error(data.error));else resolve();\n });\n\n this._connection.emit({\n type: "disposeObject",\n object_id: objectId\n });\n } else {\n throw new Error("Invalid object");\n }\n });\n }\n /**\n * Handles a message from the remote site\n */\n\n\n _setupMessageHanlders() {\n this._connection.on("init", this.init);\n\n this._connection.on("execute", data => {\n Promise.resolve(this._connection.execute(data.code)).then(() => {\n this._connection.emit({\n type: "executed"\n });\n }).catch(e => {\n console.error(e);\n\n this._connection.emit({\n type: "executed",\n error: String(e)\n });\n });\n });\n\n this._connection.on("method", async data => {\n let resolve, reject, method, method_this, args, result;\n\n try {\n if (data.promise) {\n [resolve, reject] = await this._unwrap(data.promise, false);\n }\n\n const _interface = this._object_store[data.object_id];\n method = indexObject(_interface, data.name);\n\n if (data.name.includes(".")) {\n const tmp = data.name.split(".");\n const intf_index = tmp.slice(0, tmp.length - 1).join(".");\n method_this = indexObject(_interface, intf_index);\n } else {\n method_this = _interface;\n }\n\n args = await this._unwrap(data.args, true);\n\n if (data.promise) {\n result = method.apply(method_this, args);\n\n if (result instanceof Promise || method.constructor && method.constructor.name === "AsyncFunction") {\n result.then(resolve).catch(reject);\n } else {\n resolve(result);\n }\n } else {\n method.apply(method_this, args);\n }\n } catch (err) {\n console.error(this.config.name, err);\n\n if (reject) {\n reject(err);\n }\n }\n });\n\n this._connection.on("callback", async data => {\n let resolve, reject, method, args, result;\n\n try {\n if (data.promise) {\n [resolve, reject] = await this._unwrap(data.promise, false);\n }\n\n if (data.promise) {\n method = this._store.fetch(data.id);\n args = await this._unwrap(data.args, true);\n\n if (!method) {\n throw new Error("Callback function can only called once, if you want to call a function for multiple times, please make it as a plugin api function. See https://imjoy.io/docs for more details.");\n }\n\n result = method.apply(null, args);\n\n if (result instanceof Promise || method.constructor && method.constructor.name === "AsyncFunction") {\n result.then(resolve).catch(reject);\n } else {\n resolve(result);\n }\n } else {\n method = this._store.fetch(data.id);\n args = await this._unwrap(data.args, true);\n\n if (!method) {\n throw new Error("Please notice that callback function can only called once, if you want to call a function for multiple times, please make it as a plugin api function. See https://imjoy.io/docs for more details.");\n }\n\n method.apply(null, args);\n }\n } catch (err) {\n console.error(this.config.name, err);\n\n if (reject) {\n reject(err);\n }\n }\n });\n\n this._connection.on("disposeObject", data => {\n try {\n this._disposeObject(data.object_id);\n\n this._connection.emit({\n type: "disposed"\n });\n } catch (e) {\n console.error(e);\n\n this._connection.emit({\n type: "disposed",\n error: String(e)\n });\n }\n });\n\n this._connection.on("setInterface", data => {\n this._setRemoteInterface(data.api);\n });\n\n this._connection.on("getInterface", () => {\n this._fire("getInterface");\n\n if (this._local_api) {\n this.sendInterface();\n } else {\n this.once("interfaceAvailable", () => {\n this.sendInterface();\n });\n }\n });\n\n this._connection.on("interfaceSetAsRemote", () => {\n this._remote_set = true;\n\n this._fire("interfaceSetAsRemote");\n });\n\n this._connection.on("disconnect", () => {\n this._fire("beforeDisconnect");\n\n this._connection.disconnect();\n\n this._fire("disconnected");\n });\n }\n /**\n * Sends a requests to the remote site asking it to provide its\n * current interface\n */\n\n\n requestRemote() {\n this._connection.emit({\n type: "getInterface"\n });\n }\n\n _ndarray(typedArray, shape, dtype) {\n const _dtype = Object(_utils_js__WEBPACK_IMPORTED_MODULE_0__["typedArrayToDtype"])(typedArray);\n\n if (dtype && dtype !== _dtype) {\n throw "dtype doesn\'t match the type of the array: " + _dtype + " != " + dtype;\n }\n\n shape = shape || [typedArray.length];\n return {\n _rtype: "ndarray",\n _rvalue: typedArray.buffer,\n _rshape: shape,\n _rdtype: _dtype\n };\n }\n /**\n * Sets the new remote interface provided by the other site\n *\n * @param {Array} names list of function names\n */\n\n\n _setRemoteInterface(api) {\n this._decode(api).then(intf => {\n // update existing interface instead of recreating it\n // this will preserve the object reference\n if (this._remote_interface) {\n // clear the interface\n for (let k in this._remote_interface) delete this._remote_interface[k]; // then assign the new interfaces\n\n\n Object.assign(this._remote_interface, intf);\n } else this._remote_interface = intf;\n\n this._fire("remoteReady");\n\n this._reportRemoteSet();\n });\n }\n /**\n * Generates the wrapped function corresponding to a single remote\n * method. When the generated function is called, it will send the\n * corresponding message to the remote site asking it to execute\n * the particular method of its interface\n *\n * @param {String} name of the remote method\n *\n * @returns {Function} wrapped remote method\n */\n\n\n _genRemoteMethod(targetId, name, objectId) {\n const me = this;\n\n const remoteMethod = function () {\n return new Promise(async (resolve, reject) => {\n let id = null;\n\n try {\n id = me._method_refs.put(objectId ? objectId + "/" + name : name);\n\n const wrapped_resolve = function () {\n if (id !== null) me._method_refs.fetch(id);\n return resolve.apply(this, arguments);\n };\n\n const wrapped_reject = function () {\n if (id !== null) me._method_refs.fetch(id);\n return reject.apply(this, arguments);\n };\n\n const encodedPromise = await me._wrap([wrapped_resolve, wrapped_reject]); // store the key id for removing them from the reference store together\n\n wrapped_resolve.__promise_pair = encodedPromise[1]._rvalue;\n wrapped_reject.__promise_pair = encodedPromise[0]._rvalue;\n let args = Array.prototype.slice.call(arguments);\n const argLength = args.length; // if the last argument is an object, mark it as kwargs\n\n const withKwargs = argLength > 0 && typeof args[argLength - 1] === "object" && args[argLength - 1] !== null && args[argLength - 1]._rkwargs;\n if (withKwargs) delete args[argLength - 1]._rkwargs;\n\n if (name === "register" || name === "registerService" || name === "register_service" || name === "export" || name === "on") {\n args = await me._wrap(args, true);\n } else {\n args = await me._wrap(args);\n }\n\n const transferables = args.__transferables__;\n if (transferables) delete args.__transferables__;\n\n me._connection.emit({\n type: "method",\n target_id: targetId,\n name: name,\n object_id: objectId,\n args: args,\n promise: encodedPromise,\n with_kwargs: withKwargs\n }, transferables);\n } catch (e) {\n if (id) me._method_refs.fetch(id);\n reject(`Failed to exectue remote method (interface: ${objectId || me.id}, method: ${name}), error: ${e}`);\n }\n });\n };\n\n remoteMethod.__remote_method = true;\n return remoteMethod;\n }\n /**\n * Sends a responce reporting that interface just provided by the\n * remote site was successfully set by this site as remote\n */\n\n\n _reportRemoteSet() {\n this._connection.emit({\n type: "interfaceSetAsRemote"\n });\n }\n /**\n * Prepares the provided set of remote method arguments for\n * sending to the remote site, replaces all the callbacks with\n * identifiers\n *\n * @param {Array} args to wrap\n *\n * @returns {Array} wrapped arguments\n */\n\n\n async _encode(aObject, asInterface, objectId) {\n const aType = typeof aObject;\n\n if (aType === "number" || aType === "string" || aType === "boolean" || aObject === null || aObject === undefined || aObject instanceof ArrayBuffer) {\n return aObject;\n }\n\n let bObject;\n\n if (typeof aObject === "function") {\n if (asInterface) {\n if (!objectId) throw new Error("objectId is not specified.");\n bObject = {\n _rtype: "interface",\n _rtarget_id: this._connection.peer_id,\n _rintf: objectId,\n _rvalue: asInterface\n };\n\n this._method_weakmap.set(aObject, bObject);\n } else if (this._method_weakmap.has(aObject)) {\n bObject = this._method_weakmap.get(aObject);\n } else {\n const cid = this._store.put(aObject);\n\n bObject = {\n _rtype: "callback",\n _rtarget_id: this._connection.peer_id,\n _rname: aObject.constructor && aObject.constructor.name || cid,\n _rvalue: cid\n };\n }\n\n return bObject;\n } // skip if already encoded\n\n\n if (aObject.constructor instanceof Object && aObject._rtype) {\n // make sure the interface functions are encoded\n if (aObject._rintf) {\n const temp = aObject._rtype;\n delete aObject._rtype;\n bObject = await this._encode(aObject, asInterface, objectId);\n bObject._rtype = temp;\n } else {\n bObject = aObject;\n }\n\n return bObject;\n }\n\n const transferables = [];\n const _transfer = aObject._transfer;\n const isarray = Array.isArray(aObject);\n\n for (let tp of Object.keys(this._codecs)) {\n const codec = this._codecs[tp];\n\n if (codec.encoder && aObject instanceof codec.type) {\n // TODO: what if multiple encoders found\n let encodedObj = await Promise.resolve(codec.encoder(aObject));\n if (encodedObj && !encodedObj._rtype) encodedObj._rtype = codec.name; // encode the functions in the interface object\n\n if (encodedObj && encodedObj._rintf) {\n const temp = encodedObj._rtype;\n delete encodedObj._rtype;\n encodedObj = await this._encode(encodedObj, asInterface, objectId);\n encodedObj._rtype = temp;\n }\n\n bObject = encodedObj;\n return bObject;\n }\n }\n\n if (\n /*global tf*/\n typeof tf !== "undefined" && tf.Tensor && aObject instanceof tf.Tensor) {\n const v_buffer = aObject.dataSync();\n\n if (aObject._transfer || _transfer) {\n transferables.push(v_buffer.buffer);\n delete aObject._transfer;\n }\n\n bObject = {\n _rtype: "ndarray",\n _rvalue: v_buffer.buffer,\n _rshape: aObject.shape,\n _rdtype: aObject.dtype\n };\n } else if (\n /*global nj*/\n typeof nj !== "undefined" && nj.NdArray && aObject instanceof nj.NdArray) {\n const dtype = Object(_utils_js__WEBPACK_IMPORTED_MODULE_0__["typedArrayToDtype"])(aObject.selection.data);\n\n if (aObject._transfer || _transfer) {\n transferables.push(aObject.selection.data.buffer);\n delete aObject._transfer;\n }\n\n bObject = {\n _rtype: "ndarray",\n _rvalue: aObject.selection.data.buffer,\n _rshape: aObject.shape,\n _rdtype: dtype\n };\n } else if (aObject instanceof Error) {\n console.error(aObject);\n bObject = {\n _rtype: "error",\n _rvalue: aObject.toString()\n };\n } else if (typeof File !== "undefined" && aObject instanceof File) {\n bObject = {\n _rtype: "file",\n _rvalue: aObject,\n _rpath: aObject._path || aObject.webkitRelativePath\n };\n } // send objects supported by structure clone algorithm\n // https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Structured_clone_algorithm\n else if (aObject !== Object(aObject) || aObject instanceof Boolean || aObject instanceof String || aObject instanceof Date || aObject instanceof RegExp || aObject instanceof ImageData || typeof FileList !== "undefined" && aObject instanceof FileList || typeof FileSystemDirectoryHandle !== "undefined" && aObject instanceof FileSystemDirectoryHandle || typeof FileSystemFileHandle !== "undefined" && aObject instanceof FileSystemFileHandle || typeof FileSystemHandle !== "undefined" && aObject instanceof FileSystemHandle || typeof FileSystemWritableFileStream !== "undefined" && aObject instanceof FileSystemWritableFileStream) {\n bObject = aObject; // TODO: avoid object such as DynamicPlugin instance.\n } else if (typeof File !== "undefined" && aObject instanceof File) {\n bObject = {\n _rtype: "file",\n _rname: aObject.name,\n _rmime: aObject.type,\n _rvalue: aObject,\n _rpath: aObject._path || aObject.webkitRelativePath\n };\n } else if (aObject instanceof Blob) {\n bObject = {\n _rtype: "blob",\n _rvalue: aObject\n };\n } else if (aObject instanceof ArrayBufferView) {\n if (aObject._transfer || _transfer) {\n transferables.push(aObject.buffer);\n delete aObject._transfer;\n }\n\n const dtype = Object(_utils_js__WEBPACK_IMPORTED_MODULE_0__["typedArrayToDtype"])(aObject);\n bObject = {\n _rtype: "typedarray",\n _rvalue: aObject.buffer,\n _rdtype: dtype\n };\n } else if (aObject instanceof DataView) {\n if (aObject._transfer || _transfer) {\n transferables.push(aObject.buffer);\n delete aObject._transfer;\n }\n\n bObject = {\n _rtype: "memoryview",\n _rvalue: aObject.buffer\n };\n } else if (aObject instanceof Set) {\n bObject = {\n _rtype: "set",\n _rvalue: await this._encode(Array.from(aObject), asInterface)\n };\n } else if (aObject instanceof Map) {\n bObject = {\n _rtype: "orderedmap",\n _rvalue: await this._encode(Array.from(aObject), asInterface)\n };\n } else if (aObject.constructor instanceof Object || Array.isArray(aObject)) {\n bObject = isarray ? [] : {};\n let keys; // an object/array\n\n if (aObject.constructor === Object || Array.isArray(aObject)) {\n keys = Object.keys(aObject);\n } // a class\n else if (aObject.constructor === Function) {\n throw new Error("Please instantiate the class before exportting it.");\n } // instance of a class\n else if (aObject.constructor.constructor === Function) {\n keys = Object.getOwnPropertyNames(Object.getPrototypeOf(aObject)).concat(Object.keys(aObject)); // TODO: use a proxy object to represent the actual object\n // always encode class instance as interface\n\n asInterface = true;\n } else {\n throw Error("Unsupported interface type");\n }\n\n let hasFunction = false; // encode interfaces\n\n if (aObject._rintf || asInterface) {\n if (!objectId) {\n if (typeof aObject._rintf === "string" && aObject._rintf.length > 0) {\n objectId = aObject._rintf; // enable custom object id\n } else {\n objectId = Object(_utils_js__WEBPACK_IMPORTED_MODULE_0__["randId"])();\n } // Note: object with the same id will be overwritten\n\n\n if (this._object_store[objectId]) console.warn(`Overwritting interface object with the same id: ${objectId}`);\n this._object_store[objectId] = aObject;\n }\n\n for (let k of keys) {\n if (k === "constructor") continue;\n\n if (k.startsWith("_")) {\n continue;\n }\n\n bObject[k] = await this._encode(aObject[k], typeof asInterface === "string" ? asInterface + "." + k : k, objectId);\n\n if (typeof aObject[k] === "function") {\n hasFunction = true;\n }\n } // object id for dispose the object remotely\n\n\n if (hasFunction) bObject._rintf = objectId; // remove interface when closed\n\n if (aObject.on && typeof aObject.on === "function") {\n aObject.on("close", () => {\n delete this._object_store[objectId];\n });\n }\n } else {\n for (let k of keys) {\n if (["hasOwnProperty", "constructor"].includes(k)) continue;\n bObject[k] = await this._encode(aObject[k]);\n }\n } // for example, browserFS object\n\n } else if (typeof aObject === "object") {\n const keys = Object.getOwnPropertyNames(Object.getPrototypeOf(aObject)).concat(Object.keys(aObject));\n const objectId = Object(_utils_js__WEBPACK_IMPORTED_MODULE_0__["randId"])();\n\n for (let k of keys) {\n if (["hasOwnProperty", "constructor"].includes(k)) continue; // encode as interface\n\n bObject[k] = await this._encode(aObject[k], k, bObject);\n } // object id, used for dispose the object\n\n\n bObject._rintf = objectId;\n } else {\n throw "imjoy-rpc: Unsupported data type:" + aObject;\n }\n\n if (transferables.length > 0) {\n bObject.__transferables__ = transferables;\n }\n\n if (!bObject) {\n throw new Error("Failed to encode object");\n }\n\n return bObject;\n }\n\n async _decode(aObject, withPromise) {\n if (!aObject) {\n return aObject;\n }\n\n let bObject;\n\n if (aObject["_rtype"]) {\n if (this._codecs[aObject._rtype] && this._codecs[aObject._rtype].decoder) {\n if (aObject._rintf) {\n const temp = aObject._rtype;\n delete aObject._rtype;\n aObject = await this._decode(aObject, withPromise);\n aObject._rtype = temp;\n }\n\n bObject = await Promise.resolve(this._codecs[aObject._rtype].decoder(aObject));\n } else if (aObject._rtype === "callback") {\n bObject = this._genRemoteCallback(aObject._rtarget_id, aObject._rvalue, withPromise);\n } else if (aObject._rtype === "interface") {\n bObject = this._genRemoteMethod(aObject._rtarget_id, aObject._rvalue, aObject._rintf);\n } else if (aObject._rtype === "ndarray") {\n /*global nj tf*/\n //create build array/tensor if used in the plugin\n if (typeof nj !== "undefined" && nj.array) {\n if (Array.isArray(aObject._rvalue)) {\n aObject._rvalue = aObject._rvalue.reduce(_appendBuffer);\n }\n\n bObject = nj.array(new Uint8(aObject._rvalue), aObject._rdtype).reshape(aObject._rshape);\n } else if (typeof tf !== "undefined" && tf.Tensor) {\n if (Array.isArray(aObject._rvalue)) {\n aObject._rvalue = aObject._rvalue.reduce(_appendBuffer);\n }\n\n const arraytype = _utils_js__WEBPACK_IMPORTED_MODULE_0__["dtypeToTypedArray"][aObject._rdtype];\n bObject = tf.tensor(new arraytype(aObject._rvalue), aObject._rshape, aObject._rdtype);\n } else {\n //keep it as regular if transfered to the main app\n bObject = aObject;\n }\n } else if (aObject._rtype === "error") {\n bObject = new Error(aObject._rvalue);\n } else if (aObject._rtype === "file") {\n if (aObject._rvalue instanceof File) {\n bObject = aObject._rvalue; //patch _path\n\n bObject._path = aObject._rpath;\n } else {\n bObject = new File([aObject._rvalue], aObject._rname, {\n type: aObject._rmime\n });\n bObject._path = aObject._rpath;\n }\n } else if (aObject._rtype === "typedarray") {\n const arraytype = _utils_js__WEBPACK_IMPORTED_MODULE_0__["dtypeToTypedArray"][aObject._rdtype];\n if (!arraytype) throw new Error("unsupported dtype: " + aObject._rdtype);\n bObject = new arraytype(aObject._rvalue);\n } else if (aObject._rtype === "memoryview") {\n bObject = new DataView(aObject._rvalue);\n } else if (aObject._rtype === "blob") {\n if (aObject._rvalue instanceof Blob) {\n bObject = aObject._rvalue;\n } else {\n bObject = new Blob([aObject._rvalue], {\n type: aObject._rmime\n });\n }\n } else if (aObject._rtype === "orderedmap") {\n bObject = new Map(await this._decode(aObject._rvalue, withPromise));\n } else if (aObject._rtype === "set") {\n bObject = new Set(await this._decode(aObject._rvalue, withPromise));\n } else {\n // make sure all the interface functions are decoded\n if (aObject._rintf) {\n const temp = aObject._rtype;\n delete aObject._rtype;\n bObject = await this._decode(aObject, withPromise);\n bObject._rtype = temp;\n } else bObject = aObject;\n }\n } else if (aObject.constructor === Object || Array.isArray(aObject)) {\n const isarray = Array.isArray(aObject);\n bObject = isarray ? [] : {};\n\n for (let k of Object.keys(aObject)) {\n if (isarray || aObject.hasOwnProperty(k)) {\n const v = aObject[k];\n bObject[k] = await this._decode(v, withPromise);\n }\n }\n } else {\n bObject = aObject;\n }\n\n if (bObject === undefined) {\n throw new Error("Failed to decode object");\n } // store the object id for dispose\n\n\n if (aObject._rintf) {\n this._object_weakmap.set(bObject, aObject._rintf);\n }\n\n return bObject;\n }\n\n async _wrap(args, asInterface) {\n return await this._encode(args, asInterface);\n }\n /**\n * Unwraps the set of arguments delivered from the remote site,\n * replaces all callback identifiers with a function which will\n * initiate sending that callback identifier back to other site\n *\n * @param {Object} args to unwrap\n *\n * @param {Boolean} withPromise is true means this the callback should contain a promise\n *\n * @returns {Array} unwrapped args\n */\n\n\n async _unwrap(args, withPromise) {\n return await this._decode(args, withPromise);\n }\n /**\n * Generates the wrapped function corresponding to a single remote\n * callback. When the generated function is called, it will send\n * the corresponding message to the remote site asking it to\n * execute the particular callback previously saved during a call\n * by the remote site a method from the interface of this site\n *\n * @param {Number} id of the remote callback to execute\n * @param {Number} argNum argument index of the callback\n * @param {Boolean} withPromise is true means this the callback should contain a promise\n *\n * @returns {Function} wrapped remote callback\n */\n\n\n _genRemoteCallback(targetId, cid, withPromise) {\n const me = this;\n let remoteCallback;\n\n if (withPromise) {\n remoteCallback = function () {\n return new Promise(async (resolve, reject) => {\n const args = await me._wrap(Array.prototype.slice.call(arguments));\n const argLength = args.length; // if the last argument is an object, mark it as kwargs\n\n const withKwargs = argLength > 0 && typeof args[argLength - 1] === "object" && args[argLength - 1] !== null && args[argLength - 1]._rkwargs;\n if (withKwargs) delete args[argLength - 1]._rkwargs;\n const transferables = args.__transferables__;\n if (transferables) delete args.__transferables__;\n const encodedPromise = await me._wrap([resolve, reject]); // store the key id for removing them from the reference store together\n\n resolve.__promise_pair = encodedPromise[1]._rvalue;\n reject.__promise_pair = encodedPromise[0]._rvalue;\n\n try {\n me._connection.emit({\n type: "callback",\n target_id: targetId,\n id: cid,\n args: args,\n promise: encodedPromise,\n with_kwargs: withKwargs\n }, transferables);\n } catch (e) {\n reject(`Failed to exectue remote callback ( id: ${cid}).`);\n }\n });\n };\n\n return remoteCallback;\n } else {\n remoteCallback = async function () {\n const args = await me._wrap(Array.prototype.slice.call(arguments));\n const argLength = args.length; // if the last argument is an object, mark it as kwargs\n\n const withKwargs = argLength > 0 && typeof args[argLength - 1] === "object" && args[argLength - 1] !== null && args[argLength - 1]._rkwargs;\n if (withKwargs) delete args[argLength - 1]._rkwargs;\n const transferables = args.__transferables__;\n if (transferables) delete args.__transferables__;\n return me._connection.emit({\n type: "callback",\n target_id: targetId,\n id: cid,\n args: args,\n with_kwargs: withKwargs\n }, transferables);\n };\n\n return remoteCallback;\n }\n }\n\n reset() {\n this._event_handlers = {};\n this._once_handlers = {};\n this._remote_interface = null;\n this._object_store = {};\n this._method_weakmap = new WeakMap();\n this._object_weakmap = new WeakMap();\n this._local_api = null;\n this._store = new ReferenceStore();\n this._method_refs = new ReferenceStore();\n }\n /**\n * Sends the notification message and breaks the connection\n */\n\n\n disconnect() {\n this._connection.emit({\n type: "disconnect"\n });\n\n this.reset();\n setTimeout(() => {\n this._connection.disconnect();\n }, 2000);\n }\n\n}\n/**\n * ReferenceStore is a special object which stores other objects\n * and provides the references (number) instead. This reference\n * may then be sent over a json-based communication channel (IPC\n * to another Node.js process or a message to the Worker). Other\n * site may then provide the reference in the responce message\n * implying the given object should be activated.\n *\n * Primary usage for the ReferenceStore is a storage for the\n * callbacks, which therefore makes it possible to initiate a\n * callback execution by the opposite site (which normally cannot\n * directly execute functions over the communication channel).\n *\n * Each stored object can only be fetched once and is not\n * available for the second time. Each stored object must be\n * fetched, since otherwise it will remain stored forever and\n * consume memory.\n *\n * Stored object indeces are simply the numbers, which are however\n * released along with the objects, and are later reused again (in\n * order to postpone the overflow, which should not likely happen,\n * but anyway).\n */\n\nclass ReferenceStore {\n constructor() {\n this._store = {}; // stored object\n\n this._indices = [0]; // smallest available indices\n\n this._readyHandler = function () {};\n\n this._busyHandler = function () {};\n\n this._readyHandler();\n }\n /**\n * call handler when the store is empty\n *\n * @param {FUNCTION} id of a handler\n */\n\n\n onReady(readyHandler) {\n this._readyHandler = readyHandler || function () {};\n }\n /**\n * call handler when the store is not empty\n *\n * @param {FUNCTION} id of a handler\n */\n\n\n onBusy(busyHandler) {\n this._busyHandler = busyHandler || function () {};\n }\n /**\n * get the length of the store\n *\n */\n\n\n getStack() {\n return Object.keys(this._store).length;\n }\n /**\n * @function _genId() generates the new reference id\n *\n * @returns {Number} smallest available id and reserves it\n */\n\n\n _genId() {\n let id;\n\n if (this._indices.length === 1) {\n id = this._indices[0]++;\n } else {\n id = this._indices.shift();\n }\n\n return id;\n }\n /**\n * Releases the given reference id so that it will be available by\n * another object stored\n *\n * @param {Number} id to release\n */\n\n\n _releaseId(id) {\n for (let i = 0; i < this._indices.length; i++) {\n if (id < this._indices[i]) {\n this._indices.splice(i, 0, id);\n\n break;\n }\n } // cleaning-up the sequence tail\n\n\n for (let i = this._indices.length - 1; i >= 0; i--) {\n if (this._indices[i] - 1 === this._indices[i - 1]) {\n this._indices.pop();\n } else {\n break;\n }\n }\n }\n /**\n * Stores the given object and returns the refernce id instead\n *\n * @param {Object} obj to store\n *\n * @returns {Number} reference id of the stored object\n */\n\n\n put(obj) {\n if (this._busyHandler && Object.keys(this._store).length === 0) {\n this._busyHandler();\n }\n\n const id = this._genId();\n\n this._store[id] = obj;\n return id;\n }\n /**\n * Retrieves previously stored object and releases its reference\n *\n * @param {Number} id of an object to retrieve\n */\n\n\n fetch(id) {\n const obj = this._store[id];\n\n if (obj && !obj.__remote_method) {\n delete this._store[id];\n\n this._releaseId(id);\n\n if (this._readyHandler && Object.keys(this._store).length === 0) {\n this._readyHandler();\n }\n }\n\n if (obj && obj.__promise_pair) {\n this.fetch(obj.__promise_pair);\n }\n\n return obj;\n }\n\n}\n\n/***/ }),\n\n/***/ "./src/utils.js":\n/*!**********************!*\\\n !*** ./src/utils.js ***!\n \\**********************/\n/*! exports provided: randId, dtypeToTypedArray, loadRequirementsInWindow, loadRequirementsInWebworker, loadRequirements, normalizeConfig, typedArrayToDtypeMapping, typedArrayToDtype, cacheRequirements, setupServiceWorker, urlJoin, MessageEmitter */\n/***/ (function(module, __webpack_exports__, __webpack_require__) {\n\n"use strict";\n__webpack_require__.r(__webpack_exports__);\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "randId", function() { return randId; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "dtypeToTypedArray", function() { return dtypeToTypedArray; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "loadRequirementsInWindow", function() { return loadRequirementsInWindow; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "loadRequirementsInWebworker", function() { return loadRequirementsInWebworker; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "loadRequirements", function() { return loadRequirements; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "normalizeConfig", function() { return normalizeConfig; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "typedArrayToDtypeMapping", function() { return typedArrayToDtypeMapping; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "typedArrayToDtype", function() { return typedArrayToDtype; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "cacheRequirements", function() { return cacheRequirements; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "setupServiceWorker", function() { return setupServiceWorker; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "urlJoin", function() { return urlJoin; });\n/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "MessageEmitter", function() { return MessageEmitter; });\nfunction randId() {\n return Math.random().toString(36).substr(2, 10) + new Date().getTime();\n}\nconst dtypeToTypedArray = {\n int8: Int8Array,\n int16: Int16Array,\n int32: Int32Array,\n uint8: Uint8Array,\n uint16: Uint16Array,\n uint32: Uint32Array,\n float32: Float32Array,\n float64: Float64Array,\n array: Array\n};\nasync function loadRequirementsInWindow(requirements) {\n function _importScript(url) {\n //url is URL of external file, implementationCode is the code\n //to be called from the file, location is the location to\n //insert the \n *\n * @example\n * // Image from matrix of values\n * const [min, max] = d3.extent(temperatures)\n * const colorScaler = d3.scaleSequential([min, max], d3.interpolateRdYlBu);\n *\n * // size = rows * columns * channels\n * const data = new Uint8Array(2*3*3);\n * for (let i = 0; i < temperatures.length; i++) {\n * const {r, g, b} = d3.rgb(colorScaler(temperatures[i]));\n * data[i*3] = r;\n * data[i*3 + 1] = g;\n * data[i*3 + 2] = b;\n * }\n *\n * const image = new Image(2, 3, data, { kind: 'RGB' });\n * // or\n * const image = new Image({ width: 2, height: 3, data, kind: 'RGB'});\n */\nexport default class Image {\n constructor(width, height, data, options) {\n if (arguments.length === 1) {\n options = width;\n ({ width, height, data } = options);\n } else if (data && !data.length) {\n options = data;\n ({ data } = options);\n }\n if (width === undefined) width = 1;\n if (height === undefined) height = 1;\n if (options === undefined) options = {};\n\n if (typeof options !== 'object' || options === null) {\n throw new TypeError('options must be an object');\n }\n\n if (!Number.isInteger(width) || width <= 0) {\n throw new RangeError('width must be a positive integer');\n }\n if (!Number.isInteger(height) || height <= 0) {\n throw new RangeError('height must be a positive integer');\n }\n\n const { kind = RGBA } = options;\n if (typeof kind !== 'string') {\n throw new TypeError('kind must be a string');\n }\n const theKind = getKind(kind);\n const kindDefinition = Object.assign({}, options);\n for (const prop in theKind) {\n if (kindDefinition[prop] === undefined) {\n kindDefinition[prop] = theKind[prop];\n }\n }\n verifyKindDefinition(kindDefinition);\n\n const { components, bitDepth, colorModel } = kindDefinition;\n const alpha = kindDefinition.alpha + 0;\n const size = width * height;\n const channels = components + alpha;\n const maxValue = bitDepth === 32 ? Number.MAX_VALUE : 2 ** bitDepth - 1;\n\n if (data === undefined) {\n data = createPixelArray(\n size,\n components,\n alpha,\n channels,\n bitDepth,\n maxValue,\n );\n } else {\n const expectedLength = getTheoreticalPixelArraySize(\n size,\n channels,\n bitDepth,\n );\n if (data.length !== expectedLength) {\n throw new RangeError(\n `incorrect data size: ${data.length}. Should be ${expectedLength}`,\n );\n }\n }\n\n /**\n * Width of the image.\n * @member {number}\n */\n this.width = width;\n\n /**\n * Height of the image.\n * @member {number}\n */\n this.height = height;\n\n /**\n * Typed array holding the image data.\n * @member {TypedArray}\n */\n this.data = data;\n\n /**\n * Total number of pixels (width * height).\n * @member {number}\n */\n this.size = size;\n\n /**\n * Number of color channels in the image.\n * A grey image has 1 component. An RGB image has 3 components.\n * @member {number}\n */\n this.components = components;\n\n /**\n * Alpha is 1 if there is an alpha channel, 0 otherwise.\n * @member {number}\n */\n this.alpha = alpha;\n\n /**\n * Number of bits per value in each channel.\n * @member {number}\n */\n this.bitDepth = bitDepth;\n\n /**\n * Maximum value that a pixel can have.\n * @member {number}\n */\n this.maxValue = maxValue;\n\n /**\n * Color model of the image.\n * @member {ColorModel}\n */\n this.colorModel = colorModel;\n\n /**\n * Total number of channels. Is equal to `image.components + image.alpha`.\n * @member {number}\n */\n this.channels = channels;\n\n /**\n * Metadata associated with the image.\n * @member {object}\n */\n this.meta = options.meta || {};\n\n // TODO review those props\n Object.defineProperty(this, 'parent', {\n enumerable: false,\n writable: true,\n configurable: true,\n value: options.parent || null,\n });\n this.position = options.position || [0, 0];\n\n this.computed = null;\n this.sizes = [this.width, this.height];\n this.multiplierX = this.channels;\n this.multiplierY = this.channels * this.width;\n this.isClamped = this.bitDepth < 32;\n this.borderSizes = [0, 0]; // when a filter creates a border, it may have impact on future processing like Roi\n }\n\n get [Symbol.toStringTag]() {\n return 'IJSImage';\n }\n\n static isImage(object) {\n return objectToString.call(object) === '[object IJSImage]';\n }\n\n /**\n * Creates an image from an HTML Canvas object\n * @param {Canvas} canvas\n * @return {Image}\n */\n static fromCanvas(canvas) {\n const ctx = canvas.getContext('2d');\n const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);\n return new Image(imageData.width, imageData.height, imageData.data);\n }\n\n /**\n * Create a new Image based on the characteristics of another one.\n * @param {Image} other\n * @param {object} [options] - Override options to change some parameters\n * @return {Image}\n * @example\n * const newImage = Image.createFrom(image, { width: 100 });\n */\n static createFrom(other, options) {\n const newOptions = getImageParameters(other);\n Object.assign(\n newOptions,\n {\n parent: other,\n position: [0, 0],\n },\n options,\n );\n return new Image(newOptions);\n }\n\n /**\n * Create a new manager for regions of interest based on the current image.\n * @param {object} [options]\n * @return {RoiManager}\n */\n getRoiManager(options) {\n return new RoiManager(this, options);\n }\n\n /**\n * Create a copy a the current image, including its data.\n * @instance\n * @return {Image}\n */\n clone() {\n const newData = this.data.slice();\n return new Image(this.width, this.height, newData, this);\n }\n\n apply(filter) {\n for (let y = 0; y < this.height; y++) {\n for (let x = 0; x < this.width; x++) {\n let index = (y * this.width + x) * this.channels;\n filter.call(this, index);\n }\n }\n }\n}\n\nvalueMethods(Image);\nbitMethods(Image);\nexportMethods(Image);\n\nImage.prototype.checkProcessable = checkProcessable;\nImage.prototype.getRGBAData = getRGBAData;\n\nImage.load = load;\nImage.extendMethod = extendMethod;\nImage.extendProperty = extendProperty;\nextend(Image);\n","/**\n * This method checks if a process can be applied on the current image\n * @memberof Image\n * @instance\n * @param {string} processName\n * @param {object} [options]\n */\nexport default function checkProcessable(processName, options = {}) {\n let { bitDepth, alpha, colorModel, components, channels } = options;\n if (typeof processName !== 'string' || processName.length === 0) {\n throw new TypeError('processName must be a string');\n }\n if (bitDepth) {\n if (!Array.isArray(bitDepth)) {\n bitDepth = [bitDepth];\n }\n if (!bitDepth.includes(this.bitDepth)) {\n throw new TypeError(\n `The process: ${processName} can only be applied if bit depth is in: ${bitDepth}`,\n );\n }\n }\n if (alpha) {\n if (!Array.isArray(alpha)) {\n alpha = [alpha];\n }\n if (!alpha.includes(this.alpha)) {\n throw new TypeError(\n `The process: ${processName} can only be applied if alpha is in: ${alpha}`,\n );\n }\n }\n if (colorModel) {\n if (!Array.isArray(colorModel)) {\n colorModel = [colorModel];\n }\n if (!colorModel.includes(this.colorModel)) {\n throw new TypeError(\n `The process: ${processName} can only be applied if color model is in: ${colorModel}`,\n );\n }\n }\n if (components) {\n if (!Array.isArray(components)) {\n components = [components];\n }\n if (!components.includes(this.components)) {\n let errorMessage = `The process: ${processName} can only be applied if the number of components is in: ${components}`;\n if (components.length === 1 && components[0] === 1) {\n throw new TypeError(\n `${errorMessage}.\\rYou should transform your image using \"image.grey()\" before applying the algorithm.`,\n );\n } else {\n throw new TypeError(errorMessage);\n }\n }\n }\n if (channels) {\n if (!Array.isArray(channels)) {\n channels = [channels];\n }\n if (!channels.includes(this.channels)) {\n throw new TypeError(\n `The process: ${processName} can only be applied if the number of channels is in: ${channels}`,\n );\n }\n }\n}\n","import { RGB } from '../model/model';\n\n/**\n * Retrieve the data of the current image as RGBA 8 bits\n * The source image may be:\n * * a mask (binary image)\n * * a grey image (8 16 bits) with or without alpha channel\n * * a color image (8 or 16 bits) with or without alpha channel in with RGB model\n * @instance\n * @memberof Image\n * @param {object} [options]\n * @param {boolean} [options.clamped] - If true, the function will return a Uint8ClampedArray\n * @return {Uint8Array|Uint8ClampedArray} - Array with the data\n */\nexport default function getRGBAData(options = {}) {\n const { clamped } = options;\n this.checkProcessable('getRGBAData', {\n components: [1, 3],\n bitDepth: [1, 8, 16, 32],\n });\n const arrayLength = this.width * this.height * 4;\n let newData = clamped\n ? new Uint8ClampedArray(arrayLength)\n : new Uint8Array(arrayLength);\n if (this.bitDepth === 1) {\n fillDataFromBinary(this, newData);\n } else if (this.bitDepth === 32) {\n this.checkProcessable('getRGBAData', { alpha: 0 });\n if (this.components === 1) {\n fillDataFromGrey32(this, newData);\n } else if (this.components === 3) {\n this.checkProcessable('getRGBAData', { colorModel: [RGB] });\n fillDataFromRGB32(this, newData);\n }\n } else {\n if (this.components === 1) {\n fillDataFromGrey(this, newData);\n } else if (this.components === 3) {\n this.checkProcessable('getRGBAData', { colorModel: [RGB] });\n fillDataFromRGB(this, newData);\n }\n }\n if (this.alpha === 1) {\n this.checkProcessable('getRGBAData', { bitDepth: [8, 16] });\n copyAlpha(this, newData);\n } else {\n fillAlpha(this, newData);\n }\n return newData;\n}\n\nfunction fillDataFromBinary(image, newData) {\n for (let i = 0; i < image.size; i++) {\n const value = image.getBit(i);\n newData[i * 4] = value * 255;\n newData[i * 4 + 1] = value * 255;\n newData[i * 4 + 2] = value * 255;\n }\n}\n\nfunction fillDataFromGrey32(image, newData) {\n const min = image.min[0];\n const max = image.max[0];\n const range = max - min;\n for (let i = 0; i < image.size; i++) {\n const val = Math.floor((255 * (image.data[i] - min)) / range);\n newData[i * 4] = val;\n newData[i * 4 + 1] = val;\n newData[i * 4 + 2] = val;\n }\n}\n\nfunction fillDataFromRGB32(image, newData) {\n const min = Math.min(...image.min);\n const max = Math.max(...image.max);\n const range = max - min;\n for (let i = 0; i < image.size; i++) {\n const val1 = Math.floor((255 * (image.data[i * 3] - min)) / range);\n const val2 = Math.floor((255 * (image.data[i * 3 + 1] - min)) / range);\n const val3 = Math.floor((255 * (image.data[i * 3 + 2] - min)) / range);\n newData[i * 4] = val1;\n newData[i * 4 + 1] = val2;\n newData[i * 4 + 2] = val3;\n }\n}\n\nfunction fillDataFromGrey(image, newData) {\n for (let i = 0; i < image.size; i++) {\n newData[i * 4] = image.data[i * image.channels] >>> (image.bitDepth - 8);\n newData[i * 4 + 1] =\n image.data[i * image.channels] >>> (image.bitDepth - 8);\n newData[i * 4 + 2] =\n image.data[i * image.channels] >>> (image.bitDepth - 8);\n }\n}\n\nfunction fillDataFromRGB(image, newData) {\n for (let i = 0; i < image.size; i++) {\n newData[i * 4] = image.data[i * image.channels] >>> (image.bitDepth - 8);\n newData[i * 4 + 1] =\n image.data[i * image.channels + 1] >>> (image.bitDepth - 8);\n newData[i * 4 + 2] =\n image.data[i * image.channels + 2] >>> (image.bitDepth - 8);\n }\n}\n\nfunction copyAlpha(image, newData) {\n for (let i = 0; i < image.size; i++) {\n newData[i * 4 + 3] =\n image.data[i * image.channels + image.components] >> (image.bitDepth - 8);\n }\n}\n\nfunction fillAlpha(image, newData) {\n for (let i = 0; i < image.size; i++) {\n newData[i * 4 + 3] = 255;\n }\n}\n","/* eslint-disable import/order */\n\n// filters\nimport absFilter from './filter/abs';\nimport invertFilter from './filter/invert';\nimport flipXFilter from './filter/flipX';\nimport flipYFilter from './filter/flipY';\nimport blurFilter from './filter/blurFilter';\nimport medianFilter from './filter/medianFilter';\nimport gaussianFilter from './filter/gaussianFilter';\nimport sobelFilter from './filter/sobelFilter';\nimport scharrFilter from './filter/scharrFilter';\nimport gradientFilter from './filter/gradientFilter';\nimport levelFilter from './filter/level';\nimport addFilter from './filter/add';\nimport subtractFilter from './filter/subtract';\nimport subtractImage from './filter/subtractImage';\nimport hypotenuseFilter from './filter/hypotenuse';\nimport multiplyFilter from './filter/multiply';\nimport divideFilter from './filter/divide';\nimport backgroundFilter from './filter/background';\n\n// morphology transforms\nimport dilate from './morphology/dilate';\nimport erode from './morphology/erode';\nimport open from './morphology/open';\nimport close from './morphology/close';\nimport topHat from './morphology/topHat';\nimport blackHat from './morphology/blackHat';\nimport morphologicalGradient from './morphology/morphologicalGradient';\n\n// transforms\nimport warpingFourPoints from './transform/warping';\nimport crop from './transform/crop';\nimport cropAlpha from './transform/cropAlpha';\nimport resize from './transform/resize/resize';\nimport hsv from './transform/hsv';\nimport hsl from './transform/hsl';\nimport cmyk from './transform/cmyk';\nimport rgba8 from './transform/rgba8';\nimport grey from './transform/grey';\nimport mask from './transform/mask/mask';\nimport pad from './transform/pad';\nimport colorDepth from './transform/colorDepth';\nimport { rotate, rotateLeft, rotateRight } from './transform/rotate';\nimport insert from './transform/insert';\n\n// utility\nimport setBorder from './utility/setBorder';\nimport split from './utility/split';\nimport getChannel from './utility/getChannel';\nimport combineChannels from './utility/combineChannels';\nimport setChannel from './utility/setChannel';\nimport getSimilarity from './utility/getSimilarity';\nimport getPixelsGrid from './utility/getPixelsGrid';\nimport getBestMatch from './utility/getBestMatch';\nimport getRow from './utility/getRow';\nimport getColumn from './utility/getColumn';\nimport getMatrix from './utility/getMatrix';\nimport setMatrix from './utility/setMatrix';\nimport getPixelsArray from './utility/getPixelsArray';\nimport getIntersection from './utility/getIntersection';\nimport getClosestCommonParent from './utility/getClosestCommonParent';\nimport getThreshold from './utility/getThreshold';\n\n// operators\nimport cannyEdge from './operator/cannyEdge';\nimport convolution from './operator/convolution';\nimport extract from './operator/extract';\nimport floodFill from './operator/floodFill';\nimport paintLabels from './operator/paintLabels';\nimport paintMasks from './operator/paintMasks';\nimport paintPoints from './operator/paintPoints';\nimport paintPolyline from './operator/paintPolyline';\nimport paintPolylines from './operator/paintPolylines';\nimport paintPolygon from './operator/paintPolygon';\nimport paintPolygons from './operator/paintPolygons';\n\n// computers\nimport { getHistogram, getHistograms } from './compute/histogram';\nimport getColorHistogram from './compute/colorHistogram';\nimport getMin from './compute/min';\nimport getMax from './compute/max';\nimport getSum from './compute/sum';\nimport getMoment from './compute/moment';\nimport getLocalMaxima from './compute/localMaxima';\nimport getMean from './compute/mean';\nimport getMedian from './compute/median';\nimport getPoints from './compute/points';\nimport getExtendedPoints from './compute/extendedPoints';\nimport getRelativePosition from './compute/relativePosition';\nimport countAlphaPixels from './compute/countAlphaPixels';\nimport monotoneChainConvexHull from './compute/monotoneChainConvexHull';\nimport minimalBoundingRectangle from './compute/minimalBoundingRectangle';\n\nexport default function extend(Image) {\n let inPlace = { inPlace: true };\n\n Image.extendMethod('invert', invertFilter);\n Image.extendMethod('abs', absFilter);\n Image.extendMethod('level', levelFilter, inPlace);\n Image.extendMethod('add', addFilter, inPlace);\n Image.extendMethod('subtract', subtractFilter, inPlace);\n Image.extendMethod('subtractImage', subtractImage);\n Image.extendMethod('multiply', multiplyFilter, inPlace);\n Image.extendMethod('divide', divideFilter, inPlace);\n Image.extendMethod('hypotenuse', hypotenuseFilter);\n Image.extendMethod('background', backgroundFilter);\n Image.extendMethod('flipX', flipXFilter);\n Image.extendMethod('flipY', flipYFilter);\n\n Image.extendMethod('blurFilter', blurFilter);\n Image.extendMethod('medianFilter', medianFilter);\n Image.extendMethod('gaussianFilter', gaussianFilter);\n Image.extendMethod('sobelFilter', sobelFilter);\n Image.extendMethod('gradientFilter', gradientFilter);\n Image.extendMethod('scharrFilter', scharrFilter);\n\n Image.extendMethod('dilate', dilate);\n Image.extendMethod('erode', erode);\n Image.extendMethod('open', open);\n Image.extendMethod('close', close);\n Image.extendMethod('topHat', topHat);\n Image.extendMethod('blackHat', blackHat);\n Image.extendMethod('morphologicalGradient', morphologicalGradient);\n\n Image.extendMethod('warpingFourPoints', warpingFourPoints);\n Image.extendMethod('crop', crop);\n Image.extendMethod('cropAlpha', cropAlpha);\n Image.extendMethod('resize', resize).extendMethod('scale', resize);\n Image.extendMethod('hsv', hsv);\n Image.extendMethod('hsl', hsl);\n Image.extendMethod('cmyk', cmyk);\n Image.extendMethod('rgba8', rgba8);\n Image.extendMethod('grey', grey).extendMethod('gray', grey);\n Image.extendMethod('mask', mask);\n Image.extendMethod('pad', pad);\n Image.extendMethod('colorDepth', colorDepth);\n Image.extendMethod('setBorder', setBorder, inPlace);\n Image.extendMethod('rotate', rotate);\n Image.extendMethod('rotateLeft', rotateLeft);\n Image.extendMethod('rotateRight', rotateRight);\n Image.extendMethod('insert', insert);\n\n Image.extendMethod('getRow', getRow);\n Image.extendMethod('getColumn', getColumn);\n Image.extendMethod('getMatrix', getMatrix);\n Image.extendMethod('setMatrix', setMatrix);\n Image.extendMethod('getPixelsArray', getPixelsArray);\n Image.extendMethod('getIntersection', getIntersection);\n Image.extendMethod('getClosestCommonParent', getClosestCommonParent);\n Image.extendMethod('getThreshold', getThreshold);\n\n Image.extendMethod('split', split);\n Image.extendMethod('getChannel', getChannel);\n Image.extendMethod('combineChannels', combineChannels);\n Image.extendMethod('setChannel', setChannel);\n Image.extendMethod('getSimilarity', getSimilarity);\n Image.extendMethod('getPixelsGrid', getPixelsGrid);\n Image.extendMethod('getBestMatch', getBestMatch);\n\n Image.extendMethod('cannyEdge', cannyEdge);\n Image.extendMethod('convolution', convolution);\n Image.extendMethod('extract', extract);\n Image.extendMethod('floodFill', floodFill);\n Image.extendMethod('paintLabels', paintLabels, inPlace);\n Image.extendMethod('paintMasks', paintMasks, inPlace);\n Image.extendMethod('paintPoints', paintPoints, inPlace);\n Image.extendMethod('paintPolyline', paintPolyline, inPlace);\n Image.extendMethod('paintPolylines', paintPolylines, inPlace);\n Image.extendMethod('paintPolygon', paintPolygon, inPlace);\n Image.extendMethod('paintPolygons', paintPolygons, inPlace);\n\n Image.extendMethod('countAlphaPixels', countAlphaPixels);\n Image.extendMethod('monotoneChainConvexHull', monotoneChainConvexHull);\n Image.extendMethod('minimalBoundingRectangle', minimalBoundingRectangle);\n Image.extendMethod('getHistogram', getHistogram).extendProperty(\n 'histogram',\n getHistogram,\n );\n Image.extendMethod('getHistograms', getHistograms).extendProperty(\n 'histograms',\n getHistograms,\n );\n Image.extendMethod('getColorHistogram', getColorHistogram).extendProperty(\n 'colorHistogram',\n getColorHistogram,\n );\n Image.extendMethod('getMin', getMin).extendProperty('min', getMin);\n Image.extendMethod('getMax', getMax).extendProperty('max', getMax);\n Image.extendMethod('getSum', getSum).extendProperty('sum', getSum);\n Image.extendMethod('getMoment', getMoment).extendProperty(\n 'moment',\n getMoment,\n );\n Image.extendMethod('getLocalMaxima', getLocalMaxima);\n Image.extendMethod('getMedian', getMedian).extendProperty(\n 'median',\n getMedian,\n );\n Image.extendMethod('getMean', getMean).extendProperty('mean', getMean);\n Image.extendMethod('getPoints', getPoints).extendProperty(\n 'points',\n getPoints,\n );\n Image.extendMethod('getExtendedPoints', getExtendedPoints).extendProperty(\n 'extendedPoints',\n getExtendedPoints,\n );\n Image.extendMethod('getRelativePosition', getRelativePosition);\n}\n","import Image from '../../../image/Image';\n\nconst defaultOptions = {\n regression: {\n kernelType: 'polynomial',\n kernelOptions: { degree: 2, constant: 1 },\n },\n threshold: 0.02,\n roi: {\n minSurface: 100,\n positive: false,\n },\n sampling: 20,\n include: [],\n};\n\nfunction run(image, options, onStep) {\n options = Object.assign({}, defaultOptions, options);\n const manager = this.manager;\n if (Array.isArray(image)) {\n return Promise.all(\n image.map(function (img) {\n const run = runOnce(manager, img, options);\n if (typeof onStep === 'function') {\n run.then(onStep);\n }\n return run;\n }),\n );\n } else {\n return runOnce(manager, image, options);\n }\n}\n\nfunction runOnce(manager, image, options) {\n return manager.post('data', [image, options]).then(function (response) {\n for (let i in response) {\n response[i] = new Image(response[i]);\n }\n return response;\n });\n}\n\nfunction work() {\n worker.on('data', function (send, image, options) {\n image = new IJS(image);\n const result = {};\n const toTransfer = [];\n\n const grey = image.grey();\n\n const sobel = grey.sobelFilter();\n maybeInclude('sobel', sobel);\n\n const mask = sobel.level().mask({ threshold: options.threshold });\n maybeInclude('mask', mask);\n\n const roiManager = sobel.getRoiManager();\n roiManager.fromMask(mask);\n const realMask = roiManager.getMask(options.roi);\n maybeInclude('realMask', realMask);\n\n const pixels = grey.getPixelsGrid({\n sampling: options.sampling,\n mask: realMask,\n });\n\n const background = image.getBackground(\n pixels.xyS,\n pixels.zS,\n options.regression,\n );\n maybeInclude('background', background);\n\n const corrected = image.subtract(background);\n\n result.result = corrected;\n toTransfer.push(corrected.data.buffer);\n send(result, toTransfer);\n\n function maybeInclude(name, image) {\n if (options.include.includes(name)) {\n result[name] = image;\n toTransfer.push(image.data.buffer);\n }\n }\n });\n}\n\nconst background = { run, work };\nexport default background;\n","import WorkerManager from 'web-worker-manager';\n\nimport extend from './extend';\n\nclass Worker {\n constructor() {\n this._url = null;\n this._deps = [null];\n }\n checkUrl() {\n if (this._url === null) {\n throw new Error('image worker must be initialized with an URL');\n }\n }\n get url() {\n return this._url;\n }\n set url(value) {\n if (typeof value !== 'string') {\n throw new TypeError('worker URL must be a string');\n }\n this._url = value;\n this._deps[0] = value;\n }\n static extendMethod(name, method) {\n let manager;\n let url;\n let runner = {};\n\n function run(...args) {\n if (!manager) {\n this.checkUrl();\n url = this.url;\n manager = new WorkerManager(method.work, { deps: url });\n runner.manager = manager;\n }\n return method.run.call(runner, ...args);\n }\n\n run.reset = function () {\n if (manager) {\n manager.terminate();\n manager = new WorkerManager(method.work, { deps: url });\n runner.manager = manager;\n }\n };\n Worker.prototype[name] = run;\n }\n}\n\nextend(Worker);\n\nexport default new Worker();\n","import background from './process/background';\n\nexport default function extend(Worker) {\n Worker.extendMethod('background', background);\n}\n","import React from 'react';\nexport var ReactReduxContext = /*#__PURE__*/React.createContext(null);\n\nif (process.env.NODE_ENV !== 'production') {\n ReactReduxContext.displayName = 'ReactRedux';\n}\n\nexport default ReactReduxContext;","// Default to a dummy \"batch\" implementation that just runs the callback\nfunction defaultNoopBatch(callback) {\n callback();\n}\n\nvar batch = defaultNoopBatch; // Allow injecting another batching function later\n\nexport var setBatch = function setBatch(newBatch) {\n return batch = newBatch;\n}; // Supply a getter just to skip dealing with ESM bindings\n\nexport var getBatch = function getBatch() {\n return batch;\n};","import { getBatch } from './batch'; // encapsulates the subscription logic for connecting a component to the redux store, as\n// well as nesting subscriptions of descendant components, so that we can ensure the\n// ancestor components re-render before descendants\n\nfunction createListenerCollection() {\n var batch = getBatch();\n var first = null;\n var last = null;\n return {\n clear: function clear() {\n first = null;\n last = null;\n },\n notify: function notify() {\n batch(function () {\n var listener = first;\n\n while (listener) {\n listener.callback();\n listener = listener.next;\n }\n });\n },\n get: function get() {\n var listeners = [];\n var listener = first;\n\n while (listener) {\n listeners.push(listener);\n listener = listener.next;\n }\n\n return listeners;\n },\n subscribe: function subscribe(callback) {\n var isSubscribed = true;\n var listener = last = {\n callback: callback,\n next: null,\n prev: last\n };\n\n if (listener.prev) {\n listener.prev.next = listener;\n } else {\n first = listener;\n }\n\n return function unsubscribe() {\n if (!isSubscribed || first === null) return;\n isSubscribed = false;\n\n if (listener.next) {\n listener.next.prev = listener.prev;\n } else {\n last = listener.prev;\n }\n\n if (listener.prev) {\n listener.prev.next = listener.next;\n } else {\n first = listener.next;\n }\n };\n }\n };\n}\n\nvar nullListeners = {\n notify: function notify() {},\n get: function get() {\n return [];\n }\n};\nexport function createSubscription(store, parentSub) {\n var unsubscribe;\n var listeners = nullListeners;\n\n function addNestedSub(listener) {\n trySubscribe();\n return listeners.subscribe(listener);\n }\n\n function notifyNestedSubs() {\n listeners.notify();\n }\n\n function handleChangeWrapper() {\n if (subscription.onStateChange) {\n subscription.onStateChange();\n }\n }\n\n function isSubscribed() {\n return Boolean(unsubscribe);\n }\n\n function trySubscribe() {\n if (!unsubscribe) {\n unsubscribe = parentSub ? parentSub.addNestedSub(handleChangeWrapper) : store.subscribe(handleChangeWrapper);\n listeners = createListenerCollection();\n }\n }\n\n function tryUnsubscribe() {\n if (unsubscribe) {\n unsubscribe();\n unsubscribe = undefined;\n listeners.clear();\n listeners = nullListeners;\n }\n }\n\n var subscription = {\n addNestedSub: addNestedSub,\n notifyNestedSubs: notifyNestedSubs,\n handleChangeWrapper: handleChangeWrapper,\n isSubscribed: isSubscribed,\n trySubscribe: trySubscribe,\n tryUnsubscribe: tryUnsubscribe,\n getListeners: function getListeners() {\n return listeners;\n }\n };\n return subscription;\n}","import { useEffect, useLayoutEffect } from 'react'; // React currently throws a warning when using useLayoutEffect on the server.\n// To get around it, we can conditionally useEffect on the server (no-op) and\n// useLayoutEffect in the browser. We need useLayoutEffect to ensure the store\n// subscription callback always has the selector from the latest render commit\n// available, otherwise a store update may happen between render and the effect,\n// which may cause missed updates; we also must ensure the store subscription\n// is created synchronously, otherwise a store update may occur before the\n// subscription is created and an inconsistent state may be observed\n\nexport var useIsomorphicLayoutEffect = typeof window !== 'undefined' && typeof window.document !== 'undefined' && typeof window.document.createElement !== 'undefined' ? useLayoutEffect : useEffect;","import React, { useMemo } from 'react';\nimport PropTypes from 'prop-types';\nimport { ReactReduxContext } from './Context';\nimport { createSubscription } from '../utils/Subscription';\nimport { useIsomorphicLayoutEffect } from '../utils/useIsomorphicLayoutEffect';\n\nfunction Provider(_ref) {\n var store = _ref.store,\n context = _ref.context,\n children = _ref.children;\n var contextValue = useMemo(function () {\n var subscription = createSubscription(store);\n return {\n store: store,\n subscription: subscription\n };\n }, [store]);\n var previousState = useMemo(function () {\n return store.getState();\n }, [store]);\n useIsomorphicLayoutEffect(function () {\n var subscription = contextValue.subscription;\n subscription.onStateChange = subscription.notifyNestedSubs;\n subscription.trySubscribe();\n\n if (previousState !== store.getState()) {\n subscription.notifyNestedSubs();\n }\n\n return function () {\n subscription.tryUnsubscribe();\n subscription.onStateChange = null;\n };\n }, [contextValue, previousState]);\n var Context = context || ReactReduxContext;\n return /*#__PURE__*/React.createElement(Context.Provider, {\n value: contextValue\n }, children);\n}\n\nif (process.env.NODE_ENV !== 'production') {\n Provider.propTypes = {\n store: PropTypes.shape({\n subscribe: PropTypes.func.isRequired,\n dispatch: PropTypes.func.isRequired,\n getState: PropTypes.func.isRequired\n }),\n context: PropTypes.object,\n children: PropTypes.any\n };\n}\n\nexport default Provider;","import { useContext } from 'react';\nimport { ReactReduxContext } from '../components/Context';\n/**\r\n * A hook to access the value of the `ReactReduxContext`. This is a low-level\r\n * hook that you should usually not need to call directly.\r\n *\r\n * @returns {any} the value of the `ReactReduxContext`\r\n *\r\n * @example\r\n *\r\n * import React from 'react'\r\n * import { useReduxContext } from 'react-redux'\r\n *\r\n * export const CounterComponent = ({ value }) => {\r\n * const { store } = useReduxContext()\r\n * return
{store.getState()}
\r\n * }\r\n */\n\nexport function useReduxContext() {\n var contextValue = useContext(ReactReduxContext);\n\n if (process.env.NODE_ENV !== 'production' && !contextValue) {\n throw new Error('could not find react-redux context value; please ensure the component is wrapped in a ');\n }\n\n return contextValue;\n}","import { useContext } from 'react';\nimport { ReactReduxContext } from '../components/Context';\nimport { useReduxContext as useDefaultReduxContext } from './useReduxContext';\n/**\r\n * Hook factory, which creates a `useStore` hook bound to a given context.\r\n *\r\n * @param {React.Context} [context=ReactReduxContext] Context passed to your ``.\r\n * @returns {Function} A `useStore` hook bound to the specified context.\r\n */\n\nexport function createStoreHook(context) {\n if (context === void 0) {\n context = ReactReduxContext;\n }\n\n var useReduxContext = context === ReactReduxContext ? useDefaultReduxContext : function () {\n return useContext(context);\n };\n return function useStore() {\n var _useReduxContext = useReduxContext(),\n store = _useReduxContext.store;\n\n return store;\n };\n}\n/**\r\n * A hook to access the redux store.\r\n *\r\n * @returns {any} the redux store\r\n *\r\n * @example\r\n *\r\n * import React from 'react'\r\n * import { useStore } from 'react-redux'\r\n *\r\n * export const ExampleComponent = () => {\r\n * const store = useStore()\r\n * return
{store.getState()}
\r\n * }\r\n */\n\nexport var useStore = /*#__PURE__*/createStoreHook();","import { ReactReduxContext } from '../components/Context';\nimport { useStore as useDefaultStore, createStoreHook } from './useStore';\n/**\r\n * Hook factory, which creates a `useDispatch` hook bound to a given context.\r\n *\r\n * @param {React.Context} [context=ReactReduxContext] Context passed to your ``.\r\n * @returns {Function} A `useDispatch` hook bound to the specified context.\r\n */\n\nexport function createDispatchHook(context) {\n if (context === void 0) {\n context = ReactReduxContext;\n }\n\n var useStore = context === ReactReduxContext ? useDefaultStore : createStoreHook(context);\n return function useDispatch() {\n var store = useStore();\n return store.dispatch;\n };\n}\n/**\r\n * A hook to access the redux `dispatch` function.\r\n *\r\n * @returns {any|function} redux store's `dispatch` function\r\n *\r\n * @example\r\n *\r\n * import React, { useCallback } from 'react'\r\n * import { useDispatch } from 'react-redux'\r\n *\r\n * export const CounterComponent = ({ value }) => {\r\n * const dispatch = useDispatch()\r\n * const increaseCounter = useCallback(() => dispatch({ type: 'increase-counter' }), [])\r\n * return (\r\n *
\r\n * {value}\r\n * \r\n *
\r\n * )\r\n * }\r\n */\n\nexport var useDispatch = /*#__PURE__*/createDispatchHook();","import { useReducer, useRef, useMemo, useContext, useDebugValue } from 'react';\nimport { useReduxContext as useDefaultReduxContext } from './useReduxContext';\nimport { createSubscription } from '../utils/Subscription';\nimport { useIsomorphicLayoutEffect } from '../utils/useIsomorphicLayoutEffect';\nimport { ReactReduxContext } from '../components/Context';\n\nvar refEquality = function refEquality(a, b) {\n return a === b;\n};\n\nfunction useSelectorWithStoreAndSubscription(selector, equalityFn, store, contextSub) {\n var _useReducer = useReducer(function (s) {\n return s + 1;\n }, 0),\n forceRender = _useReducer[1];\n\n var subscription = useMemo(function () {\n return createSubscription(store, contextSub);\n }, [store, contextSub]);\n var latestSubscriptionCallbackError = useRef();\n var latestSelector = useRef();\n var latestStoreState = useRef();\n var latestSelectedState = useRef();\n var storeState = store.getState();\n var selectedState;\n\n try {\n if (selector !== latestSelector.current || storeState !== latestStoreState.current || latestSubscriptionCallbackError.current) {\n var newSelectedState = selector(storeState); // ensure latest selected state is reused so that a custom equality function can result in identical references\n\n if (latestSelectedState.current === undefined || !equalityFn(newSelectedState, latestSelectedState.current)) {\n selectedState = newSelectedState;\n } else {\n selectedState = latestSelectedState.current;\n }\n } else {\n selectedState = latestSelectedState.current;\n }\n } catch (err) {\n if (latestSubscriptionCallbackError.current) {\n err.message += \"\\nThe error may be correlated with this previous error:\\n\" + latestSubscriptionCallbackError.current.stack + \"\\n\\n\";\n }\n\n throw err;\n }\n\n useIsomorphicLayoutEffect(function () {\n latestSelector.current = selector;\n latestStoreState.current = storeState;\n latestSelectedState.current = selectedState;\n latestSubscriptionCallbackError.current = undefined;\n });\n useIsomorphicLayoutEffect(function () {\n function checkForUpdates() {\n try {\n var newStoreState = store.getState(); // Avoid calling selector multiple times if the store's state has not changed\n\n if (newStoreState === latestStoreState.current) {\n return;\n }\n\n var _newSelectedState = latestSelector.current(newStoreState);\n\n if (equalityFn(_newSelectedState, latestSelectedState.current)) {\n return;\n }\n\n latestSelectedState.current = _newSelectedState;\n latestStoreState.current = newStoreState;\n } catch (err) {\n // we ignore all errors here, since when the component\n // is re-rendered, the selectors are called again, and\n // will throw again, if neither props nor store state\n // changed\n latestSubscriptionCallbackError.current = err;\n }\n\n forceRender();\n }\n\n subscription.onStateChange = checkForUpdates;\n subscription.trySubscribe();\n checkForUpdates();\n return function () {\n return subscription.tryUnsubscribe();\n };\n }, [store, subscription]);\n return selectedState;\n}\n/**\r\n * Hook factory, which creates a `useSelector` hook bound to a given context.\r\n *\r\n * @param {React.Context} [context=ReactReduxContext] Context passed to your ``.\r\n * @returns {Function} A `useSelector` hook bound to the specified context.\r\n */\n\n\nexport function createSelectorHook(context) {\n if (context === void 0) {\n context = ReactReduxContext;\n }\n\n var useReduxContext = context === ReactReduxContext ? useDefaultReduxContext : function () {\n return useContext(context);\n };\n return function useSelector(selector, equalityFn) {\n if (equalityFn === void 0) {\n equalityFn = refEquality;\n }\n\n if (process.env.NODE_ENV !== 'production') {\n if (!selector) {\n throw new Error(\"You must pass a selector to useSelector\");\n }\n\n if (typeof selector !== 'function') {\n throw new Error(\"You must pass a function as a selector to useSelector\");\n }\n\n if (typeof equalityFn !== 'function') {\n throw new Error(\"You must pass a function as an equality function to useSelector\");\n }\n }\n\n var _useReduxContext = useReduxContext(),\n store = _useReduxContext.store,\n contextSub = _useReduxContext.subscription;\n\n var selectedState = useSelectorWithStoreAndSubscription(selector, equalityFn, store, contextSub);\n useDebugValue(selectedState);\n return selectedState;\n };\n}\n/**\r\n * A hook to access the redux store's state. This hook takes a selector function\r\n * as an argument. The selector is called with the store state.\r\n *\r\n * This hook takes an optional equality comparison function as the second parameter\r\n * that allows you to customize the way the selected state is compared to determine\r\n * whether the component needs to be re-rendered.\r\n *\r\n * @param {Function} selector the selector function\r\n * @param {Function=} equalityFn the function that will be used to determine equality\r\n *\r\n * @returns {any} the selected state\r\n *\r\n * @example\r\n *\r\n * import React from 'react'\r\n * import { useSelector } from 'react-redux'\r\n *\r\n * export const CounterComponent = () => {\r\n * const counter = useSelector(state => state.counter)\r\n * return
{counter}
\r\n * }\r\n */\n\nexport var useSelector = /*#__PURE__*/createSelectorHook();","export * from './exports';\nimport { unstable_batchedUpdates as batch } from './utils/reactBatchedUpdates';\nimport { setBatch } from './utils/batch'; // Enable batched updates in our subscriptions for use\n// with standard React renderers (ReactDOM, React Native)\n\nsetBatch(batch);\nexport { batch };","import Konva from \"konva\";\nimport { createContext } from \"react\";\n\nexport const StageContext = createContext | null>(\n null\n);\n","// Unique ID creation requires a high quality random # generator. In the browser we therefore\n// require the crypto API and do not support built-in fallback to lower quality random number\n// generators (like Math.random()).\nvar getRandomValues;\nvar rnds8 = new Uint8Array(16);\nexport default function rng() {\n // lazy load so that environments that need to polyfill have a chance to do so\n if (!getRandomValues) {\n // getRandomValues needs to be invoked in a context where \"this\" is a Crypto implementation. Also,\n // find the complete implementation of crypto (msCrypto) on IE11.\n getRandomValues = typeof crypto !== 'undefined' && crypto.getRandomValues && crypto.getRandomValues.bind(crypto) || typeof msCrypto !== 'undefined' && typeof msCrypto.getRandomValues === 'function' && msCrypto.getRandomValues.bind(msCrypto);\n\n if (!getRandomValues) {\n throw new Error('crypto.getRandomValues() not supported. See https://github.com/uuidjs/uuid#getrandomvalues-not-supported');\n }\n }\n\n return getRandomValues(rnds8);\n}","import { AnnotatorState } from \"store/types\";\nimport {\n AnnotationMode,\n AnnotationState,\n ToolType,\n} from \"utils/annotator/enums\";\n\nexport const selectAnnotationState = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}): AnnotationState => {\n return annotator.annotationState;\n};\n\nexport const selectAnnotationSelectionMode = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}): AnnotationMode => {\n return annotator.selectionMode;\n};\nexport const selectPenSelectionBrushSize = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}) => {\n return annotator.penSelectionBrushSize;\n};\n\nexport const selectQuickSelectionRegionSize = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}) => {\n return annotator.quickSelectionRegionSize;\n};\n\nexport const selectThresholdAnnotationValue = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}) => {\n return annotator.thresholdAnnotationValue;\n};\n\nexport const selectToolType = ({\n annotator,\n}: {\n annotator: AnnotatorState;\n}): ToolType => {\n return annotator.toolType;\n};\n","import * as ImageJS from \"image-js\";\nexport abstract class Tool {\n /**\n * Image-JS object of the active image (i.e. of the image that we are annotating on).\n * https://image-js.github.io/image-js/#image\n */\n image: ImageJS.Image;\n\n constructor(image: ImageJS.Image) {\n this.image = image;\n }\n}\n","/**\n * Based on https://github.com/mourner/tinyqueue\n * Copyright (c) 2017, Vladimir Agafonkin https://github.com/mourner/tinyqueue/blob/master/LICENSE\n *\n * Adapted for PathFinding needs by @anvaka\n * Copyright (c) 2017, Andrei Kashcha\n *\n * Further adapted for Piximi\n */\nimport { PiximiNode } from \".\";\n\nexport class NodeHeap {\n public data: Array = [];\n\n public length: number = 0;\n\n compare(a: PiximiNode, b: PiximiNode) {\n return a.fScore - b.fScore;\n }\n\n setNodeId(node: PiximiNode, heapIndex: number) {\n node.heapIndex = heapIndex;\n }\n\n push(item: any) {\n this.data.push(item);\n this.setNodeId(item, this.length);\n this.length++;\n this._up(this.length - 1);\n }\n\n pop() {\n if (this.length === 0) return undefined;\n\n const top = this.data[0];\n this.length--;\n\n if (this.length > 0) {\n this.data[0] = this.data[this.length];\n this.setNodeId(this.data[0], 0);\n this._down(0);\n }\n this.data.pop();\n\n return top;\n }\n\n updateItem(pos: number) {\n this._down(pos);\n this._up(pos);\n }\n\n _up(pos: number) {\n const data = this.data;\n const compare = this.compare;\n const setNodeId = this.setNodeId;\n const item = data[pos];\n\n while (pos > 0) {\n const parent = (pos - 1) >> 1;\n const current = data[parent];\n if (compare(item, current) >= 0) break;\n data[pos] = current;\n\n setNodeId(current, pos);\n pos = parent;\n }\n\n data[pos] = item;\n setNodeId(item, pos);\n }\n\n _down(pos: number) {\n const data = this.data;\n const compare = this.compare;\n const halfLength = this.length >> 1;\n const item = data[pos];\n const setNodeId = this.setNodeId;\n\n while (pos < halfLength) {\n let left = (pos << 1) + 1;\n const right = left + 1;\n let best = data[left];\n\n if (right < this.length && compare(data[right], best) < 0) {\n left = right;\n best = data[right];\n }\n if (compare(best, item) >= 0) break;\n\n data[pos] = best;\n setNodeId(best, pos);\n pos = left;\n }\n\n data[pos] = item;\n setNodeId(item, pos);\n }\n}\n","export default /^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i;","import validate from './validate.js';\n/**\n * Convert array of 16 byte values to UUID string format of the form:\n * XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n */\n\nvar byteToHex = [];\n\nfor (var i = 0; i < 256; ++i) {\n byteToHex.push((i + 0x100).toString(16).substr(1));\n}\n\nfunction stringify(arr) {\n var offset = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 0;\n // Note: Be careful editing this code! It's been tuned for performance\n // and works in ways you may not expect. See https://github.com/uuidjs/uuid/pull/434\n var uuid = (byteToHex[arr[offset + 0]] + byteToHex[arr[offset + 1]] + byteToHex[arr[offset + 2]] + byteToHex[arr[offset + 3]] + '-' + byteToHex[arr[offset + 4]] + byteToHex[arr[offset + 5]] + '-' + byteToHex[arr[offset + 6]] + byteToHex[arr[offset + 7]] + '-' + byteToHex[arr[offset + 8]] + byteToHex[arr[offset + 9]] + '-' + byteToHex[arr[offset + 10]] + byteToHex[arr[offset + 11]] + byteToHex[arr[offset + 12]] + byteToHex[arr[offset + 13]] + byteToHex[arr[offset + 14]] + byteToHex[arr[offset + 15]]).toLowerCase(); // Consistency check for valid UUID. If this throws, it's likely due to one\n // of the following:\n // - One or more input array values don't map to a hex octet (leading to\n // \"undefined\" in the uuid)\n // - Invalid input values for the RFC `version` or `variant` fields\n\n if (!validate(uuid)) {\n throw TypeError('Stringified UUID is invalid');\n }\n\n return uuid;\n}\n\nexport default stringify;","import REGEX from './regex.js';\n\nfunction validate(uuid) {\n return typeof uuid === 'string' && REGEX.test(uuid);\n}\n\nexport default validate;","import rng from './rng.js';\nimport stringify from './stringify.js';\n\nfunction v4(options, buf, offset) {\n options = options || {};\n var rnds = options.random || (options.rng || rng)(); // Per 4.4, set bits for version and `clock_seq_hi_and_reserved`\n\n rnds[6] = rnds[6] & 0x0f | 0x40;\n rnds[8] = rnds[8] & 0x3f | 0x80; // Copy bytes to buffer, if provided\n\n if (buf) {\n offset = offset || 0;\n\n for (var i = 0; i < 16; ++i) {\n buf[offset + i] = rnds[i];\n }\n\n return buf;\n }\n\n return stringify(rnds);\n}\n\nexport default v4;","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Backend, DataToGPUOptions, GPUData, Tensor} from '../tensor';\nimport {DataId} from '../tensor_info';\nimport {BackendValues, DataType, WebGLData, WebGPUData} from '../types';\n\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n\n// Required information for all backends.\nexport interface BackendTimingInfo {\n kernelMs: number|{error: string};\n getExtraProfileInfo?(): string; // a field for additional timing information\n // e.g. packing / unpacking for WebGL backend\n}\n\nexport interface TensorStorage {\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n disposeData(dataId: DataId, force?: boolean): boolean;\n write(values: BackendValues, shape: number[], dtype: DataType): DataId;\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void;\n memory(): {unreliable: boolean;}; // Backend-specific information.\n /** Returns number of data ids currently in the storage. */\n numDataIds(): number;\n refCount(dataId: DataId): number;\n}\n\n/** Convenient class for storing tensor-related data. */\nexport class DataStorage {\n private data = new WeakMap();\n private dataIdsCount = 0;\n\n constructor(private backend: KernelBackend, private dataMover: DataMover) {}\n\n get(dataId: DataId) {\n if (!this.data.has(dataId)) {\n this.dataMover.moveData(this.backend, dataId);\n }\n return this.data.get(dataId);\n }\n\n set(dataId: DataId, value: T): void {\n this.dataIdsCount++;\n this.data.set(dataId, value);\n }\n\n has(dataId: DataId): boolean {\n return this.data.has(dataId);\n }\n\n delete(dataId: DataId): boolean {\n this.dataIdsCount--;\n return this.data.delete(dataId);\n }\n\n numDataIds(): number {\n return this.dataIdsCount;\n }\n}\n\nexport interface DataMover {\n /**\n * To be called by backends whenever they see a dataId that they don't own.\n * Upon calling this method, the mover will fetch the tensor from another\n * backend and register it with the current active backend.\n */\n moveData(backend: KernelBackend, dataId: DataId): void;\n}\n\nexport interface BackendTimer {\n // check if backend timer is available\n timerAvailable(): boolean;\n time(f: () => void): Promise;\n}\n\n/**\n * The interface that defines the kernels that should be implemented when\n * adding a new backend. New backends don't need to implement every one of the\n * methods, this can be done gradually (throw an error for unimplemented\n * methods).\n */\nexport class KernelBackend implements TensorStorage, Backend, BackendTimer {\n refCount(dataId: DataId): number {\n return notYetImplemented('refCount');\n }\n incRef(dataId: DataId): void {\n return notYetImplemented('incRef');\n }\n timerAvailable(): boolean {\n return true;\n }\n time(f: () => void): Promise {\n return notYetImplemented('time');\n }\n read(dataId: object): Promise {\n return notYetImplemented('read');\n }\n readSync(dataId: object): BackendValues {\n return notYetImplemented('readSync');\n }\n readToGPU(dataId: object, options?: DataToGPUOptions): GPUData {\n return notYetImplemented('readToGPU');\n }\n numDataIds(): number {\n return notYetImplemented('numDataIds');\n }\n disposeData(dataId: object, force?: boolean): boolean {\n return notYetImplemented('disposeData');\n }\n write(values: BackendValues, shape: number[], dtype: DataType): DataId {\n return notYetImplemented('write');\n }\n move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void {\n return notYetImplemented('move');\n }\n\n createTensorFromGPUData(\n values: WebGLData|WebGPUData, shape: number[], dtype: DataType): Tensor {\n return notYetImplemented('createTensorFromGPUData');\n }\n\n memory(): {unreliable: boolean; reasons?: string[]} {\n return notYetImplemented('memory');\n }\n /** Returns the highest precision for floats in bits (e.g. 16 or 32) */\n floatPrecision(): 16|32 {\n return notYetImplemented('floatPrecision');\n }\n /** Returns the smallest representable number. */\n epsilon(): number {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n dispose(): void {\n return notYetImplemented('dispose');\n }\n}\n\nfunction notYetImplemented(kernelName: string): never {\n throw new Error(\n `'${kernelName}' not yet implemented or not found in the registry. ` +\n `This kernel may not be supported by the tfjs backend you have chosen`);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendValues, DataType, DataTypeMap, FlatVector, NumericDataType, TensorLike, TypedArray, WebGLData, WebGPUData} from './types';\n\n/**\n * Shuffles the array in-place using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1, 2, 3, 4, 5];\n * tf.util.shuffle(a);\n * console.log(a);\n * ```\n *\n * @param array The array to shuffle in-place.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\n// tslint:disable-next-line:no-any\nexport function shuffle(array: any[]|Uint32Array|Int32Array|\n Float32Array): void {\n let counter = array.length;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element with it\n swap(array, counter, index);\n }\n}\n\n/**\n * Shuffles two arrays in-place the same way using Fisher-Yates algorithm.\n *\n * ```js\n * const a = [1,2,3,4,5];\n * const b = [11,22,33,44,55];\n * tf.util.shuffleCombo(a, b);\n * console.log(a, b);\n * ```\n *\n * @param array The first array to shuffle in-place.\n * @param array2 The second array to shuffle in-place with the same permutation\n * as the first array.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function shuffleCombo(\n // tslint:disable-next-line:no-any\n array: any[]|Uint32Array|Int32Array|Float32Array,\n // tslint:disable-next-line:no-any\n array2: any[]|Uint32Array|Int32Array|Float32Array): void {\n if (array.length !== array2.length) {\n throw new Error(\n `Array sizes must match to be shuffled together ` +\n `First array length was ${array.length}` +\n `Second array length was ${array2.length}`);\n }\n let counter = array.length;\n let index = 0;\n // While there are elements in the array\n while (counter > 0) {\n // Pick a random index\n index = (Math.random() * counter) | 0;\n // Decrease counter by 1\n counter--;\n // And swap the last element of each array with it\n swap(array, counter, index);\n swap(array2, counter, index);\n }\n}\n\n/** Clamps a value to a specified range. */\nexport function clamp(min: number, x: number, max: number): number {\n return Math.max(min, Math.min(x, max));\n}\n\nexport function nearestLargerEven(val: number): number {\n return val % 2 === 0 ? val : val + 1;\n}\n\nexport function swap(\n object: {[index: number]: T}, left: number, right: number) {\n const temp = object[left];\n object[left] = object[right];\n object[right] = temp;\n}\n\nexport function sum(arr: number[]): number {\n let sum = 0;\n for (let i = 0; i < arr.length; i++) {\n sum += arr[i];\n }\n return sum;\n}\n\n/**\n * Returns a sample from a uniform [a, b) distribution.\n *\n * @param a The minimum support (inclusive).\n * @param b The maximum support (exclusive).\n * @return A pseudorandom number on the half-open interval [a,b).\n */\nexport function randUniform(a: number, b: number) {\n const r = Math.random();\n return (b * r) + (1 - r) * a;\n}\n\n/** Returns the squared Euclidean distance between two vectors. */\nexport function distSquared(a: FlatVector, b: FlatVector): number {\n let result = 0;\n for (let i = 0; i < a.length; i++) {\n const diff = Number(a[i]) - Number(b[i]);\n result += diff * diff;\n }\n return result;\n}\n\n/**\n * Asserts that the expression is true. Otherwise throws an error with the\n * provided message.\n *\n * ```js\n * const x = 2;\n * tf.util.assert(x === 2, 'x is not 2');\n * ```\n *\n * @param expr The expression to assert (as a boolean).\n * @param msg A function that returns the message to report when throwing an\n * error. We use a function for performance reasons.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function assert(expr: boolean, msg: () => string) {\n if (!expr) {\n throw new Error(typeof msg === 'string' ? msg : msg());\n }\n}\n\nexport function assertShapesMatch(\n shapeA: number[], shapeB: number[], errorMessagePrefix = ''): void {\n assert(\n arraysEqual(shapeA, shapeB),\n () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n}\n\nexport function assertNonNull(a: TensorLike): void {\n assert(\n a != null,\n () => `The input to the tensor constructor must be a non-null value.`);\n}\n\n/**\n * Returns the size (number of elements) of the tensor given its shape.\n *\n * ```js\n * const shape = [3, 4, 2];\n * const size = tf.util.sizeFromShape(shape);\n * console.log(size);\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function sizeFromShape(shape: number[]): number {\n if (shape.length === 0) {\n // Scalar.\n return 1;\n }\n let size = shape[0];\n for (let i = 1; i < shape.length; i++) {\n size *= shape[i];\n }\n return size;\n}\n\nexport function isScalarShape(shape: number[]): boolean {\n return shape.length === 0;\n}\n\nexport function arraysEqual(n1: FlatVector, n2: FlatVector) {\n if (n1 === n2) {\n return true;\n }\n if (n1 == null || n2 == null) {\n return false;\n }\n\n if (n1.length !== n2.length) {\n return false;\n }\n for (let i = 0; i < n1.length; i++) {\n if (n1[i] !== n2[i]) {\n return false;\n }\n }\n return true;\n}\n\nexport function isInt(a: number): boolean {\n return a % 1 === 0;\n}\n\nexport function tanh(x: number): number {\n // tslint:disable-next-line:no-any\n if ((Math as any).tanh != null) {\n // tslint:disable-next-line:no-any\n return (Math as any).tanh(x);\n }\n if (x === Infinity) {\n return 1;\n } else if (x === -Infinity) {\n return -1;\n } else {\n const e2x = Math.exp(2 * x);\n return (e2x - 1) / (e2x + 1);\n }\n}\n\nexport function sizeToSquarishShape(size: number): [number, number] {\n const width = Math.ceil(Math.sqrt(size));\n return [width, Math.ceil(size / width)];\n}\n\n/**\n * Creates a new array with randomized indices to a given quantity.\n *\n * ```js\n * const randomTen = tf.util.createShuffledIndices(10);\n * console.log(randomTen);\n * ```\n *\n * @param number Quantity of how many shuffled indices to create.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function createShuffledIndices(n: number): Uint32Array {\n const shuffledIndices = new Uint32Array(n);\n for (let i = 0; i < n; ++i) {\n shuffledIndices[i] = i;\n }\n shuffle(shuffledIndices);\n return shuffledIndices;\n}\n\nexport function rightPad(a: string, size: number): string {\n if (size <= a.length) {\n return a;\n }\n return a + ' '.repeat(size - a.length);\n}\n\nexport function repeatedTry(\n checkFn: () => boolean, delayFn = (counter: number) => 0,\n maxCounter?: number,\n scheduleFn?: (functionRef: Function, delay: number) =>\n void): Promise {\n return new Promise((resolve, reject) => {\n let tryCount = 0;\n\n const tryFn = () => {\n if (checkFn()) {\n resolve();\n return;\n }\n\n tryCount++;\n\n const nextBackoff = delayFn(tryCount);\n\n if (maxCounter != null && tryCount >= maxCounter) {\n reject();\n return;\n }\n\n if (scheduleFn != null) {\n scheduleFn(tryFn, nextBackoff);\n } else {\n // google3 does not allow assigning another variable to setTimeout.\n // Don't refactor this so scheduleFn has a default value of setTimeout.\n setTimeout(tryFn, nextBackoff);\n }\n };\n\n tryFn();\n });\n}\n\n/**\n * Given the full size of the array and a shape that may contain -1 as the\n * implicit dimension, returns the inferred shape where -1 is replaced.\n * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].\n *\n * @param shape The shape, which may contain -1 in some dimension.\n * @param size The full size (number of elements) of the array.\n * @return The inferred shape where -1 is replaced with the inferred size.\n */\nexport function inferFromImplicitShape(\n shape: number[], size: number): number[] {\n let shapeProd = 1;\n let implicitIdx = -1;\n\n for (let i = 0; i < shape.length; ++i) {\n if (shape[i] >= 0) {\n shapeProd *= shape[i];\n } else if (shape[i] === -1) {\n if (implicitIdx !== -1) {\n throw Error(\n `Shapes can only have 1 implicit size. ` +\n `Found -1 at dim ${implicitIdx} and dim ${i}`);\n }\n implicitIdx = i;\n } else if (shape[i] < 0) {\n throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);\n }\n }\n\n if (implicitIdx === -1) {\n if (size > 0 && size !== shapeProd) {\n throw Error(`Size(${size}) must match the product of shape ${shape}`);\n }\n return shape;\n }\n\n if (shapeProd === 0) {\n throw Error(\n `Cannot infer the missing size in [${shape}] when ` +\n `there are 0 elements`);\n }\n if (size % shapeProd !== 0) {\n throw Error(\n `The implicit shape can't be a fractional number. ` +\n `Got ${size} / ${shapeProd}`);\n }\n\n const newShape = shape.slice();\n newShape[implicitIdx] = size / shapeProd;\n return newShape;\n}\n\nexport function parseAxisParam(\n axis: number|number[], shape: number[]): number[] {\n const rank = shape.length;\n\n // Normalize input\n axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);\n\n // Check for valid range\n assert(\n axis.every(ax => ax >= -rank && ax < rank),\n () =>\n `All values in axis param must be in range [-${rank}, ${rank}) but ` +\n `got axis ${axis}`);\n\n // Check for only integers\n assert(\n axis.every(ax => isInt(ax)),\n () => `All values in axis param must be integers but ` +\n `got axis ${axis}`);\n\n // Handle negative axis.\n return axis.map(a => a < 0 ? rank + a : a);\n}\n\n/** Reduces the shape by removing all dimensions of shape 1. */\nexport function squeezeShape(shape: number[], axis?: number[]):\n {newShape: number[], keptDims: number[]} {\n const newShape: number[] = [];\n const keptDims: number[] = [];\n const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;\n const axes = (axis == null || isEmptyArray) ?\n null :\n parseAxisParam(axis, shape).sort();\n let j = 0;\n for (let i = 0; i < shape.length; ++i) {\n if (axes != null) {\n if (axes[j] === i && shape[i] !== 1) {\n throw new Error(\n `Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);\n }\n if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n if (axes[j] <= i) {\n j++;\n }\n }\n if (shape[i] !== 1) {\n newShape.push(shape[i]);\n keptDims.push(i);\n }\n }\n return {newShape, keptDims};\n}\n\nexport function getTypedArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function getArrayFromDType(\n dtype: D, size: number): DataTypeMap[D] {\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else if (dtype === 'string') {\n values = new Array<'string'>(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n return values as DataTypeMap[D];\n}\n\nexport function checkConversionForErrors(\n vals: DataTypeMap[D]|number[], dtype: D): void {\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);\n }\n }\n}\n\n/** Returns true if the dtype is valid. */\nexport function isValidDtype(dtype: DataType): boolean {\n return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||\n dtype === 'int32' || dtype === 'string';\n}\n\n/**\n * Returns true if the new type can't encode the old type without loss of\n * precision.\n */\nexport function hasEncodingLoss(oldType: DataType, newType: DataType): boolean {\n if (newType === 'complex64') {\n return false;\n }\n if (newType === 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {\n return false;\n }\n if (newType === 'bool' && oldType === 'bool') {\n return false;\n }\n return true;\n}\n\nexport function bytesPerElement(dtype: DataType): number {\n if (dtype === 'float32' || dtype === 'int32') {\n return 4;\n } else if (dtype === 'complex64') {\n return 8;\n } else if (dtype === 'bool') {\n return 1;\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\n/**\n * Returns the approximate number of bytes allocated in the string array - 2\n * bytes per character. Computing the exact bytes for a native string in JS\n * is not possible since it depends on the encoding of the html page that\n * serves the website.\n */\nexport function bytesFromStringArray(arr: Uint8Array[]): number {\n if (arr == null) {\n return 0;\n }\n let bytes = 0;\n arr.forEach(x => bytes += x.length);\n return bytes;\n}\n\n/** Returns true if the value is a string. */\nexport function isString(value: {}): value is string {\n return typeof value === 'string' || value instanceof String;\n}\n\nexport function isBoolean(value: {}): boolean {\n return typeof value === 'boolean';\n}\n\nexport function isNumber(value: {}): boolean {\n return typeof value === 'number';\n}\n\nexport function inferDtype(values: TensorLike|WebGLData|WebGPUData): DataType {\n if (Array.isArray(values)) {\n return inferDtype(values[0]);\n }\n if (values instanceof Float32Array) {\n return 'float32';\n } else if (\n values instanceof Int32Array || values instanceof Uint8Array ||\n values instanceof Uint8ClampedArray) {\n return 'int32';\n } else if (isNumber(values)) {\n return 'float32';\n } else if (isString(values)) {\n return 'string';\n } else if (isBoolean(values)) {\n return 'bool';\n }\n return 'float32';\n}\n\nexport function isFunction(f: Function) {\n return !!(f && f.constructor && f.call && f.apply);\n}\n\nexport function nearestDivisor(size: number, start: number): number {\n for (let i = start; i < size; ++i) {\n if (size % i === 0) {\n return i;\n }\n }\n return size;\n}\n\nexport function computeStrides(shape: number[]): number[] {\n const rank = shape.length;\n if (rank < 2) {\n return [];\n }\n\n // Last dimension has implicit stride of 1, thus having D-1 (instead of D)\n // strides.\n const strides = new Array(rank - 1);\n strides[rank - 2] = shape[rank - 1];\n for (let i = rank - 3; i >= 0; --i) {\n strides[i] = strides[i + 1] * shape[i + 1];\n }\n return strides;\n}\n\nfunction createNestedArray(\n offset: number, shape: number[], a: TypedArray, isComplex = false) {\n const ret = new Array();\n if (shape.length === 1) {\n const d = shape[0] * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = a[offset + i];\n }\n } else {\n const d = shape[0];\n const rest = shape.slice(1);\n const len = rest.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n for (let i = 0; i < d; i++) {\n ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);\n }\n }\n return ret;\n}\n\n// Provide a nested array of TypedArray in given shape.\nexport function toNestedArray(\n shape: number[], a: TypedArray, isComplex = false) {\n if (shape.length === 0) {\n // Scalar type should return a single number.\n return a[0];\n }\n const size = shape.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);\n if (size === 0) {\n // A tensor with shape zero should be turned into empty list.\n return [];\n }\n if (size !== a.length) {\n throw new Error(`[${shape}] does not match the input size ${a.length}${\n isComplex ? ' for a complex tensor' : ''}.`);\n }\n\n return createNestedArray(0, shape, a, isComplex);\n}\n\nexport function convertBackendValuesAndArrayBuffer(\n data: BackendValues|ArrayBuffer, dtype: DataType) {\n // If is type Uint8Array[], return it directly.\n if (Array.isArray(data)) {\n return data;\n }\n if (dtype === 'float32') {\n return data instanceof Float32Array ? data : new Float32Array(data);\n } else if (dtype === 'int32') {\n return data instanceof Int32Array ? data : new Int32Array(data);\n } else if (dtype === 'bool' || dtype === 'string') {\n return Uint8Array.from(new Int32Array(data));\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n\nexport function makeOnesTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n const array = makeZerosTypedArray(size, dtype);\n for (let i = 0; i < array.length; i++) {\n array[i] = 1;\n }\n return array;\n}\n\nexport function makeZerosTypedArray(\n size: number, dtype: D): DataTypeMap[D] {\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(size) as DataTypeMap[D];\n } else if (dtype === 'int32') {\n return new Int32Array(size) as DataTypeMap[D];\n } else if (dtype === 'bool') {\n return new Uint8Array(size) as DataTypeMap[D];\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Make nested `TypedArray` filled with zeros.\n * @param shape The shape information for the nested array.\n * @param dtype dtype of the array element.\n */\nexport function makeZerosNestedTypedArray(\n shape: number[], dtype: D) {\n const size = shape.reduce((prev, curr) => prev * curr, 1);\n if (dtype == null || dtype === 'float32') {\n return toNestedArray(shape, new Float32Array(size));\n } else if (dtype === 'int32') {\n return toNestedArray(shape, new Int32Array(size));\n } else if (dtype === 'bool') {\n return toNestedArray(shape, new Uint8Array(size));\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\nexport function assertNonNegativeIntegerDimensions(shape: number[]) {\n shape.forEach(dimSize => {\n assert(\n Number.isInteger(dimSize) && dimSize >= 0,\n () =>\n `Tensor must have a shape comprised of positive integers but got ` +\n `shape [${shape}].`);\n });\n}\n\n/**\n * Computes flat index for a given location (multidimentionsal index) in a\n * Tensor/multidimensional array.\n *\n * @param locs Location in the tensor.\n * @param rank Rank of the tensor.\n * @param strides Tensor strides.\n */\nexport function locToIndex(\n locs: number[], rank: number, strides: number[]): number {\n if (rank === 0) {\n return 0;\n } else if (rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += strides[i] * locs[i];\n }\n return index;\n}\n\n/**\n * Computes the location (multidimensional index) in a\n * tensor/multidimentional array for a given flat index.\n *\n * @param index Index in flat array.\n * @param rank Rank of tensor.\n * @param strides Strides of tensor.\n */\nexport function indexToLoc(\n index: number, rank: number, strides: number[]): number[] {\n if (rank === 0) {\n return [];\n } else if (rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(rank);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / strides[i]);\n index -= locs[i] * strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n}\n\n/**\n * This method asserts whether an object is a Promise instance.\n * @param object\n */\n// tslint:disable-next-line: no-any\nexport function isPromise(object: any): object is Promise {\n // We chose to not use 'obj instanceOf Promise' for two reasons:\n // 1. It only reliably works for es6 Promise, not other Promise\n // implementations.\n // 2. It doesn't work with framework that uses zone.js. zone.js monkey\n // patch the async calls, so it is possible the obj (patched) is\n // comparing to a pre-patched Promise.\n return object && object.then && typeof object.then === 'function';\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Platform} from './platforms/platform';\nimport {isPromise} from './util_base';\n\n// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.\nconst TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';\n\ntype FlagValue = number|boolean;\ntype FlagEvaluationFn = (() => FlagValue)|(() => Promise);\nexport type Flags = {\n [featureName: string]: FlagValue\n};\nexport type FlagRegistryEntry = {\n evaluationFn: FlagEvaluationFn;\n setHook?: (value: FlagValue) => void;\n};\n\n/**\n * The environment contains evaluated flags as well as the registered platform.\n * This is always used as a global singleton and can be retrieved with\n * `tf.env()`.\n *\n * @doc {heading: 'Environment'}\n */\nexport class Environment {\n private flags: Flags = {};\n private flagRegistry: {[flagName: string]: FlagRegistryEntry} = {};\n\n private urlFlags: Flags = {};\n\n platformName: string;\n platform: Platform;\n\n // Jasmine spies on this in 'environment_test.ts'\n getQueryParams = getQueryParams;\n\n // tslint:disable-next-line: no-any\n constructor(public global: any) {\n this.populateURLFlags();\n }\n\n setPlatform(platformName: string, platform: Platform) {\n if (this.platform != null) {\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.warn(\n `Platform ${this.platformName} has already been set. ` +\n `Overwriting the platform with ${platformName}.`);\n }\n }\n this.platformName = platformName;\n this.platform = platform;\n }\n\n registerFlag(\n flagName: string, evaluationFn: FlagEvaluationFn,\n setHook?: (value: FlagValue) => void) {\n this.flagRegistry[flagName] = {evaluationFn, setHook};\n\n // Override the flag value from the URL. This has to happen here because\n // the environment is initialized before flags get registered.\n if (this.urlFlags[flagName] != null) {\n const flagValue = this.urlFlags[flagName];\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.warn(\n `Setting feature override from URL ${flagName}: ${flagValue}.`);\n }\n this.set(flagName, flagValue);\n }\n }\n\n async getAsync(flagName: string): Promise {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n this.flags[flagName] = await this.evaluateFlag(flagName);\n return this.flags[flagName];\n }\n\n get(flagName: string): FlagValue {\n if (flagName in this.flags) {\n return this.flags[flagName];\n }\n\n const flagValue = this.evaluateFlag(flagName);\n if (isPromise(flagValue)) {\n throw new Error(\n `Flag ${flagName} cannot be synchronously evaluated. ` +\n `Please use getAsync() instead.`);\n }\n\n this.flags[flagName] = flagValue;\n return this.flags[flagName];\n }\n\n getNumber(flagName: string): number {\n return this.get(flagName) as number;\n }\n\n getBool(flagName: string): boolean {\n return this.get(flagName) as boolean;\n }\n\n getFlags(): Flags {\n return this.flags;\n }\n // For backwards compatibility.\n get features(): Flags {\n return this.flags;\n }\n\n set(flagName: string, value: FlagValue): void {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot set flag ${flagName} as it has not been registered.`);\n }\n this.flags[flagName] = value;\n if (this.flagRegistry[flagName].setHook != null) {\n this.flagRegistry[flagName].setHook(value);\n }\n }\n\n private evaluateFlag(flagName: string): FlagValue|Promise {\n if (this.flagRegistry[flagName] == null) {\n throw new Error(\n `Cannot evaluate flag '${flagName}': no evaluation function found.`);\n }\n return this.flagRegistry[flagName].evaluationFn();\n }\n\n setFlags(flags: Flags) {\n this.flags = Object.assign({}, flags);\n }\n\n reset() {\n this.flags = {};\n this.urlFlags = {};\n this.populateURLFlags();\n }\n\n private populateURLFlags(): void {\n if (typeof this.global === 'undefined' ||\n typeof this.global.location === 'undefined' ||\n typeof this.global.location.search === 'undefined') {\n return;\n }\n\n const urlParams = this.getQueryParams(this.global.location.search);\n if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {\n const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');\n keyValues.forEach(keyValue => {\n const [key, value] = keyValue.split(':') as [string, string];\n this.urlFlags[key] = parseValue(key, value);\n });\n }\n }\n}\n\nexport function getQueryParams(queryString: string): {[key: string]: string} {\n const params = {};\n queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {\n decodeParam(params, t[0], t[1]);\n return t.join('=');\n });\n return params;\n}\n\nfunction decodeParam(\n params: {[key: string]: string}, name: string, value?: string) {\n params[decodeURIComponent(name)] = decodeURIComponent(value || '');\n}\n\nfunction parseValue(flagName: string, value: string): FlagValue {\n value = value.toLowerCase();\n if (value === 'true' || value === 'false') {\n return value === 'true';\n } else if (`${+ value}` === value) {\n return +value;\n }\n throw new Error(\n `Could not parse value flag value ${value} for flag ${flagName}.`);\n}\n\n/**\n * Returns the current environment (a global singleton).\n *\n * The environment object contains the evaluated feature values as well as the\n * active platform.\n *\n * @doc {heading: 'Environment'}\n */\nexport function env() {\n return ENV;\n}\n\nexport let ENV: Environment = null;\nexport function setEnvironmentGlobal(environment: Environment) {\n ENV = environment;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Note that the identifier globalNameSpace is scoped to this module, but will\n// always resolve to the same global object regardless of how the module is\n// resolved.\n// tslint:disable-next-line:no-any\nlet globalNameSpace: {_tfGlobals: Map};\n// tslint:disable-next-line:no-any\nexport function getGlobalNamespace(): {_tfGlobals: Map} {\n if (globalNameSpace == null) {\n // tslint:disable-next-line:no-any\n let ns: any;\n if (typeof (window) !== 'undefined') {\n ns = window;\n } else if (typeof (global) !== 'undefined') {\n ns = global;\n } else if (typeof (process) !== 'undefined') {\n ns = process;\n } else if (typeof (self) !== 'undefined') {\n ns = self;\n } else {\n throw new Error('Could not find a global object');\n }\n globalNameSpace = ns;\n }\n return globalNameSpace;\n}\n\n// tslint:disable-next-line:no-any\nfunction getGlobalMap(): Map {\n const ns = getGlobalNamespace();\n if (ns._tfGlobals == null) {\n ns._tfGlobals = new Map();\n }\n return ns._tfGlobals;\n}\n\n/**\n * Returns a globally accessible 'singleton' object.\n *\n * @param key the name of the object\n * @param init a function to initialize to initialize this object\n * the first time it is fetched.\n */\nexport function getGlobal(key: string, init: () => T): T {\n const globalMap = getGlobalMap();\n if (globalMap.has(key)) {\n return globalMap.get(key);\n } else {\n const singleton = init();\n globalMap.set(key, singleton);\n return globalMap.get(key);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Allow UpperCamelCase variable names\n// tslint:disable: variable-name\n// Unfortunately just enabling PascalCase per file (tslint:enable:\n// allow-pascal-case) doesn't work.\nimport { NamedTensorInfoMap } from './kernel_registry';\nimport { TensorInfo } from './tensor_info';\nimport {ExplicitPadding} from './ops/conv_util';\nimport {Activation} from './ops/fused_types';\nimport {DataType, PixelData} from './types';\n\nexport const Abs = 'Abs';\nexport type AbsInputs = UnaryInputs;\n\nexport const Acos = 'Acos';\nexport type AcosInputs = UnaryInputs;\n\nexport const Acosh = 'Acosh';\nexport type AcoshInputs = UnaryInputs;\n\nexport const Add = 'Add';\nexport type AddInputs = BinaryInputs;\n\nexport const AddN = 'AddN';\nexport type AddNInputs = TensorInfo[];\n\nexport const All = 'All';\nexport type AllInputs = Pick;\nexport interface AllAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Any = 'Any';\nexport type AnyInputs = Pick;\nexport interface AnyAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const ArgMax = 'ArgMax';\nexport type ArgMaxInputs = Pick;\nexport interface ArgMaxAttrs {\n axis: number;\n}\n\nexport const ArgMin = 'ArgMin';\nexport type ArgMinInputs = Pick;\nexport interface ArgMinAttrs {\n axis: number;\n}\n\nexport const Asin = 'Asin';\nexport type AsinInputs = UnaryInputs;\n\nexport const Asinh = 'Asinh';\nexport type AsinhInputs = UnaryInputs;\n\nexport const Atan = 'Atan';\nexport type AtanInputs = UnaryInputs;\n\nexport const Atanh = 'Atanh';\nexport type AtanhInputs = UnaryInputs;\n\nexport const Atan2 = 'Atan2';\nexport type Atan2Inputs = BinaryInputs;\n\nexport const AvgPool = 'AvgPool';\nexport type AvgPoolInputs = Pick;\nexport interface AvgPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const AvgPoolGrad = 'AvgPoolGrad';\nexport type AvgPoolGradInputs = Pick;\nexport interface AvgPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n}\n\nexport const AvgPool3D = 'AvgPool3D';\nexport type AvgPool3DInputs = Pick;\nexport interface AvgPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n dataFormat: 'NDHWC'|'NCDHW';\n}\n\nexport const AvgPool3DGrad = 'AvgPool3DGrad';\nexport type AvgPool3DGradInputs = Pick;\nexport interface AvgPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const BatchMatMul = 'BatchMatMul';\nexport type BatchMatMulInputs = Pick;\nexport interface BatchMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n}\n\nexport const BatchToSpaceND = 'BatchToSpaceND';\nexport type BatchToSpaceNDInputs = Pick;\nexport interface BatchToSpaceNDAttrs {\n blockShape: number[];\n crops: number[][];\n}\n\nexport type BinaryInputs = Pick;\n\nexport const Bincount = 'Bincount';\nexport type BincountInputs = Pick;\nexport interface BincountAttrs {\n size: number;\n}\n\nexport const BroadcastTo = 'BroadcastTo';\nexport type BroadcastToInputs = Pick;\nexport interface BroadCastToAttrs {\n shape: number[];\n inputShape: number[]; // for gradient\n}\n\nexport const BroadcastArgs = 'BroadcastArgs';\nexport type BroadcastArgsInputs = Pick;\n\nexport const Cast = 'Cast';\nexport type CastInputs = UnaryInputs;\nexport interface CastAttrs {\n dtype: DataType;\n}\n\nexport const Ceil = 'Ceil';\nexport type CeilInputs = UnaryInputs;\n\nexport const ClipByValue = 'ClipByValue';\nexport type ClipByValueInputs = UnaryInputs;\nexport interface ClipByValueAttrs {\n clipValueMin: number;\n clipValueMax: number;\n}\n\nexport const Complex = 'Complex';\nexport type ComplexInputs = Pick;\n\nexport const ComplexAbs = 'ComplexAbs';\nexport type ComplexAbsInputs = UnaryInputs;\n\nexport const Concat = 'Concat';\nexport type ConcatInputs = TensorInfo[];\nexport interface ConcatAttrs {\n axis: number;\n}\n\nexport const Conv2D = 'Conv2D';\nexport type Conv2DInputs = Pick;\nexport interface Conv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const Conv2DBackpropFilter = 'Conv2DBackpropFilter';\nexport type Conv2DBackpropFilterInputs = Pick;\nexport interface Conv2DBackpropFilterAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const Conv2DBackpropInput = 'Conv2DBackpropInput';\nexport type Conv2DBackpropInputInputs = Pick;\nexport interface Conv2DBackpropInputAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Conv3D = 'Conv3D';\nexport type Conv3DInputs = Pick;\nexport interface Conv3DAttrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n dataFormat: 'NDHWC'|'NCDHW';\n dilations: [number, number, number]|number;\n}\n\nexport const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';\nexport type Conv3DBackpropFilterV2Inputs = Pick;\n\nexport interface Conv3DBackpropFilterV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n filterShape: [number, number, number, number, number];\n}\n\nexport const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';\nexport type Conv3DBackpropInputV2Inputs =\n Pick;\nexport interface Conv3DBackpropInputV2Attrs {\n strides: [number, number, number]|number;\n pad: 'valid'|'same';\n inputShape: [number, number, number, number, number];\n}\n\nexport const Cos = 'Cos';\nexport type CosInputs = UnaryInputs;\n\nexport const Cosh = 'Cosh';\nexport type CoshInputs = UnaryInputs;\n\nexport const Cumprod = 'Cumprod';\nexport type CumprodInputs = Pick;\nexport interface CumprodAttrs {\n axis: number;\n exclusive: boolean;\n reverse: boolean;\n}\n\nexport const Cumsum = 'Cumsum';\nexport type CumsumInputs = Pick;\nexport interface CumsumAttrs {\n axis: number;\n exclusive: boolean;\n reverse: boolean;\n}\n\nexport const CropAndResize = 'CropAndResize';\nexport type CropAndResizeInputs =\n Pick;\nexport interface CropAndResizeAttrs {\n cropSize: [number, number];\n method: 'bilinear'|'nearest';\n extrapolationValue: number;\n}\n\nexport const DenseBincount = 'DenseBincount';\nexport type DenseBincountInputs = Pick;\nexport interface DenseBincountAttrs {\n size: number;\n binaryOutput?: boolean;\n}\n\nexport const DepthToSpace = 'DepthToSpace';\nexport type DepthToSpaceInputs = Pick;\nexport interface DepthToSpaceAttrs {\n blockSize: number;\n dataFormat: 'NHWC'|'NCHW';\n}\n\nexport const DepthwiseConv2dNative = 'DepthwiseConv2dNative';\nexport type DepthwiseConv2dNativeInputs =\n Pick;\nexport interface DepthwiseConv2dNativeAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const DepthwiseConv2dNativeBackpropFilter =\n 'DepthwiseConv2dNativeBackpropFilter';\nexport type DepthwiseConv2dNativeBackpropFilterInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropFilterAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n filterShape: [number, number, number, number];\n}\n\nexport const DepthwiseConv2dNativeBackpropInput =\n 'DepthwiseConv2dNativeBackpropInput';\nexport type DepthwiseConv2dNativeBackpropInputInputs =\n Pick;\nexport interface DepthwiseConv2dNativeBackpropInputAttrs {\n strides: [number, number]|number;\n dilations: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n inputShape: [number, number, number, number];\n}\n\nexport const Diag = 'Diag';\nexport type DiagInputs = Pick;\n\nexport const Dilation2D = 'Dilation2D';\nexport type Dilation2DInputs = Pick;\nexport interface Dilation2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n dilations: [number, number]|number;\n}\n\nexport const Dilation2DBackpropInput = 'Dilation2DBackpropInput';\nexport type Dilation2DBackpropInputInputs =\n Pick;\n\nexport const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';\nexport type Dilation2DBackpropFilterInputs =\n Pick;\n\nexport const RealDiv = 'RealDiv';\nexport type RealDivInputs = BinaryInputs;\n\nexport const Einsum = 'Einsum';\nexport type EinsumInputs = TensorInfo[];\nexport interface EinsumAttrs {\n equation: string;\n}\n\nexport const Elu = 'Elu';\nexport type EluInputs = Pick;\n\nexport const EluGrad = 'EluGrad';\nexport type EluGradInputs = Pick;\n\nexport const Erf = 'Erf';\nexport type ErfInputs = UnaryInputs;\n\nexport const Equal = 'Equal';\nexport type EqualInputs = BinaryInputs;\n\nexport const Exp = 'Exp';\nexport type ExpInputs = UnaryInputs;\n\nexport const ExpandDims = 'ExpandDims';\nexport type ExpandDimsInputs = Pick;\nexport interface ExpandDimsAttrs {\n dim: number;\n}\n\nexport const Expm1 = 'Expm1';\nexport type Expm1Inputs = UnaryInputs;\n\nexport const FFT = 'FFT';\nexport type FFTInputs = Pick;\n\nexport const Fill = 'Fill';\nexport interface FillAttrs {\n shape: number[];\n value: number|string;\n dtype: DataType;\n}\n\nexport const FlipLeftRight = 'FlipLeftRight';\nexport type FlipLeftRightInputs = Pick;\n\nexport const Floor = 'Floor';\nexport type FloorInputs = UnaryInputs;\n\nexport const FloorDiv = 'FloorDiv';\nexport type FloorDivInputs = BinaryInputs;\n\nexport const FusedBatchNorm = 'FusedBatchNorm';\nexport type FusedBatchNormInputs =\n Pick;\nexport interface FusedBatchNormAttrs {\n varianceEpsilon: number;\n}\n\nexport const GatherV2 = 'GatherV2';\nexport type GatherV2Inputs = Pick;\nexport interface GatherV2Attrs {\n axis: number;\n batchDims: number;\n}\n\nexport const GatherNd = 'GatherNd';\nexport type GatherNdInputs = Pick;\n\nexport const Greater = 'Greater';\nexport type GreaterInputs = BinaryInputs;\n\nexport const GreaterEqual = 'GreaterEqual';\nexport type GreaterEqualInputs = BinaryInputs;\n\nexport const Identity = 'Identity';\nexport type IdentityInputs = Pick;\n\nexport const IFFT = 'IFFT';\nexport type IFFTInputs = Pick;\n\nexport const Imag = 'Imag';\nexport type ImagInputs = Pick;\n\nexport const IsFinite = 'IsFinite';\nexport type IsFiniteInputs = UnaryInputs;\n\nexport const IsInf = 'IsInf';\nexport type IsInfInputs = UnaryInputs;\n\nexport const IsNan = 'IsNan';\nexport type IsNanInputs = UnaryInputs;\n\nexport const LeakyRelu = 'LeakyRelu';\nexport type LeakyReluInputs = Pick;\nexport interface LeakyReluAttrs {\n alpha: number;\n}\n\nexport const Less = 'Less';\nexport type LessInputs = BinaryInputs;\n\nexport const LessEqual = 'LessEqual';\nexport type LessEqualInputs = BinaryInputs;\n\nexport const LinSpace = 'LinSpace';\nexport interface LinSpaceAttrs {\n start: number;\n stop: number;\n num: number;\n}\nexport const Log = 'Log';\nexport type LogInputs = UnaryInputs;\n\nexport const Log1p = 'Log1p';\nexport type Log1pInputs = UnaryInputs;\n\nexport const LogicalAnd = 'LogicalAnd';\nexport type LogicalAndInputs = BinaryInputs;\n\nexport const LogicalNot = 'LogicalNot';\nexport type LogicalNotInputs = Pick;\n\nexport const LogicalOr = 'LogicalOr';\nexport type LogicalOrInputs = BinaryInputs;\n\nexport const LogicalXor = 'LogicalXor';\nexport type LogicalXorInputs = BinaryInputs;\n\nexport const LogSoftmax = 'LogSoftmax';\nexport type LogSoftmaxInputs = Pick;\nexport interface LogSoftmaxAttrs {\n axis: number;\n}\n\nexport const LowerBound = 'LowerBound';\nexport type LowerBoundInputs =\n Pick;\n\nexport const LRN = 'LRN';\nexport type LRNInputs = Pick;\nexport interface LRNAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const LRNGrad = 'LRNGrad';\nexport type LRNGradInputs = Pick;\nexport interface LRNGradAttrs {\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n}\n\nexport const Max = 'Max';\nexport type MaxInputs = Pick;\nexport interface MaxAttrs {\n reductionIndices: number|number[];\n keepDims: boolean;\n}\n\nexport const Maximum = 'Maximum';\nexport type MaximumInputs = BinaryInputs;\n\nexport const MaxPool = 'MaxPool';\nexport type MaxPoolInputs = Pick;\nexport interface MaxPoolAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolGrad = 'MaxPoolGrad';\nexport type MaxPoolGradInputs = Pick;\nexport interface MaxPoolGradAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3D = 'MaxPool3D';\nexport type MaxPool3DInputs = Pick;\nexport interface MaxPool3DAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dataFormat: 'NDHWC'|'NCDHW';\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPool3DGrad = 'MaxPool3DGrad';\nexport type MaxPool3DGradInputs =\n Pick;\nexport interface MaxPool3DGradAttrs {\n filterSize: [number, number, number]|number;\n strides: [number, number, number]|number;\n pad: 'valid'|'same'|number;\n dimRoundingMode?: 'floor'|'round'|'ceil';\n}\n\nexport const MaxPoolWithArgmax = 'MaxPoolWithArgmax';\nexport type MaxPoolWithArgmaxInputs = Pick;\nexport interface MaxPoolWithArgmaxAttrs {\n filterSize: [number, number]|number;\n strides: [number, number]|number;\n pad: 'valid'|'same'|number;\n includeBatchInIndex: boolean;\n}\n\nexport const Mean = 'Mean';\nexport type MeanInputs = Pick;\nexport interface MeanAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Min = 'Min';\nexport type MinInputs = Pick;\nexport interface MinAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const Minimum = 'Minimum';\nexport type MinimumInputs = BinaryInputs;\n\nexport const MirrorPad = 'MirrorPad';\nexport type MirrorPadInputs = Pick;\nexport interface MirrorPadAttrs {\n paddings: Array<[number, number]>;\n mode: 'reflect'|'symmetric';\n}\n\nexport const Mod = 'Mod';\nexport type ModInputs = BinaryInputs;\n\nexport const Multinomial = 'Multinomial';\nexport type MultinomialInputs = Pick;\nexport interface MultinomialAttrs {\n numSamples: number;\n seed: number;\n normalized: boolean;\n}\n\nexport const Multiply = 'Multiply';\nexport type MultiplyInputs = BinaryInputs;\n\nexport const Neg = 'Neg';\nexport type NegInputs = UnaryInputs;\n\nexport const NotEqual = 'NotEqual';\nexport type NotEqualInputs = BinaryInputs;\n\nexport const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';\nexport type NonMaxSuppressionV3Inputs =\n Pick;\nexport interface NonMaxSuppressionV3Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n}\n\nexport const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';\nexport type NonMaxSuppressionV4Inputs =\n Pick;\nexport interface NonMaxSuppressionV4Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n padToMaxOutputSize: boolean;\n}\n\nexport const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';\nexport type NonMaxSuppressionV5Inputs =\n Pick;\nexport interface NonMaxSuppressionV5Attrs {\n maxOutputSize: number;\n iouThreshold: number;\n scoreThreshold: number;\n softNmsSigma: number;\n}\n\nexport const OnesLike = 'OnesLike';\nexport type OnesLikeInputs = UnaryInputs;\n\nexport const OneHot = 'OneHot';\nexport type OneHotInputs = Pick;\nexport interface OneHotAttrs {\n depth: number;\n onValue: number;\n offValue: number;\n dtype: DataType;\n}\n\nexport const Pack = 'Pack';\nexport type PackInputs = TensorInfo[];\nexport interface PackAttrs {\n axis: number;\n}\n\nexport const PadV2 = 'PadV2';\nexport type PadV2Inputs = Pick;\nexport interface PadV2Attrs {\n paddings: Array<[number, number]>;\n constantValue: number;\n}\n\nexport const Pool = 'Pool';\nexport type PoolInputs = Pick;\n\nexport const Pow = 'Pow';\nexport type PowInputs = BinaryInputs;\n\nexport const Prelu = 'Prelu';\nexport type PreluInputs = Pick;\n\nexport const Prod = 'Prod';\nexport type ProdInputs = Pick;\nexport interface ProdAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const RaggedGather = 'RaggedGather';\nexport type RaggedGatherInputs = {\n paramsNestedSplits: TensorInfo[]\n}&Pick;\nexport interface RaggedGatherAttrs {\n outputRaggedRank: number;\n}\n\nexport const RaggedRange = 'RaggedRange';\nexport type RaggedRangeInputs =\n Pick;\n\nexport const RaggedTensorToTensor = 'RaggedTensorToTensor';\nexport type RaggedTensorToTensorInputs =\n Pick&\n {rowPartitionTensors: TensorInfo[]};\nexport interface RaggedTensorToTensorAttrs {\n rowPartitionTypes: string[];\n}\n\nexport const Range = 'Range';\nexport interface RangeAttrs {\n start: number;\n stop: number;\n step: number;\n dtype: 'float32'|'int32';\n}\n\nexport const Real = 'Real';\nexport type RealInputs = Pick;\n\nexport const Reciprocal = 'Reciprocal';\nexport type ReciprocalInputs = UnaryInputs;\n\nexport const Relu = 'Relu';\nexport type ReluInputs = Pick;\n\nexport const Reshape = 'Reshape';\nexport type ReshapeInputs = Pick;\nexport interface ReshapeAttrs {\n shape: number[];\n}\n\nexport const ResizeNearestNeighbor = 'ResizeNearestNeighbor';\nexport type ResizeNearestNeighborInputs = Pick;\nexport interface ResizeNearestNeighborAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';\nexport type ResizeNearestNeighborGradInputs =\n Pick;\nexport type ResizeNearestNeighborGradAttrs = ResizeNearestNeighborAttrs;\n\nexport const ResizeBilinear = 'ResizeBilinear';\nexport type ResizeBilinearInputs = Pick;\nexport interface ResizeBilinearAttrs {\n alignCorners: boolean;\n halfPixelCenters: boolean;\n size: [number, number];\n}\n\nexport const ResizeBilinearGrad = 'ResizeBilinearGrad';\nexport type ResizeBilinearGradInputs = Pick;\nexport type ResizeBilinearGradAttrs = ResizeBilinearAttrs;\n\nexport const Relu6 = 'Relu6';\nexport type Relu6Inputs = Pick;\n\nexport const Reverse = 'Reverse';\nexport type ReverseInputs = Pick;\nexport interface ReverseAttrs {\n dims: number|number[];\n}\n\nexport const Round = 'Round';\nexport type RoundInputs = UnaryInputs;\n\nexport const Rsqrt = 'Rsqrt';\nexport type RsqrtInputs = UnaryInputs;\n\nexport const ScatterNd = 'ScatterNd';\nexport type ScatterNdInputs = Pick;\nexport interface ScatterNdAttrs {\n shape: number[];\n}\n\nexport const SearchSorted = 'SearchSorted';\nexport type SearchSortedInputs =\n Pick;\nexport interface SearchSortedAttrs {\n side: 'left'|'right';\n}\n\nexport const Select = 'Select';\nexport type SelectInputs = Pick;\n\nexport const Selu = 'Selu';\nexport type SeluInputs = Pick;\n\nexport const Slice = 'Slice';\nexport type SliceInputs = Pick;\nexport interface SliceAttrs {\n begin: number|number[];\n size: number|number[];\n}\nexport const Sin = 'Sin';\nexport type SinInputs = UnaryInputs;\n\nexport const Sinh = 'Sinh';\nexport type SinhInputs = UnaryInputs;\n\nexport const Sign = 'Sign';\nexport type SignInputs = UnaryInputs;\n\nexport const Sigmoid = 'Sigmoid';\nexport type SigmoidInputs = UnaryInputs;\n\nexport const Softplus = 'Softplus';\nexport type SoftplusInputs = UnaryInputs;\n\nexport const Sqrt = 'Sqrt';\nexport type SqrtInputs = UnaryInputs;\n\nexport const Sum = 'Sum';\nexport type SumInputs = Pick;\nexport interface SumAttrs {\n axis: number|number[];\n keepDims: boolean;\n}\n\nexport const SpaceToBatchND = 'SpaceToBatchND';\nexport type SpaceToBatchNDInputs = Pick;\nexport interface SpaceToBatchNDAttrs {\n blockShape: number[];\n paddings: number[][];\n}\n\nexport const SplitV = 'SplitV';\nexport type SplitVInputs = Pick;\nexport interface SplitVAttrs {\n numOrSizeSplits: number[]|number;\n axis: number;\n}\n\nexport const Softmax = 'Softmax';\nexport type SoftmaxInputs = Pick;\nexport interface SoftmaxAttrs {\n dim: number;\n}\n\nexport const SparseFillEmptyRows = 'SparseFillEmptyRows';\nexport type SparseFillEmptyRowsInputs =\n Pick;\n\nexport const SparseReshape = 'SparseReshape';\nexport type SparseReshapeInputs =\n Pick;\n\nexport const SparseSegmentMean = 'SparseSegmentMean';\nexport type SparseSegmentMeanInputs =\n Pick;\n\nexport const SparseSegmentSum = 'SparseSegmentSum';\nexport type SparseSegmentSumInputs =\n Pick;\n\nexport const SparseToDense = 'SparseToDense';\nexport type SparseToDenseInputs =\n Pick;\nexport interface SparseToDenseAttrs {\n outputShape: number[];\n}\n\nexport const SquaredDifference = 'SquaredDifference';\nexport type SquaredDifferenceInputs = BinaryInputs;\n\nexport const Square = 'Square';\nexport type SquareInputs = Pick;\n\nexport const StridedSlice = 'StridedSlice';\nexport type StridedSliceInputs = Pick;\nexport interface StridedSliceAttrs {\n begin: number[];\n end: number[];\n strides: number[];\n beginMask: number;\n endMask: number;\n ellipsisMask: number;\n newAxisMask: number;\n shrinkAxisMask: number;\n}\n\nexport const StringNGrams = 'StringNGrams';\nexport type StringNGramsInputs = Pick;\nexport interface StringNGramsAttrs {\n separator: string;\n nGramWidths: number[];\n leftPad: string;\n rightPad: string;\n padWidth: number;\n preserveShortSequences: boolean;\n}\n\nexport const StringSplit = 'StringSplit';\nexport type StringSplitInputs = Pick;\nexport interface StringSplitAttrs {\n skipEmpty: boolean;\n}\n\nexport const StringToHashBucketFast = 'StringToHashBucketFast';\nexport type StringToHashBucketFastInputs = Pick;\nexport interface StringToHashBucketFastAttrs {\n numBuckets: number;\n}\n\nexport const Sub = 'Sub';\nexport type SubInputs = BinaryInputs;\n\nexport const Tan = 'Tan';\nexport type TanInputs = UnaryInputs;\n\nexport const Tanh = 'Tanh';\nexport type TanhInputs = UnaryInputs;\n\nexport const Tile = 'Tile';\nexport type TileInputs = Pick;\nexport interface TileAttrs {\n reps: number[];\n}\n\nexport const TopK = 'TopK';\nexport type TopKInputs = Pick;\nexport interface TopKAttrs {\n k: number;\n sorted: boolean;\n}\n\nexport const Transform = 'Transform';\nexport type TransformInputs = Pick;\nexport interface TransformAttrs {\n interpolation: 'nearest'|'bilinear';\n fillMode: 'constant'|'reflect'|'wrap'|'nearest';\n fillValue: number;\n outputShape?: [number, number];\n}\n\nexport const Transpose = 'Transpose';\nexport type TransposeInputs = Pick;\nexport interface TransposeAttrs {\n perm: number[];\n}\n\nexport const Unique = 'Unique';\nexport type UniqueInputs = Pick;\nexport interface UniqueAttrs {\n axis: number;\n}\n\nexport type UnaryInputs = Pick;\n\nexport const Unpack = 'Unpack';\nexport type UnpackInputs = Pick;\nexport interface UnpackAttrs {\n axis: number;\n}\n\nexport const UnsortedSegmentSum = 'UnsortedSegmentSum';\nexport type UnsortedSegmentSumInputs =\n Pick;\nexport interface UnsortedSegmentSumAttrs {\n numSegments: number;\n}\n\nexport const UpperBound = 'UpperBound';\nexport type UpperBoundInputs =\n Pick;\n\nexport const ZerosLike = 'ZerosLike';\nexport type ZerosLikeInputs = UnaryInputs;\n\n/**\n * TensorFlow.js-only kernels\n */\nexport const Step = 'Step';\nexport type StepInputs = UnaryInputs;\nexport interface StepAttrs {\n alpha: number;\n}\n\nexport const FromPixels = 'FromPixels';\nexport interface FromPixelsInputs {\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap;\n}\nexport interface FromPixelsAttrs {\n numChannels: number;\n}\n\nexport const RotateWithOffset = 'RotateWithOffset';\nexport type RotateWithOffsetInputs = Pick;\nexport interface RotateWithOffsetAttrs {\n radians: number;\n fillValue: number|[number, number, number];\n center: number|[number, number];\n}\n\nexport const _FusedMatMul = '_FusedMatMul';\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulInputs extends NamedTensorInfoMap {\n a: TensorInfo;\n b: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\n// tslint:disable-next-line: class-name\nexport interface _FusedMatMulAttrs {\n transposeA: boolean;\n transposeB: boolean;\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedConv2D = 'FusedConv2D';\nexport interface FusedConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n\nexport const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';\nexport interface FusedDepthwiseConv2DInputs extends NamedTensorInfoMap {\n x: TensorInfo;\n filter: TensorInfo;\n bias?: TensorInfo;\n preluActivationWeights?: TensorInfo;\n}\nexport interface FusedDepthwiseConv2DAttrs {\n strides: [number, number]|number;\n pad: 'valid'|'same'|number|ExplicitPadding;\n dataFormat: 'NHWC'|'NCHW';\n dilations: [number, number]|number;\n dimRoundingMode: 'floor'|'round'|'ceil';\n activation: Activation;\n leakyreluAlpha?: number;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from './environment';\n\nexport function warn(...msg: Array<{}>): void {\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.warn(...msg);\n }\n}\n\nexport function log(...msg: Array<{}>): void {\n if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) {\n console.log(...msg);\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from './environment';\nimport {getGlobal} from './global_util';\nimport * as log from './log';\nimport {NamedGradientMap} from './tape';\nimport {Tensor} from './tensor';\nimport {TensorInfo} from './tensor_info';\nimport {RecursiveArray} from './types';\n\nconst kernelRegistry =\n getGlobal('kernelRegistry', () => new Map<`${string}_${string}`,\n KernelConfig>());\nconst gradRegistry =\n getGlobal('gradRegistry', () => new Map());\n\ntype AttributeValue =\n number | number[] | boolean | boolean[] | string | string[] | NamedAttrMap;\n\n/** These are extra non-tensor/primitive params passed to kernel functions. */\nexport type Attribute = AttributeValue | RecursiveArray;\n\n/** Specifies the code to run when executing a kernel. */\nexport type KernelFunc = (params: {\n inputs: NamedTensorInfoMap,\n backend: {},\n attrs?: NamedAttrMap,\n}) => TensorInfo | TensorInfo[];\n\n/** The function to run when computing a gradient during backprop. */\nexport type GradFunc =\n (dy: Tensor | Tensor[], saved: Tensor[], attrs: NamedAttrMap) =>\n NamedGradientMap;\n\n/** Function that gets called after the backend initializes. */\nexport type KernelSetupFunc = (backend: {}) => void;\n/** Function that gets called right before the backend is disposed. */\nexport type KernelDisposeFunc = KernelSetupFunc;\n\n/** Config object for registering a kernel in the global registry. */\nexport interface KernelConfig {\n kernelName: string;\n backendName: string;\n kernelFunc: KernelFunc;\n setupFunc?: KernelSetupFunc;\n disposeFunc?: KernelDisposeFunc;\n}\n\n/** Config object for registering a gradient in the global registry. */\nexport interface GradConfig {\n kernelName: string;\n inputsToSave?: string[];\n // When saveAllInputs is true, all inputs will be saved. Only use this flag\n // if inputs is an array of Tensors.\n saveAllInputs?: boolean;\n outputsToSave?: boolean[];\n gradFunc: GradFunc;\n}\n\nexport interface NamedTensorInfoMap {\n [name: string]: TensorInfo|undefined;\n}\n\nexport interface NamedAttrMap {\n [name: string]: Attribute;\n}\n\n/**\n * Returns the kernel function (code) associated with the provided names.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n */\nexport function getKernel(\n kernelName: string, backendName: string): KernelConfig {\n const key = makeKey(kernelName, backendName);\n return kernelRegistry.get(key);\n}\n\n/**\n * Returns the registered gradient info associated with the provided kernel.\n * @param kernelName The official TF kernel name.\n */\nexport function getGradient(kernelName: string): GradConfig {\n return gradRegistry.get(kernelName);\n}\n\nexport function getKernelsForBackend(backendName: string): KernelConfig[] {\n const it = kernelRegistry.entries();\n const result: KernelConfig[] = [];\n\n while (true) {\n const {done, value} = it.next();\n if (done) {\n break;\n }\n const [key, config] = value;\n const [backend, ] = key.split('_');\n if (backend === backendName) {\n result.push(config);\n }\n }\n return result;\n}\n\n/**\n * Registers the function (forward pass) for the kernel in a global registry.\n *\n * @param config A config object with the following properties:\n * - `kernelName` The official name of the kernel.\n * - `backendName` The official name of the backend.\n * - `kernelFunc` The function to run during the forward pass of the kernel.\n * - `setupFunc` Optional. Gets called once, after the backend initializes.\n * - `disposeFunc` Optional. Gets called once, right before the backend is\n * disposed.\n */\nexport function registerKernel(config: KernelConfig) {\n const {kernelName, backendName} = config;\n const key = makeKey(kernelName, backendName);\n if (kernelRegistry.has(key)) {\n log.warn(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is already registered`);\n }\n kernelRegistry.set(key, config);\n}\n\n/**\n * Registers a gradient function for a given kernel in the global registry,\n * to be used during the back-propagation of that kernel.\n *\n * @param config An object with the following properties:\n * - `kernelName` The name of the kernel that the gradient function is for.\n * - `gradFunc` The function to run during back-propagation.\n */\nexport function registerGradient(config: GradConfig) {\n const {kernelName} = config;\n\n if (gradRegistry.has(kernelName)) {\n // TODO (yassogba) after 3.0 assess whether we need to keep this gated\n // to debug mode.\n if (env().getBool('DEBUG')) {\n log.warn(`Overriding the gradient for '${kernelName}'`);\n }\n }\n gradRegistry.set(kernelName, config);\n}\n\n/**\n * Removes the kernel function from the registry.\n *\n * @param kernelName The official name of the kernel.\n * @param backendName The official name of the backend.\n *\n */\nexport function unregisterKernel(\n kernelName: string, backendName: string): void {\n const key = makeKey(kernelName, backendName);\n if (!kernelRegistry.has(key)) {\n throw new Error(\n `The kernel '${kernelName}' for backend ` +\n `'${backendName}' is not registered`);\n }\n kernelRegistry.delete(key);\n}\n\n/** Removes the registered gradient from the global registry. */\nexport function unregisterGradient(kernelName: string): void {\n if (!gradRegistry.has(kernelName)) {\n throw new Error(\n `The gradient '${kernelName}' for backend is not registered`);\n }\n gradRegistry.delete(kernelName);\n}\n\n/**\n * Finds kernels that have already been registered to a backend and re-registers\n * them for a new backend. Useful for registering custom backends.\n * @param registeredBackendName Already registered backend.\n * @param newBackendName New backend.\n */\nexport function copyRegisteredKernels(\n registeredBackendName: string, newBackendName: string): void {\n const kernels = getKernelsForBackend(registeredBackendName);\n kernels.forEach(kernelConfig => {\n const newKernelConfig =\n Object.assign({}, kernelConfig, {backendName: newBackendName});\n registerKernel(newKernelConfig);\n });\n}\n\nfunction makeKey(kernelName: string,\n backendName: string): `${string}_${string}` {\n return `${backendName}_${kernelName}`;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// Workaround for allowing cjs module to be included in bundle created by\n// rollup.\nimport * as LongExports from 'long';\n// tslint:disable-next-line\nconst Long: LongExports.LongConstructor =\n // tslint:disable-next-line\n (LongExports as any).default || LongExports;\n\nexport function hexToLong(hex: string): Long {\n return Long.fromString(hex, true, 16);\n}\n\n// Some primes between 2^63 and 2^64 for various uses.\n// Hex 0xc3a5c85c97cb3127\nconst k0: Long = hexToLong('c3a5c85c97cb3127');\n// Hex 0xb492b66fbe98f273\nconst k1: Long = hexToLong('b492b66fbe98f273');\n// Hex 0x9ae16a3b2f90404f\nconst k2: Long = hexToLong('9ae16a3b2f90404f');\n\nfunction shiftMix(val: Long): Long {\n return val.xor(val.shru(47));\n}\n\nfunction fetch(s: Uint8Array, offset: number, numBytes: number): Long {\n const bytes = s.slice(offset, offset + numBytes);\n return Long.fromBytes(Array.from(bytes), true, true);\n}\n\nfunction fetch64(s: Uint8Array, offset: number): Long {\n return fetch(s, offset, 8);\n}\n\nfunction fetch32(s: Uint8Array, offset: number): Long {\n return fetch(s, offset, 4);\n}\n\nfunction rotate64(val: Long, shift: number): Long {\n // Avoid shifting by 64: doing so yields an undefined result.\n return shift === 0 ? val : val.shru(shift).or(val.shl(64 - shift));\n}\n\nfunction hashLen16(u: Long, v: Long, mul = hexToLong('9ddfea08eb382d69')) {\n // Murmur-inspired hashing.\n let a = u.xor(v).mul(mul);\n a = a.xor(a.shru(47));\n let b = v.xor(a).mul(mul);\n b = b.xor(b.shru(47));\n b = b.mul(mul);\n return b;\n}\n\n// Return a 16-byte hash for 48 bytes. Quick and dirty.\n// Callers do best to use \"random-looking\" values for a and b.\nfunction weakHashLen32WithSeeds(\n w: Long, x: Long, y: Long, z: Long, a: Long, b: Long) {\n a = a.add(w);\n b = rotate64(b.add(a).add(z), 21);\n const c = a;\n a = a.add(x);\n a = a.add(y);\n b = b.add(rotate64(a, 44));\n return [a.add(z), b.add(c)];\n}\n\nfunction weakHashLen32WithSeedsStr(\n s: Uint8Array, offset: number, a: Long, b: Long) {\n return weakHashLen32WithSeeds(\n fetch64(s, offset), fetch64(s, offset + 8), fetch64(s, offset + 16),\n fetch64(s, offset + 24), a, b);\n}\n\nfunction hashLen0to16(s: Uint8Array, len = s.length): Long {\n if (len >= 8) {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).add(k2);\n const b = fetch64(s, len - 8);\n const c = rotate64(b, 37).mul(mul).add(a);\n const d = rotate64(a, 25).add(b).mul(mul);\n return hashLen16(c, d, mul);\n }\n if (len >= 4) {\n const mul = k2.add(len * 2);\n const a = fetch32(s, 0);\n return hashLen16(a.shl(3).add(len), fetch32(s, len - 4), mul);\n }\n if (len > 0) {\n const a = s[0];\n const b = s[len >> 1];\n const c = s[len - 1];\n const y = a + (b << 8);\n const z = len + (c << 2);\n return shiftMix(k2.mul(y).xor(k0.mul(z))).mul(k2);\n }\n return k2;\n}\n\nfunction hashLen17to32(s: Uint8Array, len = s.length): Long {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).mul(k1);\n const b = fetch64(s, 8);\n const c = fetch64(s, len - 8).mul(mul);\n const d = fetch64(s, len - 16).mul(k2);\n return hashLen16(\n rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d),\n a.add(rotate64(b.add(k2), 18)).add(c), mul);\n}\n\nfunction hashLen33to64(s: Uint8Array, len = s.length): Long {\n const mul = k2.add(len * 2);\n const a = fetch64(s, 0).mul(k2);\n const b = fetch64(s, 8);\n const c = fetch64(s, len - 8).mul(mul);\n const d = fetch64(s, len - 16).mul(k2);\n const y = rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d);\n const z = hashLen16(y, a.add(rotate64(b.add(k2), 18)).add(c), mul);\n const e = fetch64(s, 16).mul(mul);\n const f = fetch64(s, 24);\n const g = y.add(fetch64(s, len - 32)).mul(mul);\n const h = z.add(fetch64(s, len - 24)).mul(mul);\n return hashLen16(\n rotate64(e.add(f), 43).add(rotate64(g, 30)).add(h),\n e.add(rotate64(f.add(a), 18)).add(g), mul);\n}\n\nexport function fingerPrint64(s: Uint8Array, len = s.length): Long {\n const seed: Long = Long.fromNumber(81, true);\n if (len <= 32) {\n if (len <= 16) {\n return hashLen0to16(s, len);\n } else {\n return hashLen17to32(s, len);\n }\n } else if (len <= 64) {\n return hashLen33to64(s, len);\n }\n\n // For strings over 64 bytes we loop. Internal state consists of\n // 56 bytes: v, w, x, y, and z.\n let x = seed;\n let y = seed.mul(k1).add(113);\n\n let z = shiftMix(y.mul(k2).add(113)).mul(k2);\n let v = [Long.UZERO, Long.UZERO];\n let w = [Long.UZERO, Long.UZERO];\n x = x.mul(k2).add(fetch64(s, 0));\n\n let offset = 0;\n // Set end so that after the loop we have 1 to 64 bytes left to process.\n const end = ((len - 1) >> 6) * 64;\n const last64 = end + ((len - 1) & 63) - 63;\n\n do {\n x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(k1);\n y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(k1);\n x = x.xor(w[1]);\n y = y.add(v[0]).add(fetch64(s, offset + 40));\n z = rotate64(z.add(w[0]), 33).mul(k1);\n v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(k1), x.add(w[0]));\n w = weakHashLen32WithSeedsStr(\n s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));\n\n [z, x] = [x, z];\n offset += 64;\n } while (offset !== end);\n const mul = k1.add(z.and(0xff).shl(1));\n // Point to the last 64 bytes of input.\n offset = last64;\n\n w[0] = w[0].add((len - 1) & 63);\n v[0] = v[0].add(w[0]);\n w[0] = w[0].add(v[0]);\n\n x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(mul);\n y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(mul);\n x = x.xor(w[1].mul(9));\n y = y.add(v[0].mul(9).add(fetch64(s, offset + 40)));\n z = rotate64(z.add(w[0]), 33).mul(mul);\n v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(mul), x.add(w[0]));\n w = weakHashLen32WithSeedsStr(\n s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16)));\n\n [z, x] = [x, z];\n\n return hashLen16(\n hashLen16(v[0], w[0], mul).add(shiftMix(y).mul(k0)).add(z),\n hashLen16(v[1], w[1], mul).add(x), mul);\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from './environment';\nimport {BackendValues, DataType, RecursiveArray, TensorLike, TypedArray} from './types';\nimport * as base from './util_base';\nexport * from './util_base';\nexport * from './hash_util';\n\n/**\n * Create typed array for scalar value. Used for storing in `DataStorage`.\n */\nexport function createScalarValue(\n value: DataType, dtype: DataType): BackendValues {\n if (dtype === 'string') {\n return encodeString(value);\n }\n\n return toTypedArray([value], dtype);\n}\n\nfunction noConversionNeeded(a: TensorLike, dtype: DataType): boolean {\n return (a instanceof Float32Array && dtype === 'float32') ||\n (a instanceof Int32Array && dtype === 'int32') ||\n (a instanceof Uint8Array && dtype === 'bool');\n}\n\nexport function toTypedArray(a: TensorLike, dtype: DataType): TypedArray {\n if (dtype === 'string') {\n throw new Error('Cannot convert a string[] to a TypedArray');\n }\n if (Array.isArray(a)) {\n a = flatten(a);\n }\n\n if (env().getBool('DEBUG')) {\n base.checkConversionForErrors(a as number[], dtype);\n }\n if (noConversionNeeded(a, dtype)) {\n return a as TypedArray;\n }\n if (dtype == null || dtype === 'float32' || dtype === 'complex64') {\n return new Float32Array(a as number[]);\n } else if (dtype === 'int32') {\n return new Int32Array(a as number[]);\n } else if (dtype === 'bool') {\n const bool = new Uint8Array((a as number[]).length);\n for (let i = 0; i < bool.length; ++i) {\n if (Math.round((a as number[])[i]) !== 0) {\n bool[i] = 1;\n }\n }\n return bool;\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n}\n\n/**\n * Returns the current high-resolution time in milliseconds relative to an\n * arbitrary time in the past. It works across different platforms (node.js,\n * browsers).\n *\n * ```js\n * console.log(tf.util.now());\n * ```\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function now(): number {\n return env().platform.now();\n}\n\n/**\n * Returns a platform-specific implementation of\n * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n *\n * If `fetch` is defined on the global object (`window`, `process`, etc.),\n * `tf.util.fetch` returns that function.\n *\n * If not, `tf.util.fetch` returns a platform-specific solution.\n *\n * ```js\n * const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');\n * // handle response\n * ```\n *\n * @doc {heading: 'Util'}\n */\nexport function fetch(\n path: string, requestInits?: RequestInit): Promise {\n return env().platform.fetch(path, requestInits);\n}\n\n/**\n * Encodes the provided string into bytes using the provided encoding scheme.\n *\n * @param s The string to encode.\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function encodeString(s: string, encoding = 'utf-8'): Uint8Array {\n encoding = encoding || 'utf-8';\n return env().platform.encode(s, encoding);\n}\n\n/**\n * Decodes the provided bytes into a string using the provided encoding scheme.\n * @param bytes The bytes to decode.\n *\n * @param encoding The encoding scheme. Defaults to utf-8.\n *\n * @doc {heading: 'Util'}\n */\nexport function decodeString(bytes: Uint8Array, encoding = 'utf-8'): string {\n encoding = encoding || 'utf-8';\n return env().platform.decode(bytes, encoding);\n}\n\nexport function isTypedArray(a: {}): a is Float32Array|Int32Array|Uint8Array|\n Uint8ClampedArray {\n return env().platform.isTypedArray(a);\n}\n\n// NOTE: We explicitly type out what T extends instead of any so that\n// util.flatten on a nested array of number doesn't try to infer T as a\n// number[][], causing us to explicitly type util.flatten().\n/**\n * Flattens an arbitrarily nested array.\n *\n * ```js\n * const a = [[1, 2], [3, 4], [5, [6, [7]]]];\n * const flat = tf.util.flatten(a);\n * console.log(flat);\n * ```\n *\n * @param arr The nested array to flatten.\n * @param result The destination array which holds the elements.\n * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults\n * to false.\n *\n * @doc {heading: 'Util', namespace: 'util'}\n */\nexport function\nflatten|TypedArray>(\n arr: T|RecursiveArray, result: T[] = [], skipTypedArray = false): T[] {\n if (result == null) {\n result = [];\n }\n if (typeof arr === 'boolean' || typeof arr === 'number' ||\n typeof arr === 'string' || base.isPromise(arr) || arr == null ||\n isTypedArray(arr) && skipTypedArray) {\n result.push(arr as T);\n } else if (Array.isArray(arr) || isTypedArray(arr)) {\n for (let i = 0; i < arr.length; ++i) {\n flatten(arr[i], result, skipTypedArray);\n }\n } else {\n let maxIndex = -1;\n for (const key of Object.keys(arr)) {\n // 0 or positive integer.\n if (/^([1-9]+[0-9]*|0)$/.test(key)) {\n maxIndex = Math.max(maxIndex, Number(key));\n }\n }\n for (let i = 0; i <= maxIndex; i++) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n flatten((arr as RecursiveArray)[i], result, skipTypedArray);\n }\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimer, BackendTimingInfo} from './backends/backend';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport {DataType, DataTypeMap, TypedArray} from './types';\nimport * as util from './util';\n\nexport type KernelProfile = {\n kernelName: string,\n outputs: Tensor[],\n inputs: NamedTensorMap,\n timeMs: Promise,\n extraInfo: Promise\n};\n\nexport class Profiler {\n constructor(private backendTimer: BackendTimer, private logger?: Logger) {\n if (logger == null) {\n this.logger = new Logger();\n }\n }\n\n profileKernel(kernelName: string, inputs: NamedTensorMap, f: () => Tensor[]):\n KernelProfile {\n let outputs: Tensor[];\n const holdResultWrapperFn = () => {\n outputs = f();\n };\n let timer: Promise;\n const start = util.now();\n if (this.backendTimer.timerAvailable()) {\n timer = this.backendTimer.time(holdResultWrapperFn);\n } else {\n holdResultWrapperFn();\n for (const output of outputs) {\n output.dataSync();\n }\n timer = Promise.resolve({kernelMs: util.now() - start});\n }\n if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {\n for (let i = 0; i < outputs.length; i++) {\n const output = outputs[i];\n // Dangling promise here because we don't want to propagate up\n // asynchronicity.\n output.data().then(tensorVals => {\n checkComputationForErrors(tensorVals, output.dtype, kernelName);\n });\n }\n }\n\n const kernelProfile = {\n kernelName,\n outputs,\n inputs,\n timeMs: timer.then(timing => timing.kernelMs),\n extraInfo: timer.then(\n timing => timing.getExtraProfileInfo != null ?\n timing.getExtraProfileInfo() :\n '')\n };\n return kernelProfile;\n }\n\n logKernelProfile(kernelProfile: KernelProfile): void {\n const {kernelName, outputs, timeMs, inputs, extraInfo} = kernelProfile;\n\n outputs.forEach(result => {\n Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {\n this.logger.logKernelProfile(\n kernelName, result, valueContainer[0], valueContainer[1], inputs,\n valueContainer[2]);\n });\n });\n }\n}\n\nexport function checkComputationForErrors(\n vals: DataTypeMap[D], dtype: D, kernelName: string): boolean {\n if (dtype !== 'float32') {\n // Only floating point computations will generate NaN values\n return false;\n }\n for (let i = 0; i < vals.length; i++) {\n const num = vals[i] as number;\n if (isNaN(num) || !isFinite(num)) {\n // Throwing custom exception so behavior is testable.\n console.warn(`Found ${num} in the result of '${kernelName}'`);\n return true;\n }\n }\n return false;\n}\n\nexport class Logger {\n logKernelProfile(\n name: string, result: Tensor, vals: TypedArray,\n timeMs: number|{error: string}, inputs: NamedTensorMap,\n extraInfo?: string) {\n const time = typeof timeMs === 'number' ? util.rightPad(`${timeMs}ms`, 9) :\n timeMs['error'];\n const paddedName = util.rightPad(name, 25);\n const rank = result.rank;\n const size = result.size;\n const shape = util.rightPad(result.shape.toString(), 14);\n let inputShapesDescription = '';\n\n for (const name in inputs) {\n const input = inputs[name];\n if (input != null) {\n // The input might be a non-tensor (e.g HTMLImageElement), in which case\n // we claim the output shape as input shape.\n const inputShape = input.shape || result.shape;\n const inputRank = inputShape.length;\n inputShapesDescription +=\n `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;\n }\n }\n\n console.log(\n `%c${paddedName}\\t%c${time}\\t%c${rank}D ${shape}\\t%c${size}\\t%c${\n inputShapesDescription}\\t%c${extraInfo}`,\n 'font-weight:bold', 'color:red', 'color:blue', 'color: orange',\n 'color: green', 'color: steelblue');\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TypedArray} from './types';\nimport {computeStrides, isString, rightPad, sizeFromShape} from './util';\n\n// Maximum number of values before we decide to show ellipsis.\nconst FORMAT_LIMIT_NUM_VALS = 20;\n// Number of first and last values to show when displaying a, b,...,y, z.\nconst FORMAT_NUM_FIRST_LAST_VALS = 3;\n// Number of significant digits to show.\nconst FORMAT_NUM_SIG_DIGITS = 7;\n\nexport function tensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n verbose: boolean) {\n const strides = computeStrides(shape);\n const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);\n const rank = shape.length;\n const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);\n const lines = ['Tensor'];\n if (verbose) {\n lines.push(` dtype: ${dtype}`);\n lines.push(` rank: ${rank}`);\n lines.push(` shape: [${shape}]`);\n lines.push(` values:`);\n }\n lines.push(valsLines.map(l => ' ' + l).join('\\n'));\n return lines.join('\\n');\n}\n\nfunction computeMaxSizePerColumn(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[]): number[] {\n const n = sizeFromShape(shape);\n const numCols = strides[strides.length - 1];\n const padPerCol = new Array(numCols).fill(0);\n const rank = shape.length;\n const valuesOrTuples =\n dtype === 'complex64' ? createComplexTuples(vals) : vals;\n\n if (rank > 1) {\n for (let row = 0; row < n / numCols; row++) {\n const offset = row * numCols;\n for (let j = 0; j < numCols; j++) {\n padPerCol[j] = Math.max(\n padPerCol[j],\n valToString(valuesOrTuples[offset + j], 0, dtype).length);\n }\n }\n }\n return padPerCol;\n}\n\nfunction valToString(\n val: number|string|[number, number], pad: number, dtype: DataType) {\n let valStr: string;\n if (Array.isArray(val)) {\n valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +\n `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;\n } else if (isString(val)) {\n valStr = `'${val}'`;\n } else if (dtype === 'bool') {\n valStr = boolNumToString(val);\n } else {\n valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();\n }\n\n return rightPad(valStr, pad);\n}\n\nfunction boolNumToString(v: number): string {\n return v === 0 ? 'false' : 'true';\n}\n\nfunction subTensorToString(\n vals: TypedArray|string[], shape: number[], dtype: DataType,\n strides: number[], padPerCol: number[], isLast = true): string[] {\n const storagePerElement = dtype === 'complex64' ? 2 : 1;\n\n const size = shape[0];\n const rank = shape.length;\n if (rank === 0) {\n if (dtype === 'complex64') {\n const complexTuple = createComplexTuples(vals);\n return [valToString(complexTuple[0], 0, dtype)];\n }\n if (dtype === 'bool') {\n return [boolNumToString(vals[0] as number)];\n }\n return [vals[0].toString()];\n }\n\n if (rank === 1) {\n if (size > FORMAT_LIMIT_NUM_VALS) {\n const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;\n\n let firstVals = Array.from(\n vals.slice(0, firstValsSize));\n let lastVals = Array.from(vals.slice(\n (size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement,\n size * storagePerElement));\n if (dtype === 'complex64') {\n firstVals = createComplexTuples(firstVals);\n lastVals = createComplexTuples(lastVals);\n }\n return [\n '[' +\n firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ', ..., ' +\n lastVals\n .map(\n (x, i) => valToString(\n x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))\n .join(', ') +\n ']'\n ];\n }\n const displayVals: Array =\n dtype === 'complex64' ? createComplexTuples(vals) :\n Array.from(vals);\n\n return [\n '[' +\n displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))\n .join(', ') +\n ']'\n ];\n }\n\n // The array is rank 2 or more.\n const subshape = shape.slice(1);\n const substrides = strides.slice(1);\n const stride = strides[0] * storagePerElement;\n const lines: string[] = [];\n if (size > FORMAT_LIMIT_NUM_VALS) {\n for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n false /* isLast */));\n }\n lines.push('...');\n for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n } else {\n for (let i = 0; i < size; i++) {\n const start = i * stride;\n const end = start + stride;\n lines.push(...subTensorToString(\n vals.slice(start, end), subshape, dtype, substrides, padPerCol,\n i === size - 1 /* isLast */));\n }\n }\n const sep = rank === 2 ? ',' : '';\n lines[0] = '[' + (size > 0 ? lines[0] + sep : '');\n for (let i = 1; i < lines.length - 1; i++) {\n lines[i] = ' ' + lines[i] + sep;\n }\n let newLineSep = ',\\n';\n for (let i = 2; i < rank; i++) {\n newLineSep += '\\n';\n }\n lines[lines.length - 1] =\n ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);\n return lines;\n}\n\nfunction createComplexTuples(vals: Array<{}>|\n TypedArray): Array<[number, number]> {\n const complexTuples: Array<[number, number]> = [];\n for (let i = 0; i < vals.length; i += 2) {\n complexTuples.push([vals[i], vals[i + 1]] as [number, number]);\n }\n return complexTuples;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Workaround for: https://github.com/bazelbuild/rules_nodejs/issues/1265\n/// \n\nimport {getGlobal} from './global_util';\nimport {TensorInfo, DataId} from './tensor_info';\nimport {tensorToString} from './tensor_format';\nimport {ArrayMap, BackendValues, DataType, DataTypeMap, DataValues, NumericDataType, Rank, ShapeMap, SingleValueMap, TypedArray} from './types';\nimport * as util from './util';\nimport {computeStrides, toNestedArray} from './util';\n\nexport interface TensorData {\n dataId?: DataId;\n values?: DataTypeMap[D];\n}\n\n// This interface mimics KernelBackend (in backend.ts), which would create a\n// circular dependency if imported.\nexport interface Backend {}\n\n/**\n * A mutable object, similar to `tf.Tensor`, that allows users to set values\n * at locations before converting to an immutable `tf.Tensor`.\n *\n * See `tf.buffer` for creating a tensor buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class TensorBuffer {\n size: number;\n shape: ShapeMap[R];\n strides: number[];\n values: DataTypeMap[D];\n\n constructor(shape: ShapeMap[R], public dtype: D, values?: DataTypeMap[D]) {\n this.shape = shape.slice() as ShapeMap[R];\n this.size = util.sizeFromShape(shape);\n\n if (values != null) {\n const n = values.length;\n util.assert(\n n === this.size,\n () => `Length of values '${n}' does not match the size ` +\n `inferred by the shape '${this.size}'.`);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `complex64 dtype TensorBuffers are not supported. Please create ` +\n `a TensorBuffer for the real and imaginary parts separately and ` +\n `call tf.complex(real, imag).`);\n }\n this.values = values || util.getArrayFromDType(dtype, this.size);\n this.strides = computeStrides(shape);\n }\n\n /**\n * Sets a value in the buffer at a given location.\n *\n * @param value The value to set.\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n set(value: SingleValueMap[D], ...locs: number[]): void {\n if (locs.length === 0) {\n locs = [0];\n }\n util.assert(\n locs.length === this.rank,\n () => `The number of provided coordinates (${locs.length}) must ` +\n `match the rank (${this.rank})`);\n\n const index = this.locToIndex(locs);\n this.values[index] = value as number;\n }\n\n /**\n * Returns the value in the buffer at the provided location.\n *\n * @param locs The location indices.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n get(...locs: number[]): SingleValueMap[D] {\n if (locs.length === 0) {\n locs = [0];\n }\n let i = 0;\n for (const loc of locs) {\n if (loc < 0 || loc >= this.shape[i]) {\n const msg = `Requested out of range element at ${locs}. ` +\n ` Buffer shape=${this.shape}`;\n throw new Error(msg);\n }\n i++;\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return this.values[index] as SingleValueMap[D];\n }\n\n locToIndex(locs: number[]): number {\n if (this.rank === 0) {\n return 0;\n } else if (this.rank === 1) {\n return locs[0];\n }\n let index = locs[locs.length - 1];\n for (let i = 0; i < locs.length - 1; ++i) {\n index += this.strides[i] * locs[i];\n }\n return index;\n }\n\n indexToLoc(index: number): number[] {\n if (this.rank === 0) {\n return [];\n } else if (this.rank === 1) {\n return [index];\n }\n const locs: number[] = new Array(this.shape.length);\n for (let i = 0; i < locs.length - 1; ++i) {\n locs[i] = Math.floor(index / this.strides[i]);\n index -= locs[i] * this.strides[i];\n }\n locs[locs.length - 1] = index;\n return locs;\n }\n\n get rank() {\n return this.shape.length;\n }\n\n /**\n * Creates an immutable `tf.Tensor` object from the buffer.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\n toTensor(): Tensor {\n return trackerFn().makeTensor(this.values, this.shape, this.dtype) as\n Tensor;\n }\n}\n\nexport interface DataToGPUWebGLOption {\n customTexShape?: [number, number];\n}\n\nexport type DataToGPUOptions = DataToGPUWebGLOption;\n\nexport interface GPUData {\n tensorRef: Tensor;\n texture?: WebGLTexture;\n buffer?: GPUBuffer;\n texShape?: [number, number];\n bufSize?: number;\n}\n\nexport interface TensorTracker {\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: Backend): Tensor;\n makeVariable(\n initialValue: Tensor, trainable?: boolean, name?: string,\n dtype?: DataType): Variable;\n incRef(a: Tensor, backend: Backend): void;\n disposeTensor(t: Tensor): void;\n disposeVariable(v: Variable): void;\n read(dataId: DataId): Promise;\n readSync(dataId: DataId): BackendValues;\n readToGPU(dataId: DataId, options?: DataToGPUOptions): GPUData;\n}\n\n/**\n * The Tensor class calls into this handler to delegate chaining operations.\n */\nexport interface OpHandler {\n cast(x: T, dtype: DataType): T;\n buffer(\n shape: ShapeMap[R], dtype: D,\n values?: DataTypeMap[D]): TensorBuffer;\n print(x: T, verbose: boolean): void;\n clone(x: T): T;\n // TODO(yassogba) bring reshape back?\n}\n\n// For tracking tensor creation and disposal.\nlet trackerFn: () => TensorTracker = null;\n// Used by chaining methods to call into ops.\nlet opHandler: OpHandler = null;\n// Used to warn about deprecated methods.\nlet deprecationWarningFn: (msg: string) => void = null;\n// This here so that we can use this method on dev branches and keep the\n// functionality at master.\n// tslint:disable-next-line:no-unused-expression\n[deprecationWarningFn];\n\n/**\n * An external consumer can register itself as the tensor tracker. This way\n * the Tensor class can notify the tracker for every tensor created and\n * disposed.\n */\nexport function setTensorTracker(fn: () => TensorTracker) {\n trackerFn = fn;\n}\n\n/**\n * An external consumer can register itself as the op handler. This way the\n * Tensor class can have chaining methods that call into ops via the op\n * handler.\n */\nexport function setOpHandler(handler: OpHandler) {\n opHandler = handler;\n}\n\n/**\n * Sets the deprecation warning function to be used by this file. This way the\n * Tensor class can be a leaf but still use the environment.\n */\nexport function setDeprecationWarningFn(fn: (msg: string) => void) {\n deprecationWarningFn = fn;\n}\n\n// Declare this namespace to make Tensor class augmentation work in google3.\nexport declare namespace Tensor {}\n/**\n * A `tf.Tensor` object represents an immutable, multidimensional array of\n * numbers that has a shape and a data type.\n *\n * For performance reasons, functions that create tensors do not necessarily\n * perform a copy of the data passed to them (e.g. if the data is passed as a\n * `Float32Array`), and changes to the data will change the tensor. This is not\n * a feature and is not supported. To avoid this behavior, use the tensor before\n * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.\n *\n * See `tf.tensor` for details on how to create a `tf.Tensor`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Tensor implements TensorInfo {\n /** Unique id of this tensor. */\n readonly id: number;\n /**\n * Id of the bucket holding the data for this tensor. Multiple arrays can\n * point to the same bucket (e.g. when calling array.reshape()).\n */\n dataId: DataId;\n /** The shape of the tensor. */\n readonly shape: ShapeMap[R];\n /** Number of elements in the tensor. */\n readonly size: number;\n /** The data type for the array. */\n readonly dtype: DataType;\n /** The rank type for the array (see `Rank` enum). */\n readonly rankType: R;\n\n /** Whether this tensor has been globally kept. */\n kept = false;\n /** The id of the scope this tensor is being tracked in. */\n scopeId: number;\n\n /**\n * Number of elements to skip in each dimension when indexing. See\n * https://docs.scipy.org/doc/numpy/reference/generated/\\\n * numpy.ndarray.strides.html\n */\n readonly strides: number[];\n\n constructor(shape: ShapeMap[R], dtype: DataType, dataId: DataId, id: number) {\n this.shape = shape.slice() as ShapeMap[R];\n this.dtype = dtype || 'float32';\n this.size = util.sizeFromShape(shape);\n this.strides = computeStrides(shape);\n this.dataId = dataId;\n this.id = id;\n this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher') as R;\n }\n\n get rank(): number {\n return this.shape.length;\n }\n\n /**\n * Returns a promise of `tf.TensorBuffer` that holds the underlying data.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async buffer(): Promise> {\n const vals = await this.data();\n return opHandler.buffer(this.shape, this.dtype as D, vals);\n }\n\n /**\n * Returns a `tf.TensorBuffer` that holds the underlying data.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n bufferSync(): TensorBuffer {\n return opHandler.buffer(this.shape, this.dtype as D, this.dataSync());\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * asynchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async array(): Promise {\n const vals = await this.data();\n return toNestedArray(this.shape, vals, this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Returns the tensor data as a nested array. The transfer of data is done\n * synchronously.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n arraySync(): ArrayMap[R] {\n return toNestedArray(\n this.shape, this.dataSync(), this.dtype === 'complex64') as\n ArrayMap[R];\n }\n\n /**\n * Asynchronously downloads the values from the `tf.Tensor`. Returns a\n * promise of `TypedArray` that resolves when the computation has finished.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n async data(): Promise {\n this.throwIfDisposed();\n const data = trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n const bytes = await data as Uint8Array[];\n try {\n return bytes.map(b => util.decodeString(b)) as DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as Promise;\n }\n\n /**\n * Copy the tensor's data to a new GPU resource. Comparing to the `dataSync()`\n * and `data()`, this method prevents data from being downloaded to CPU.\n *\n * For WebGL backend, the data will be stored on a densely packed texture.\n * This means that the texture will use the RGBA channels to store value.\n *\n * For WebGPU backend, the data will be stored on a buffer. There is no\n * parameter, so can not use a user-defined size to create the buffer.\n *\n * @param options:\n * For WebGL,\n * - customTexShape: Optional. If set, will use the user defined\n * texture shape to create the texture.\n *\n * @returns For WebGL backend, a GPUData contains the new texture and\n * its information.\n * {\n * tensorRef: The tensor that is associated with this texture,\n * texture: WebGLTexture,\n * texShape: [number, number] // [height, width]\n * }\n *\n * For WebGPU backend, a GPUData contains the new buffer and\n * its information.\n * {\n * tensorRef: The tensor that is associated with this buffer,\n * buffer: GPUBuffer,\n * bufSize: number\n * }\n *\n * Remember to dispose the GPUData after it is used by\n * `res.tensorRef.dispose()`.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataToGPU(options?: DataToGPUOptions): GPUData {\n this.throwIfDisposed();\n return trackerFn().readToGPU(this.dataId, options);\n }\n\n /**\n * Synchronously downloads the values from the `tf.Tensor`. This blocks the\n * UI thread until the values are ready, which can cause performance issues.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dataSync(): DataTypeMap[D] {\n this.throwIfDisposed();\n const data = trackerFn().readSync(this.dataId);\n if (this.dtype === 'string') {\n try {\n return (data as Uint8Array[]).map(b => util.decodeString(b)) as\n DataTypeMap[D];\n } catch {\n throw new Error(\n 'Failed to decode the string bytes into utf-8. ' +\n 'To get the original bytes, call tensor.bytes().');\n }\n }\n return data as DataTypeMap[D];\n }\n\n /** Returns the underlying bytes of the tensor's data. */\n async bytes(): Promise {\n this.throwIfDisposed();\n const data = await trackerFn().read(this.dataId);\n if (this.dtype === 'string') {\n return data as Uint8Array[];\n } else {\n return new Uint8Array((data as TypedArray).buffer);\n }\n }\n\n /**\n * Disposes `tf.Tensor` from memory.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n dispose(): void {\n if (this.isDisposed) {\n return;\n }\n trackerFn().disposeTensor(this);\n this.isDisposedInternal = true;\n }\n\n protected isDisposedInternal = false;\n get isDisposed(): boolean {\n return this.isDisposedInternal;\n }\n\n throwIfDisposed() {\n if (this.isDisposed) {\n throw new Error(`Tensor is disposed.`);\n }\n }\n\n /**\n * Prints the `tf.Tensor`. See `tf.print` for details.\n *\n * @param verbose Whether to print verbose information about the tensor,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n print(verbose = false): void {\n return opHandler.print(this, verbose);\n }\n\n /**\n * Returns a copy of the tensor. See `tf.clone` for details.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n clone(this: T): T {\n this.throwIfDisposed();\n return opHandler.clone(this);\n }\n\n /**\n * Returns a human-readable description of the tensor. Useful for logging.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n toString(verbose = false): string {\n const vals = this.dataSync();\n return tensorToString(vals, this.shape, this.dtype, verbose);\n }\n\n cast(dtype: DataType): T {\n this.throwIfDisposed();\n return opHandler.cast(this as T, dtype);\n }\n variable(trainable = true, name?: string, dtype?: DataType): Variable {\n this.throwIfDisposed();\n return trackerFn().makeVariable(this, trainable, name, dtype) as\n Variable;\n }\n}\n\nObject.defineProperty(Tensor, Symbol.hasInstance, {\n value: (instance: Tensor) => {\n // Implementation note: we should use properties of the object that will be\n // defined before the constructor body has finished executing (methods).\n // This is because when this code is transpiled by babel, babel will call\n // classCallCheck before the constructor body is run.\n // See https://github.com/tensorflow/tfjs/issues/3384 for backstory.\n return !!instance && instance.data != null && instance.dataSync != null &&\n instance.throwIfDisposed != null;\n }\n});\n\nexport function getGlobalTensorClass() {\n // Use getGlobal so that we can augment the Tensor class across package\n // boundaries becase the node resolution alg may result in different modules\n // being returned for this file depending on the path they are loaded from.\n return getGlobal('Tensor', () => {\n return Tensor;\n });\n}\n\n// Global side effect. Cache global reference to Tensor class\ngetGlobalTensorClass();\n\nexport interface NumericTensor extends Tensor {\n dtype: NumericDataType;\n dataSync(): DataTypeMap[D];\n data(): Promise;\n dataToGPU(options?: DataToGPUOptions): GPUData;\n}\n\nexport interface StringTensor extends Tensor {\n dtype: 'string';\n dataSync(): DataTypeMap[D];\n data(): Promise;\n}\n\n/** @doclink Tensor */\nexport type Scalar = Tensor;\n/** @doclink Tensor */\nexport type Tensor1D = Tensor;\n/** @doclink Tensor */\nexport type Tensor2D = Tensor;\n/** @doclink Tensor */\nexport type Tensor3D = Tensor;\n/** @doclink Tensor */\nexport type Tensor4D = Tensor;\n/** @doclink Tensor */\nexport type Tensor5D = Tensor;\n/** @doclink Tensor */\nexport type Tensor6D = Tensor;\n\n/**\n * A mutable `tf.Tensor`, useful for persisting state, e.g. for training.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\nexport class Variable extends Tensor {\n name: string;\n\n constructor(\n initialValue: Tensor, public trainable: boolean, name: string,\n tensorId: number) {\n super(\n initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);\n this.name = name;\n }\n\n /**\n * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have\n * the same shape and dtype as the old `tf.Tensor`.\n *\n * @param newValue New tensor to be assigned to this variable.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\n assign(newValue: Tensor): void {\n if (newValue.dtype !== this.dtype) {\n throw new Error(\n `dtype of the new value (${newValue.dtype}) and ` +\n `previous value (${this.dtype}) must match`);\n }\n if (!util.arraysEqual(newValue.shape, this.shape)) {\n throw new Error(\n `shape of the new value (${newValue.shape}) and ` +\n `previous value (${this.shape}) must match`);\n }\n trackerFn().disposeTensor(this);\n this.dataId = newValue.dataId;\n trackerFn().incRef(this, null /* backend */);\n }\n\n override dispose(): void {\n trackerFn().disposeVariable(this);\n this.isDisposedInternal = true;\n }\n}\n\nObject.defineProperty(Variable, Symbol.hasInstance, {\n value: (instance: Variable) => {\n return instance instanceof Tensor && instance.assign != null &&\n instance.assign instanceof Function;\n }\n});\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** @docalias number[] */\nexport interface ShapeMap {\n R0: number[];\n R1: [number];\n R2: [number, number];\n R3: [number, number, number];\n R4: [number, number, number, number];\n R5: [number, number, number, number, number];\n R6: [number, number, number, number, number, number];\n}\n\n/** @docalias number[] */\nexport interface ArrayMap {\n R0: number;\n R1: number[];\n R2: number[][];\n R3: number[][][];\n R4: number[][][][];\n R5: number[][][][][];\n R6: number[][][][][][];\n}\n\nexport interface DataTypeMap {\n float32: Float32Array;\n int32: Int32Array;\n bool: Uint8Array;\n complex64: Float32Array;\n string: string[];\n}\n\nexport interface SingleValueMap {\n bool: boolean;\n int32: number;\n float32: number;\n complex64: number;\n string: string;\n}\n\n/** @docalias 'float32'|'int32'|'bool'|'complex64'|'string' */\nexport type DataType = keyof DataTypeMap;\nexport type NumericDataType = 'float32'|'int32'|'bool'|'complex64';\nexport type TypedArray = Float32Array|Int32Array|Uint8Array;\n/** Tensor data used in tensor creation and user-facing API. */\nexport type DataValues = DataTypeMap[DataType];\n/** The underlying tensor data that gets stored in a backend. */\nexport type BackendValues = Float32Array|Int32Array|Uint8Array|Uint8Array[];\n\nexport enum Rank {\n R0 = 'R0',\n R1 = 'R1',\n R2 = 'R2',\n R3 = 'R3',\n R4 = 'R4',\n R5 = 'R5',\n R6 = 'R6'\n}\n\nexport type FlatVector = boolean[]|number[]|TypedArray;\nexport type RegularArray =\n T[]|T[][]|T[][][]|T[][][][]|T[][][][][]|T[][][][][][];\n\n// tslint:disable-next-line:no-any\nexport interface RecursiveArray {\n [index: number]: T|RecursiveArray;\n}\n\n// Looks for upcasting types. Used, for example, in operations with mixed dtype\n// inputs.\nenum UpcastInt32AndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'int32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastBoolAndMap {\n 'float32' = 'float32',\n 'int32' = 'int32',\n 'bool' = 'bool',\n 'complex64' = 'complex64'\n}\n\nenum UpcastFloat32AndMap {\n 'float32' = 'float32',\n 'int32' = 'float32',\n 'bool' = 'float32',\n 'complex64' = 'complex64'\n}\n\nenum UpcastComplex64AndMap {\n 'float32' = 'complex64',\n 'int32' = 'complex64',\n 'bool' = 'complex64',\n 'complex64' = 'complex64'\n}\n\nconst upcastTypeMap = {\n 'float32': UpcastFloat32AndMap,\n 'int32': UpcastInt32AndMap,\n 'bool': UpcastBoolAndMap,\n 'complex64': UpcastComplex64AndMap\n};\n\nexport function upcastType(typeA: DataType, typeB: DataType): DataType {\n if (typeA === 'string' || typeB === 'string') {\n if (typeA === 'string' && typeB === 'string') {\n return 'string';\n }\n throw new Error(`Can not upcast ${typeA} with ${typeB}`);\n }\n return upcastTypeMap[typeA][typeB];\n}\n\n/** Returns the output type after summation. */\nexport function sumOutType(type: DataType): DataType {\n return upcastType(type, 'int32');\n}\n\n/** @docalias TypedArray|Array */\nexport type TensorLike =\n TypedArray|number|boolean|string|RecursiveArray|\n RecursiveArray|RecursiveArray|Uint8Array[];\nexport type ScalarLike = number|boolean|string|Uint8Array;\n/** @docalias TypedArray|Array */\nexport type TensorLike1D = TypedArray|number[]|boolean[]|string[]|Uint8Array[];\n/** @docalias TypedArray|Array */\nexport type TensorLike2D = TypedArray|number[]|number[][]|boolean[]|boolean[][]|\n string[]|string[][]|Uint8Array[]|Uint8Array[][];\n/** @docalias TypedArray|Array */\nexport type TensorLike3D = TypedArray|number[]|number[][][]|boolean[]|\n boolean[][][]|string[]|string[][][]|Uint8Array[]|Uint8Array[][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike4D = TypedArray|number[]|number[][][][]|boolean[]|\n boolean[][][][]|string[]|string[][][][]|Uint8Array[]|Uint8Array[][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike5D =\n TypedArray|number[]|number[][][][][]|boolean[]|boolean[][][][][]|string[]|\n string[][][][][]|Uint8Array[]|Uint8Array[][][][][];\n/** @docalias TypedArray|Array */\nexport type TensorLike6D =\n TypedArray|number[]|number[][][][][][]|boolean[]|boolean[][][][][][]|\n string[]|string[][][][][][]|Uint8Array[]|Uint8Array[][][][][];\n\n/** Type for representing image data in Uint8Array type. */\nexport interface PixelData {\n width: number;\n height: number;\n data: Uint8Array;\n}\n\n/**\n * Type for representing all permutations and combinations of 'RGBA' channels.\n */\nexport type WebGLChannels = 'A'|'B'|'G'|'R'|'AB'|'AG'|'AR'|'BA'|'BG'|'BR'|'GA'|\n 'GB'|'GR'|'RA'|'RB'|'RG'|'ABG'|'ABR'|'AGB'|'AGR'|'ARB'|'ARG'|'BAG'|'BAR'|\n 'BGA'|'BGR'|'BRA'|'BRG'|'GAB'|'GAR'|'GBA'|'GBR'|'GRA'|'GRB'|'RAB'|'RAG'|\n 'RBA'|'RBG'|'RGA'|'RGB'|'ABGR'|'ABRG'|'AGBR'|'AGRB'|'ARBG'|'ARGB'|'BAGR'|\n 'BARG'|'BGAR'|'BGRA'|'BRAG'|'BRGA'|'GABR'|'GARB'|'GBAR'|'GBRA'|'GRAB'|\n 'GRBA'|'RABG'|'RAGB'|'RBAG'|'RBGA'|'RGAB'|'RGBA';\n\n/** Type for representing a texture data to create a tensor. */\nexport interface WebGLData {\n texture: WebGLTexture;\n height: number;\n width: number;\n channels: WebGLChannels;\n}\n\n/**\n * Type for representing a buffer data to create a tensor. Buffer usage should\n * at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC. When\n * zeroCopy is false or undefined (default), this GPUBuffer will be copied to\n * the tensor's resource buffer. When zeroCopy is true, tensor will use this\n * GPUBuffer as tensor's resource buffer, user should not destroy this GPUBuffer\n * until all access is done. If not specified at creating a tensor, tensor type\n * is float32.\n */\nexport interface WebGPUData {\n buffer: GPUBuffer;\n zeroCopy?: boolean;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {TensorContainer, TensorContainerArray} from './tensor_types';\nimport {upcastType} from './types';\nimport {assert} from './util';\n\nexport function makeTypesMatch(a: T, b: T): [T, T] {\n if (a.dtype === b.dtype) {\n return [a, b];\n }\n const dtype = upcastType(a.dtype, b.dtype);\n return [a.cast(dtype), b.cast(dtype)];\n}\n\nexport function assertTypesMatch(a: Tensor, b: Tensor): void {\n assert(\n a.dtype === b.dtype,\n () => `The dtypes of the first(${a.dtype}) and` +\n ` second(${b.dtype}) input must match`);\n}\n\nexport function isTensorInList(tensor: Tensor, tensorList: Tensor[]): boolean {\n return tensorList.some(x => x.id === tensor.id);\n}\n\n/**\n * Extracts any `Tensor`s found within the provided object.\n *\n * @param container an object that may be a `Tensor` or may directly contain\n * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it\n * is safe to pass any object here, except that `Promise`s are not\n * supported.\n * @returns An array of `Tensors` found within the passed object. If the\n * argument is simply a `Tensor', a list containing that `Tensor` is\n * returned. If the object is not a `Tensor` or does not\n * contain `Tensors`, an empty list is returned.\n */\nexport function getTensorsInContainer(result: TensorContainer): Tensor[] {\n const list: Tensor[] = [];\n const seen = new Set<{}|void>();\n walkTensorContainer(result, list, seen);\n return list;\n}\n\nfunction walkTensorContainer(\n container: TensorContainer, list: Tensor[], seen: Set<{}|void>): void {\n if (container == null) {\n return;\n }\n if (container instanceof Tensor) {\n list.push(container);\n return;\n }\n if (!isIterable(container)) {\n return;\n }\n // Iteration over keys works also for arrays.\n const iterable = container as TensorContainerArray;\n for (const k in iterable) {\n const val = iterable[k];\n if (!seen.has(val)) {\n seen.add(val);\n walkTensorContainer(val, list, seen);\n }\n }\n}\n\n// tslint:disable-next-line:no-any\nfunction isIterable(obj: any): boolean {\n return Array.isArray(obj) || typeof obj === 'object';\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendTimingInfo, DataMover, KernelBackend} from './backends/backend';\nimport {Environment, setEnvironmentGlobal} from './environment';\nimport {getGlobalNamespace} from './global_util';\nimport {Add, Cast, Identity} from './kernel_names';\nimport { getGradient, getKernel, getKernelsForBackend, GradFunc, NamedAttrMap } from './kernel_registry';\nimport { TensorInfo } from './tensor_info';\nimport * as log from './log';\nimport {KernelProfile, Profiler} from './profiler';\nimport {backpropagateGradients, getFilteredNodesXToY, TapeNode} from './tape';\nimport {DataToGPUOptions, GPUData, setTensorTracker, Tensor, TensorTracker, Variable} from './tensor';\nimport {DataId} from './tensor_info';\nimport {GradSaveFunc, NamedTensorMap, NamedVariableMap, TensorContainer} from './tensor_types';\nimport {getTensorsInContainer} from './tensor_util';\nimport {BackendValues, DataType, DataValues} from './types';\nimport * as util from './util';\nimport {bytesFromStringArray, makeOnesTypedArray, now, sizeFromShape} from './util';\n\n/**\n * A function that computes an output. The save function is for saving tensors\n * computed in the forward pass, that we need in the backward pass.\n */\nexport type ForwardFunc = (backend: KernelBackend, save?: GradSaveFunc) => T;\n\n/**\n * @docalias (a: Tensor, b: Tensor,..., save?: Function) => {\n * value: Tensor,\n * gradFunc: (dy: Tensor, saved?: NamedTensorMap) => Tensor | Tensor[]\n * }\n */\nexport type CustomGradientFunc =\n (...inputs: Array) => {\n value: T;\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[];\n };\n\nexport type MemoryInfo = {\n numTensors: number; numDataBuffers: number; numBytes: number;\n unreliable?: boolean; reasons: string[];\n};\n\ntype KernelInfo = {\n name: string; bytesAdded: number; totalBytesSnapshot: number;\n tensorsAdded: number;\n totalTensorsSnapshot: number;\n inputShapes: number[][];\n outputShapes: number[][];\n kernelTimeMs: number | {error: string} | Promise;\n extraInfo: string | Promise;\n};\n\nexport type ProfileInfo = {\n newBytes: number; newTensors: number; peakBytes: number;\n kernels: KernelInfo[];\n result: TensorContainer;\n kernelNames: string[];\n};\n\nexport interface TimingInfo extends BackendTimingInfo {\n wallMs: number;\n}\n\n/** @docalias Function */\nexport type ScopeFn = () => T;\n\ninterface ScopeState {\n track: Tensor[];\n name: string;\n id: number;\n}\n\ninterface RegisteredKernelInvocation {\n kernelName: string;\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\ninterface CustomGradKernelInvocation {\n forwardFunc: ForwardFunc;\n backwardsFunc: (dy: T, saved: Tensor[]) => {\n [P in keyof I]: () => I[P]\n };\n inputs: I;\n attrs?: NamedAttrMap;\n}\n\nfunction isRegisteredKernelInvocation(\n kernelInvocation: RegisteredKernelInvocation|\n CustomGradKernelInvocation):\n kernelInvocation is RegisteredKernelInvocation {\n return (kernelInvocation as RegisteredKernelInvocation).kernelName != null;\n}\n\nclass EngineState {\n // Public since optimizers will use it.\n registeredVariables: NamedVariableMap = {};\n\n nextTapeNodeId = 0;\n numBytes = 0;\n numTensors = 0;\n numStringTensors = 0;\n numDataBuffers = 0;\n\n activeTape: TapeNode[];\n // Number of nested tf.grad() statements when computing higher-order\n // gradients. E.g. `1` for first-order gradients and `2` for second-order\n // gradients. Used to track if the tape should be removed after a backprop.\n gradientDepth = 0;\n // Number of nested kernel calls. When kernel depth is greater than 1, we turn\n // off the tape.\n kernelDepth = 0;\n\n // Keep Tensors that parallel the tapes.\n activeScope: ScopeState;\n scopeStack: ScopeState[] = [];\n /**\n * Keeps track of the number of data moves during a kernel execution. We\n * maintain a stack since kernels can call other kernels, recursively.\n */\n numDataMovesStack: number[] = [];\n nextScopeId = 0;\n\n tensorInfo = new WeakMap();\n\n profiling = false;\n activeProfile: ProfileInfo = {\n newBytes: 0,\n newTensors: 0,\n peakBytes: 0,\n kernels: [],\n result: null,\n get kernelNames():\n string[] {\n return Array.from(new Set(this.kernels.map(k => k.name)));\n }\n };\n\n dispose() {\n for (const variableName in this.registeredVariables) {\n this.registeredVariables[variableName].dispose();\n }\n }\n}\n\nexport class Engine implements TensorTracker, DataMover {\n state: EngineState;\n backendName: string;\n registry: {[id: string]: KernelBackend} = {};\n registryFactory: {\n [id: string]: {\n factory: () => KernelBackend | Promise,\n priority: number\n }\n } = {};\n\n private profiler: Profiler;\n private backendInstance: KernelBackend;\n private pendingBackendInit: Promise;\n private pendingBackendInitId = 0;\n\n constructor(public ENV: Environment) {\n this.state = new EngineState();\n }\n\n async ready(): Promise {\n if (this.pendingBackendInit != null) {\n return this.pendingBackendInit.then(() => {});\n }\n if (this.backendInstance != null) {\n return;\n }\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const success = await this.initializeBackend(backendName).success;\n if (success) {\n await this.setBackend(backendName);\n return;\n }\n }\n\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n get backend(): KernelBackend {\n if (this.pendingBackendInit != null) {\n throw new Error(\n `Backend '${this.backendName}' has not yet been initialized. Make ` +\n `sure to await tf.ready() or await tf.setBackend() before calling ` +\n `other methods`);\n }\n if (this.backendInstance == null) {\n const {name, asyncInit} = this.initializeBackendsAndReturnBest();\n if (asyncInit) {\n throw new Error(\n `The highest priority backend '${name}' has not yet been ` +\n `initialized. Make sure to await tf.ready() or ` +\n `await tf.setBackend() before calling other methods`);\n }\n this.setBackend(name);\n }\n return this.backendInstance;\n }\n\n backendNames(): string[] {\n return Object.keys(this.registryFactory);\n }\n\n findBackend(backendName: string): KernelBackend {\n if (!(backendName in this.registry)) {\n // If the backend hasn't been initialized but we have a registry entry for\n // it, initialize it and return it.\n if (backendName in this.registryFactory) {\n const {asyncInit} = this.initializeBackend(backendName);\n if (asyncInit) {\n // Backend is not ready yet.\n return null;\n }\n } else {\n return null;\n }\n }\n return this.registry[backendName];\n }\n\n findBackendFactory(backendName: string):\n () => KernelBackend | Promise {\n if (!(backendName in this.registryFactory)) {\n return null;\n }\n return this.registryFactory[backendName].factory;\n }\n\n registerBackend(\n backendName: string,\n factory: () => KernelBackend | Promise,\n priority = 1): boolean {\n if (backendName in this.registryFactory) {\n log.warn(\n `${backendName} backend was already registered. ` +\n `Reusing existing backend factory.`);\n return false;\n }\n this.registryFactory[backendName] = {factory, priority};\n return true;\n }\n\n async setBackend(backendName: string): Promise {\n if (this.registryFactory[backendName] == null) {\n throw new Error(`Backend name '${backendName}' not found in registry`);\n }\n this.backendName = backendName;\n if (this.registry[backendName] == null) {\n this.backendInstance = null;\n const {success, asyncInit} = this.initializeBackend(backendName);\n const result = asyncInit ? await success : success;\n if (!result) {\n return false;\n }\n }\n this.backendInstance = this.registry[backendName];\n this.setupRegisteredKernels();\n // Reset the profiler.\n this.profiler = new Profiler(this.backendInstance);\n\n return true;\n }\n\n private setupRegisteredKernels(): void {\n const kernels = getKernelsForBackend(this.backendName);\n kernels.forEach(kernel => {\n if (kernel.setupFunc != null) {\n kernel.setupFunc(this.backendInstance);\n }\n });\n }\n\n private disposeRegisteredKernels(backendName: string): void {\n const kernels = getKernelsForBackend(backendName);\n kernels.forEach(kernel => {\n if (kernel.disposeFunc != null) {\n kernel.disposeFunc(this.registry[backendName]);\n }\n });\n }\n\n /**\n * Initializes a backend by looking up the backend name in the factory\n * registry and calling the factory method. Returns a boolean representing\n * whether the initialization of the backend suceeded. Throws an error if\n * there is no backend in the factory registry.\n */\n private initializeBackend(backendName: string):\n {success: boolean|Promise, asyncInit: boolean} {\n const registryFactoryEntry = this.registryFactory[backendName];\n if (registryFactoryEntry == null) {\n throw new Error(\n `Cannot initialize backend ${backendName}, no registration found.`);\n }\n\n try {\n const backend = registryFactoryEntry.factory();\n /* Test if the factory returns a promise.\n Done in a more liberal way than\n previous 'Promise.resolve(backend)===backend'\n as we needed to account for custom Promise\n implementations (e.g. Angular) */\n if (backend && !(backend instanceof KernelBackend) &&\n typeof backend.then === 'function') {\n const promiseId = ++this.pendingBackendInitId;\n const success =\n backend\n .then(backendInstance => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.registry[backendName] = backendInstance;\n this.pendingBackendInit = null;\n return true;\n })\n .catch(err => {\n // Outdated promise. Another backend was set in the meantime.\n if (promiseId < this.pendingBackendInitId) {\n return false;\n }\n this.pendingBackendInit = null;\n log.warn(`Initialization of backend ${backendName} failed`);\n log.warn(err.stack || err.message);\n return false;\n });\n this.pendingBackendInit = success;\n return {success, asyncInit: true};\n } else {\n this.registry[backendName] = backend as KernelBackend;\n return {success: true, asyncInit: false};\n }\n } catch (err) {\n log.warn(`Initialization of backend ${backendName} failed`);\n log.warn(err.stack || err.message);\n return {success: false, asyncInit: false};\n }\n }\n\n removeBackend(backendName: string): void {\n if (!(backendName in this.registryFactory)) {\n throw new Error(`${backendName} backend not found in registry`);\n }\n if (this.backendName === backendName && this.pendingBackendInit != null) {\n // There is a pending promise of the backend we want to remove. Make it\n // obsolete.\n this.pendingBackendInitId++;\n }\n\n if (backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n\n delete this.registryFactory[backendName];\n\n // Unset the backend if it is active.\n if (this.backendName === backendName) {\n this.pendingBackendInit = null;\n this.backendName = null;\n this.backendInstance = null;\n }\n }\n\n private getSortedBackends(): string[] {\n if (Object.keys(this.registryFactory).length === 0) {\n throw new Error('No backend found in registry.');\n }\n return Object.keys(this.registryFactory).sort((a: string, b: string) => {\n // Highest priority comes first.\n return this.registryFactory[b].priority -\n this.registryFactory[a].priority;\n });\n }\n\n private initializeBackendsAndReturnBest():\n {name: string, asyncInit: boolean} {\n const sortedBackends = this.getSortedBackends();\n\n for (let i = 0; i < sortedBackends.length; i++) {\n const backendName = sortedBackends[i];\n const {success, asyncInit} = this.initializeBackend(backendName);\n if (asyncInit || success) {\n return {name: backendName, asyncInit};\n }\n }\n throw new Error(\n `Could not initialize any backends, all backend initializations ` +\n `failed.`);\n }\n\n moveData(backend: KernelBackend, dataId: DataId) {\n const info = this.state.tensorInfo.get(dataId);\n const srcBackend = info.backend;\n const values = this.readSync(dataId);\n const refCount = srcBackend.refCount(dataId);\n // Delete the tensor from the old backend and move it to the new\n // backend.\n srcBackend.disposeData(dataId, true);\n info.backend = backend;\n backend.move(dataId, values, info.shape, info.dtype, refCount);\n if (this.shouldCheckForMemLeaks()) {\n // Track the number of moves during a kernel execution to correctly\n // detect memory leaks.\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;\n }\n }\n\n tidy(nameOrFn: string|ScopeFn, fn?: ScopeFn):\n T {\n let name: string = null;\n if (fn == null) {\n // Called with only 1 argument.\n if (typeof nameOrFn !== 'function') {\n throw new Error('Please provide a function to tidy()');\n }\n fn = nameOrFn;\n } else {\n // Called with 2 arguments.\n if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {\n throw new Error(\n 'When calling with two arguments, the first argument ' +\n 'to tidy() must be a string');\n }\n if (typeof fn !== 'function') {\n throw new Error(\n 'When calling with two arguments, the 2nd argument ' +\n 'to tidy() must be a function');\n }\n name = nameOrFn as string;\n // TODO(nsthorat,smilkov): Do operation logging and performance\n // profiling.\n }\n let result: T;\n return this.scopedRun(\n () => this.startScope(name), () => this.endScope(result), () => {\n result = fn();\n if (result instanceof Promise) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n return result;\n });\n }\n\n private scopedRun(start: () => void, end: () => void, f: () => T): T {\n start();\n try {\n const res = f();\n end();\n return res;\n } catch (ex) {\n end();\n throw ex;\n }\n }\n\n private static nextTensorId = 0;\n private nextTensorId(): number {\n return Engine.nextTensorId++;\n }\n\n private static nextVariableId = 0;\n private nextVariableId(): number {\n return Engine.nextVariableId++;\n }\n\n /**\n * This method is called instead of the public-facing tensor.clone() when\n * saving a tensor for backwards pass. It makes sure to add the clone\n * operation to the tape regardless of being called inside a kernel\n * execution.\n */\n private clone(x: Tensor): Tensor {\n const y: Tensor = ENGINE.runKernel(Identity,\n {x} as unknown as NamedTensorMap);\n const inputs = {x};\n const grad = (dy: Tensor) => ({\n x: () => {\n const dtype = 'float32';\n const gradInputs = {x: dy};\n const attrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, gradInputs as unknown as NamedTensorMap,\n // tslint:disable-next-line: no-unnecessary-type-assertion\n attrs as unknown as NamedAttrMap) as Tensor;\n }\n });\n const saved: Tensor[] = [];\n this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});\n return y;\n }\n\n /**\n * Execute a kernel with the given name and return the output tensor.\n *\n * @param kernelName The name of the kernel to execute.\n * @param inputs A map of input names to tensors.\n * @param attrs A map of attribute names to their values. An attribute is a\n * primitive (non-tensor) input to the kernel.\n * @param inputsToSave A list of tensors, inputs to save for the backprop\n * computation.\n * @param outputsToSave A list of booleans, specifying which output to save\n * for the backprop computation. These are booleans since the output\n * tensors are not visible to the user.\n */\n runKernel(\n kernelName: string, inputs: NamedTensorMap, attrs?: NamedAttrMap): T {\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n const hasKernel = getKernel(kernelName, this.backendName) != null;\n if (!hasKernel) {\n throw new Error(`Kernel '${kernelName}' not registered for backend '${\n this.backendName}'`);\n }\n return this.runKernelFunc({kernelName, inputs, attrs});\n }\n\n private shouldCheckForMemLeaks(): boolean {\n return this.ENV.getBool('IS_TEST');\n }\n\n private checkKernelForMemLeak(\n kernelName: string, numDataIdsBefore: number,\n outInfos: TensorInfo[]): void {\n const numDataIdsAfter = this.backend.numDataIds();\n\n // Count the number of data ids associated with the result of the kernel.\n let numOutputDataIds = 0;\n outInfos.forEach(info => {\n // Complex numbers allocate 3 data ids, one for 'real', one for\n // 'imaginary', and one for the container that holds the former two.\n numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);\n });\n\n // Account for the number of moves during kernel execution. A \"data move\"\n // can happen in the middle of a kernel execution, placing a new (key,value)\n // pair in the data storage. Since data moves have net zero effect (we\n // always remove the data from the old backend), we have to cancel them out\n // when detecting memory leaks.\n const numMoves =\n this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];\n const dataIdsLeaked =\n numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;\n if (dataIdsLeaked > 0) {\n throw new Error(\n `Backend '${this.backendName}' has an internal memory leak ` +\n `(${dataIdsLeaked} data ids) after running '${kernelName}'`);\n }\n }\n\n /**\n * Internal helper method to execute a kernel Func\n *\n * Use `runKernel` to execute kernels from outside of engine.\n */\n private runKernelFunc(\n kernelParams: RegisteredKernelInvocation|\n CustomGradKernelInvocation): T {\n let outputs: Tensor[];\n let saved: Tensor[] = [];\n const isTapeOn = this.isTapeOn();\n\n const startingBytecount = this.state.numBytes;\n const startingNumTensors = this.state.numTensors;\n\n if (this.shouldCheckForMemLeaks()) {\n this.state.numDataMovesStack.push(0);\n }\n\n let kernelFunc: () => Tensor[];\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n\n let out: TensorInfo|TensorInfo[];\n\n const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?\n kernelParams.kernelName :\n this.state.activeScope != null ? this.state.activeScope.name : '';\n\n // Create the kernelFunc from either a registered kernel OR passed in\n // forward/backward functions (used by custom grad). In this context a\n // kernelFunc wraps a kernel implementation with some bookkeeping.\n\n if (isRegisteredKernelInvocation(kernelParams)) {\n const {kernelName, inputs, attrs} = kernelParams;\n if (this.backendName == null) {\n // backend has not been initialized yet (backend initialization is lazy\n // can be deferred until an op/ kernel is run).\n // The below getter has side effects that will try to initialize the\n // backend and set properties like this.backendName\n // tslint:disable-next-line: no-unused-expression\n this.backend;\n }\n const kernel = getKernel(kernelName, this.backendName);\n util.assert(\n kernel != null,\n () => `Cannot find registered kernel '${kernelName}' for backend '${\n this.backendName}'`);\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = kernel.kernelFunc({inputs, attrs, backend: this.backend});\n const outInfos = Array.isArray(out) ? out : [out];\n if (this.shouldCheckForMemLeaks()) {\n this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);\n }\n\n const outTensors = outInfos.map((outInfo: TensorInfo|Tensor) => {\n // todo (yassogba) remove this option (Tensor) when node backend\n // methods have been modularized and they all return tensorInfo.\n // TensorInfos do not have a rank attribute.\n if ((outInfo as Tensor).rank != null) {\n return outInfo as Tensor;\n }\n return this.makeTensorFromTensorInfo(outInfo);\n });\n\n // Save any required inputs and outputs.\n\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since there would be no backprop for these tensors\n // (which would otherwise dispose them).\n if (isTapeOn) {\n const tensorsToSave =\n this.getTensorsForGradient(kernelName, inputs, outTensors);\n saved = this.saveTensorsForBackwardMode(tensorsToSave);\n }\n return outTensors;\n };\n } else {\n const {forwardFunc} = kernelParams;\n // Running a customGrad op.\n const saveFunc: GradSaveFunc = (tensors) => {\n // Do not save unless we are recording to the tape. Otherwise it would\n // cause a mem leak since we would never run backprop, which disposes\n // the kept tensors.\n if (!isTapeOn) {\n return;\n }\n saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n };\n\n kernelFunc = () => {\n const numDataIdsBefore = this.backend.numDataIds();\n out = this.tidy(() => forwardFunc(this.backend, saveFunc));\n const outs = (Array.isArray(out) ? out : [out]) as Tensor[];\n if (this.shouldCheckForMemLeaks()) {\n // Scope name is used to print a more helpful error message if needed.\n this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);\n }\n return outs;\n };\n }\n\n //\n // Run the kernelFunc. Optionally profiling it.\n //\n const {inputs, attrs} = kernelParams;\n const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?\n null :\n kernelParams.backwardsFunc;\n\n let kernelProfile: KernelProfile;\n this.scopedRun(\n // Stop recording to a tape when running a kernel.\n () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {\n if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {\n outputs = kernelFunc();\n } else {\n kernelProfile = this.profiler.profileKernel(\n kernelOrScopeName, inputs, () => kernelFunc());\n if (this.ENV.getBool('DEBUG')) {\n this.profiler.logKernelProfile(kernelProfile);\n }\n outputs = kernelProfile.outputs;\n }\n });\n\n if (isTapeOn) {\n this.addTapeNode(\n kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);\n }\n\n if (this.state.profiling) {\n this.state.activeProfile.kernels.push({\n name: kernelOrScopeName,\n bytesAdded: this.state.numBytes - startingBytecount,\n totalBytesSnapshot: this.state.numBytes,\n tensorsAdded: this.state.numTensors - startingNumTensors,\n totalTensorsSnapshot: this.state.numTensors,\n inputShapes: Object.keys(inputs).map(\n key => inputs[key] != null ? inputs[key].shape : null),\n outputShapes: outputs.map(item => item.shape),\n kernelTimeMs: kernelProfile.timeMs,\n extraInfo: kernelProfile.extraInfo\n });\n }\n return (Array.isArray(out) ? outputs : outputs[0]) as T;\n }\n\n /**\n * Saves tensors used in forward mode for use in backward mode.\n *\n * @param tensors the list of tensors to save.\n */\n private saveTensorsForBackwardMode(tensors: Tensor[]): Tensor[] {\n const saved = tensors.map(tensor => this.keep(this.clone(tensor)));\n return saved;\n }\n\n /**\n * Returns a list of tensors to save for a given gradient calculation.\n *\n * @param kernelName name of kernel to look up gradient for.\n * @param inputs a map of input tensors.\n * @param outputs an array of output tensors from forward mode of kernel.\n */\n private getTensorsForGradient(\n kernelName: string, inputs: NamedTensorMap,\n outputs: Tensor[]): Tensor[]|null {\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n const inputsToSave: string[] = gradConfig.inputsToSave || [];\n const outputsToSave: boolean[] = gradConfig.outputsToSave || [];\n\n // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs\n // specified in inputsToSave will be saved.\n let inputTensorsToSave: Tensor[];\n if (gradConfig.saveAllInputs) {\n util.assert(\n Array.isArray(inputs),\n () => 'saveAllInputs is true, expected inputs to be an array.');\n\n inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);\n } else {\n inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);\n }\n\n const outputTensorsToSave: Tensor[] =\n outputs.filter((_, i) => outputsToSave[i]);\n\n return inputTensorsToSave.concat(outputTensorsToSave);\n }\n // We return an empty list rather than throw an error because the kernel we\n // are looking up may not actually be relevant to backproping through the\n // overall function\n //\n // See 'does not error if irrelevant (pruned) ops are missing grads' test\n // in gradients_test.ts for an example.\n return [];\n }\n\n /**\n * Internal method used by public APIs for tensor creation. Makes a new\n * tensor with the provided shape, dtype and values. It always\n * creates a new data id and writes the values to the underlying backend.\n */\n makeTensor(\n values: DataValues, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n if (values == null) {\n throw new Error('Values passed to engine.makeTensor() are null');\n }\n dtype = dtype || 'float32';\n backend = backend || this.backend;\n let backendVals = values as BackendValues;\n if (dtype === 'string' && util.isString(values[0])) {\n backendVals = (values as string[]).map(d => util.encodeString(d));\n }\n const dataId = backend.write(backendVals, shape, dtype);\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n\n // Count bytes for string tensors.\n if (dtype === 'string') {\n const info = this.state.tensorInfo.get(dataId);\n const newBytes = bytesFromStringArray(backendVals as Uint8Array[]);\n this.state.numBytes += newBytes - info.bytes;\n info.bytes = newBytes;\n }\n return t;\n }\n\n /**\n * Internal method used by backends. Makes a new tensor\n * that is a wrapper around an existing data id. It doesn't create\n * a new data id, only increments the ref count used in memory tracking.\n * @deprecated\n */\n makeTensorFromDataId(\n dataId: DataId, shape: number[], dtype: DataType,\n backend?: KernelBackend): Tensor {\n dtype = dtype || 'float32';\n const tensorInfo: TensorInfo = {dataId, shape, dtype};\n return this.makeTensorFromTensorInfo(tensorInfo, backend);\n }\n\n /**\n * Internal method used by backends. Makes a new tensor that is a wrapper\n * around an existing data id in TensorInfo. It doesn't create a new data id,\n * only increments the ref count used in memory tracking.\n */\n makeTensorFromTensorInfo(tensorInfo: TensorInfo, backend?: KernelBackend):\n Tensor {\n const {dataId, shape, dtype} = tensorInfo;\n const t = new Tensor(shape, dtype, dataId, this.nextTensorId());\n this.trackTensor(t, backend);\n return t;\n }\n\n makeVariable(\n initialValue: Tensor, trainable = true, name?: string,\n dtype?: DataType): Variable {\n name = name || this.nextVariableId().toString();\n if (dtype != null && dtype !== initialValue.dtype) {\n initialValue = initialValue.cast(dtype);\n }\n const v = new Variable(initialValue, trainable, name, this.nextTensorId());\n if (this.state.registeredVariables[v.name] != null) {\n throw new Error(`Variable with name ${v.name} was already registered`);\n }\n this.state.registeredVariables[v.name] = v;\n this.incRef(v, this.backend);\n return v;\n }\n\n trackTensor(a: Tensor, backend: KernelBackend): void {\n this.state.numTensors++;\n if (a.dtype === 'string') {\n this.state.numStringTensors++;\n }\n // Bytes for complex numbers are counted by their components. Bytes for\n // string tensors are counted when writing values.\n let bytes = 0;\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n bytes = a.size * util.bytesPerElement(a.dtype);\n }\n this.state.numBytes += bytes;\n\n if (!this.state.tensorInfo.has(a.dataId)) {\n this.state.numDataBuffers++;\n this.state.tensorInfo.set(a.dataId, {\n backend: backend || this.backend,\n dtype: a.dtype,\n shape: a.shape,\n bytes\n });\n }\n\n if (!(a instanceof Variable)) {\n this.track(a);\n }\n }\n\n // Track the tensor by dataId and increase the refCount for the dataId in the\n // backend.\n // TODO(pyu10055): This is currently used by makeVariable method, to increase\n // refCount on the backend for the dataId. It can potentially be replaced with\n // Identity op indead of calling backend directly.\n incRef(a: Tensor, backend: KernelBackend): void {\n this.trackTensor(a, backend);\n this.backend.incRef(a.dataId);\n }\n\n removeDataId(dataId: DataId, backend: KernelBackend) {\n if (this.state.tensorInfo.has(dataId) &&\n this.state.tensorInfo.get(dataId).backend === backend) {\n this.state.tensorInfo.delete(dataId);\n this.state.numDataBuffers--;\n }\n }\n disposeTensor(a: Tensor): void {\n if (!this.state.tensorInfo.has(a.dataId)) {\n return;\n }\n const info = this.state.tensorInfo.get(a.dataId);\n\n this.state.numTensors--;\n if (a.dtype === 'string') {\n this.state.numStringTensors--;\n this.state.numBytes -= info.bytes;\n }\n // Don't count bytes for complex numbers as they are counted by their\n // components.\n if (a.dtype !== 'complex64' && a.dtype !== 'string') {\n const bytes = a.size * util.bytesPerElement(a.dtype);\n this.state.numBytes -= bytes;\n }\n\n // Remove the reference to dataId if backend dispose the data successfully\n if (info.backend.disposeData(a.dataId)) {\n this.removeDataId(a.dataId, info.backend);\n }\n\n // TODO(nsthorat): Construct an error and save the stack trace for\n // debugging when in debug mode. Creating a stack trace is too expensive\n // to do unconditionally.\n }\n\n disposeVariables(): void {\n for (const varName in this.state.registeredVariables) {\n const v = this.state.registeredVariables[varName];\n this.disposeVariable(v);\n }\n }\n\n disposeVariable(v: Variable): void {\n this.disposeTensor(v);\n if (this.state.registeredVariables[v.name] != null) {\n delete this.state.registeredVariables[v.name];\n }\n }\n\n memory(): MemoryInfo {\n const info = this.backend.memory() as MemoryInfo;\n info.numTensors = this.state.numTensors;\n info.numDataBuffers = this.state.numDataBuffers;\n info.numBytes = this.state.numBytes;\n if (this.state.numStringTensors > 0) {\n info.unreliable = true;\n if (info.reasons == null) {\n info.reasons = [];\n }\n info.reasons.push(\n 'Memory usage by string tensors is approximate ' +\n '(2 bytes per character)');\n }\n return info;\n }\n\n async profile(query: () => (TensorContainer | Promise)):\n Promise {\n this.state.profiling = true;\n\n const startBytes = this.state.numBytes;\n const startNumTensors = this.state.numTensors;\n\n this.state.activeProfile.kernels = [];\n this.state.activeProfile.result = await query();\n\n this.state.profiling = false;\n\n this.state.activeProfile.peakBytes = Math.max(\n ...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));\n this.state.activeProfile.newBytes = this.state.numBytes - startBytes;\n this.state.activeProfile.newTensors =\n this.state.numTensors - startNumTensors;\n for (const kernel of this.state.activeProfile.kernels) {\n kernel.kernelTimeMs = await kernel.kernelTimeMs;\n kernel.extraInfo = await kernel.extraInfo;\n }\n return this.state.activeProfile;\n }\n\n isTapeOn(): boolean {\n return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;\n }\n\n private addTapeNode(\n kernelName: string, inputs: NamedTensorMap, outputs: Tensor[],\n gradientsFunc: GradFunc, saved: Tensor[], attrs: NamedAttrMap): void {\n const tapeNode: TapeNode =\n {id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved};\n\n const gradConfig = getGradient(kernelName);\n if (gradConfig != null) {\n gradientsFunc = gradConfig.gradFunc;\n }\n if (gradientsFunc != null) {\n tapeNode.gradient = (dys: Tensor[]) => {\n // TODO(smilkov): To optimize back-prop, pass dys that are not used in\n // the backprop graph to the user as null instead of zeros\n dys = dys.map((dy, i) => {\n if (dy == null) {\n const output = outputs[i];\n const vals = util.makeZerosTypedArray(output.size, output.dtype);\n return this.makeTensor(vals, output.shape, output.dtype);\n }\n return dy;\n });\n // Grad functions of ops with single outputs expect a dy, while ops\n // with multiple outputs expect dys (array of dy).\n return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);\n };\n }\n this.state.activeTape.push(tapeNode);\n }\n\n keep(result: T): T {\n result.kept = true;\n return result;\n }\n\n private startTape() {\n if (this.state.gradientDepth === 0) {\n this.state.activeTape = [];\n }\n this.state.gradientDepth++;\n }\n\n private endTape() {\n this.state.gradientDepth--;\n }\n\n /**\n * Start a scope. Use this with endScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n startScope(name?: string) {\n const scopeInfo: ScopeState = {\n track: [],\n name: 'unnamed scope',\n id: this.state.nextScopeId++\n };\n if (name) {\n scopeInfo.name = name;\n }\n this.state.scopeStack.push(scopeInfo);\n this.state.activeScope = scopeInfo;\n }\n\n /**\n * End a scope. Use this with startScope() to achieve the same functionality\n * as scope() without the need for a function closure.\n */\n endScope(result?: TensorContainer) {\n const tensorsToTrackInParent = getTensorsInContainer(result);\n const tensorsToTrackInParentSet =\n new Set(tensorsToTrackInParent.map(t => t.id));\n\n // Dispose the arrays tracked in this scope.\n for (let i = 0; i < this.state.activeScope.track.length; i++) {\n const tensor = this.state.activeScope.track[i];\n if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {\n tensor.dispose();\n }\n }\n\n const oldScope = this.state.scopeStack.pop();\n this.state.activeScope = this.state.scopeStack.length === 0 ?\n null :\n this.state.scopeStack[this.state.scopeStack.length - 1];\n\n // Track the current result in the parent scope.\n tensorsToTrackInParent.forEach(tensor => {\n // Only track the tensor if was allocated in the inner scope and is not\n // globally kept.\n if (!tensor.kept && tensor.scopeId === oldScope.id) {\n this.track(tensor);\n }\n });\n }\n\n /**\n * Returns gradients of `f` with respect to each of the `xs`. The gradients\n * returned are of the same length as `xs`, but some might be null if `f`\n * was not a function of that `x`. It also takes optional dy to multiply the\n * gradient, which defaults to `1`.\n */\n gradients(\n f: () => T, xs: Tensor[], dy?: T,\n allowNoGradients = false): {value: T, grads: Tensor[]} {\n util.assert(\n xs.length > 0, () => 'gradients() received an empty list of xs.');\n if (dy != null && dy.dtype !== 'float32') {\n throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);\n }\n\n const y = this.scopedRun(\n () => this.startTape(), () => this.endTape(),\n () => this.tidy('forward', f));\n\n util.assert(\n y instanceof Tensor,\n () => 'The result y returned by f() must be a tensor.');\n // Filter out the nodes that don't connect x => y.\n const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);\n if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {\n throw new Error(\n 'Cannot compute gradient of y=f(x) with respect to x. Make sure ' +\n 'that the f you passed encloses all operations that lead from x ' +\n 'to y.');\n }\n\n return this.tidy('backward', () => {\n const accumulatedGradientMap: {[tensorId: number]: Tensor} = {};\n accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;\n\n // Backprop gradients through the filtered nodes.\n backpropagateGradients(\n accumulatedGradientMap, filteredTape,\n // Pass the tidy function to avoid circular dep with `tape.ts`.\n f => this.tidy(f as ScopeFn),\n // Pass an add function to avoide a circular dep with `tape.ts`.\n add);\n const grads = xs.map(x => accumulatedGradientMap[x.id]);\n\n if (this.state.gradientDepth === 0) {\n // This means that we are not computing higher-order gradients\n // and can clean up the tape.\n this.state.activeTape.forEach(node => {\n for (const tensor of node.saved) {\n tensor.dispose();\n }\n });\n this.state.activeTape = null;\n }\n return {value: y, grads};\n });\n }\n\n customGrad(f: CustomGradientFunc):\n (...args: Array) => T {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in customGrad(f) must be a function.');\n return (...inputs: Tensor[]): T => {\n util.assert(\n inputs.every(t => t instanceof Tensor),\n () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +\n 'tensors');\n\n let res: {\n value: T,\n gradFunc: (dy: T, saved: Tensor[]) => Tensor | Tensor[],\n };\n const inputMap: NamedTensorMap = {};\n inputs.forEach((input, i) => {\n inputMap[i] = input;\n });\n\n const forwardFunc: ForwardFunc = (_, save) => {\n res = f(...[...inputs, save]);\n util.assert(\n res.value instanceof Tensor,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.value` is a tensor');\n util.assert(\n util.isFunction(res.gradFunc),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function.');\n return res.value;\n };\n\n const backwardsFunc = (dy: T, saved: Tensor[]) => {\n const gradRes = res.gradFunc(dy, saved);\n const grads: Tensor[] = Array.isArray(gradRes) ? gradRes : [gradRes];\n util.assert(\n grads.length === inputs.length,\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'the same number of tensors as inputs passed to f(...).');\n util.assert(\n grads.every(t => t instanceof Tensor),\n () => 'The function f passed in customGrad(f) must return an ' +\n 'object where `obj.gradFunc` is a function that returns ' +\n 'a list of only tensors.');\n const gradMap: {[key: string]: () => Tensor} = {};\n grads.forEach((grad, i) => {\n gradMap[i] = () => grad;\n });\n return gradMap;\n };\n\n return this.runKernelFunc({\n forwardFunc,\n backwardsFunc,\n inputs: inputMap,\n });\n };\n }\n\n readSync(dataId: DataId): BackendValues {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readSync(dataId);\n }\n read(dataId: DataId): Promise {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.read(dataId);\n }\n\n readToGPU(dataId: DataId, options?: DataToGPUOptions): GPUData {\n // Route the read to the correct backend.\n const info = this.state.tensorInfo.get(dataId);\n return info.backend.readToGPU(dataId, options);\n }\n\n async time(query: () => void): Promise {\n const start = now();\n const timingInfo = await this.backend.time(query) as TimingInfo;\n timingInfo.wallMs = now() - start;\n return timingInfo;\n }\n\n /**\n * Tracks a Tensor in the current scope to be automatically cleaned up\n * when the current scope ends, and returns the value.\n *\n * @param result The Tensor to track in the current scope.\n */\n private track(result: T): T {\n if (this.state.activeScope != null) {\n result.scopeId = this.state.activeScope.id;\n this.state.activeScope.track.push(result);\n }\n\n return result;\n }\n\n get registeredVariables(): NamedVariableMap {\n return this.state.registeredVariables;\n }\n\n /**\n * Resets the engine state. Removes all backends but does not remove\n * registered backend factories.\n */\n reset(): void {\n // Make any pending promise obsolete.\n this.pendingBackendInitId++;\n\n this.state.dispose();\n this.ENV.reset();\n this.state = new EngineState();\n\n for (const backendName in this.registry) {\n this.disposeRegisteredKernels(backendName);\n this.registry[backendName].dispose();\n delete this.registry[backendName];\n }\n this.backendName = null;\n this.backendInstance = null;\n this.pendingBackendInit = null;\n }\n}\n\nfunction ones(shape: number[]): Tensor {\n const values = makeOnesTypedArray(sizeFromShape(shape), 'float32');\n return ENGINE.makeTensor(values, shape, 'float32');\n}\n\nexport function getOrMakeEngine(): Engine {\n const ns = getGlobalNamespace() as unknown as {_tfengine: Engine};\n if (ns._tfengine == null) {\n const environment = new Environment(ns);\n ns._tfengine = new Engine(environment);\n }\n setEnvironmentGlobal(ns._tfengine.ENV);\n\n // Tell the current tensor interface that the global engine is responsible\n // for tracking.\n setTensorTracker(() => ns._tfengine);\n return ns._tfengine;\n}\n\nexport const ENGINE = getOrMakeEngine();\n\n/**\n * A implementation of the add op for use within engine and tape.\n *\n * This allows us to avoid a circular dependency between add.ts and engine.\n * It is exported to be available in tape tests.\n */\nexport function add(a: Tensor, b: Tensor): Tensor {\n // We duplicate Add here to avoid a circular dependency with add.ts.\n const inputs = {a, b};\n return ENGINE.runKernel(Add, inputs as unknown as NamedTensorMap);\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport * as util from './util';\n\nexport interface TapeNode {\n id: number;\n kernelName: string;\n outputs: Tensor[];\n inputs: NamedTensorMap;\n // Optional params, defined only for ops with gradient impl.\n gradient?: (dys: Tensor[]) => NamedGradientMap;\n saved?: Tensor[];\n}\n\nexport type NamedGradientMap = {\n [inputName: string]: () => Tensor;\n};\n\n/**\n * Computes a list of TapeNodes that connect x to y, filtering everything else\n * out and preserving the order of the original tape elements.\n *\n * @param tape The tape elements to filter.\n * @param xs The input Tensors.\n * @param y The output Tensor.\n */\nexport function getFilteredNodesXToY(\n tape: TapeNode[], xs: Tensor[], y: Tensor): TapeNode[] {\n // Forward pass to compute all the nodes and Tensors that are transitively a\n // function of x.\n const tensorsFromX: {[tensorId: number]: boolean} = {};\n const nodesFromX: {[nodeId: number]: boolean} = {};\n for (let i = 0; i < xs.length; i++) {\n tensorsFromX[xs[i].id] = true;\n }\n\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n for (const inputName in nodeInputs) {\n const input = nodeInputs[inputName];\n\n let anyInputFromX = false;\n for (let j = 0; j < xs.length; j++) {\n if (tensorsFromX[input.id]) {\n node.outputs.forEach(output => tensorsFromX[output.id] = true);\n anyInputFromX = true;\n nodesFromX[node.id] = true;\n break;\n }\n }\n\n if (anyInputFromX) {\n break;\n }\n }\n }\n\n // Backward pass to find all of the nodes and Tensors that lead to y.\n const tensorsLeadToY: {[tensorId: number]: boolean} = {};\n tensorsLeadToY[y.id] = true;\n const nodesToY: {[nodeId: number]: boolean} = {};\n\n for (let i = tape.length - 1; i >= 0; i--) {\n const node = tape[i];\n const nodeInputs = node.inputs;\n\n // If any of the outputs lead to y, mark all of the inputs as leading to y.\n for (let j = 0; j < node.outputs.length; j++) {\n if (tensorsLeadToY[node.outputs[j].id]) {\n for (const inputName in nodeInputs) {\n tensorsLeadToY[nodeInputs[inputName].id] = true;\n nodesToY[node.id] = true;\n }\n break;\n }\n }\n }\n\n // Return the paths that come from x and lead to y.\n const filteredTape: TapeNode[] = [];\n for (let i = 0; i < tape.length; i++) {\n const node = tape[i];\n\n if (nodesFromX[node.id] && nodesToY[node.id]) {\n // Prune the inputs from the node that aren't a function of x.\n const prunedInputs: {[inputName: string]: Tensor} = {};\n for (const inputName in node.inputs) {\n const nodeInput = node.inputs[inputName];\n if (tensorsFromX[nodeInput.id]) {\n prunedInputs[inputName] = nodeInput;\n }\n }\n\n // Copy the node and overwrite inputsAndArgs to the pruned version.\n const prunedNode = Object.assign({}, node);\n prunedNode.inputs = prunedInputs;\n prunedNode.outputs = node.outputs;\n\n filteredTape.push(prunedNode);\n }\n }\n\n return filteredTape;\n}\n\n/**\n * Backpropagate gradients through the filtered TapeNodes.\n *\n * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map\n * is mutated by this method.\n * @param filteredTape The filtered TapeNodes to backprop through.\n */\nexport function backpropagateGradients(\n tensorAccumulatedGradientMap: {[tensorId: number]: Tensor},\n filteredTape: TapeNode[], tidy: (f: Function) => Tensor,\n add: (a: Tensor, b: Tensor) => Tensor) {\n // Walk the tape backward and keep a map of Tensor to its gradient.\n for (let i = filteredTape.length - 1; i >= 0; i--) {\n const node = filteredTape[i];\n\n const dys: Tensor[] = [];\n node.outputs.forEach(o => {\n const gradTensor = tensorAccumulatedGradientMap[o.id];\n if (gradTensor != null) {\n dys.push(gradTensor);\n } else {\n // This particular output is not in the back-propagation subgraph, so it\n // does not affect the final output, thus we put null for its dy.\n dys.push(null);\n }\n });\n\n if (node.gradient == null) {\n throw new Error(\n `Cannot compute gradient: gradient function not found ` +\n `for ${node.kernelName}.`);\n }\n\n // Backprop dy through this node and accumulate gradients over the inputs.\n const inputGradients = node.gradient(dys);\n\n for (const inputName in node.inputs) {\n if (!(inputName in inputGradients)) {\n throw new Error(\n `Cannot backprop through input ${inputName}. ` +\n `Available gradients found: ${Object.keys(inputGradients)}.`);\n }\n\n // Call the gradient function.\n const dx = tidy(() => inputGradients[inputName]());\n if (dx.dtype !== 'float32') {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);\n }\n const x = node.inputs[inputName];\n if (!util.arraysEqual(dx.shape, x.shape)) {\n throw new Error(\n `Error in gradient for op ${\n node.kernelName}. The gradient of input ` +\n `'${inputName}' has shape '${dx.shape}', which does not match ` +\n `the shape of the input '${x.shape}'`);\n }\n\n if (tensorAccumulatedGradientMap[x.id] == null) {\n tensorAccumulatedGradientMap[x.id] = dx;\n } else {\n const curGradient = tensorAccumulatedGradientMap[x.id];\n tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);\n curGradient.dispose();\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// tslint:disable-next-line:no-any\nfunction _isNavigatorDefined(): boolean {\n return typeof navigator !== 'undefined' && navigator != null;\n}\n\nlet isMobileMockValue: boolean|undefined;\n\nexport function mockIsMobile(value: boolean|undefined) {\n isMobileMockValue = value;\n}\n\nexport function isMobile(nav?: Navigator): boolean {\n if (isMobileMockValue !== undefined) {\n return isMobileMockValue;\n }\n if (nav || _isNavigatorDefined()) {\n if (!nav) {\n nav = navigator;\n }\n if (nav.product === 'ReactNative') {\n return true;\n }\n\n const a = nav.userAgent || nav.vendor ||\n // tslint:disable-next-line:no-any\n (typeof window !== 'undefined' ? (window as any).opera : '');\n // Use `navigator.userAgentData.mobile` as fallback.\n if (!a) {\n // tslint:disable-next-line:no-any\n const navAny = nav as any;\n return navAny.userAgentData && navAny.userAgentData.mobile;\n }\n // tslint:disable-next-line:max-line-length\n return /(android|bb\\d+|meego).+mobile|avantgo|bada\\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i\n .test(a) ||\n // tslint:disable-next-line:max-line-length\n /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\\-(n|u)|c55\\/|capi|ccwa|cdm\\-|cell|chtm|cldc|cmd\\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\\-s|devi|dica|dmob|do(c|p)o|ds(12|\\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\\-|_)|g1 u|g560|gene|gf\\-5|g\\-mo|go(\\.w|od)|gr(ad|un)|haie|hcit|hd\\-(m|p|t)|hei\\-|hi(pt|ta)|hp( i|ip)|hs\\-c|ht(c(\\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\\-(20|go|ma)|i230|iac( |\\-|\\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\\/)|klon|kpt |kwc\\-|kyo(c|k)|le(no|xi)|lg( g|\\/(k|l|u)|50|54|\\-[a-w])|libw|lynx|m1\\-w|m3ga|m50\\/|ma(te|ui|xo)|mc(01|21|ca)|m\\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\\-2|po(ck|rt|se)|prox|psio|pt\\-g|qa\\-a|qc(07|12|21|32|60|\\-[2-7]|i\\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\\-|oo|p\\-)|sdk\\/|se(c(\\-|0|1)|47|mc|nd|ri)|sgh\\-|shar|sie(\\-|m)|sk\\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\\-|v\\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\\-|tdg\\-|tel(i|m)|tim\\-|t\\-mo|to(pl|sh)|ts(70|m\\-|m3|m5)|tx\\-9|up(\\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\\-|your|zeto|zte\\-/i\n .test(a.substr(0, 4));\n }\n return false;\n}\n\nexport function isBrowser(): boolean {\n return (typeof window !== 'undefined' && window.document != null) ||\n //@ts-ignore\n (typeof WorkerGlobalScope !== 'undefined');\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport './engine';\n\nimport * as device_util from './device_util';\nimport {env} from './environment';\n\nconst ENV = env();\n\n/**\n * This file contains environment-related flag registrations.\n */\n\n/** Whether to enable debug mode. */\nENV.registerFlag('DEBUG', () => false, debugValue => {\n if (debugValue) {\n console.warn(\n 'Debugging mode is ON. The output of every math call will ' +\n 'be downloaded to CPU and checked for NaNs. ' +\n 'This significantly impacts performance.');\n }\n});\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag('IS_BROWSER', () => device_util.isBrowser());\n\n/** Whether we are in a browser (as versus, say, node.js) environment. */\nENV.registerFlag(\n 'IS_NODE',\n () => (typeof process !== 'undefined') &&\n (typeof process.versions !== 'undefined') &&\n (typeof process.versions.node !== 'undefined'));\n\n/** Whether this browser is Chrome. */\nENV.registerFlag(\n 'IS_CHROME',\n () => typeof navigator !== 'undefined' && navigator != null &&\n navigator.userAgent != null && /Chrome/.test(navigator.userAgent) &&\n /Google Inc/.test(navigator.vendor));\n\n/**\n * True when the environment is \"production\" where we disable safety checks\n * to gain performance.\n */\nENV.registerFlag('PROD', () => false);\n\n/**\n * Whether to do sanity checks when inferring a shape from user-provided\n * values, used when creating a new tensor.\n */\nENV.registerFlag(\n 'TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV.getBool('DEBUG'));\n\n/** Whether deprecation warnings are enabled. */\nENV.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true);\n\n/** True if running unit tests. */\nENV.registerFlag('IS_TEST', () => false);\n\n/** Whether to check computation result for errors. */\nENV.registerFlag('CHECK_COMPUTATION_FOR_ERRORS', () => true);\n\n/** Whether the backend needs to wrap input to imageBitmap. */\nENV.registerFlag('WRAP_TO_IMAGEBITMAP', () => false);\n\n/** Whether to enable canvas2d willReadFrequently for GPU backends */\nENV.registerFlag('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU', () => false);\n\n/** Whether to use setTimeoutCustom */\nENV.registerFlag('USE_SETTIMEOUTCUSTOM', () => false);\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from './engine';\nimport {env} from './environment';\nimport {Tensor} from './tensor';\nimport {DataType, TensorLike, WebGLData, WebGPUData} from './types';\nimport {assert, flatten, inferDtype, isTypedArray, toTypedArray} from './util';\nimport {bytesPerElement} from './util_base';\n\nexport function inferShape(\n val: TensorLike|WebGLData|WebGPUData, dtype?: DataType): number[] {\n let firstElem: typeof val = val;\n\n if (isTypedArray(val)) {\n return dtype === 'string' ? [] : [val.length];\n }\n const isObject = typeof val === 'object';\n if (isObject) {\n if ('texture' in val) {\n const usedChannels = val.channels || 'RGBA';\n return [val.height, val.width * usedChannels.length];\n } else if ('buffer' in val && !(val.buffer instanceof ArrayBuffer)) {\n return [val.buffer.size / (dtype == null ? 4 : bytesPerElement(dtype))];\n }\n }\n if (!Array.isArray(val)) {\n return []; // Scalar.\n }\n const shape: number[] = [];\n\n while (Array.isArray(firstElem) ||\n isTypedArray(firstElem) && dtype !== 'string') {\n shape.push(firstElem.length);\n firstElem = firstElem[0];\n }\n if (Array.isArray(val) &&\n env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {\n deepAssertShapeConsistency(val, shape, []);\n }\n\n return shape;\n}\n\nfunction deepAssertShapeConsistency(\n val: TensorLike, shape: number[], indices: number[]) {\n indices = indices || [];\n if (!(Array.isArray(val)) && !isTypedArray(val)) {\n assert(\n shape.length === 0,\n () => `Element arr[${indices.join('][')}] is a primitive, ` +\n `but should be an array/TypedArray of ${shape[0]} elements`);\n return;\n }\n assert(\n shape.length > 0,\n () => `Element arr[${indices.join('][')}] should be a primitive, ` +\n `but is an array of ${val.length} elements`);\n assert(\n val.length === shape[0],\n () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +\n `elements, but has ${val.length} elements`);\n const subShape = shape.slice(1);\n for (let i = 0; i < val.length; ++i) {\n deepAssertShapeConsistency(val[i], subShape, indices.concat(i));\n }\n}\n\nfunction assertDtype(\n expectedDtype: DataType|'numeric'|'string_or_numeric',\n actualDType: DataType, argName: string, functionName: string) {\n if (expectedDtype === 'string_or_numeric') {\n return;\n }\n if (expectedDtype == null) {\n throw new Error(`Expected dtype cannot be null.`);\n }\n if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||\n expectedDtype === 'numeric' && actualDType === 'string') {\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must ` +\n `be ${expectedDtype} tensor, but got ${actualDType} tensor`);\n }\n}\n\nexport function convertToTensor(\n x: T|TensorLike, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T {\n if (x instanceof Tensor) {\n assertDtype(parseAsDtype, x.dtype, argName, functionName);\n return x;\n }\n let inferredDtype = inferDtype(x);\n // If the user expects a bool/int/float, use that info to update the\n // inferredDtype when it is not a string.\n if (inferredDtype !== 'string' &&\n ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {\n inferredDtype = parseAsDtype as DataType;\n }\n assertDtype(parseAsDtype, inferredDtype, argName, functionName);\n\n if ((x == null) ||\n (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' &&\n typeof x !== 'boolean' && typeof x !== 'string')) {\n const type = x == null ? 'null' : (x as {}).constructor.name;\n throw new Error(\n `Argument '${argName}' passed to '${functionName}' must be a ` +\n `Tensor or TensorLike, but got '${type}'`);\n }\n const inferredShape = inferShape(x, inferredDtype);\n if (!isTypedArray(x) && !Array.isArray(x)) {\n x = [x] as number[];\n }\n const skipTypedArray = true;\n const values = inferredDtype !== 'string' ?\n toTypedArray(x, inferredDtype as DataType) :\n flatten(x as string[], [], skipTypedArray) as string[];\n return ENGINE.makeTensor(values, inferredShape, inferredDtype) as T;\n}\n\nexport function convertToTensorArray(\n arg: Array, argName: string, functionName: string,\n parseAsDtype: DataType|'numeric'|'string_or_numeric' = 'numeric'): T[] {\n if (!Array.isArray(arg)) {\n throw new Error(\n `Argument ${argName} passed to ${functionName} must be a ` +\n '`Tensor[]` or `TensorLike[]`');\n }\n const tensors = arg as T[];\n return tensors.map(\n (t, i) =>\n convertToTensor(t, `${argName}[${i}]`, functionName, parseAsDtype));\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {isPromise} from '../util';\n\nexport const OP_SCOPE_SUFFIX = '__op';\n\n/**\n * Used for wrapping functions that perform math operations on\n * Tensors. The function will be wrapped in a named scope that cleans all\n * memory usage after the function is done.\n */\nexport function op(f: {[name: string]: T}): T {\n const keys = Object.keys(f);\n if (keys.length !== 1) {\n throw new Error(\n `Please provide an object with a single key ` +\n `(operation name) mapping to a function. Got an object with ` +\n `${keys.length} keys.`);\n }\n\n let opName = keys[0];\n const fn = f[opName];\n\n // Strip the underscore from the end of the function name.\n if (opName.endsWith('_')) {\n opName = opName.substring(0, opName.length - 1);\n }\n\n // add an __op suffix to distinguish ops from kernels in tf.profile\n opName = opName + OP_SCOPE_SUFFIX;\n\n // tslint:disable-next-line:no-any\n const f2 = (...args: any[]) => {\n ENGINE.startScope(opName);\n try {\n const result = fn(...args);\n if (isPromise(result)) {\n console.error('Cannot return a Promise inside of tidy.');\n }\n ENGINE.endScope(result);\n return result;\n } catch (ex) {\n ENGINE.endScope(null);\n throw ex;\n }\n };\n Object.defineProperty(f2, 'name', {value: opName, configurable: true});\n\n // tslint:disable-next-line:no-any\n return f2 as any as T;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Complex, ComplexInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Converts two real numbers to a complex number.\n *\n * Given a tensor `real` representing the real part of a complex number, and a\n * tensor `imag` representing the imaginary part of a complex number, this\n * operation returns complex numbers elementwise of the form [r0, i0, r1, i1],\n * where r represents the real part and i represents the imag part.\n *\n * The input tensors real and imag must have the same shape.\n *\n * ```js\n * const real = tf.tensor1d([2.25, 3.25]);\n * const imag = tf.tensor1d([4.75, 5.75]);\n * const complex = tf.complex(real, imag);\n *\n * complex.print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction complex_(real: T|TensorLike, imag: T|TensorLike): T {\n const $real = convertToTensor(real, 'real', 'complex');\n const $imag = convertToTensor(imag, 'imag', 'complex');\n util.assertShapesMatch(\n $real.shape, $imag.shape,\n `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` +\n `must match in call to tf.complex().`);\n\n const inputs: ComplexInputs = {real: $real, imag: $imag};\n return ENGINE.runKernel(Complex, inputs as unknown as NamedTensorMap);\n}\n\nexport const complex = /* @__PURE__ */ op({complex_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {TensorLike, TypedArray, WebGLData, WebGPUData} from '../types';\nimport {DataType} from '../types';\nimport {assert, assertNonNegativeIntegerDimensions, flatten, inferDtype, isTypedArray, sizeFromShape, toTypedArray} from '../util';\n\n/** This is shared code across all tensor creation methods. */\nexport function makeTensor(\n values: TensorLike|WebGLData|WebGPUData, shape: number[],\n inferredShape: number[], dtype?: DataType): Tensor {\n if (dtype == null) {\n dtype = inferDtype(values);\n } else if (dtype === 'complex64') {\n throw new Error(\n `Cannot construct a complex64 tensor directly. ` +\n `Please use tf.complex(real, imag).`);\n }\n\n if (typeof values === 'object' &&\n ('texture' in values ||\n ('buffer' in values && !(values.buffer instanceof ArrayBuffer)))) {\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new Error(\n `Creating tensor from GPU data only supports ` +\n `'float32'|'int32' dtype, while the dtype is ${dtype}.`);\n }\n return ENGINE.backend.createTensorFromGPUData(\n values as WebGLData | WebGPUData, shape || inferredShape, dtype);\n }\n\n if (!isTypedArray(values) && !Array.isArray(values) &&\n typeof values !== 'number' && typeof values !== 'boolean' &&\n typeof values !== 'string') {\n throw new Error(\n 'values passed to tensor(values) must be a number/boolean/string or ' +\n 'an array of numbers/booleans/strings, or a TypedArray');\n }\n // Verify that the shape matches the inferred shape.\n if (shape != null) {\n assertNonNegativeIntegerDimensions(shape);\n\n const providedSize = sizeFromShape(shape);\n const inferredSize = sizeFromShape(inferredShape);\n assert(\n providedSize === inferredSize,\n () =>\n `Based on the provided shape, [${shape}], the tensor should have ` +\n `${providedSize} values but has ${inferredSize}`);\n\n for (let i = 0; i < inferredShape.length; ++i) {\n const inferred = inferredShape[i];\n const flatDimsDontMatch = i === inferredShape.length - 1 ?\n inferred !== sizeFromShape(shape.slice(i)) :\n true;\n assert(\n inferredShape[i] === shape[i] || !flatDimsDontMatch,\n () => `Error creating a new Tensor. Inferred shape ` +\n `(${inferredShape}) does not match the provided ` +\n `shape (${shape}). `);\n }\n }\n\n if (!isTypedArray(values) && !Array.isArray(values)) {\n values = [values] as number[];\n }\n\n shape = shape || inferredShape;\n values = dtype !== 'string' ?\n toTypedArray(values, dtype) :\n flatten(values as string[], [], true) as string[];\n return ENGINE.makeTensor(values as TypedArray, shape, dtype);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {DataType, Rank, ShapeMap, WebGLData, WebGPUData} from '../types';\n\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates a `tf.Tensor` with the provided values, shape and dtype.\n *\n * ```js\n * // Pass an array of values to create a vector.\n * tf.tensor([1, 2, 3, 4]).print();\n * ```\n *\n * ```js\n * // Pass a nested array of values to make a matrix or a higher\n * // dimensional tensor.\n * tf.tensor([[1, 2], [3, 4]]).print();\n * ```\n *\n * ```js\n * // Pass a flat array and specify a shape yourself.\n * tf.tensor([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * ```js\n * // Pass a `WebGLData` object and specify a shape yourself.\n *\n * // This makes it possible for TF.js applications to avoid GPU / CPU sync.\n * // For example, if your application includes a preprocessing step on the GPU,\n * // you could upload the GPU output directly to TF.js, rather than first\n * // downloading the values.\n *\n * // Example for WebGL2:\n * const customCanvas = document.createElement('canvas');\n * const customBackend = new tf.MathBackendWebGL(customCanvas);\n * tf.registerBackend('custom-webgl', () => customBackend);\n * await tf.setBackend('custom-webgl');\n * const gl = customBackend.gpgpu.gl;\n * const texture = gl.createTexture();\n * const tex2d = gl.TEXTURE_2D;\n * const width = 2;\n * const height = 2;\n *\n * gl.bindTexture(tex2d, texture);\n * gl.texParameteri(tex2d, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);\n * gl.texParameteri(tex2d, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);\n * gl.texParameteri(tex2d, gl.TEXTURE_MIN_FILTER, gl.NEAREST);\n * gl.texParameteri(tex2d, gl.TEXTURE_MAG_FILTER, gl.NEAREST);\n * gl.texImage2D(\n * tex2d, 0, gl.RGBA32F, // internalFormat\n * width, height, 0,\n * gl.RGBA, // textureFormat\n * gl.FLOAT, // textureType\n * new Float32Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n * );\n *\n * // Currently, the `texture` has 4 pixels:\n * // Pixel0 is {R:0, G:1, B:2, A:3}\n * // Pixel1 is {R:4, G:5, B:6, A:7}\n * // Pixel2 is {R:8, G:9, B:10, A:11}\n * // Pixel3 is {R:12, G:13, B:14, A:15}\n *\n * const logicalShape = [height * width * 2];\n * const a = tf.tensor({texture, height, width, channels: 'BR'}, logicalShape);\n * // Tensor value will be [2, 0, 6, 4, 10, 8, 14, 12], since [2, 0] is the\n * // values of 'B' and 'R' channels of Pixel0, [6, 4] is the values of 'B' and\n * 'R'\n * // channels of Pixel1...\n *\n * // For postprocessing on the GPU, it's possible to retrieve the texture\n * // backing any tensor by calling the tensor's `dataToGPU` method like\n * // so:\n *\n * const tex = a.dataToGPU();\n * ```\n *\n * ```js\n * // Pass a `WebGPUData` object and specify a shape yourself.\n *\n * // This makes it possible for TF.js applications to avoid GPU / CPU sync.\n * // For example, if your application includes a preprocessing step on the GPU,\n * // you could upload the GPU output directly to TF.js, rather than first\n * // downloading the values. Unlike WebGL, this optionally supports zero copy\n * // by WebGPUData.zeroCopy. When zeroCopy is false or undefined(default), this\n * // passing GPUBuffer can be destroyed after tensor is created. When zeroCopy\n * // is true, this GPUBuffer is bound directly by the tensor, so do not destroy\n * // this GPUBuffer until all access is done.\n *\n * // Example for WebGPU:\n * function createGPUBufferFromData(device, data, dtype) {\n * const bytesPerElement = 4;\n * const sizeInBytes = data.length * bytesPerElement;\n *\n * const gpuWriteBuffer = device.createBuffer({\n * mappedAtCreation: true,\n * size: sizeInBytes,\n * usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC\n * });\n * const arrayBuffer = gpuWriteBuffer.getMappedRange();\n * if (dtype === 'float32') {\n * new Float32Array(arrayBuffer).set(data);\n * } else if (dtype === 'int32') {\n * new Int32Array(arrayBuffer).set(data);\n * } else {\n * throw new Error(\n * `Creating tensor from GPUBuffer only supports` +\n * `'float32'|'int32' dtype, while the dtype is ${dtype}.`);\n * }\n * gpuWriteBuffer.unmap();\n *\n * const gpuReadBuffer = device.createBuffer({\n * mappedAtCreation: false,\n * size: sizeInBytes,\n * usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE |\n * GPUBufferUsage.COPY_SRC\n * });\n *\n * const copyEncoder = device.createCommandEncoder();\n * copyEncoder.copyBufferToBuffer(\n * gpuWriteBuffer, 0, gpuReadBuffer, 0, sizeInBytes);\n * const copyCommands = copyEncoder.finish();\n * device.queue.submit([copyCommands]);\n * gpuWriteBuffer.destroy();\n * return gpuReadBuffer;\n * }\n *\n * const dtype = 'float32';\n * const device = tf.backend().device;\n * const aData = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];\n * const bData = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4];\n * const expected = [2, 4, 6, 8, 6, 8, 10, 12, 10, 12, 14, 16, 14, 16, 18, 20];\n * const aBuffer = createGPUBufferFromData(device, aData, dtype);\n * const shape = [aData.length];\n * // To use zeroCopy, use {buffer: aBuffer, zeroCopy: true} instead and destroy\n * // aBuffer untill all access is done.\n * const a = tf.tensor({buffer: aBuffer}, shape, dtype);\n * const b = tf.tensor(bData, shape, dtype);\n * const result = tf.add(a, b);\n * a.dispose();\n * b.dispose();\n * result.dispose();\n * aBuffer.destroy();\n * ```\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`, or a `WebGLData` object, or a\n * `WebGPUData` object. If the values are strings, they will be encoded as utf-8\n * and kept as `Uint8Array[]`. If the values is a `WebGLData` object, the dtype\n * could only be 'float32' or 'int32' and the object has to have: 1. texture, a\n * `WebGLTexture`, the texture must share the same `WebGLRenderingContext` with\n * TFJS's WebGL backend (you could create a custom WebGL backend from your\n * texture's canvas) and the internal texture format for the input texture must\n * be floating point or normalized integer; 2. height, the height of the\n * texture; 3. width, the width of the texture; 4. channels, a non-empty subset\n * of 'RGBA', indicating the values of which channels will be passed to the\n * tensor, such as 'R' or 'BR' (The order of the channels affect the order of\n * tensor values. ). (If the values passed from texture is less than the tensor\n * size, zeros will be padded at the rear.). If the values is a `WebGPUData`\n * object, the dtype could only be 'float32' or 'int32 and the object has to\n * have: buffer, a `GPUBuffer`. The buffer must: 1. share the same `GPUDevice`\n * with TFJS's WebGPU backend; 2. buffer.usage should at least support\n * GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3. buffer.size should not\n * be smaller than the byte size of tensor shape. WebGPUData optionally supports\n * zero copy by flag zeroCopy. When zeroCopy is false or undefined(default),\n * this passing GPUBuffer can be destroyed after tensor is created. When\n * zeroCopy is true, this GPUBuffer is bound directly by the tensor, so do not\n * destroy this GPUBuffer until all access is done.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor(\n values: TensorLike|WebGLData|WebGPUData, shape?: ShapeMap[R],\n dtype?: DataType): Tensor {\n const inferredShape = inferShape(values, dtype);\n return makeTensor(values, shape, inferredShape, dtype) as Tensor;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/* Type definitions for exporting and importing of models. */\n\n/**\n * A map from Tensor dtype to number of bytes per element of the Tensor.\n */\nexport const DTYPE_VALUE_SIZE_MAP: {[dtype: string]: number} = {\n 'float32': 4,\n 'float16': 2,\n 'int32': 4,\n 'uint16': 2,\n 'uint8': 1,\n 'bool': 1,\n 'complex64': 8\n};\n\n/**\n * A weight manifest.\n *\n * The weight manifest consists of an ordered list of weight-manifest groups.\n * Each weight-manifest group (\"group\" for short hereafter) consists of a\n * number of weight values stored in a number of paths.\n * See the documentation of `WeightManifestGroupConfig` below for more details.\n */\nexport declare type WeightsManifestConfig = WeightsManifestGroupConfig[];\n\n/**\n * A weight-manifest group.\n *\n * Consists of an ordered list of weight values encoded in binary format,\n * stored in an ordered list of paths.\n */\nexport declare interface WeightsManifestGroupConfig {\n /**\n * An ordered list of paths.\n *\n * Paths are intentionally abstract in order to be general. For example, they\n * can be relative URL paths or relative paths on the file system.\n */\n paths: string[];\n\n /**\n * Specifications of the weights stored in the paths.\n */\n weights: WeightsManifestEntry[];\n}\n\n/**\n * Group to which the weight belongs.\n *\n * - 'optimizer': Weight from a stateful optimizer.\n */\nexport type WeightGroup = 'model'|'optimizer';\n\n/**\n * An entry in the weight manifest.\n *\n * The entry contains specification of a weight.\n */\nexport declare interface WeightsManifestEntry {\n /**\n * Name of the weight, e.g., 'Dense_1/bias'\n */\n name: string;\n\n /**\n * Shape of the weight.\n */\n shape: number[];\n\n /**\n * Data type of the weight.\n */\n dtype: 'float32'|'int32'|'bool'|'string'|'complex64';\n\n /**\n * Type of the weight.\n *\n * Optional.\n *\n * The value 'optimizer' indicates the weight belongs to an optimizer\n * (i.e., used only during model training and not during inference).\n */\n group?: WeightGroup;\n\n /**\n * Information for dequantization of the weight.\n */\n quantization?: {\n scale?: number, // The scaling constant to multiply by.\n min?: number, // The (possibly nudged) minimum weight to add.\n dtype: 'uint16'|'uint8'|'float16' // The dtype of the quantized weights.\n };\n}\n\n/**\n * Options for saving a model.\n * @innamespace io\n */\nexport interface SaveConfig {\n /**\n * Whether to save only the trainable weights of the model, ignoring the\n * non-trainable ones.\n */\n trainableOnly?: boolean;\n\n /**\n * Whether the optimizer will be saved (if exists).\n *\n * Default: `false`.\n */\n includeOptimizer?: boolean;\n}\n\n/**\n * Result of a saving operation.\n */\nexport interface SaveResult {\n /**\n * Information about the model artifacts saved.\n */\n modelArtifactsInfo: ModelArtifactsInfo;\n\n /**\n * HTTP responses from the server that handled the model-saving request (if\n * any). This is applicable only to server-based saving routes.\n */\n responses?: Response[];\n\n /**\n * Error messages and related data (if any).\n */\n errors?: Array<{}|string>;\n}\n\nexport declare interface ModelArtifactsInfo {\n /**\n * Timestamp for when the model is saved.\n */\n dateSaved: Date;\n\n /**\n * TODO (cais,yassogba) consider removing GraphDef as GraphDefs now\n * come in a JSON format and none of our IOHandlers support a non json\n * format. We could conder replacing this with 'Binary' if we want to\n * allow future handlers to save to non json formats (though they will\n * probably want more information than 'Binary').\n * Type of the model topology\n *\n * Type of the model topology\n *\n * Possible values:\n * - JSON: JSON config (human-readable, e.g., Keras JSON).\n * - GraphDef: TensorFlow\n * [GraphDef](https://www.tensorflow.org/extend/tool_developers/#graphdef)\n * protocol buffer (binary).\n */\n modelTopologyType: 'JSON'|'GraphDef';\n\n /**\n * Size of model topology (Keras JSON or GraphDef), in bytes.\n */\n modelTopologyBytes?: number;\n\n /**\n * Size of weight specification or manifest, in bytes.\n */\n weightSpecsBytes?: number;\n\n /**\n * Size of weight value data, in bytes.\n */\n weightDataBytes?: number;\n}\n\n/** Model training configuration. */\nexport declare interface TrainingConfig {\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n // See\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tfjs-layers/blob/master/src/keras_format/training_config.ts\n /** Optimizer used for the model training. */\n optimizer_config: {};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Loss function(s) for the model's output(s). */\n loss: string|string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n /** Metric function(s) for the model's output(s). */\n metrics?: string[]|{[key: string]: string};\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n weighted_metrics?: string[];\n\n // TODO(cais): Tighten the typing once keras spec is available to tfjs-core.\n sample_weight_mode?: string;\n\n loss_weights?: number[]|{[key: string]: number};\n}\n\n/**\n * The serialized artifacts of a model, including topology and weights.\n *\n * The `modelTopology`, `trainingConfig`, `weightSpecs` and `weightData` fields\n * of this interface are optional, in order to support topology- or weights-only\n * saving and loading.\n *\n * Note this interface is used internally in IOHandlers. For the file format\n * written to disk as `model.json`, see `ModelJSON`.\n */\nexport declare interface ModelArtifacts {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology?: {}|ArrayBuffer;\n\n /**\n * Serialized configuration for the model's training.\n */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weight specifications.\n *\n * This corresponds to the weightsData below.\n */\n weightSpecs?: WeightsManifestEntry[];\n\n /**\n * Binary buffer for all weight values concatenated in the order specified\n * by `weightSpecs`.\n */\n weightData?: ArrayBuffer;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n\n /**\n * Inputs and outputs signature for model initializer.\n */\n initializerSignature?: {};\n}\n\n/**\n * The on-disk format of the `model.json` file.\n *\n * TF.js 1.0 always populates the optional fields when writing model.json.\n * Prior versions did not provide those fields.\n */\nexport declare interface ModelJSON {\n /**\n * Model topology.\n *\n * For Keras-style `tf.Model`s, this is a JSON object.\n * For TensorFlow-style models (e.g., `SavedModel`), this is the JSON\n * encoding of the `GraphDef` protocol buffer.\n */\n modelTopology: {};\n\n /** Model training configuration. */\n trainingConfig?: TrainingConfig;\n\n /**\n * Weights manifest.\n *\n * The weights manifest consists of an ordered list of weight-manifest\n * groups. Each weight-manifest group consists of a number of weight values\n * stored in a number of paths. See the documentation of\n * `WeightsManifestConfig` for more details.\n */\n weightsManifest: WeightsManifestConfig;\n\n /**\n * Hard-coded format name for models saved from TensorFlow.js or converted\n * by TensorFlow.js Converter.\n */\n format?: string;\n\n /**\n * What library is responsible for originally generating this artifact.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js v1.0.0'.\n */\n generatedBy?: string;\n\n /**\n * What library or tool is responsible for converting the original model\n * to this format, applicable only if the model is output by a converter.\n *\n * Used for debugging purposes. E.g., 'TensorFlow.js Converter v1.0.0'.\n *\n * A value of `null` means the model artifacts are generated without any\n * conversion process (e.g., saved directly from a TensorFlow.js\n * `tf.LayersModel` instance.)\n */\n convertedBy?: string|null;\n\n /**\n * Inputs and outputs signature for saved model.\n */\n signature?: {};\n\n /**\n * User-defined metadata about the model.\n */\n userDefinedMetadata?: {[key: string]: {}};\n\n /**\n * Initializer for the model.\n */\n modelInitializer?: {};\n\n /**\n * Inputs and outputs signature for model initializer.\n */\n initializerSignature?: {};\n}\n\n/**\n * Type definition for handlers of loading operations.\n */\nexport type LoadHandler = () => Promise;\n\n/**\n * Type definition for handlers of saving operations.\n */\nexport type SaveHandler = (modelArtifact: ModelArtifacts) =>\n Promise;\n\n/**\n * Interface for a model import/export handler.\n *\n * The `save` and `load` handlers are both optional, in order to allow handlers\n * that support only saving or loading.\n */\n// tslint:disable-next-line:interface-name\nexport interface IOHandler {\n save?: SaveHandler;\n load?: LoadHandler;\n}\n\n/**\n * Type definition for handlers of synchronous loading operations.\n */\nexport type LoadHandlerSync = () => ModelArtifacts;\n\n/**\n * Type definition for handlers of synchronous saving operations.\n */\nexport type SaveHandlerSync = (modelArtifact: ModelArtifacts) => SaveResult;\n\n/**\n * Interface for a synchronous model import/export handler.\n *\n * The `save` and `load` handlers are both optional, in order to allow handlers\n * that support only saving or loading.\n */\n// tslint:disable-next-line:interface-name\nexport type IOHandlerSync = {\n save?: SaveHandlerSync;\n load?: LoadHandlerSync;\n};\n\n/**\n * An interface for the manager of a model store.\n *\n * A model store is defined as a storage medium on which multiple models can\n * be stored. Each stored model has a unique `path` as its identifier.\n * A `ModelStoreManager` for the store allows actions including\n *\n * - Listing the models stored in the store.\n * - Deleting a model from the store.\n */\nexport interface ModelStoreManager {\n /**\n * List all models in the model store.\n *\n * @returns A dictionary mapping paths of existing models to their\n * model artifacts info. Model artifacts info include type of the model's\n * topology, byte sizes of the topology, weights, etc.\n */\n listModels(): Promise<{[path: string]: ModelArtifactsInfo}>;\n\n /**\n * Remove a model specified by `path`.\n *\n * @param path\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n */\n removeModel(path: string): Promise;\n}\n\n/**\n * Callback for the progress of a long-running action such as an HTTP\n * request for a large binary object.\n *\n * `fraction` should be a number in the [0, 1] interval, indicating how\n * much of the action has completed.\n */\nexport type OnProgressCallback = (fraction: number) => void;\n\n/** @innamespace io */\nexport interface LoadOptions {\n /**\n * RequestInit (options) for HTTP requests.\n *\n * For detailed information on the supported fields, see\n * [https://developer.mozilla.org/en-US/docs/Web/API/Request/Request](\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request)\n */\n requestInit?: RequestInit;\n\n /**\n * Progress callback.\n */\n onProgress?: OnProgressCallback;\n\n /**\n * A function used to override the `window.fetch` function.\n */\n fetchFunc?: Function;\n\n /**\n * Strict loading model: whether extraneous weights or missing\n * weights should trigger an `Error`.\n *\n * If `true`, require that the provided weights exactly match those\n * required by the layers. `false` means that both extra weights\n * and missing weights will be silently ignored.\n *\n * Default: `true`.\n */\n strict?: boolean;\n\n /**\n * Path prefix for weight files, by default this is calculated from the\n * path of the model JSON file.\n *\n * For instance, if the path to the model JSON file is\n * `http://localhost/foo/model.json`, then the default path prefix will be\n * `http://localhost/foo/`. If a weight file has the path value\n * `group1-shard1of2` in the weight manifest, then the weight file will be\n * loaded from `http://localhost/foo/group1-shard1of2` by default. However,\n * if you provide a `weightPathPrefix` value of\n * `http://localhost/foo/alt-weights`, then the weight file will be loaded\n * from the path `http://localhost/foo/alt-weights/group1-shard1of2` instead.\n */\n weightPathPrefix?: string;\n\n /**\n * Whether the module or model is to be loaded from TF Hub.\n *\n * Setting this to `true` allows passing a TF-Hub module URL, omitting the\n * standard model file name and the query parameters.\n *\n * Default: `false`.\n */\n fromTFHub?: boolean;\n\n /**\n * An async function to convert weight file name to URL. The weight file\n * names are stored in model.json's weightsManifest.paths field. By default we\n * consider weight files are colocated with the model.json file. For example:\n * model.json URL: https://www.google.com/models/1/model.json\n * group1-shard1of1.bin url:\n * https://www.google.com/models/1/group1-shard1of1.bin\n *\n * With this func you can convert the weight file name to any URL.\n */\n weightUrlConverter?: (weightFileName: string) => Promise;\n}\n\n/**\n * Additional options for Platform.fetch\n */\nexport interface RequestDetails {\n /**\n * Is this request for a binary file (as opposed to a json file)\n */\n isBinary?: boolean;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {complex} from '../ops/complex';\nimport {tensor} from '../ops/tensor';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\nimport {TypedArray} from '../types';\nimport {sizeFromShape} from '../util';\n\nimport {DTYPE_VALUE_SIZE_MAP, ModelArtifacts, ModelArtifactsInfo, ModelJSON, WeightGroup, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\n/** Number of bytes reserved for the length of the string. (32bit integer). */\nconst NUM_BYTES_STRING_LENGTH = 4;\n\n/**\n * Encode a map from names to weight values as an ArrayBuffer, along with an\n * `Array` of `WeightsManifestEntry` as specification of the encoded weights.\n *\n * This function does not perform sharding.\n *\n * This function is the reverse of `decodeWeights`.\n *\n * @param tensors A map (\"dict\") from names to tensors.\n * @param group Group to which the weights belong (optional).\n * @returns A `Promise` of\n * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s\n * concatenated.\n * - An `Array` of `WeightManifestEntry`s, carrying information including\n * tensor names, `dtype`s and shapes.\n * @throws Error: on unsupported tensor `dtype`.\n */\nexport async function encodeWeights(\n tensors: NamedTensorMap|NamedTensor[], group?: WeightGroup):\n Promise<{data: ArrayBuffer, specs: WeightsManifestEntry[]}> {\n // TODO(adarob, cais): Support quantization.\n const specs: WeightsManifestEntry[] = [];\n const dataPromises: Array> = [];\n\n const names: string[] = Array.isArray(tensors) ?\n tensors.map(tensor => tensor.name) :\n Object.keys(tensors);\n\n for (let i = 0; i < names.length; ++i) {\n const name = names[i];\n const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];\n if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&\n t.dtype !== 'string' && t.dtype !== 'complex64') {\n throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);\n }\n const spec: WeightsManifestEntry = {name, shape: t.shape, dtype: t.dtype};\n if (t.dtype === 'string') {\n const utf8bytes = new Promise(async resolve => {\n const vals = await t.bytes() as Uint8Array[];\n const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +\n NUM_BYTES_STRING_LENGTH * vals.length;\n const bytes = new Uint8Array(totalNumBytes);\n let offset = 0;\n for (let i = 0; i < vals.length; i++) {\n const val = vals[i];\n const bytesOfLength =\n new Uint8Array(new Uint32Array([val.length]).buffer);\n bytes.set(bytesOfLength, offset);\n offset += NUM_BYTES_STRING_LENGTH;\n bytes.set(val, offset);\n offset += val.length;\n }\n resolve(bytes);\n });\n dataPromises.push(utf8bytes);\n } else {\n dataPromises.push(t.data());\n }\n if (group != null) {\n spec.group = group;\n }\n specs.push(spec);\n }\n\n const tensorValues = await Promise.all(dataPromises);\n return {data: concatenateTypedArrays(tensorValues), specs};\n}\n\n/**\n * Decode flat ArrayBuffer as weights.\n *\n * This function does not handle sharding.\n *\n * This function is the reverse of `encodeWeights`.\n *\n * @param buffer A flat ArrayBuffer carrying the binary values of the tensors\n * concatenated in the order specified in `specs`.\n * @param specs Specifications of the names, dtypes and shapes of the tensors\n * whose value are encoded by `buffer`.\n * @return A map from tensor name to tensor value, with the names corresponding\n * to names in `specs`.\n * @throws Error, if any of the tensors has unsupported dtype.\n */\nexport function decodeWeights(\n buffer: ArrayBuffer, specs: WeightsManifestEntry[]): NamedTensorMap {\n // TODO(adarob, cais): Support quantization.\n const out: NamedTensorMap = {};\n let float16Decode: (buffer: Uint16Array) => Float32Array | undefined;\n let offset = 0;\n for (const spec of specs) {\n const name = spec.name;\n const dtype = spec.dtype;\n const shape = spec.shape;\n const size = sizeFromShape(shape);\n let values: TypedArray|string[]|Uint8Array[];\n\n if ('quantization' in spec) {\n const quantization = spec.quantization;\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n if (!('min' in quantization && 'scale' in quantization)) {\n throw new Error(\n `Weight ${spec.name} with quantization ${quantization.dtype} ` +\n `doesn't have corresponding metadata min and scale.`);\n }\n } else if (quantization.dtype === 'float16') {\n if (dtype !== 'float32') {\n throw new Error(\n `Weight ${spec.name} is quantized with ${quantization.dtype} ` +\n `which only supports weights of type float32 not ${dtype}.`);\n }\n } else {\n throw new Error(\n `Weight ${spec.name} has unknown ` +\n `quantization dtype ${quantization.dtype}. ` +\n `Supported quantization dtypes are: ` +\n `'uint8', 'uint16', and 'float16'.`);\n }\n const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype];\n const byteBuffer =\n buffer.slice(offset, offset + size * quantizationSizeFactor);\n const quantizedArray = (quantization.dtype === 'uint8') ?\n new Uint8Array(byteBuffer) :\n new Uint16Array(byteBuffer);\n if (dtype === 'float32') {\n if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {\n values = new Float32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = v * quantization.scale + quantization.min;\n }\n } else if (quantization.dtype === 'float16') {\n if (float16Decode === undefined) {\n float16Decode = getFloat16Decoder();\n }\n values = float16Decode(quantizedArray as Uint16Array);\n } else {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type float32.`);\n }\n } else if (dtype === 'int32') {\n if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {\n throw new Error(\n `Unsupported quantization type ${quantization.dtype} ` +\n `for weight type int32.`);\n }\n values = new Int32Array(quantizedArray.length);\n for (let i = 0; i < quantizedArray.length; i++) {\n const v = quantizedArray[i];\n values[i] = Math.round(v * quantization.scale + quantization.min);\n }\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * quantizationSizeFactor;\n } else if (dtype === 'string') {\n const size = sizeFromShape(spec.shape);\n values = [];\n for (let i = 0; i < size; i++) {\n const byteLength = new Uint32Array(\n buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];\n offset += NUM_BYTES_STRING_LENGTH;\n const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));\n (values as Uint8Array[]).push(bytes);\n offset += byteLength;\n }\n } else {\n const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype];\n const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);\n\n if (dtype === 'float32') {\n values = new Float32Array(byteBuffer);\n } else if (dtype === 'int32') {\n values = new Int32Array(byteBuffer);\n } else if (dtype === 'bool') {\n values = new Uint8Array(byteBuffer);\n } else if (dtype === 'complex64') {\n values = new Float32Array(byteBuffer);\n const real = new Float32Array(values.length / 2);\n const image = new Float32Array(values.length / 2);\n for (let i = 0; i < real.length; i++) {\n real[i] = values[i * 2];\n image[i] = values[i * 2 + 1];\n }\n const realTensor = tensor(real, shape, 'float32');\n const imageTensor = tensor(image, shape, 'float32');\n out[name] = complex(realTensor, imageTensor);\n realTensor.dispose();\n imageTensor.dispose();\n } else {\n throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);\n }\n offset += size * dtypeFactor;\n }\n if (dtype !== 'complex64') {\n out[name] = tensor(values, shape, dtype);\n }\n }\n return out;\n}\n\n/**\n * Concatenate TypedArrays into an ArrayBuffer.\n */\nexport function concatenateTypedArrays(xs: TypedArray[]): ArrayBuffer {\n // TODO(adarob, cais): Support quantization.\n if (xs === null) {\n throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);\n }\n\n let totalByteLength = 0;\n\n // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'\n // can have a different byte length from that of the `TypedArray` itself,\n // for example, when the `TypedArray` is created from an offset in an\n // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match\n // the `TypedArray` in byte length. If an element of `xs` does not show\n // this property, a new `TypedArray` that satisfy this property will be\n // constructed and pushed into `normalizedXs`.\n const normalizedXs: TypedArray[] = [];\n xs.forEach((x: TypedArray) => {\n totalByteLength += x.byteLength;\n // tslint:disable:no-any\n normalizedXs.push(\n x.byteLength === x.buffer.byteLength ? x :\n new (x.constructor as any)(x));\n if (!(x as any instanceof Float32Array || x as any instanceof Int32Array ||\n x as any instanceof Uint8Array)) {\n throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);\n }\n // tslint:enable:no-any\n });\n\n const y = new Uint8Array(totalByteLength);\n let offset = 0;\n normalizedXs.forEach((x: TypedArray) => {\n y.set(new Uint8Array(x.buffer), offset);\n offset += x.byteLength;\n });\n\n return y.buffer;\n}\n\n// Use Buffer on Node.js instead of Blob/atob/btoa\nconst useNodeBuffer = typeof Buffer !== 'undefined' &&\n (typeof Blob === 'undefined' || typeof atob === 'undefined' ||\n typeof btoa === 'undefined');\n\n/**\n * Calculate the byte length of a JavaScript string.\n *\n * Note that a JavaScript string can contain wide characters, therefore the\n * length of the string is not necessarily equal to the byte length.\n *\n * @param str Input string.\n * @returns Byte length.\n */\nexport function stringByteLength(str: string): number {\n if (useNodeBuffer) {\n return Buffer.byteLength(str);\n }\n return new Blob([str]).size;\n}\n\n/**\n * Encode an ArrayBuffer as a base64 encoded string.\n *\n * @param buffer `ArrayBuffer` to be converted.\n * @returns A string that base64-encodes `buffer`.\n */\nexport function arrayBufferToBase64String(buffer: ArrayBuffer): string {\n if (useNodeBuffer) {\n return Buffer.from(buffer).toString('base64');\n }\n const buf = new Uint8Array(buffer);\n let s = '';\n for (let i = 0, l = buf.length; i < l; i++) {\n s += String.fromCharCode(buf[i]);\n }\n return btoa(s);\n}\n\n/**\n * Decode a base64 string as an ArrayBuffer.\n *\n * @param str Base64 string.\n * @returns Decoded `ArrayBuffer`.\n */\nexport function base64StringToArrayBuffer(str: string): ArrayBuffer {\n if (useNodeBuffer) {\n const buf = Buffer.from(str, 'base64');\n return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);\n }\n const s = atob(str);\n const buffer = new Uint8Array(s.length);\n for (let i = 0; i < s.length; ++i) {\n buffer.set([s.charCodeAt(i)], i);\n }\n return buffer.buffer;\n}\n\n/**\n * Concatenate a number of ArrayBuffers into one.\n *\n * @param buffers A number of array buffers to concatenate.\n * @returns Result of concatenating `buffers` in order.\n */\nexport function concatenateArrayBuffers(buffers: ArrayBuffer[]): ArrayBuffer {\n if (buffers.length === 1) {\n return buffers[0];\n }\n\n let totalByteLength = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n totalByteLength += buffer.byteLength;\n });\n\n const temp = new Uint8Array(totalByteLength);\n let offset = 0;\n buffers.forEach((buffer: ArrayBuffer) => {\n temp.set(new Uint8Array(buffer), offset);\n offset += buffer.byteLength;\n });\n return temp.buffer;\n}\n\n/**\n * Get the basename of a path.\n *\n * Behaves in a way analogous to Linux's basename command.\n *\n * @param path\n */\nexport function basename(path: string): string {\n const SEPARATOR = '/';\n path = path.trim();\n while (path.endsWith(SEPARATOR)) {\n path = path.slice(0, path.length - 1);\n }\n const items = path.split(SEPARATOR);\n return items[items.length - 1];\n}\n\n/**\n * Create `ModelJSON` from `ModelArtifacts`.\n *\n * @param artifacts Model artifacts, describing the model and its weights.\n * @param manifest Weight manifest, describing where the weights of the\n * `ModelArtifacts` are stored, and some metadata about them.\n * @returns Object representing the `model.json` file describing the model\n * artifacts and weights\n */\nexport function getModelJSONForModelArtifacts(\n artifacts: ModelArtifacts, manifest: WeightsManifestConfig): ModelJSON {\n const result: ModelJSON = {\n modelTopology: artifacts.modelTopology,\n format: artifacts.format,\n generatedBy: artifacts.generatedBy,\n convertedBy: artifacts.convertedBy,\n weightsManifest: manifest\n };\n if (artifacts.signature != null) {\n result.signature = artifacts.signature;\n }\n if (artifacts.userDefinedMetadata != null) {\n result.userDefinedMetadata = artifacts.userDefinedMetadata;\n }\n if (artifacts.modelInitializer != null) {\n result.modelInitializer = artifacts.modelInitializer;\n }\n if (artifacts.initializerSignature != null) {\n result.initializerSignature = artifacts.initializerSignature;\n }\n if (artifacts.trainingConfig != null) {\n result.trainingConfig = artifacts.trainingConfig;\n }\n return result;\n}\n\n/**\n * Create `ModelArtifacts` from a JSON file and weights.\n *\n * @param modelJSON Object containing the parsed JSON of `model.json`\n * @param weightSpecs The list of WeightsManifestEntry for the model. Must be\n * passed if the modelJSON has a weightsManifest.\n * @param weightData An ArrayBuffer of weight data for the model corresponding\n * to the weights in weightSpecs. Must be passed if the modelJSON has a\n * weightsManifest.\n * @returns A Promise of the `ModelArtifacts`, as described by the JSON file.\n */\nexport function getModelArtifactsForJSONSync(\n modelJSON: ModelJSON, weightSpecs?: WeightsManifestEntry[],\n weightData?: ArrayBuffer): ModelArtifacts {\n\n const modelArtifacts: ModelArtifacts = {\n modelTopology: modelJSON.modelTopology,\n format: modelJSON.format,\n generatedBy: modelJSON.generatedBy,\n convertedBy: modelJSON.convertedBy\n };\n\n if (modelJSON.trainingConfig != null) {\n modelArtifacts.trainingConfig = modelJSON.trainingConfig;\n }\n if (modelJSON.weightsManifest != null) {\n if (!weightSpecs) {\n throw new Error('modelJSON has weightsManifest but weightSpecs is null');\n }\n if (!weightData) {\n throw new Error('modelJSON has weightsManifest but weightData is null');\n }\n modelArtifacts.weightSpecs = weightSpecs;\n modelArtifacts.weightData = weightData;\n }\n if (modelJSON.signature != null) {\n modelArtifacts.signature = modelJSON.signature;\n }\n if (modelJSON.userDefinedMetadata != null) {\n modelArtifacts.userDefinedMetadata = modelJSON.userDefinedMetadata;\n }\n if (modelJSON.modelInitializer != null) {\n modelArtifacts.modelInitializer = modelJSON.modelInitializer;\n }\n if (modelJSON.initializerSignature != null) {\n modelArtifacts.initializerSignature = modelJSON.initializerSignature;\n }\n\n return modelArtifacts;\n}\n\n/**\n * Create `ModelArtifacts` from a JSON file.\n *\n * @param modelJSON Object containing the parsed JSON of `model.json`\n * @param loadWeights Function that takes the JSON file's weights manifest,\n * reads weights from the listed path(s), and returns a Promise of the\n * weight manifest entries along with the weights data.\n * @returns A Promise of the `ModelArtifacts`, as described by the JSON file.\n */\nexport async function getModelArtifactsForJSON(\n modelJSON: ModelJSON,\n loadWeights: (weightsManifest: WeightsManifestConfig) => Promise<[\n /* weightSpecs */ WeightsManifestEntry[], /* weightData */ ArrayBuffer\n ]>): Promise {\n let weightSpecs: WeightsManifestEntry[] | undefined;\n let weightData: ArrayBuffer | undefined;\n\n if (modelJSON.weightsManifest != null) {\n [weightSpecs, weightData] = await loadWeights(modelJSON.weightsManifest);\n }\n\n return getModelArtifactsForJSONSync(modelJSON, weightSpecs, weightData);\n}\n\n/**\n * Populate ModelArtifactsInfo fields for a model with JSON topology.\n * @param modelArtifacts\n * @returns A ModelArtifactsInfo object.\n */\nexport function getModelArtifactsInfoForJSON(modelArtifacts: ModelArtifacts):\n ModelArtifactsInfo {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error('Expected JSON model topology, received ArrayBuffer.');\n }\n\n return {\n dateSaved: new Date(),\n modelTopologyType: 'JSON',\n modelTopologyBytes: modelArtifacts.modelTopology == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),\n weightSpecsBytes: modelArtifacts.weightSpecs == null ?\n 0 :\n stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),\n weightDataBytes: modelArtifacts.weightData == null ?\n 0 :\n modelArtifacts.weightData.byteLength,\n };\n}\n\n/**\n * Concatenate the weights stored in a WeightsManifestConfig into a list of\n * WeightsManifestEntry\n *\n * @param weightsManifest The WeightsManifestConfig to extract weights from.\n * @returns A list of WeightsManifestEntry of the weights in the weightsManifest\n */\nexport function getWeightSpecs(weightsManifest: WeightsManifestConfig):\n WeightsManifestEntry[] {\n const weightSpecs: WeightsManifestEntry[] = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n }\n return weightSpecs;\n}\n\n/**\n * Computes mantisa table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 2048 mantissa lookup values.\n */\nfunction computeFloat16MantisaTable(): Uint32Array {\n const convertMantissa = (i: number): number => {\n let m = i << 13;\n let e = 0;\n\n while ((m & 0x00800000) === 0) {\n e -= 0x00800000;\n m <<= 1;\n }\n m &= ~0x00800000;\n e += 0x38800000;\n\n return m | e;\n };\n\n const mantisaTable = new Uint32Array(2048);\n\n mantisaTable[0] = 0;\n for (let i = 1; i < 1024; i++) {\n mantisaTable[i] = convertMantissa(i);\n }\n for (let i = 1024; i < 2048; i++) {\n mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);\n }\n\n return mantisaTable;\n}\n\n/**\n * Computes exponent table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 64 exponent lookup values.\n */\nfunction computeFloat16ExponentTable(): Uint32Array {\n const exponentTable = new Uint32Array(64);\n\n exponentTable[0] = 0;\n exponentTable[31] = 0x47800000;\n exponentTable[32] = 0x80000000;\n exponentTable[63] = 0xc7800000;\n for (let i = 1; i < 31; i++) {\n exponentTable[i] = i << 23;\n }\n for (let i = 33; i < 63; i++) {\n exponentTable[i] = 0x80000000 + ((i - 32) << 23);\n }\n\n return exponentTable;\n}\n\n/**\n * Computes offset table for casting Float16 to Float32\n * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n *\n * @returns Uint32Array, 6d offset values.\n */\nfunction computeFloat16OffsetTable(): Uint32Array {\n const offsetTable = new Uint32Array(64);\n\n for (let i = 0; i < 64; i++) {\n offsetTable[i] = 1024;\n }\n offsetTable[0] = offsetTable[32] = 0;\n\n return offsetTable;\n}\n\n/**\n * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values\n * to a Float32Array.\n *\n * @returns Function (buffer: Uint16Array) => Float32Array which decodes\n * the Uint16Array of Float16 bytes to a Float32Array.\n */\nexport function getFloat16Decoder(): (buffer: Uint16Array) => Float32Array {\n // Algorithm is based off of\n // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf\n\n // Cache lookup tables\n const mantisaTable = computeFloat16MantisaTable();\n const exponentTable = computeFloat16ExponentTable();\n const offsetTable = computeFloat16OffsetTable();\n\n return (quantizedArray: Uint16Array) => {\n const buffer = new ArrayBuffer(4 * quantizedArray.length);\n const bufferUint32View = new Uint32Array(buffer);\n for (let index = 0; index < quantizedArray.length; index++) {\n const float16Bits = quantizedArray[index];\n const float32Bits =\n mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +\n exponentTable[float16Bits >> 10];\n bufferUint32View[index] = float32Bits;\n }\n return new Float32Array(buffer);\n };\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IOHandler, LoadOptions} from './types';\n\nexport type IORouter = (url: string|string[], loadOptions?: LoadOptions) =>\n IOHandler;\n\nexport class IORouterRegistry {\n // Singleton instance.\n private static instance: IORouterRegistry;\n\n private saveRouters: IORouter[];\n private loadRouters: IORouter[];\n\n private constructor() {\n this.saveRouters = [];\n this.loadRouters = [];\n }\n\n private static getInstance(): IORouterRegistry {\n if (IORouterRegistry.instance == null) {\n IORouterRegistry.instance = new IORouterRegistry();\n }\n return IORouterRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerSaveRouter(saveRouter: IORouter) {\n IORouterRegistry.getInstance().saveRouters.push(saveRouter);\n }\n\n /**\n * Register a load-handler router.\n *\n * @param loadRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `load` method defined or `null`.\n */\n static registerLoadRouter(loadRouter: IORouter) {\n IORouterRegistry.getInstance().loadRouters.push(loadRouter);\n }\n\n /**\n * Look up IOHandler for saving, given a URL-like string.\n *\n * @param url\n * @returns If only one match is found, an instance of IOHandler with the\n * `save` method defined. If no match is found, `null`.\n * @throws Error, if more than one match is found.\n */\n static getSaveHandlers(url: string|string[]): IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'save');\n }\n\n /**\n * Look up IOHandler for loading, given a URL-like string.\n *\n * @param url\n * @param loadOptions Optional, custom load options.\n * @returns All valid handlers for `url`, given the currently registered\n * handler routers.\n */\n static getLoadHandlers(url: string|string[], loadOptions?: LoadOptions):\n IOHandler[] {\n return IORouterRegistry.getHandlers(url, 'load', loadOptions);\n }\n\n private static getHandlers(\n url: string|string[], handlerType: 'save'|'load',\n loadOptions?: LoadOptions): IOHandler[] {\n const validHandlers: IOHandler[] = [];\n const routers = handlerType === 'load' ?\n IORouterRegistry.getInstance().loadRouters :\n IORouterRegistry.getInstance().saveRouters;\n routers.forEach(router => {\n const handler = router(url, loadOptions);\n if (handler !== null) {\n validHandlers.push(handler);\n }\n });\n return validHandlers;\n }\n}\n\nexport const registerSaveRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerSaveRouter(loudRouter);\nexport const registerLoadRouter = (loudRouter: IORouter) =>\n IORouterRegistry.registerLoadRouter(loudRouter);\nexport const getSaveHandlers = (url: string|string[]) =>\n IORouterRegistry.getSaveHandlers(url);\nexport const getLoadHandlers =\n (url: string|string[], loadOptions?: LoadOptions) =>\n IORouterRegistry.getLoadHandlers(url, loadOptions);\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\n\nimport {getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelStoreManager, SaveResult} from './types';\n\nconst DATABASE_NAME = 'tensorflowjs';\nconst DATABASE_VERSION = 1;\n\n// Model data and ModelArtifactsInfo (metadata) are stored in two separate\n// stores for efficient access of the list of stored models and their metadata.\n// 1. The object store for model data: topology, weights and weight manifests.\nconst MODEL_STORE_NAME = 'models_store';\n// 2. The object store for ModelArtifactsInfo, including meta-information such\n// as the type of topology (JSON vs binary), byte size of the topology, byte\n// size of the weights, etc.\nconst INFO_STORE_NAME = 'model_info_store';\n\n/**\n * Delete the entire database for tensorflow.js, including the models store.\n */\nexport async function deleteDatabase(): Promise {\n const idbFactory = getIndexedDBFactory();\n\n return new Promise((resolve, reject) => {\n const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);\n deleteRequest.onsuccess = () => resolve();\n deleteRequest.onerror = error => reject(error);\n });\n}\n\nfunction getIndexedDBFactory(): IDBFactory {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Add more info about what IOHandler subtypes are available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'Failed to obtain IndexedDB factory because the current environment' +\n 'is not a web browser.');\n }\n // tslint:disable-next-line:no-any\n const theWindow: any = typeof window === 'undefined' ? self : window;\n const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||\n theWindow.webkitIndexedDB || theWindow.msIndexedDB ||\n theWindow.shimIndexedDB;\n if (factory == null) {\n throw new Error(\n 'The current browser does not appear to support IndexedDB.');\n }\n return factory;\n}\n\nfunction setUpDatabase(openRequest: IDBRequest) {\n const db = openRequest.result as IDBDatabase;\n db.createObjectStore(MODEL_STORE_NAME, {keyPath: 'modelPath'});\n db.createObjectStore(INFO_STORE_NAME, {keyPath: 'modelPath'});\n}\n\n/**\n * IOHandler subclass: Browser IndexedDB.\n *\n * See the doc string of `browserIndexedDB` for more details.\n */\nexport class BrowserIndexedDB implements IOHandler {\n protected readonly indexedDB: IDBFactory;\n protected readonly modelPath: string;\n\n static readonly URL_SCHEME = 'indexeddb://';\n\n constructor(modelPath: string) {\n this.indexedDB = getIndexedDBFactory();\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For IndexedDB, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n // TODO(cais): Support saving GraphDef models.\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n return this.databaseAction(this.modelPath, modelArtifacts) as\n Promise;\n }\n\n async load(): Promise {\n return this.databaseAction(this.modelPath) as Promise;\n }\n\n /**\n * Perform database action to put model artifacts into or read model artifacts\n * from IndexedDB object store.\n *\n * Whether the action is put or get depends on whether `modelArtifacts` is\n * specified. If it is specified, the action will be put; otherwise the action\n * will be get.\n *\n * @param modelPath A unique string path for the model.\n * @param modelArtifacts If specified, it will be the model artifacts to be\n * stored in IndexedDB.\n * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`\n * of `ModelArtifacts`, if the action is get.\n */\n private databaseAction(modelPath: string, modelArtifacts?: ModelArtifacts):\n Promise {\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n\n if (modelArtifacts == null) {\n // Read model out from object store.\n const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const getRequest = modelStore.get(this.modelPath);\n getRequest.onsuccess = () => {\n if (getRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${this.modelPath}' ` +\n `in IndexedDB.`));\n } else {\n resolve(getRequest.result.modelArtifacts);\n }\n };\n getRequest.onerror = error => {\n db.close();\n return reject(getRequest.error);\n };\n modelTx.oncomplete = () => db.close();\n } else {\n // Put model into object store.\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n // First, put ModelArtifactsInfo into info store.\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n let infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const putInfoRequest =\n infoStore.put({modelPath: this.modelPath, modelArtifactsInfo});\n let modelTx: IDBTransaction;\n putInfoRequest.onsuccess = () => {\n // Second, put model data into model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const putModelRequest = modelStore.put({\n modelPath: this.modelPath,\n modelArtifacts,\n modelArtifactsInfo\n });\n putModelRequest.onsuccess = () => resolve({modelArtifactsInfo});\n putModelRequest.onerror = error => {\n // If the put-model request fails, roll back the info entry as\n // well.\n infoStore = infoTx.objectStore(INFO_STORE_NAME);\n const deleteInfoRequest = infoStore.delete(this.modelPath);\n deleteInfoRequest.onsuccess = () => {\n db.close();\n return reject(putModelRequest.error);\n };\n deleteInfoRequest.onerror = error => {\n db.close();\n return reject(putModelRequest.error);\n };\n };\n };\n putInfoRequest.onerror = error => {\n db.close();\n return reject(putInfoRequest.error);\n };\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n }\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n\nexport const indexedDBRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) {\n return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(indexedDBRouter);\nIORouterRegistry.registerLoadRouter(indexedDBRouter);\n\n/**\n * Creates a browser IndexedDB IOHandler for saving and loading models.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save('indexeddb://MyModel'));\n * console.log(saveResult);\n * ```\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),\n * which can be used with, e.g., `tf.Model.save`.\n */\nexport function browserIndexedDB(modelPath: string): IOHandler {\n return new BrowserIndexedDB(modelPath);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserIndexedDB.URL_SCHEME) ?\n key.slice(BrowserIndexedDB.URL_SCHEME.length) :\n key;\n}\n\nexport class BrowserIndexedDBManager implements ModelStoreManager {\n private indexedDB: IDBFactory;\n\n constructor() {\n this.indexedDB = getIndexedDBFactory();\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n return new Promise<{[path: string]: ModelArtifactsInfo}>(\n (resolve, reject) => {\n const openRequest =\n this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const tx = db.transaction(INFO_STORE_NAME, 'readonly');\n const store = tx.objectStore(INFO_STORE_NAME);\n // tslint:disable:max-line-length\n // Need to cast `store` as `any` here because TypeScript's DOM\n // library does not have the `getAll()` method even though the\n // method is supported in the latest version of most mainstream\n // browsers:\n // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll\n // tslint:enable:max-line-length\n // tslint:disable-next-line:no-any\n const getAllInfoRequest = (store as any).getAll() as IDBRequest;\n getAllInfoRequest.onsuccess = () => {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n for (const item of getAllInfoRequest.result) {\n out[item.modelPath] = item.modelArtifactsInfo;\n }\n resolve(out);\n };\n getAllInfoRequest.onerror = error => {\n db.close();\n return reject(getAllInfoRequest.error);\n };\n tx.oncomplete = () => db.close();\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n return new Promise((resolve, reject) => {\n const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);\n openRequest.onupgradeneeded = () => setUpDatabase(openRequest);\n\n openRequest.onsuccess = () => {\n const db = openRequest.result;\n const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');\n const infoStore = infoTx.objectStore(INFO_STORE_NAME);\n\n const getInfoRequest = infoStore.get(path);\n let modelTx: IDBTransaction;\n getInfoRequest.onsuccess = () => {\n if (getInfoRequest.result == null) {\n db.close();\n return reject(new Error(\n `Cannot find model with path '${path}' ` +\n `in IndexedDB.`));\n } else {\n // First, delete the entry in the info store.\n const deleteInfoRequest = infoStore.delete(path);\n const deleteModelData = () => {\n // Second, delete the entry in the model store.\n modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');\n const modelStore = modelTx.objectStore(MODEL_STORE_NAME);\n const deleteModelRequest = modelStore.delete(path);\n deleteModelRequest.onsuccess = () =>\n resolve(getInfoRequest.result.modelArtifactsInfo);\n deleteModelRequest.onerror = error =>\n reject(getInfoRequest.error);\n };\n // Proceed with deleting model data regardless of whether deletion\n // of info data succeeds or not.\n deleteInfoRequest.onsuccess = deleteModelData;\n deleteInfoRequest.onerror = error => {\n deleteModelData();\n db.close();\n return reject(getInfoRequest.error);\n };\n }\n };\n getInfoRequest.onerror = error => {\n db.close();\n return reject(getInfoRequest.error);\n };\n\n infoTx.oncomplete = () => {\n if (modelTx == null) {\n db.close();\n } else {\n modelTx.oncomplete = () => db.close();\n }\n };\n };\n openRequest.onerror = error => reject(openRequest.error);\n });\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {arrayBufferToBase64String, base64StringToArrayBuffer, getModelArtifactsInfoForJSON} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelArtifactsInfo, ModelJSON, ModelStoreManager, SaveResult} from './types';\n\nconst PATH_SEPARATOR = '/';\nconst PATH_PREFIX = 'tensorflowjs_models';\nconst INFO_SUFFIX = 'info';\nconst MODEL_TOPOLOGY_SUFFIX = 'model_topology';\nconst WEIGHT_SPECS_SUFFIX = 'weight_specs';\nconst WEIGHT_DATA_SUFFIX = 'weight_data';\nconst MODEL_METADATA_SUFFIX = 'model_metadata';\n\n/**\n * Purge all tensorflow.js-saved model artifacts from local storage.\n *\n * @returns Paths of the models purged.\n */\nexport function purgeLocalStorageArtifacts(): string[] {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n throw new Error(\n 'purgeLocalStorageModels() cannot proceed because local storage is ' +\n 'unavailable in the current environment.');\n }\n const LS = window.localStorage;\n const purgedModelPaths: string[] = [];\n for (let i = 0; i < LS.length; ++i) {\n const key = LS.key(i);\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n if (key.startsWith(prefix) && key.length > prefix.length) {\n LS.removeItem(key);\n const modelName = getModelPathFromKey(key);\n if (purgedModelPaths.indexOf(modelName) === -1) {\n purgedModelPaths.push(modelName);\n }\n }\n }\n return purgedModelPaths;\n}\n\ntype LocalStorageKeys = {\n /** Key of the localStorage entry storing `ModelArtifactsInfo`. */\n info: string,\n /**\n * Key of the localStorage entry storing the 'modelTopology' key of\n * `model.json`\n */\n topology: string,\n /**\n * Key of the localStorage entry storing the `weightsManifest.weights` entries\n * of `model.json`\n */\n weightSpecs: string,\n /** Key of the localStorage entry storing the weight data in Base64 */\n weightData: string,\n /**\n * Key of the localStorage entry storing the remaining fields of `model.json`\n * @see {@link ModelMetadata}\n */\n modelMetadata: string,\n};\n\ntype ModelMetadata = Omit;\n\nfunction getModelKeys(path: string): LocalStorageKeys {\n return {\n info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),\n topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),\n weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),\n weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),\n modelMetadata:\n [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)\n };\n}\n\nfunction removeItems(keys: LocalStorageKeys): void {\n for (const key of Object.values(keys)) {\n window.localStorage.removeItem(key);\n }\n}\n\n/**\n * Get model path from a local-storage key.\n *\n * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'\n *\n * @param key\n */\nfunction getModelPathFromKey(key: string) {\n const items = key.split(PATH_SEPARATOR);\n if (items.length < 3) {\n throw new Error(`Invalid key format: ${key}`);\n }\n return items.slice(1, items.length - 1).join(PATH_SEPARATOR);\n}\n\nfunction maybeStripScheme(key: string) {\n return key.startsWith(BrowserLocalStorage.URL_SCHEME) ?\n key.slice(BrowserLocalStorage.URL_SCHEME.length) :\n key;\n}\n\n/**\n * IOHandler subclass: Browser Local Storage.\n *\n * See the doc string to `browserLocalStorage` for more details.\n */\nexport class BrowserLocalStorage implements IOHandler {\n protected readonly LS: Storage;\n protected readonly modelPath: string;\n protected readonly keys: LocalStorageKeys;\n\n static readonly URL_SCHEME = 'localstorage://';\n\n constructor(modelPath: string) {\n if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' ||\n typeof window.localStorage === 'undefined') {\n // TODO(cais): Add more info about what IOHandler subtypes are\n // available.\n // Maybe point to a doc page on the web and/or automatically determine\n // the available IOHandlers and print them in the error message.\n throw new Error(\n 'The current environment does not support local storage.');\n }\n this.LS = window.localStorage;\n\n if (modelPath == null || !modelPath) {\n throw new Error(\n 'For local storage, modelPath must not be null, undefined or empty.');\n }\n this.modelPath = modelPath;\n this.keys = getModelKeys(this.modelPath);\n }\n\n /**\n * Save model artifacts to browser local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @param modelArtifacts The model artifacts to be stored.\n * @returns An instance of SaveResult.\n */\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserLocalStorage.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const topology = JSON.stringify(modelArtifacts.modelTopology);\n const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);\n\n const modelArtifactsInfo: ModelArtifactsInfo =\n getModelArtifactsInfoForJSON(modelArtifacts);\n\n try {\n this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));\n this.LS.setItem(this.keys.topology, topology);\n this.LS.setItem(this.keys.weightSpecs, weightSpecs);\n this.LS.setItem(\n this.keys.weightData,\n arrayBufferToBase64String(modelArtifacts.weightData));\n\n // Note that JSON.stringify doesn't write out keys that have undefined\n // values, so for some keys, we set undefined instead of a null-ish\n // value.\n const metadata: Required = {\n format: modelArtifacts.format,\n generatedBy: modelArtifacts.generatedBy,\n convertedBy: modelArtifacts.convertedBy,\n signature: modelArtifacts.signature != null ?\n modelArtifacts.signature :\n undefined,\n userDefinedMetadata: modelArtifacts.userDefinedMetadata != null ?\n modelArtifacts.userDefinedMetadata :\n undefined,\n modelInitializer: modelArtifacts.modelInitializer != null ?\n modelArtifacts.modelInitializer :\n undefined,\n initializerSignature: modelArtifacts.initializerSignature != null ?\n modelArtifacts.initializerSignature :\n undefined,\n trainingConfig: modelArtifacts.trainingConfig != null ?\n modelArtifacts.trainingConfig :\n undefined\n };\n this.LS.setItem(this.keys.modelMetadata, JSON.stringify(metadata));\n\n return {modelArtifactsInfo};\n } catch (err) {\n // If saving failed, clean up all items saved so far.\n removeItems(this.keys);\n\n throw new Error(\n `Failed to save model '${this.modelPath}' to local storage: ` +\n `size quota being exceeded is a possible cause of this failure: ` +\n `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +\n `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +\n `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);\n }\n }\n }\n\n /**\n * Load a model from local storage.\n *\n * See the documentation to `browserLocalStorage` for details on the saved\n * artifacts.\n *\n * @returns The loaded model (if loading succeeds).\n */\n async load(): Promise {\n const info =\n JSON.parse(this.LS.getItem(this.keys.info)) as ModelArtifactsInfo;\n if (info == null) {\n throw new Error(\n `In local storage, there is no model with name '${this.modelPath}'`);\n }\n\n if (info.modelTopologyType !== 'JSON') {\n throw new Error(\n 'BrowserLocalStorage does not support loading non-JSON model ' +\n 'topology yet.');\n }\n\n const out: ModelArtifacts = {};\n\n // Load topology.\n const topology = JSON.parse(this.LS.getItem(this.keys.topology));\n if (topology == null) {\n throw new Error(\n `In local storage, the topology of model '${this.modelPath}' ` +\n `is missing.`);\n }\n out.modelTopology = topology;\n\n // Load weight specs.\n const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));\n if (weightSpecs == null) {\n throw new Error(\n `In local storage, the weight specs of model '${this.modelPath}' ` +\n `are missing.`);\n }\n out.weightSpecs = weightSpecs;\n\n // Load meta-data fields.\n const metadataString = this.LS.getItem(this.keys.modelMetadata);\n if (metadataString != null) {\n const metadata = JSON.parse(metadataString) as ModelMetadata;\n out.format = metadata.format;\n out.generatedBy = metadata.generatedBy;\n out.convertedBy = metadata.convertedBy;\n if (metadata.signature != null) {\n out.signature = metadata.signature;\n }\n if (metadata.userDefinedMetadata != null) {\n out.userDefinedMetadata = metadata.userDefinedMetadata;\n }\n if (metadata.modelInitializer != null) {\n out.modelInitializer = metadata.modelInitializer;\n }\n if (metadata.initializerSignature != null) {\n out.initializerSignature = metadata.initializerSignature;\n }\n if (metadata.trainingConfig != null) {\n out.trainingConfig = metadata.trainingConfig;\n }\n }\n\n // Load weight data.\n const weightDataBase64 = this.LS.getItem(this.keys.weightData);\n if (weightDataBase64 == null) {\n throw new Error(\n `In local storage, the binary weight values of model ` +\n `'${this.modelPath}' are missing.`);\n }\n out.weightData = base64StringToArrayBuffer(weightDataBase64);\n\n return out;\n }\n}\n\nexport const localStorageRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) {\n return browserLocalStorage(\n url.slice(BrowserLocalStorage.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(localStorageRouter);\nIORouterRegistry.registerLoadRouter(localStorageRouter);\n\n/**\n * Factory function for local storage IOHandler.\n *\n * This `IOHandler` supports both `save` and `load`.\n *\n * For each model's saved artifacts, four items are saved to local storage.\n * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the\n * model, such as date saved, type of the topology, size in bytes, etc.\n * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-\n * style models, this is a stringized JSON.\n * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the\n * model, can be used to decode the saved binary weight values (see\n * item below).\n * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary\n * weight values, stored as a base64-encoded string.\n *\n * Saving may throw an `Error` if the total size of the artifacts exceed the\n * browser-specific quota.\n *\n * @param modelPath A unique identifier for the model to be saved. Must be a\n * non-empty string.\n * @returns An instance of `IOHandler`, which can be used with, e.g.,\n * `tf.Model.save`.\n */\nexport function browserLocalStorage(modelPath: string): IOHandler {\n return new BrowserLocalStorage(modelPath);\n}\n\nexport class BrowserLocalStorageManager implements ModelStoreManager {\n private readonly LS: Storage;\n\n constructor() {\n assert(\n env().getBool('IS_BROWSER'),\n () => 'Current environment is not a web browser');\n assert(\n typeof window === 'undefined' ||\n typeof window.localStorage !== 'undefined',\n () => 'Current browser does not appear to support localStorage');\n this.LS = window.localStorage;\n }\n\n async listModels(): Promise<{[path: string]: ModelArtifactsInfo}> {\n const out: {[path: string]: ModelArtifactsInfo} = {};\n const prefix = PATH_PREFIX + PATH_SEPARATOR;\n const suffix = PATH_SEPARATOR + INFO_SUFFIX;\n for (let i = 0; i < this.LS.length; ++i) {\n const key = this.LS.key(i);\n if (key.startsWith(prefix) && key.endsWith(suffix)) {\n const modelPath = getModelPathFromKey(key);\n out[modelPath] = JSON.parse(this.LS.getItem(key)) as ModelArtifactsInfo;\n }\n }\n return out;\n }\n\n async removeModel(path: string): Promise {\n path = maybeStripScheme(path);\n const keys = getModelKeys(path);\n if (this.LS.getItem(keys.info) == null) {\n throw new Error(`Cannot find model at path '${path}'`);\n }\n const info = JSON.parse(this.LS.getItem(keys.info)) as ModelArtifactsInfo;\n removeItems(keys);\n return info;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Classes and functions for model management across multiple storage mediums.\n *\n * Supported client actions:\n * - Listing models on all registered storage mediums.\n * - Remove model by URL from any registered storage mediums, by using URL\n * string.\n * - Moving or copying model from one path to another in the same medium or from\n * one medium to another, by using URL strings.\n */\n\nimport {assert} from '../util';\n\nimport {IORouterRegistry} from './router_registry';\nimport {ModelArtifactsInfo, ModelStoreManager} from './types';\n\nconst URL_SCHEME_SUFFIX = '://';\n\nexport class ModelStoreManagerRegistry {\n // Singleton instance.\n private static instance: ModelStoreManagerRegistry;\n\n private managers: {[scheme: string]: ModelStoreManager};\n\n private constructor() {\n this.managers = {};\n }\n\n private static getInstance(): ModelStoreManagerRegistry {\n if (ModelStoreManagerRegistry.instance == null) {\n ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry();\n }\n return ModelStoreManagerRegistry.instance;\n }\n\n /**\n * Register a save-handler router.\n *\n * @param saveRouter A function that maps a URL-like string onto an instance\n * of `IOHandler` with the `save` method defined or `null`.\n */\n static registerManager(scheme: string, manager: ModelStoreManager) {\n assert(scheme != null, () => 'scheme must not be undefined or null.');\n if (scheme.endsWith(URL_SCHEME_SUFFIX)) {\n scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));\n }\n assert(scheme.length > 0, () => 'scheme must not be an empty string.');\n const registry = ModelStoreManagerRegistry.getInstance();\n assert(\n registry.managers[scheme] == null,\n () => `A model store manager is already registered for scheme '${\n scheme}'.`);\n registry.managers[scheme] = manager;\n }\n\n static getManager(scheme: string): ModelStoreManager {\n const manager = ModelStoreManagerRegistry.getInstance().managers[scheme];\n if (manager == null) {\n throw new Error(`Cannot find model manager for scheme '${scheme}'`);\n }\n return manager;\n }\n\n static getSchemes(): string[] {\n return Object.keys(ModelStoreManagerRegistry.getInstance().managers);\n }\n}\n\n/**\n * Helper method for parsing a URL string into a scheme and a path.\n *\n * @param url E.g., 'localstorage://my-model'\n * @returns A dictionary with two fields: scheme and path.\n * Scheme: e.g., 'localstorage' in the example above.\n * Path: e.g., 'my-model' in the example above.\n */\nfunction parseURL(url: string): {scheme: string, path: string} {\n if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {\n throw new Error(\n `The url string provided does not contain a scheme. ` +\n `Supported schemes are: ` +\n `${ModelStoreManagerRegistry.getSchemes().join(',')}`);\n }\n return {\n scheme: url.split(URL_SCHEME_SUFFIX)[0],\n path: url.split(URL_SCHEME_SUFFIX)[1],\n };\n}\n\nasync function cloneModelInternal(\n sourceURL: string, destURL: string,\n deleteSource = false): Promise {\n assert(\n sourceURL !== destURL,\n () => `Old path and new path are the same: '${sourceURL}'`);\n\n const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);\n assert(\n loadHandlers.length > 0,\n () => `Copying failed because no load handler is found for source URL ${\n sourceURL}.`);\n assert(\n loadHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `load handlers for source URL ${sourceURL}.`);\n const loadHandler = loadHandlers[0];\n\n const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);\n assert(\n saveHandlers.length > 0,\n () => `Copying failed because no save handler is found for destination ` +\n `URL ${destURL}.`);\n assert(\n saveHandlers.length < 2,\n () => `Copying failed because more than one (${loadHandlers.length}) ` +\n `save handlers for destination URL ${destURL}.`);\n const saveHandler = saveHandlers[0];\n\n const sourceScheme = parseURL(sourceURL).scheme;\n const sourcePath = parseURL(sourceURL).path;\n const sameMedium = sourceScheme === parseURL(sourceURL).scheme;\n\n const modelArtifacts = await loadHandler.load();\n\n // If moving within the same storage medium, remove the old model as soon as\n // the loading is done. Without doing this, it is possible that the combined\n // size of the two models will cause the cloning to fail.\n if (deleteSource && sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n const saveResult = await saveHandler.save(modelArtifacts);\n\n // If moving between mediums, the deletion is done after the save succeeds.\n // This guards against the case in which saving to the destination medium\n // fails.\n if (deleteSource && !sameMedium) {\n await ModelStoreManagerRegistry.getManager(sourceScheme)\n .removeModel(sourcePath);\n }\n\n return saveResult.modelArtifactsInfo;\n}\n\n/**\n * List all models stored in registered storage mediums.\n *\n * For a web browser environment, the registered mediums are Local Storage and\n * IndexedDB.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @returns A `Promise` of a dictionary mapping URLs of existing models to\n * their model artifacts info. URLs include medium-specific schemes, e.g.,\n * 'indexeddb://my/model/1'. Model artifacts info include type of the\n * model's topology, byte sizes of the topology, weights, etc.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function listModels(): Promise<{[url: string]: ModelArtifactsInfo}> {\n const schemes = ModelStoreManagerRegistry.getSchemes();\n const out: {[url: string]: ModelArtifactsInfo} = {};\n for (const scheme of schemes) {\n const schemeOut =\n await ModelStoreManagerRegistry.getManager(scheme).listModels();\n for (const path in schemeOut) {\n const url = scheme + URL_SCHEME_SUFFIX + path;\n out[url] = schemeOut[path];\n }\n }\n return out;\n}\n\n/**\n * Remove a model specified by URL from a registered storage medium.\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Delete the model.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n * ```\n *\n * @param url A URL to a stored model, with a scheme prefix, e.g.,\n * 'localstorage://my-model-1', 'indexeddb://my/model/2'.\n * @returns ModelArtifactsInfo of the deleted model (if and only if deletion\n * is successful).\n * @throws Error if deletion fails, e.g., if no model exists at `path`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function removeModel(url: string): Promise {\n const schemeAndPath = parseURL(url);\n const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);\n return manager.removeModel(schemeAndPath.path);\n}\n\n/**\n * Copy a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Copying within a storage medium, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Copying between two storage mediums, e.g.,\n * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Copy the model, from Local Storage to IndexedDB.\n * await tf.io.copyModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove both models.\n * await tf.io.removeModel('localstorage://demo/management/model1');\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of copying.\n * @param destURL Destination URL of copying.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function copyModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = false;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\n/**\n * Move a model from one URL to another.\n *\n * This function supports:\n *\n * 1. Moving within a storage medium, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`\n * 2. Moving between two storage mediums, e.g.,\n * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`\n *\n * ```js\n * // First create and save a model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * await model.save('localstorage://demo/management/model1');\n *\n * // Then list existing models.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Move the model, from Local Storage to IndexedDB.\n * await tf.io.moveModel(\n * 'localstorage://demo/management/model1',\n * 'indexeddb://demo/management/model1');\n *\n * // List models again.\n * console.log(JSON.stringify(await tf.io.listModels()));\n *\n * // Remove the moved model.\n * await tf.io.removeModel('indexeddb://demo/management/model1');\n * ```\n *\n * @param sourceURL Source URL of moving.\n * @param destURL Destination URL of moving.\n * @returns ModelArtifactsInfo of the copied model (if and only if copying\n * is successful).\n * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or\n * if `oldPath` and `newPath` are identical.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Management',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nasync function moveModel(\n sourceURL: string, destURL: string): Promise {\n const deleteSource = true;\n return cloneModelInternal(sourceURL, destURL, deleteSource);\n}\n\nexport {moveModel, copyModel, removeModel, listModels};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport '../flags';\n\nimport {env} from '../environment';\nimport {BrowserIndexedDB, BrowserIndexedDBManager} from '../io/indexed_db';\nimport {BrowserLocalStorage, BrowserLocalStorageManager} from '../io/local_storage';\nimport {ModelStoreManagerRegistry} from '../io/model_management';\n\nimport {Platform} from './platform';\n\nexport class PlatformBrowser implements Platform {\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n private textEncoder: TextEncoder;\n\n // For setTimeoutCustom\n private readonly messageName = 'setTimeoutCustom';\n private functionRefs: Function[] = [];\n private handledMessageCount = 0;\n private hasEventListener = false;\n\n fetch(path: string, init?: RequestInit): Promise {\n return fetch(path, init);\n }\n\n now(): number {\n return performance.now();\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Browser's encoder only supports utf-8, but got ${encoding}`);\n }\n if (this.textEncoder == null) {\n this.textEncoder = new TextEncoder();\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n return new TextDecoder(encoding).decode(bytes);\n }\n\n // If the setTimeout nesting level is greater than 5 and timeout is less\n // than 4ms, timeout will be clamped to 4ms, which hurts the perf.\n // Interleaving window.postMessage and setTimeout will trick the browser and\n // avoid the clamp.\n setTimeoutCustom(functionRef: Function, delay: number): void {\n if (typeof window === 'undefined' ||\n !env().getBool('USE_SETTIMEOUTCUSTOM')) {\n setTimeout(functionRef, delay);\n return;\n }\n\n this.functionRefs.push(functionRef);\n setTimeout(() => {\n window.postMessage(\n {name: this.messageName, index: this.functionRefs.length - 1}, '*');\n }, delay);\n\n if (!this.hasEventListener) {\n this.hasEventListener = true;\n window.addEventListener('message', (event: MessageEvent) => {\n if (event.source === window && event.data.name === this.messageName) {\n event.stopPropagation();\n const functionRef = this.functionRefs[event.data.index];\n functionRef();\n this.handledMessageCount++;\n if (this.handledMessageCount === this.functionRefs.length) {\n this.functionRefs = [];\n this.handledMessageCount = 0;\n }\n }\n }, true);\n }\n }\n\n isTypedArray(a: unknown): a is Uint8Array | Float32Array | Int32Array\n | Uint8ClampedArray {\n return a instanceof Float32Array || a instanceof Int32Array ||\n a instanceof Uint8Array || a instanceof Uint8ClampedArray;\n }\n}\n\nif (env().get('IS_BROWSER')) {\n env().setPlatform('browser', new PlatformBrowser());\n\n // Register LocalStorage IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager());\n } catch (err) {\n }\n\n // Register IndexedDB IOHandler\n try {\n ModelStoreManagerRegistry.registerManager(\n BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());\n } catch (err) {\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from '../environment';\nimport {Platform} from './platform';\n\n// We are wrapping this within an object so it can be stubbed by Jasmine.\nexport const getNodeFetch = {\n // tslint:disable-next-line:no-require-imports\n importFetch: () => require('node-fetch')\n};\n\ntype FetchFn = (url: string, init?: RequestInit) => Promise;\nlet systemFetch: FetchFn;\n// These getters and setters are for testing so we don't export a mutable\n// variable.\nexport function resetSystemFetch() {\n systemFetch = null;\n}\nexport function setSystemFetch(fetchFn: FetchFn) {\n systemFetch = fetchFn;\n}\nexport function getSystemFetch(): FetchFn {\n return systemFetch;\n}\n\nexport class PlatformNode implements Platform {\n private textEncoder: TextEncoder;\n // tslint:disable-next-line:no-any\n util: any;\n\n constructor() {\n // tslint:disable-next-line:no-require-imports\n this.util = require('util');\n // According to the spec, the built-in encoder can do only UTF-8 encoding.\n // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder\n this.textEncoder = new this.util.TextEncoder();\n }\n\n fetch(path: string, requestInits?: RequestInit): Promise {\n if (env().global.fetch != null) {\n return env().global.fetch(path, requestInits);\n }\n\n if (systemFetch == null) {\n systemFetch = getNodeFetch.importFetch();\n }\n return systemFetch(path, requestInits);\n }\n\n now(): number {\n const time = process.hrtime();\n return time[0] * 1000 + time[1] / 1000000;\n }\n\n encode(text: string, encoding: string): Uint8Array {\n if (encoding !== 'utf-8' && encoding !== 'utf8') {\n throw new Error(\n `Node built-in encoder only supports utf-8, but got ${encoding}`);\n }\n return this.textEncoder.encode(text);\n }\n decode(bytes: Uint8Array, encoding: string): string {\n if (bytes.length === 0) {\n return '';\n }\n return new this.util.TextDecoder(encoding).decode(bytes);\n }\n isTypedArray(a: unknown): a is Float32Array | Int32Array | Uint8Array\n | Uint8ClampedArray {\n return this.util.types.isFloat32Array(a)\n || this.util.types.isInt32Array(a)\n || this.util.types.isUint8Array(a)\n || this.util.types.isUint8ClampedArray(a);\n }\n}\n\nif (env().get('IS_NODE') && !env().get('IS_BROWSER')) {\n env().setPlatform('node', new PlatformNode());\n}\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorBuffer} from '../tensor';\nimport {DataType, DataTypeMap, Rank, ShapeMap} from '../types';\nimport * as util from '../util';\n\n/**\n * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`.\n *\n * The values are stored in CPU as `TypedArray`. Fill the buffer using\n * `buffer.set()`, or by modifying directly `buffer.values`.\n *\n * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with\n * those values.\n *\n * ```js\n * // Create a buffer and set values at particular indices.\n * const buffer = tf.buffer([2, 2]);\n * buffer.set(3, 0, 0);\n * buffer.set(5, 1, 0);\n *\n * // Convert the buffer back to a tensor.\n * buffer.toTensor().print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The dtype of the buffer. Defaults to 'float32'.\n * @param values The values of the buffer as `TypedArray`. Defaults to\n * zeros.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function buffer(\n shape: ShapeMap[R], dtype: D = 'float32' as D,\n values?: DataTypeMap[D]): TensorBuffer {\n dtype = dtype || 'float32' as D;\n util.assertNonNegativeIntegerDimensions(shape);\n return new TensorBuffer(shape, dtype, values);\n}\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Cast, CastAttrs, CastInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {DataType, TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Casts a `tf.Tensor` to a new dtype.\n *\n * ```js\n * const x = tf.tensor1d([1.5, 2.5, 3]);\n * tf.cast(x, 'int32').print();\n * ```\n * @param x The input tensor to be casted.\n * @param dtype The dtype to cast the input tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction cast_(x: T|TensorLike, dtype: DataType): T {\n const $x = convertToTensor(x, 'x', 'cast');\n\n // Sanity checks.\n if (!util.isValidDtype(dtype)) {\n throw new Error(`Failed to cast to unknown dtype ${dtype}`);\n }\n if (dtype === 'string' && $x.dtype !== 'string' ||\n dtype !== 'string' && $x.dtype === 'string') {\n throw new Error('Only strings can be casted to strings');\n }\n\n const inputs: CastInputs = {x: $x};\n const attrs: CastAttrs = {dtype};\n\n return ENGINE.runKernel(\n Cast, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const cast = /* @__PURE__ */ op({cast_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Identity, IdentityInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a new tensor with the same values and shape as the specified\n * tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n *\n * x.clone().print();\n * ```\n *\n * @param x The tensor to clone.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction clone_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric');\n const inputs: IdentityInputs = {x: $x};\n\n // Note this op is called tf.identity in python. Hence the kernel name used\n // here.\n return ENGINE.runKernel(Identity, inputs as unknown as NamedTensorMap);\n}\n\nexport const clone = /* @__PURE__ */ op({clone_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\n\n/**\n * Prints information about the `tf.Tensor` including its data.\n *\n * ```js\n * const verbose = true;\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose);\n * ```\n * @param x The tensor to be printed.\n * @param verbose Whether to print verbose information about the ` Tensor`,\n * including dtype and size.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function print(x: T, verbose = false): void {\n console.log(x.toString(verbose));\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelBackend} from './backends/backend';\nimport {ENGINE, Engine, MemoryInfo, ProfileInfo, ScopeFn, TimingInfo} from './engine';\nimport {env} from './environment';\n\nimport {Platform} from './platforms/platform';\nimport {setDeprecationWarningFn, Tensor} from './tensor';\nimport {TensorContainer} from './tensor_types';\nimport {getTensorsInContainer} from './tensor_util';\n\n/**\n * Enables production mode which disables correctness checks in favor of\n * performance.\n *\n * @doc {heading: 'Environment'}\n */\nexport function enableProdMode(): void {\n env().set('PROD', true);\n}\n\n/**\n * Enables debug mode which will log information about all executed kernels:\n * the elapsed time of the kernel execution, as well as the rank, shape, and\n * size of the output tensor.\n *\n * Debug mode will significantly slow down your application as it will\n * download the result of every operation to the CPU. This should not be used in\n * production. Debug mode does not affect the timing information of the kernel\n * execution as we do not measure download time in the kernel execution time.\n *\n * See also: `tf.profile`, `tf.memory`.\n *\n * @doc {heading: 'Environment'}\n */\nexport function enableDebugMode(): void {\n env().set('DEBUG', true);\n}\n\n/** Globally disables deprecation warnings */\nexport function disableDeprecationWarnings(): void {\n env().set('DEPRECATION_WARNINGS_ENABLED', false);\n console.warn(`TensorFlow.js deprecation warnings have been disabled.`);\n}\n\n/** Warn users about deprecated functionality. */\nexport function deprecationWarn(msg: string) {\n if (env().getBool('DEPRECATION_WARNINGS_ENABLED')) {\n console.warn(\n msg + ' You can disable deprecation warnings with ' +\n 'tf.disableDeprecationWarnings().');\n }\n}\nsetDeprecationWarningFn(deprecationWarn);\n\n/**\n * Dispose all variables kept in backend engine.\n *\n * @doc {heading: 'Environment'}\n */\nexport function disposeVariables(): void {\n ENGINE.disposeVariables();\n}\n\n/**\n * It returns the global engine that keeps track of all tensors and backends.\n *\n * @doc {heading: 'Environment'}\n */\nexport function engine(): Engine {\n return ENGINE;\n}\n\n/**\n * Returns memory info at the current time in the program. The result is an\n * object with the following properties:\n *\n * - `numBytes`: Number of bytes allocated (undisposed) at this time.\n * - `numTensors`: Number of unique tensors allocated.\n * - `numDataBuffers`: Number of unique data buffers allocated\n * (undisposed) at this time, which is ≤ the number of tensors\n * (e.g. `a.reshape(newShape)` makes a new Tensor that shares the same\n * data buffer with `a`).\n * - `unreliable`: True if the memory usage is unreliable. See `reasons` when\n * `unreliable` is true.\n * - `reasons`: `string[]`, reasons why the memory is unreliable, present if\n * `unreliable` is true.\n *\n * WebGL Properties:\n * - `numBytesInGPU`: Number of bytes allocated (undisposed) in the GPU only at\n * this time.\n *\n * @doc {heading: 'Performance', subheading: 'Memory'}\n */\nexport function memory(): MemoryInfo {\n return ENGINE.memory();\n}\n\n/**\n * Executes the provided function `f()` and returns a promise that resolves\n * with information about the function's memory use:\n * - `newBytes`: the number of new bytes allocated\n * - `newTensors`: the number of new tensors created\n * - `peakBytes`: the peak number of bytes allocated\n * - `kernels`: an array of objects for each kernel involved that reports\n * their input and output shapes, number of bytes used, and number of new\n * tensors created.\n * - `kernelNames`: an array of unique strings with just the names of the\n * kernels in the `kernels` array.\n *\n * ```js\n * const profile = await tf.profile(() => {\n * const x = tf.tensor1d([1, 2, 3]);\n * let x2 = x.square();\n * x2.dispose();\n * x2 = x.square();\n * x2.dispose();\n * return x;\n * });\n *\n * console.log(`newBytes: ${profile.newBytes}`);\n * console.log(`newTensors: ${profile.newTensors}`);\n * console.log(`byte usage over all kernels: ${profile.kernels.map(k =>\n * k.totalBytesSnapshot)}`);\n * ```\n *\n *\n * @doc {heading: 'Performance', subheading: 'Profile'}\n */\nexport function profile(f: () => (TensorContainer | Promise)):\n Promise {\n return ENGINE.profile(f);\n}\n\n/**\n * Executes the provided function `fn` and after it is executed, cleans up all\n * intermediate tensors allocated by `fn` except those returned by `fn`.\n * `fn` must not return a Promise (async functions not allowed). The returned\n * result can be a complex object.\n *\n * Using this method helps avoid memory leaks. In general, wrap calls to\n * operations in `tf.tidy` for automatic memory cleanup.\n *\n * NOTE: Variables do *not* get cleaned up when inside a tidy(). If you want to\n * dispose variables, please use `tf.disposeVariables` or call dispose()\n * directly on variables.\n *\n * ```js\n * // y = 2 ^ 2 + 1\n * const y = tf.tidy(() => {\n * // a, b, and one will be cleaned up when the tidy ends.\n * const one = tf.scalar(1);\n * const a = tf.scalar(2);\n * const b = a.square();\n *\n * console.log('numTensors (in tidy): ' + tf.memory().numTensors);\n *\n * // The value returned inside the tidy function will return\n * // through the tidy, in this case to the variable y.\n * return b.add(one);\n * });\n *\n * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);\n * y.print();\n * ```\n *\n * @param nameOrFn The name of the closure, or the function to execute.\n * If a name is provided, the 2nd argument should be the function.\n * If debug mode is on, the timing and the memory usage of the function\n * will be tracked and displayed on the console using the provided name.\n * @param fn The function to execute.\n *\n * @doc {heading: 'Performance', subheading: 'Memory'}\n */\nexport function tidy(\n nameOrFn: string|ScopeFn, fn?: ScopeFn): T {\n return ENGINE.tidy(nameOrFn, fn);\n}\n\n/**\n * Disposes any `tf.Tensor`s found within the provided object.\n *\n * @param container an object that may be a `tf.Tensor` or may directly\n * contain `tf.Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. If\n * the object is not a `tf.Tensor` or does not contain `Tensors`, nothing\n * happens. In general it is safe to pass any object here, except that\n * `Promise`s are not supported.\n *\n * @doc {heading: 'Performance', subheading: 'Memory'}\n */\nexport function dispose(container: TensorContainer) {\n const tensors = getTensorsInContainer(container);\n tensors.forEach(tensor => tensor.dispose());\n}\n\n/**\n * Keeps a `tf.Tensor` generated inside a `tf.tidy` from being disposed\n * automatically.\n *\n * ```js\n * let b;\n * const y = tf.tidy(() => {\n * const one = tf.scalar(1);\n * const a = tf.scalar(2);\n *\n * // b will not be cleaned up by the tidy. a and one will be cleaned up\n * // when the tidy ends.\n * b = tf.keep(a.square());\n *\n * console.log('numTensors (in tidy): ' + tf.memory().numTensors);\n *\n * // The value returned inside the tidy function will return\n * // through the tidy, in this case to the variable y.\n * return b.add(one);\n * });\n *\n * console.log('numTensors (outside tidy): ' + tf.memory().numTensors);\n * console.log('y:');\n * y.print();\n * console.log('b:');\n * b.print();\n * ```\n *\n * @param result The tensor to keep from being disposed.\n *\n * @doc {heading: 'Performance', subheading: 'Memory'}\n */\nexport function keep(result: T): T {\n return ENGINE.keep(result);\n}\n\n/**\n * Executes `f()` and returns a promise that resolves with timing\n * information.\n *\n * The result is an object with the following properties:\n *\n * - `wallMs`: Wall execution time.\n * - `kernelMs`: Kernel execution time, ignoring data transfer. If using the\n * WebGL backend and the query timer extension is not available, this will\n * return an error object.\n * - On `WebGL` The following additional properties exist:\n * - `uploadWaitMs`: CPU blocking time on texture uploads.\n * - `downloadWaitMs`: CPU blocking time on texture downloads (readPixels).\n *\n * ```js\n * const x = tf.randomNormal([20, 20]);\n * const time = await tf.time(() => x.matMul(x));\n *\n * console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`);\n * ```\n *\n * @param f The function to execute and time.\n *\n * @doc {heading: 'Performance', subheading: 'Timing'}\n */\nexport function time(f: () => void): Promise {\n return ENGINE.time(f);\n}\n\n/**\n * Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and\n * executing operations on those tensors. Returns a promise that resolves\n * to a boolean if the backend initialization was successful.\n *\n * Note this disposes the current backend, if any, as well as any tensors\n * associated with it. A new backend is initialized, even if it is of the\n * same type as the previous one.\n *\n * @param backendName The name of the backend. Currently supports\n * `'webgl'|'cpu'` in the browser, `'tensorflow'` under node.js\n * (requires tfjs-node), and `'wasm'` (requires tfjs-backend-wasm).\n *\n * @doc {heading: 'Backends'}\n */\nexport function setBackend(backendName: string): Promise {\n return ENGINE.setBackend(backendName);\n}\n\n/**\n * Returns a promise that resolves when the currently selected backend (or the\n * highest priority one) has initialized. Await this promise when you are using\n * a backend that has async initialization.\n *\n * @doc {heading: 'Backends'}\n */\nexport function ready(): Promise {\n return ENGINE.ready();\n}\n\n/**\n * Returns the current backend name (cpu, webgl, etc). The backend is\n * responsible for creating tensors and executing operations on those tensors.\n *\n * @doc {heading: 'Backends'}\n */\nexport function getBackend(): string {\n return ENGINE.backendName;\n}\n\n/**\n * Removes a backend and the registered factory.\n *\n * @doc {heading: 'Backends'}\n */\nexport function removeBackend(name: string): void {\n ENGINE.removeBackend(name);\n}\n\n/**\n * Finds the backend registered under the provided name. Returns null if the\n * name is not in the registry, or the registration hasn't finished yet.\n */\nexport function findBackend(name: string): KernelBackend {\n return ENGINE.findBackend(name);\n}\n\n/**\n * Finds the backend factory registered under the provided name. Returns a\n * function that produces a new backend when called. Returns null if the name\n * is not in the registry.\n */\nexport function findBackendFactory(name: string): () =>\n KernelBackend | Promise {\n return ENGINE.findBackendFactory(name);\n}\n\n/**\n * Registers a global backend. The registration should happen when importing\n * a module file (e.g. when importing `backend_webgl.ts`), and is used for\n * modular builds (e.g. custom tfjs bundle with only webgl support).\n *\n * @param factory The backend factory function. When called, it should\n * return a backend instance, or a promise of an instance.\n * @param priority The priority of the backend (higher = more important).\n * In case multiple backends are registered, the priority is used to find\n * the best backend. Defaults to 1.\n * @return False if there is already a registered backend under this name, true\n * if not.\n *\n * @doc {heading: 'Backends'}\n */\nexport function registerBackend(\n name: string, factory: () => KernelBackend | Promise,\n priority = 1): boolean {\n return ENGINE.registerBackend(name, factory, priority);\n}\n\n/**\n * Gets the current backend. If no backends have been initialized, this will\n * attempt to initialize the best backend. Will throw an error if the highest\n * priority backend has async initialization, in which case you should call\n * 'await tf.ready()' before running other code.\n *\n * @doc {heading: 'Backends'}\n */\nexport function backend(): KernelBackend {\n return ENGINE.backend;\n}\n\n/**\n * Sets the global platform.\n *\n * @param platformName The name of this platform.\n * @param platform A platform implementation.\n */\nexport function setPlatform(platformName: string, platform: Platform) {\n env().setPlatform(platformName, platform);\n}\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Required side effectful code for tfjs-core\n\n// Set up Engine and ENV\nimport {getOrMakeEngine} from './engine';\ngetOrMakeEngine();\n\n// Register backend-agnostic flags.\nimport './flags';\n// Register platforms\nimport './platforms/platform_browser';\nimport './platforms/platform_node';\n\n// Set up OpHandler\nimport {buffer} from './ops/buffer';\nimport {cast} from './ops/cast';\nimport {clone} from './ops/clone';\nimport {print} from './ops/print';\nimport {OpHandler, setOpHandler} from './tensor';\nconst opHandler: OpHandler = {\n buffer,\n cast,\n clone,\n print\n};\nsetOpHandler(opHandler);\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Add, AddInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.\n *\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3, 4]);\n * const b = tf.tensor1d([10, 20, 30, 40]);\n *\n * a.add(b).print(); // or tf.add(a, b)\n * ```\n *\n * ```js\n * // Broadcast add a with b.\n * const a = tf.scalar(5);\n * const b = tf.tensor1d([10, 20, 30, 40]);\n *\n * a.add(b).print(); // or tf.add(a, b)\n * ```\n * @param a The first `tf.Tensor` to add.\n * @param b The second `tf.Tensor` to add. Must have the same type as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction add_(a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'add');\n let $b = convertToTensor(b, 'b', 'add');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: AddInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Add, inputs as unknown as NamedTensorMap);\n}\n\nexport const add = /* @__PURE__ */ op({add_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {FloorDiv, FloorDivInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.\n * The result is rounded with floor function.\n *\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 9, 16]);\n * const b = tf.tensor1d([1, 2, 3, 4]);\n *\n * a.floorDiv(b).print(); // or tf.div(a, b)\n * ```\n *\n * ```js\n * // Broadcast div a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(2);\n *\n * a.floorDiv(b).print(); // or tf.floorDiv(a, b)\n * ```\n *\n * @param a The first tensor as the numerator.\n * @param b The second tensor as the denominator. Must have the same dtype as\n * `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction floorDiv_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'floorDiv');\n let $b = convertToTensor(b, 'b', 'floorDiv');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: FloorDivInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(FloorDiv, inputs as unknown as NamedTensorMap);\n}\n\nexport const floorDiv = /* @__PURE__ */ op({floorDiv_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {RealDiv, RealDivInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {floorDiv} from './floorDiv';\nimport {op} from './operation';\n\n/**\n * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 9, 16]);\n * const b = tf.tensor1d([1, 2, 3, 4]);\n *\n * a.div(b).print(); // or tf.div(a, b)\n * ```\n *\n * ```js\n * // Broadcast div a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(2);\n *\n * a.div(b).print(); // or tf.div(a, b)\n * ```\n *\n * @param a The first tensor as the numerator.\n * @param b The second tensor as the denominator. Must have the same dtype as\n * `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction div_(a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'div');\n let $b = convertToTensor(b, 'b', 'div');\n [$a, $b] = makeTypesMatch($a, $b);\n\n if ($a.dtype === 'int32' && $b.dtype === 'int32') {\n return floorDiv($a, $b);\n }\n\n const inputs: RealDivInputs = {a: $a, b: $b};\n const attrs = {};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(RealDiv,\n inputs as unknown as NamedTensorMap, attrs) as T;\n}\n\nexport const div = /* @__PURE__ */ op({div_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Multiply, MultiplyInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.\n *\n * We also expose `tf.mulStrict` which has the same signature as this op and\n * asserts that `a` and `b` are the same shape (does not broadcast).\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3, 4]);\n * const b = tf.tensor1d([2, 3, 4, 5]);\n *\n * a.mul(b).print(); // or tf.mul(a, b)\n * ```\n *\n * ```js\n * // Broadcast mul a with b.\n * const a = tf.tensor1d([1, 2, 3, 4]);\n * const b = tf.scalar(5);\n *\n * a.mul(b).print(); // or tf.mul(a, b)\n * ```\n * @param a The first tensor to multiply.\n * @param b The second tensor to multiply. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction mul_(a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'mul');\n let $b = convertToTensor(b, 'b', 'mul');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: MultiplyInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Multiply, inputs as unknown as NamedTensorMap);\n}\nexport const mul = /* @__PURE__ */ op({mul_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Sqrt, SqrtInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 4, -1]);\n *\n * x.sqrt().print(); // or tf.sqrt(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction sqrt_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'sqrt', 'float32');\n\n const inputs: SqrtInputs = {x: $x};\n\n return ENGINE.runKernel(Sqrt, inputs as unknown as NamedTensorMap);\n}\nexport const sqrt = /* @__PURE__ */ op({sqrt_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Computes square of `x` element-wise: `x ^ 2`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);\n *\n * x.square().print(); // or tf.square(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction square_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'square');\n const attrs = {};\n return ENGINE.runKernel('Square', {x: $x}, attrs);\n}\n\nexport const square = /* @__PURE__ */ op({square_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {ZerosLike, ZerosLikeInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a `tf.Tensor` with all elements set to 0 with the same shape as the\n * given tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n * tf.zerosLike(x).print();\n * ```\n *\n * @param x The tensor of required shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction zerosLike_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'zerosLike');\n const inputs: ZerosLikeInputs = {x: $x};\n return ENGINE.runKernel(ZerosLike, inputs as unknown as NamedTensorMap);\n}\nexport const zerosLike = /* @__PURE__ */ op({zerosLike_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {CustomGradientFunc, ENGINE} from './engine';\nimport {Scalar, Tensor, Variable} from './tensor';\nimport {NamedTensorMap} from './tensor_types';\nimport {convertToTensor, convertToTensorArray} from './tensor_util_env';\nimport {TensorLike} from './types';\nimport * as util from './util';\n\n/**\n * Provided `f(x)`, returns another function `g(x, dy?)`, which gives the\n * gradient of `f(x)` with respect to `x`.\n *\n * If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to\n * `x` is computed instead. `f(x)` must take a single tensor `x` and return a\n * single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead.\n *\n * ```js\n * // f(x) = x ^ 2\n * const f = x => x.square();\n * // f'(x) = 2x\n * const g = tf.grad(f);\n *\n * const x = tf.tensor1d([2, 3]);\n * g(x).print();\n * ```\n *\n * ```js\n * // f(x) = x ^ 3\n * const f = x => x.pow(tf.scalar(3, 'int32'));\n * // f'(x) = 3x ^ 2\n * const g = tf.grad(f);\n * // f''(x) = 6x\n * const gg = tf.grad(g);\n *\n * const x = tf.tensor1d([2, 3]);\n * gg(x).print();\n * ```\n *\n * @param f The function f(x), to compute gradient for.\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction grad(f: (x: Tensor) => Tensor): (\n x: TensorLike|Tensor, dy?: TensorLike|Tensor) => Tensor {\n util.assert(\n util.isFunction(f), () => 'The f passed in grad(f) must be a function');\n return (x: TensorLike|Tensor, dy?: TensorLike|Tensor): Tensor => {\n // x can be of any dtype, thus null as the last argument.\n const $x = convertToTensor(x, 'x', 'tf.grad', 'string_or_numeric');\n const $dy: Tensor =\n (dy != null) ? convertToTensor(dy, 'dy', 'tf.grad') : null;\n return ENGINE.tidy(() => {\n const {value, grads} = ENGINE.gradients(() => f($x), [$x], $dy);\n if ($dy != null) {\n util.assertShapesMatch(\n value.shape, $dy.shape,\n 'The shape of dy passed in grad(f)(x, dy) must match the shape ' +\n 'returned by f(x)');\n }\n checkGrads(grads);\n return grads[0];\n });\n };\n}\n\n/**\n * Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`,\n * which gives an array of gradients of `f()` with respect to each input\n * [`x1`,`x2`,...].\n *\n * If `dy` is passed when calling `g()`, the gradient of\n * `f(x1,...).mul(dy).sum()` with respect to each input is computed instead.\n * The provided `f` must take one or more tensors and return a single tensor\n * `y`. If `f()` takes a single input, we recommend using `tf.grad` instead.\n *\n * ```js\n * // f(a, b) = a * b\n * const f = (a, b) => a.mul(b);\n * // df / da = b, df / db = a\n * const g = tf.grads(f);\n *\n * const a = tf.tensor1d([2, 3]);\n * const b = tf.tensor1d([-2, -3]);\n * const [da, db] = g([a, b]);\n * console.log('da');\n * da.print();\n * console.log('db');\n * db.print();\n * ```\n *\n * @param f The function `f(x1, x2,...)` to compute gradients for.\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction grads(f: (...args: Tensor[]) => Tensor): (\n args: Array, dy?: Tensor|TensorLike) => Tensor[] {\n util.assert(\n util.isFunction(f), () => 'The f passed in grads(f) must be a function');\n return (args: Array, dy?: Tensor|TensorLike): Tensor[] => {\n util.assert(\n Array.isArray(args),\n () => 'The args passed in grads(f)(args) must be an array ' +\n 'of `Tensor`s or `TensorLike`s');\n // args can be of any dtype, thus null as the last argument.\n const $args =\n convertToTensorArray(args, 'args', 'tf.grads', 'string_or_numeric');\n const $dy: Tensor =\n (dy != null) ? convertToTensor(dy, 'dy', 'tf.grads') : null;\n return ENGINE.tidy(() => {\n const {value, grads} = ENGINE.gradients(() => f(...$args), $args, $dy);\n if ($dy != null) {\n util.assertShapesMatch(\n value.shape, $dy.shape,\n 'The shape of dy passed in grads(f)([x1,...], dy) must ' +\n 'match the shape returned by f([x1,...])');\n }\n checkGrads(grads);\n return grads;\n });\n };\n}\n\n/**\n * Like `tf.grad`, but also returns the value of `f()`. Useful when `f()`\n * returns a metric you want to show.\n *\n * The result is a rich object with the following properties:\n * - grad: The gradient of `f(x)` w.r.t. `x` (result of `tf.grad`).\n * - value: The value returned by `f(x)`.\n *\n * ```js\n * // f(x) = x ^ 2\n * const f = x => x.square();\n * // f'(x) = 2x\n * const g = tf.valueAndGrad(f);\n *\n * const x = tf.tensor1d([2, 3]);\n * const {value, grad} = g(x);\n *\n * console.log('value');\n * value.print();\n * console.log('grad');\n * grad.print();\n * ```\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction valueAndGrad(f: (x: I) => O): (\n x: I, dy?: O) => {\n value: O;\n grad: I;\n} {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in valueAndGrad(f) must be a function');\n return (x: I, dy?: O) => {\n util.assert(\n x instanceof Tensor,\n () => 'The x passed in valueAndGrad(f)(x) must be a tensor');\n util.assert(\n dy == null || dy instanceof Tensor,\n () => 'The dy passed in valueAndGrad(f)(x, dy) must be a tensor');\n const {grads, value} = ENGINE.gradients(() => f(x), [x], dy);\n checkGrads(grads);\n return {grad: grads[0] as I, value};\n };\n}\n\n/**\n * Like `tf.grads`, but returns also the value of `f()`. Useful when `f()`\n * returns a metric you want to show.\n *\n * The result is a rich object with the following properties:\n * - grads: The gradients of `f()` w.r.t. each input (result of `tf.grads`).\n * - value: The value returned by `f(x)`.\n *\n * ```js\n * // f(a, b) = a * b\n * const f = (a, b) => a.mul(b);\n * // df/da = b, df/db = a\n * const g = tf.valueAndGrads(f);\n *\n * const a = tf.tensor1d([2, 3]);\n * const b = tf.tensor1d([-2, -3]);\n * const {value, grads} = g([a, b]);\n *\n * const [da, db] = grads;\n *\n * console.log('value');\n * value.print();\n *\n * console.log('da');\n * da.print();\n * console.log('db');\n * db.print();\n * ```\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction valueAndGrads(f: (...args: Tensor[]) => O): (\n args: Tensor[], dy?: O) => {\n grads: Tensor[];\n value: O;\n} {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in valueAndGrads(f) must be a function');\n return (args: Tensor[], dy?: O) => {\n util.assert(\n Array.isArray(args) && args.every(arg => arg instanceof Tensor),\n () => 'The args passed in valueAndGrads(f)(args) must be array of ' +\n 'tensors');\n util.assert(\n dy == null || dy instanceof Tensor,\n () => 'The dy passed in valueAndGrads(f)(args, dy) must be a tensor');\n const res = ENGINE.gradients(() => f(...args), args, dy);\n if (dy != null) {\n util.assertShapesMatch(\n res.value.shape, dy.shape,\n 'The shape of dy passed in valueAndGrads(f)([x1,...], dy) must ' +\n 'match the shape returned by f([x1,...])');\n }\n checkGrads(res.grads);\n return res;\n };\n}\n\n/**\n * Computes and returns the gradient of f(x) with respect to the list of\n * trainable variables provided by `varList`. If no list is provided, it\n * defaults to all trainable variables.\n *\n * ```js\n * const a = tf.variable(tf.tensor1d([3, 4]));\n * const b = tf.variable(tf.tensor1d([5, 6]));\n * const x = tf.tensor1d([1, 2]);\n *\n * // f(a, b) = a * x ^ 2 + b * x\n * const f = () => a.mul(x.square()).add(b.mul(x)).sum();\n * // df/da = x ^ 2, df/db = x\n * const {value, grads} = tf.variableGrads(f);\n *\n * Object.keys(grads).forEach(varName => grads[varName].print());\n * ```\n *\n * @param f The function to execute. f() should return a scalar.\n * @param varList The list of variables to compute the gradients with respect\n * to. Defaults to all trainable variables.\n * @returns An object with the following keys and values:\n * - `value`: The value of the function `f`.\n * - `grads`: A map from the names of the variables to the gradients.\n * If the `varList` argument is provided explicitly and contains a subset of\n * non-trainable variables, this map in the return value will contain keys\n * that map the names of the non-trainable variables to `null`.\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction variableGrads(f: () => Scalar, varList?: Variable[]):\n {value: Scalar, grads: NamedTensorMap} {\n util.assert(\n util.isFunction(f),\n () => 'The f passed in variableGrads(f) must be a function');\n util.assert(\n varList == null ||\n Array.isArray(varList) && varList.every(v => v instanceof Variable),\n () =>\n 'The varList passed in variableGrads(f, varList) must be an array ' +\n 'of variables');\n\n const specifiedVarList = varList != null;\n if (!specifiedVarList) {\n // Get all of the trainable variables.\n varList = [];\n for (const varName in ENGINE.registeredVariables) {\n varList.push(ENGINE.registeredVariables[varName]);\n }\n }\n\n const specifiedNonTrainable: Variable[] =\n specifiedVarList ? varList.filter(variable => !variable.trainable) : null;\n\n // Prune non-trainable variables.\n const originalVarCount = varList.length;\n varList = varList.filter(variable => variable.trainable);\n util.assert(\n varList.length > 0,\n () => `variableGrads() expects at least one of the input variables to ` +\n `be trainable, but none of the ${originalVarCount} variables is ` +\n `trainable.`);\n\n const allowNoGradients = true;\n const {value, grads} = ENGINE.gradients(f, varList, null, allowNoGradients);\n\n util.assert(\n grads.some(g => g != null),\n () => 'Cannot find a connection between any variable and the result of ' +\n 'the loss function y=f(x). Please make sure the operations that ' +\n 'use variables are inside the function f passed to minimize().');\n util.assert(\n value.rank === 0,\n () => `The f passed in variableGrads(f) must return a scalar, but it ` +\n `returned a rank-${value.rank} tensor`);\n\n const namedGrads: NamedTensorMap = {};\n varList.forEach((v, i) => {\n if (grads[i] != null) {\n namedGrads[v.name] = grads[i];\n }\n });\n if (specifiedNonTrainable != null) {\n // If varList is explicitly provided and contains non-trainable values,\n // add them to the returned gradients with `null` values.\n specifiedNonTrainable.forEach(v => namedGrads[v.name] = null);\n }\n return {value, grads: namedGrads};\n}\n\n/**\n * Overrides the gradient computation of a function `f`.\n *\n * Takes a function\n * `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`\n * and returns another function `g(...inputs)` which takes the same inputs as\n * `f`. When called, `g` returns `f().value`. In backward mode, custom gradients\n * with respect to each input of `f` are computed using `f().gradFunc`.\n *\n * The `save` function passed to `f` should be used for saving tensors needed\n * in the gradient. And the `saved` passed to the `gradFunc` is a\n * `NamedTensorMap`, which contains those saved tensors.\n *\n * ```js\n * const customOp = tf.customGrad((x, save) => {\n * // Save x to make sure it's available later for the gradient.\n * save([x]);\n * // Override gradient of our custom x ^ 2 op to be dy * abs(x);\n * return {\n * value: x.square(),\n * // Note `saved.x` which points to the `x` we saved earlier.\n * gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]\n * };\n * });\n *\n * const x = tf.tensor1d([-1, -2, 3]);\n * const dx = tf.grad(x => customOp(x));\n *\n * console.log(`f(x):`);\n * customOp(x).print();\n * console.log(`f'(x):`);\n * dx(x).print();\n * ```\n *\n * @param f The function to evaluate in forward mode, which should return\n * `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`\n * returns the custom gradients of `f` with respect to its inputs.\n *\n * @doc {heading: 'Training', subheading: 'Gradients'}\n */\nfunction customGrad(f: CustomGradientFunc):\n (...args: Tensor[]) => T {\n return ENGINE.customGrad(f);\n}\n\nfunction checkGrads(grads: Tensor[]) {\n const numNullGradients = grads.filter(g => g == null).length;\n if (numNullGradients > 0) {\n throw new Error(\n `Cannot compute gradient of y=f(x) with respect to x. Make sure that\n the f you passed encloses all operations that lead from x to y.`);\n }\n}\n\nexport {\n customGrad,\n variableGrads,\n valueAndGrad,\n valueAndGrads,\n grad,\n grads,\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar} from '../tensor';\nimport {DataType} from '../types';\nimport {isTypedArray} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.scalar` as it makes the code more readable.\n *\n * ```js\n * tf.scalar(3.14).print();\n * ```\n *\n * @param value The value of the scalar.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function scalar(\n value: number|boolean|string|Uint8Array, dtype?: DataType): Scalar {\n if (((isTypedArray(value) && dtype !== 'string') || Array.isArray(value)) &&\n dtype !== 'complex64') {\n throw new Error(\n 'Error creating a new Scalar: value must be a primitive ' +\n '(number|boolean|string)');\n }\n if (dtype === 'string' && isTypedArray(value) &&\n !(value instanceof Uint8Array)) {\n throw new Error(\n 'When making a scalar from encoded string, ' +\n 'the value must be `Uint8Array`.');\n }\n const shape: number[] = [];\n const inferredShape: number[] = [];\n return makeTensor(value, shape, inferredShape, dtype) as Scalar;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {assert} from './util';\n\n/**\n * Types to support JSON-esque data structures internally.\n *\n * Internally ConfigDict's use camelCase keys and values where the\n * values are class names to be instantiated. On the python side, these\n * will be snake_case. Internally we allow Enums into the values for better\n * type safety, but these need to be converted to raw primitives (usually\n * strings) for round-tripping with python.\n *\n * toConfig returns the TS-friendly representation. model.toJSON() returns\n * the pythonic version as that's the portable format. If you need to\n * python-ify a non-model level toConfig output, you'll need to use a\n * convertTsToPythonic from serialization_utils in -Layers.\n *\n */\nexport declare type ConfigDictValue =\n boolean | number | string | null | ConfigDictArray | ConfigDict;\nexport declare interface ConfigDict {\n [key: string]: ConfigDictValue;\n}\nexport declare interface ConfigDictArray extends Array {}\n\n/**\n * Type to represent the class-type of Serializable objects.\n *\n * Ie the class prototype with access to the constructor and any\n * static members/methods. Instance methods are not listed here.\n *\n * Source for this idea: https://stackoverflow.com/a/43607255\n */\nexport declare type SerializableConstructor = {\n // tslint:disable-next-line:no-any\n new (...args: any[]): T; className: string; fromConfig: FromConfigMethod;\n};\nexport declare type FromConfigMethod =\n (cls: SerializableConstructor, config: ConfigDict) => T;\n\n/**\n * Serializable defines the serialization contract.\n *\n * TFJS requires serializable classes to return their className when asked\n * to avoid issues with minification.\n */\nexport abstract class Serializable {\n /**\n * Return the class name for this class to use in serialization contexts.\n *\n * Generally speaking this will be the same thing that constructor.name\n * would have returned. However, the class name needs to be robust\n * against minification for serialization/deserialization to work properly.\n *\n * There's also places such as initializers.VarianceScaling, where\n * implementation details between different languages led to different\n * class hierarchies and a non-leaf node is used for serialization purposes.\n */\n getClassName(): string {\n return (this.constructor as SerializableConstructor)\n .className;\n }\n\n /**\n * Return all the non-weight state needed to serialize this object.\n */\n abstract getConfig(): ConfigDict;\n\n /**\n * Creates an instance of T from a ConfigDict.\n *\n * This works for most descendants of serializable. A few need to\n * provide special handling.\n * @param cls A Constructor for the class to instantiate.\n * @param config The Configuration for the object.\n */\n /** @nocollapse */\n static fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(config);\n }\n}\n\n/**\n * Maps string keys to class constructors.\n *\n * Used during (de)serialization from the cross-language JSON format, which\n * requires the class name in the serialization format matches the class\n * names as used in Python, should it exist.\n */\nexport class SerializationMap {\n private static instance: SerializationMap;\n classNameMap: {\n [className: string]:\n [SerializableConstructor, FromConfigMethod]\n };\n\n private constructor() {\n this.classNameMap = {};\n }\n\n /**\n * Returns the singleton instance of the map.\n */\n static getMap(): SerializationMap {\n if (SerializationMap.instance == null) {\n SerializationMap.instance = new SerializationMap();\n }\n return SerializationMap.instance;\n }\n\n /**\n * Registers the class as serializable.\n */\n static register(cls: SerializableConstructor) {\n SerializationMap.getMap().classNameMap[cls.className] =\n [cls, cls.fromConfig];\n }\n}\n\n/**\n * Register a class with the serialization map of TensorFlow.js.\n *\n * This is often used for registering custom Layers, so they can be\n * serialized and deserialized.\n *\n * Example:\n *\n * ```js\n * class MyCustomLayer extends tf.layers.Layer {\n * static className = 'MyCustomLayer';\n *\n * constructor(config) {\n * super(config);\n * }\n * }\n * tf.serialization.registerClass(MyCustomLayer);\n * ```\n *\n * @param cls The class to be registered. It must have a public static member\n * called `className` defined and the value must be a non-empty string.\n *\n * @doc {heading: 'Models', subheading: 'Serialization', ignoreCI: true}\n */\nexport function registerClass(\n cls: SerializableConstructor) {\n assert(\n cls.className != null,\n () => `Class being registered does not have the static className ` +\n `property defined.`);\n assert(\n typeof cls.className === 'string',\n () => `className is required to be a string, but got type ` +\n typeof cls.className);\n assert(\n cls.className.length > 0,\n () => `Class being registered has an empty-string as its className, ` +\n `which is disallowed.`);\n\n SerializationMap.register(cls);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {dispose} from '../globals';\nimport {variableGrads} from '../gradients';\nimport {scalar} from '../ops/ops';\nimport {Serializable} from '../serialization';\nimport {Scalar, Variable} from '../tensor';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\n\n/**\n * A variable that belongs to an optimizer.\n *\n * The `originalName` field is required for keeping track of the canonical\n * name of the variable, which is usually the name of the model weight that\n * the variable is related to plus a suffix, e.g., 'dense1/kernel/momentum'.\n * The name of the `Variable` object itself cannot be used directly due to\n * possible deduplication: Every `Variable` must have a unique name but more\n * than one optimizer objects of the same type may be created for the same model\n * or the same `Variable`.\n */\nexport interface OptimizerVariable {\n originalName: string;\n variable: Variable;\n}\n\n/** @doc {heading: 'Training', subheading: 'Classes', namespace: 'train'} */\nexport abstract class Optimizer extends Serializable {\n protected iterations_: number;\n\n /**\n * Executes `f()` and minimizes the scalar output of `f()` by computing\n * gradients of y with respect to the list of trainable variables provided by\n * `varList`. If no list is provided, it defaults to all trainable variables.\n *\n * @param f The function to execute and whose output to minimize.\n * @param returnCost Whether to return the scalar cost value produced by\n * executing `f()`.\n * @param varList An optional list of variables to update. If specified, only\n * the trainable variables in varList will be updated by minimize. Defaults to\n * all trainable variables.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers'}\n */\n minimize(f: () => Scalar, returnCost = false, varList?: Variable[]): Scalar\n |null {\n const {value, grads} = this.computeGradients(f, varList);\n\n if (varList != null) {\n const gradArray: NamedTensor[] =\n varList.map(v => ({name: v.name, tensor: grads[v.name]}));\n this.applyGradients(gradArray);\n } else {\n this.applyGradients(grads);\n }\n\n // Dispose gradients.\n dispose(grads);\n\n if (returnCost) {\n return value;\n } else {\n value.dispose();\n return null;\n }\n }\n\n /**\n * The number of iterations that this optimizer instance has been invoked for.\n */\n get iterations(): number {\n if (this.iterations_ == null) {\n this.iterations_ = 0;\n }\n return this.iterations_;\n }\n\n protected incrementIterations() {\n this.iterations_ = this.iterations + 1;\n }\n\n /**\n * Executes f() and computes the gradient of the scalar output of f() with\n * respect to the list of trainable variables provided by `varList`. If no\n * list is provided, it defaults to all trainable variables.\n *\n * @param f The function to execute and whose output to use for computing\n * gradients with respect to variables.\n * @param varList An optional list of variables to compute gradients with\n * respect to. If specified, only the trainable variables in varList will have\n * gradients computed with respect to. Defaults to all trainable variables.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers'}\n */\n computeGradients(f: () => Scalar, varList?: Variable[]):\n {value: Scalar, grads: NamedTensorMap} {\n return variableGrads(f, varList);\n }\n\n /**\n * Updates variables by using the computed gradients.\n *\n * @param variableGradients A mapping of variable name to its gradient value.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers'}\n */\n abstract applyGradients(variableGradients: NamedTensorMap|\n NamedTensor[]): void;\n\n /**\n * Dispose the variables (if any) owned by this optimizer instance.\n */\n dispose(): void {\n if (this.iterations_ != null) {\n dispose(this.iterations_);\n }\n }\n\n async saveIterations(): Promise {\n if (this.iterations_ == null) {\n this.iterations_ = 0;\n }\n return {\n name: 'iter', // Named for Python compatibility.\n // TODO(cais): Use 'int64' type when available.\n tensor: scalar(this.iterations_, 'int32')\n };\n }\n\n async getWeights(): Promise {\n throw new Error('getWeights() is not implemented for this optimizer yet.');\n }\n\n async setWeights(weightValues: NamedTensor[]): Promise {\n throw new Error(\n `setWeights() is not implemented for this optimizer class ` +\n `${this.getClassName()}`);\n }\n\n /**\n * Extract the first element of the weight values and set it\n * as the iterations counter variable of this instance of optimizer.\n *\n * @param weightValues\n * @returns Weight values with the first element consumed and excluded.\n */\n protected async extractIterations(weightValues: NamedTensor[]):\n Promise {\n this.iterations_ = (await weightValues[0].tensor.data())[0];\n return weightValues.slice(1);\n }\n}\n\nObject.defineProperty(Optimizer, Symbol.hasInstance, {\n value: (instance: Optimizer) => {\n return instance.minimize != null && instance.computeGradients != null &&\n instance.applyGradients != null;\n }\n});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {sqrt} from '../ops/ops';\nimport {square} from '../ops/square';\nimport {zerosLike} from '../ops/zeros_like';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {NamedTensor, NamedVariableMap} from '../tensor_types';\n\nimport {Optimizer, OptimizerVariable} from './optimizer';\n\n/** @doclink Optimizer */\nexport class AdadeltaOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'Adadelta';\n }\n private accumulatedGrads: OptimizerVariable[] = [];\n private accumulatedUpdates: OptimizerVariable[] = [];\n\n constructor(\n protected learningRate: number, protected rho: number,\n protected epsilon: number = null) {\n super();\n\n if (epsilon == null) {\n this.epsilon = ENGINE.backend.epsilon();\n }\n }\n\n applyGradients(variableGradients: NamedVariableMap|NamedTensor[]) {\n const variableNames = Array.isArray(variableGradients) ?\n variableGradients.map(item => item.name) :\n Object.keys(variableGradients);\n\n variableNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n const trainable = false;\n if (this.accumulatedGrads[i] == null) {\n this.accumulatedGrads[i] = {\n originalName: `${name}/accum_grad`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n if (this.accumulatedUpdates[i] == null) {\n this.accumulatedUpdates[i] = {\n originalName: `${name}/accum_var`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n const accumulatedGrad = this.accumulatedGrads[i].variable;\n const accumulatedUpdate = this.accumulatedUpdates[i].variable;\n\n tidy(() => {\n const newAccumulatedGrad =\n add(mul(accumulatedGrad, this.rho),\n mul(square(gradient), 1 - this.rho));\n\n const updates =\n mul(div(sqrt(add(accumulatedUpdate, this.epsilon)),\n sqrt(add(accumulatedGrad, this.epsilon))),\n gradient);\n\n const newAccumulatedUpdate =\n add(mul(accumulatedUpdate, this.rho),\n mul(square(updates), 1 - this.rho));\n\n accumulatedGrad.assign(newAccumulatedGrad);\n accumulatedUpdate.assign(newAccumulatedUpdate);\n\n const newValue = add(mul(updates, -this.learningRate), value);\n value.assign(newValue);\n });\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n if (this.accumulatedUpdates != null) {\n dispose(this.accumulatedGrads.map(v => v.variable));\n dispose(this.accumulatedUpdates.map(v => v.variable));\n }\n }\n\n override async getWeights(): Promise {\n // Order matters for Python compatibility.\n const variables: OptimizerVariable[] =\n [...this.accumulatedGrads, ...this.accumulatedUpdates];\n return [await this.saveIterations()].concat(\n variables.map(v => ({name: v.originalName, tensor: v.variable})));\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n const variableCount = weightValues.length / 2;\n const trainable = false;\n this.accumulatedGrads =\n weightValues.slice(0, variableCount).map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(\n trainable)\n }));\n this.accumulatedUpdates =\n weightValues.slice(variableCount, variableCount * 2)\n .map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(trainable)\n }));\n }\n\n getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'rho': this.rho,\n 'epsilon': this.epsilon\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(config['learningRate'], config['rho'], config['epsilon']);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Fill, FillAttrs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\n/**\n * Creates a `tf.Tensor` filled with a scalar value.\n *\n * ```js\n * tf.fill([2, 2], 4).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param value The scalar value to fill the tensor with.\n * @param dtype The type of an element in the resulting tensor. Defaults to\n * 'float'.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction fill(\n shape: ShapeMap[R], value: number|string, dtype?: DataType): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n\n const attrs: FillAttrs = {shape, value, dtype};\n\n return ENGINE.runKernel(Fill, {}, attrs as unknown as NamedAttrMap);\n}\n\nexport {fill};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {fill} from '../ops/fill';\nimport {mul} from '../ops/mul';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {NamedTensor, NamedVariableMap} from '../tensor_types';\n\nimport {Optimizer, OptimizerVariable} from './optimizer';\n\n/** @doclink Optimizer */\nexport class AdagradOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'Adagrad';\n }\n\n private accumulatedGrads: OptimizerVariable[] = [];\n\n constructor(\n protected learningRate: number, private initialAccumulatorValue = 0.1) {\n super();\n }\n\n applyGradients(variableGradients: NamedVariableMap|NamedTensor[]) {\n const variableNames = Array.isArray(variableGradients) ?\n variableGradients.map(item => item.name) :\n Object.keys(variableGradients);\n\n variableNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n if (this.accumulatedGrads[i] == null) {\n const trainable = false;\n this.accumulatedGrads[i] = {\n originalName: `${name}/accumulator`,\n variable: tidy(\n () => fill(value.shape, this.initialAccumulatorValue)\n .variable(trainable))\n };\n }\n\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n const accumulatedGrad = this.accumulatedGrads[i].variable;\n\n tidy(() => {\n const newAccumulatedGrad = add(accumulatedGrad, square(gradient));\n accumulatedGrad.assign(newAccumulatedGrad);\n\n const newValue = add(\n mul(div(gradient,\n sqrt(add(newAccumulatedGrad, ENGINE.backend.epsilon()))),\n -this.learningRate),\n value);\n value.assign(newValue);\n });\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n if (this.accumulatedGrads != null) {\n dispose(this.accumulatedGrads.map(v => v.variable));\n }\n }\n\n override async getWeights(): Promise {\n // Order matters for Python compatibility.\n return [await this.saveIterations()].concat(this.accumulatedGrads.map(\n v => ({name: v.originalName, tensor: v.variable})));\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n const trainable = false;\n this.accumulatedGrads = weightValues.map(\n v => ({originalName: v.name, variable: v.tensor.variable(trainable)}));\n }\n\n getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'initialAccumulatorValue': this.initialAccumulatorValue,\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(config['learningRate'], config['initialAccumulatorValue']);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Pow, PowInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the power of one `tf.Tensor` to another. Supports broadcasting.\n *\n * Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for\n * corresponding elements in x and y. The result's dtype will be the upcasted\n * type of the `base` and `exp` dtypes.\n *\n * ```js\n * const a = tf.tensor([[2, 3], [4, 5]])\n * const b = tf.tensor([[1, 2], [3, 0]]).toInt();\n *\n * a.pow(b).print(); // or tf.pow(a, b)\n * ```\n *\n * ```js\n * const a = tf.tensor([[1, 2], [3, 4]])\n * const b = tf.tensor(2).toInt();\n *\n * a.pow(b).print(); // or tf.pow(a, b)\n * ```\n * We also expose `powStrict` which has the same signature as this op and\n * asserts that `base` and `exp` are the same shape (does not broadcast).\n *\n * @param base The base `tf.Tensor` to pow element-wise.\n * @param exp The exponent `tf.Tensor` to pow element-wise.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction pow_(\n base: Tensor|TensorLike, exp: Tensor|TensorLike): T {\n let $base = convertToTensor(base, 'base', 'pow');\n let $exp = convertToTensor(exp, 'exp', 'pow');\n [$base, $exp] = makeTypesMatch($base, $exp);\n\n const inputs: PowInputs = {a: $base, b: $exp};\n\n return ENGINE.runKernel(Pow, inputs as unknown as NamedTensorMap);\n}\n\nexport const pow = /* @__PURE__ */ op({pow_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Sub, SubInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([10, 20, 30, 40]);\n * const b = tf.tensor1d([1, 2, 3, 4]);\n *\n * a.sub(b).print(); // or tf.sub(a, b)\n * ```\n *\n * ```js\n * // Broadcast subtract a with b.\n * const a = tf.tensor1d([10, 20, 30, 40]);\n * const b = tf.scalar(5);\n *\n * a.sub(b).print(); // or tf.sub(a, b)\n * ```\n * @param a The first `tf.Tensor` to subtract from.\n * @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as\n * `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction sub_(a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'sub');\n let $b = convertToTensor(b, 'b', 'sub');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: SubInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Sub, inputs as unknown as NamedTensorMap);\n}\n\nexport const sub = /* @__PURE__ */ op({sub_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {pow} from '../ops/pow';\nimport {scalar} from '../ops/scalar';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {zerosLike} from '../ops/zeros_like';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {Variable} from '../tensor';\nimport {NamedTensor, NamedVariableMap} from '../tensor_types';\n\nimport {Optimizer, OptimizerVariable} from './optimizer';\n\nexport class AdamOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'Adam';\n }\n private accBeta1: Variable;\n private accBeta2: Variable;\n\n private accumulatedFirstMoment: OptimizerVariable[] = [];\n private accumulatedSecondMoment: OptimizerVariable[] = [];\n\n constructor(\n protected learningRate: number, protected beta1: number,\n protected beta2: number, protected epsilon: number = null) {\n super();\n tidy(() => {\n // accB* will be updated by batch.\n this.accBeta1 = scalar(beta1).variable();\n this.accBeta2 = scalar(beta2).variable();\n });\n\n if (epsilon == null) {\n this.epsilon = ENGINE.backend.epsilon();\n }\n }\n\n applyGradients(variableGradients: NamedVariableMap|NamedTensor[]) {\n const varNames = Array.isArray(variableGradients) ?\n variableGradients.map(v => v.name) :\n Object.keys(variableGradients);\n tidy(() => {\n const oneMinusAccBeta1 = sub(1, this.accBeta1);\n const oneMinusAccBeta2 = sub(1, this.accBeta2);\n\n varNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n const trainable = false;\n if (this.accumulatedFirstMoment[i] == null) {\n this.accumulatedFirstMoment[i] = {\n originalName: `${name}/m`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n if (this.accumulatedSecondMoment[i] == null) {\n this.accumulatedSecondMoment[i] = {\n originalName: `${name}/v`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n const firstMoment = this.accumulatedFirstMoment[i].variable;\n const secondMoment = this.accumulatedSecondMoment[i].variable;\n\n const newFirstMoment =\n add(mul(firstMoment, this.beta1), mul(gradient, 1 - this.beta1));\n const newSecondMoment =\n add(mul(secondMoment, this.beta2),\n mul(square(gradient), 1 - this.beta2));\n\n const biasCorrectedFirstMoment = div(newFirstMoment, oneMinusAccBeta1);\n const biasCorrectedSecondMoment =\n div(newSecondMoment, oneMinusAccBeta2);\n\n firstMoment.assign(newFirstMoment);\n secondMoment.assign(newSecondMoment);\n\n const newValue =\n add(mul(div(biasCorrectedFirstMoment,\n add(sqrt(biasCorrectedSecondMoment), this.epsilon)),\n -this.learningRate),\n value);\n value.assign(newValue);\n });\n\n this.accBeta1.assign(mul(this.accBeta1, this.beta1));\n this.accBeta2.assign(mul(this.accBeta2, this.beta2));\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n this.accBeta1.dispose();\n this.accBeta2.dispose();\n\n if (this.accumulatedFirstMoment != null) {\n dispose(this.accumulatedFirstMoment.map(v => v.variable));\n }\n if (this.accumulatedSecondMoment != null) {\n dispose(this.accumulatedSecondMoment.map(v => v.variable));\n }\n }\n\n override async getWeights(): Promise {\n // Order matters for Python compatibility.\n const variables: OptimizerVariable[] =\n [...this.accumulatedFirstMoment, ...this.accumulatedSecondMoment];\n return [await this.saveIterations()].concat(\n variables.map(v => ({name: v.originalName, tensor: v.variable})));\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n tidy(() => {\n this.accBeta1.assign(pow(this.beta1, this.iterations_ + 1));\n this.accBeta2.assign(pow(this.beta2, this.iterations_ + 1));\n });\n\n const variableCount = weightValues.length / 2;\n const trainable = false;\n this.accumulatedFirstMoment =\n weightValues.slice(0, variableCount).map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(\n trainable)\n }));\n this.accumulatedSecondMoment =\n weightValues.slice(variableCount, variableCount * 2)\n .map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(trainable)\n }));\n }\n\n getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'beta1': this.beta1,\n 'beta2': this.beta2,\n 'epsilon': this.epsilon,\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(\n config['learningRate'], config['beta1'], config['beta2'],\n config['epsilon']);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Abs, AbsInputs, ComplexAbs, ComplexAbsInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes absolute value element-wise: `abs(x)`\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n *\n * x.abs().print(); // or tf.abs(x)\n * ```\n * @param x The input `tf.Tensor`.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction abs_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'abs');\n\n if ($x.dtype === 'complex64') {\n const inputs: ComplexAbsInputs = {x: $x};\n return ENGINE.runKernel(ComplexAbs, inputs as unknown as NamedTensorMap);\n } else {\n const inputs: AbsInputs = {x: $x};\n return ENGINE.runKernel(Abs, inputs as unknown as NamedTensorMap);\n }\n}\n\nexport const abs = /* @__PURE__ */ op({abs_});\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Returns the dimensions in the input shape that are broadcasted to\n * produce the provided output shape.\n *\n * The returned dimensions are 0-indexed and sorted. An example:\n * inShape = [4, 1, 3]\n * outShape = [5, 4, 3, 3]\n * result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.\n */\nexport function getBroadcastDims(\n inShape: number[], outShape: number[]): number[] {\n const inRank = inShape.length;\n const dims: number[] = [];\n for (let i = 0; i < inRank; i++) {\n const dim = inRank - 1 - i;\n const a = inShape[dim] || 1;\n const b = outShape[outShape.length - 1 - i] || 1;\n if (b > 1 && a === 1) {\n dims.unshift(dim);\n }\n }\n return dims;\n}\n\n/**\n * Returns the axes in the output space that should be reduced to produce\n * the input space.\n */\nexport function getReductionAxes(\n inShape: number[], outShape: number[]): number[] {\n const result: number[] = [];\n for (let i = 0; i < outShape.length; i++) {\n const inDim = inShape[inShape.length - i - 1];\n const outAxis = outShape.length - i - 1;\n const outDim = outShape[outAxis];\n if (inDim == null || (inDim === 1 && outDim > 1)) {\n result.unshift(outAxis);\n }\n }\n return result;\n}\n\nexport function assertAndGetBroadcastShape(\n shapeA: number[], shapeB: number[]): number[] {\n const result: number[] = [];\n const l = Math.max(shapeA.length, shapeB.length);\n\n for (let i = 0; i < l; i++) {\n let a = shapeA[shapeA.length - i - 1];\n if (a == null) {\n a = 1;\n }\n let b = shapeB[shapeB.length - i - 1];\n if (b == null) {\n b = 1;\n }\n if (a === 1) {\n result.unshift(b);\n } else if (b === 1) {\n result.unshift(a);\n } else if (a !== b) {\n const errMsg = `Operands could not be broadcast together with shapes ` +\n `${shapeA} and ${shapeB}.`;\n throw Error(errMsg);\n } else {\n result.unshift(a);\n }\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Maximum, MaximumInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {cast} from './cast';\nimport {op} from './operation';\n\n/**\n * Returns the max of a and b (`a > b ? a : b`) element-wise.\n * Supports broadcasting.\n *\n * We also expose `tf.maximumStrict` which has the same signature as this op and\n * asserts that `a` and `b` are the same shape (does not broadcast).\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 3, 16]);\n * const b = tf.tensor1d([1, 2, 9, 4]);\n *\n * a.maximum(b).print(); // or tf.maximum(a, b)\n * ```\n *\n * ```js\n * // Broadcast maximum a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(5);\n *\n * a.maximum(b).print(); // or tf.maximum(a, b)\n * ```\n *\n * @param a The first tensor.\n * @param b The second tensor. Must have the same type as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction maximum_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'maximum');\n let $b = convertToTensor(b, 'b', 'maximum');\n [$a, $b] = makeTypesMatch($a, $b);\n\n if ($a.dtype === 'bool') {\n $a = cast($a, 'int32');\n $b = cast($b, 'int32');\n }\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: MaximumInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Maximum, inputs as unknown as NamedTensorMap);\n}\n\nexport const maximum = /* @__PURE__ */ op({maximum_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {abs} from '../ops/abs';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {maximum} from '../ops/maximum';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {sub} from '../ops/sub';\nimport {zerosLike} from '../ops/zeros_like';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {Variable} from '../tensor';\nimport {NamedTensor, NamedVariableMap} from '../tensor_types';\n\nimport {Optimizer, OptimizerVariable} from './optimizer';\n\nexport class AdamaxOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'Adamax';\n }\n private accBeta1: Variable;\n private iteration: Variable;\n\n private accumulatedFirstMoment: OptimizerVariable[] = [];\n private accumulatedWeightedInfNorm: OptimizerVariable[] = [];\n\n constructor(\n protected learningRate: number, protected beta1: number,\n protected beta2: number, protected epsilon: number = null,\n protected decay = 0.0) {\n super();\n\n tidy(() => {\n this.iteration = scalar(0).variable();\n this.accBeta1 = scalar(beta1).variable();\n });\n\n if (epsilon == null) {\n this.epsilon = ENGINE.backend.epsilon();\n }\n }\n\n applyGradients(variableGradients: NamedVariableMap|NamedTensor[]) {\n const variableNames = Array.isArray(variableGradients) ?\n variableGradients.map(item => item.name) :\n Object.keys(variableGradients);\n\n tidy(() => {\n const oneMinusAccBeta1 = sub(1, this.accBeta1);\n const lr =\n div(-this.learningRate, add(mul(this.iteration, this.decay), 1));\n\n variableNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n const trainable = false;\n if (this.accumulatedFirstMoment[i] == null) {\n this.accumulatedFirstMoment[i] = {\n originalName: `${name}/m`,\n variable: zerosLike(value).variable(trainable)\n };\n }\n if (this.accumulatedWeightedInfNorm[i] == null) {\n this.accumulatedWeightedInfNorm[i] = {\n originalName: `${name}/v`,\n variable: zerosLike(value).variable(trainable)\n };\n }\n\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n const firstMoment = this.accumulatedFirstMoment[i].variable;\n const weightedInfNorm = this.accumulatedWeightedInfNorm[i].variable;\n\n const newFirstMoment =\n add(mul(firstMoment, this.beta1), mul(gradient, 1 - this.beta1));\n\n const ut0 = mul(weightedInfNorm, this.beta2);\n const ut1 = abs(gradient);\n\n const newWeightedInfNorm = maximum(ut0, ut1);\n\n firstMoment.assign(newFirstMoment);\n weightedInfNorm.assign(newWeightedInfNorm);\n\n const newValue =\n add(mul(div(lr, oneMinusAccBeta1),\n div(newFirstMoment, add(newWeightedInfNorm, this.epsilon))),\n value);\n\n value.assign(newValue);\n });\n\n this.iteration.assign(add(this.iteration, 1));\n this.accBeta1.assign(mul(this.accBeta1, this.beta1));\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n this.accBeta1.dispose();\n this.iteration.dispose();\n\n if (this.accumulatedFirstMoment != null) {\n dispose(this.accumulatedFirstMoment.map(v => v.variable));\n }\n if (this.accumulatedWeightedInfNorm != null) {\n dispose(this.accumulatedWeightedInfNorm.map(v => v.variable));\n }\n }\n\n override async getWeights(): Promise {\n throw new Error('getWeights() is not implemented for Adamax yet.');\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n throw new Error('setWeights() is not implemented for Adamax yet.');\n }\n\n getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'beta1': this.beta1,\n 'beta2': this.beta2,\n 'epsilon': this.epsilon,\n 'decay': this.decay\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(\n config['learningRate'], config['beta1'], config['beta2'],\n config['epsilon'], config['decay']);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {keep, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {Scalar} from '../tensor';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\n\nimport {Optimizer} from './optimizer';\n\n/** @doclink Optimizer */\nexport class SGDOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'SGD';\n }\n protected c: Scalar;\n\n constructor(protected learningRate: number) {\n super();\n this.setLearningRate(learningRate);\n }\n\n applyGradients(variableGradients: NamedTensorMap|NamedTensor[]) {\n const varNames = Array.isArray(variableGradients) ?\n variableGradients.map(v => v.name) :\n Object.keys(variableGradients);\n varNames.forEach((name, i) => {\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n const value = ENGINE.registeredVariables[name];\n tidy(() => {\n const newValue = add(mul(this.c, gradient), value);\n value.assign(newValue);\n });\n });\n this.incrementIterations();\n }\n\n /**\n * Sets the learning rate of the optimizer.\n */\n setLearningRate(learningRate: number) {\n this.learningRate = learningRate;\n if (this.c != null) {\n this.c.dispose();\n }\n this.c = keep(scalar(-learningRate));\n }\n\n override dispose() {\n this.c.dispose();\n }\n\n override async getWeights(): Promise {\n return [await this.saveIterations()];\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n if (weightValues.length !== 0) {\n throw new Error('SGD optimizer does not have settable weights.');\n }\n }\n\n getConfig(): ConfigDict {\n return {'learningRate': this.learningRate};\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(config['learningRate']);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {zerosLike} from '../ops/zeros_like';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {Scalar, Tensor} from '../tensor';\nimport {NamedTensor, NamedVariableMap} from '../tensor_types';\n\nimport {OptimizerVariable} from './optimizer';\nimport {SGDOptimizer} from './sgd_optimizer';\n\n/** @doclink Optimizer */\nexport class MomentumOptimizer extends SGDOptimizer {\n /** @nocollapse */\n // Name matters for Python compatibility.\n static override get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'Momentum';\n }\n private m: Scalar;\n private accumulations: OptimizerVariable[] = [];\n\n constructor(\n protected override learningRate: number, private momentum: number,\n private useNesterov = false) {\n super(learningRate);\n this.m = scalar(this.momentum);\n }\n\n override applyGradients(variableGradients: NamedVariableMap|NamedTensor[]) {\n const variableNames = Array.isArray(variableGradients) ?\n variableGradients.map(item => item.name) :\n Object.keys(variableGradients);\n\n variableNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n if (this.accumulations[i] == null) {\n const trainable = false;\n this.accumulations[i] = {\n originalName: `${name}/momentum`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n\n const accumulation = this.accumulations[i].variable;\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n tidy(() => {\n let newValue: Tensor;\n const newAccumulation = add(mul(this.m, accumulation), gradient);\n if (this.useNesterov) {\n newValue = add(\n mul(this.c, add(gradient, mul(newAccumulation, this.m))), value);\n } else {\n newValue = add(mul(this.c, newAccumulation), value);\n }\n accumulation.assign(newAccumulation);\n value.assign(newValue);\n });\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n this.m.dispose();\n if (this.accumulations != null) {\n dispose(this.accumulations.map(v => v.variable));\n }\n }\n\n /**\n * Sets the momentum of the optimizer.\n *\n * @param momentum\n */\n setMomentum(momentum: number) {\n this.momentum = momentum;\n }\n\n override async getWeights(): Promise {\n // Order matters for Python compatibility.\n return [await this.saveIterations()].concat(this.accumulations.map(\n v => ({name: v.originalName, tensor: v.variable})));\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n const trainable = false;\n this.accumulations = weightValues.map(\n v => ({originalName: v.name, variable: v.tensor.variable(trainable)}));\n }\n\n override getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'momentum': this.momentum,\n 'useNesterov': this.useNesterov\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(\n config['learningRate'], config['momentum'], config['useNesterov']);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {dispose, tidy} from '../globals';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {zerosLike} from '../ops/zeros_like';\nimport {ConfigDict, Serializable, SerializableConstructor} from '../serialization';\nimport {NamedTensor, NamedTensorMap} from '../tensor_types';\n\nimport {Optimizer, OptimizerVariable} from './optimizer';\n\n/** @doclink Optimizer */\nexport class RMSPropOptimizer extends Optimizer {\n /** @nocollapse */\n static get className() {\n // Name matters for Python compatibility.\n // This is a getter instead of a property because when it's a property, it\n // prevents the entire class from being tree-shaken.\n return 'RMSProp';\n }\n private centered: boolean;\n\n private accumulatedMeanSquares: OptimizerVariable[] = [];\n private accumulatedMoments: OptimizerVariable[] = [];\n private accumulatedMeanGrads: OptimizerVariable[] = [];\n\n constructor(\n protected learningRate: number, protected decay = 0.9,\n protected momentum = 0.0, protected epsilon: number = null,\n centered = false) {\n super();\n\n this.centered = centered;\n\n if (epsilon == null) {\n this.epsilon = ENGINE.backend.epsilon();\n }\n if (learningRate == null) {\n throw new Error(`learningRate for RMSPropOptimizer must be defined.`);\n }\n }\n\n applyGradients(variableGradients: NamedTensorMap|NamedTensor[]) {\n const variableNames = Array.isArray(variableGradients) ?\n variableGradients.map(item => item.name) :\n Object.keys(variableGradients);\n\n variableNames.forEach((name, i) => {\n const value = ENGINE.registeredVariables[name];\n const trainable = false;\n if (this.accumulatedMeanSquares[i] == null) {\n this.accumulatedMeanSquares[i] = {\n originalName: `${name}/rms`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n if (this.accumulatedMoments[i] == null) {\n this.accumulatedMoments[i] = {\n originalName: `${name}/momentum`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n if (this.accumulatedMeanGrads[i] == null && this.centered) {\n this.accumulatedMeanGrads[i] = {\n originalName: `${name}/mg`,\n variable: tidy(() => zerosLike(value).variable(trainable))\n };\n }\n\n const gradient = Array.isArray(variableGradients) ?\n variableGradients[i].tensor :\n variableGradients[name];\n if (gradient == null) {\n return;\n }\n\n const accumulatedMeanSquare = this.accumulatedMeanSquares[i].variable;\n const accumulatedMoments = this.accumulatedMoments[i].variable;\n tidy(() => {\n const newAccumulatedMeanSquare =\n add(mul(accumulatedMeanSquare, this.decay),\n mul(square(gradient), 1 - this.decay));\n\n if (this.centered) {\n const accumulatedMeanGrad = this.accumulatedMeanGrads[i].variable;\n // Centered gradient\n const newAccumulatedMeanGrad =\n add(mul(accumulatedMeanGrad, this.decay),\n mul(gradient, 1 - this.decay));\n\n const gradContribution =\n div(mul(gradient, this.learningRate),\n sqrt(\n sub(newAccumulatedMeanSquare,\n add(square(newAccumulatedMeanGrad), this.epsilon))));\n const newAccumulatedMoments =\n add(mul(accumulatedMoments, this.momentum), gradContribution);\n\n accumulatedMeanSquare.assign(newAccumulatedMeanSquare);\n accumulatedMeanGrad.assign(newAccumulatedMeanGrad);\n accumulatedMoments.assign(newAccumulatedMoments);\n\n const newValue = sub(value, newAccumulatedMoments);\n value.assign(newValue);\n } else {\n // Plain gradient\n const newAccumulatedMeanSquare =\n add(mul(accumulatedMeanSquare, this.decay),\n mul(square(gradient), 1 - this.decay));\n\n const newAccumulatedMoments =\n add(mul(accumulatedMoments, this.momentum),\n div(mul(gradient, this.learningRate),\n sqrt(add(newAccumulatedMeanSquare, this.epsilon))));\n\n accumulatedMeanSquare.assign(newAccumulatedMeanSquare);\n accumulatedMoments.assign(newAccumulatedMoments);\n\n const newValue = sub(value, newAccumulatedMoments);\n value.assign(newValue);\n }\n });\n });\n this.incrementIterations();\n }\n\n override dispose(): void {\n if (this.accumulatedMeanSquares != null) {\n dispose(this.accumulatedMeanSquares.map(v => v.variable));\n }\n if (this.accumulatedMeanGrads != null && this.centered) {\n dispose(this.accumulatedMeanGrads.map(v => v.variable));\n }\n if (this.accumulatedMoments != null) {\n dispose(this.accumulatedMoments.map(v => v.variable));\n }\n }\n\n override async getWeights(): Promise {\n // Order matters for Python compatibility.\n const variables: OptimizerVariable[] =\n [...this.accumulatedMeanSquares, ...this.accumulatedMoments];\n if (this.centered) {\n variables.push(...this.accumulatedMeanGrads);\n }\n return [await this.saveIterations()].concat(\n variables.map(v => ({name: v.originalName, tensor: v.variable})));\n }\n\n override async setWeights(weightValues: NamedTensor[]): Promise {\n weightValues = await this.extractIterations(weightValues);\n const variableCount =\n this.centered ? weightValues.length / 3 : weightValues.length / 2;\n const trainable = false;\n this.accumulatedMeanSquares =\n weightValues.slice(0, variableCount).map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(\n trainable)\n }));\n this.accumulatedMoments =\n weightValues.slice(variableCount, variableCount * 2)\n .map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(trainable)\n }));\n if (this.centered) {\n this.accumulatedMeanGrads =\n weightValues.slice(variableCount * 2, variableCount * 3)\n .map(v => ({\n originalName: v.name,\n variable: v.tensor.variable(trainable)\n }));\n }\n }\n\n getConfig(): ConfigDict {\n return {\n 'learningRate': this.learningRate,\n 'decay': this.decay,\n 'momentum': this.momentum,\n 'epsilon': this.epsilon,\n 'centered': this.centered\n };\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: SerializableConstructor, config: ConfigDict): T {\n return new cls(\n config['learningRate'], config['decay'], config['momentum'],\n config['epsilon'], config['centered']);\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AdadeltaOptimizer} from './adadelta_optimizer';\nimport {AdagradOptimizer} from './adagrad_optimizer';\nimport {AdamOptimizer} from './adam_optimizer';\nimport {AdamaxOptimizer} from './adamax_optimizer';\nimport {MomentumOptimizer} from './momentum_optimizer';\nimport {RMSPropOptimizer} from './rmsprop_optimizer';\nimport {SGDOptimizer} from './sgd_optimizer';\nimport {registerClass} from '../serialization';\n\nconst OPTIMIZERS = [\n AdadeltaOptimizer,\n AdagradOptimizer,\n AdamOptimizer,\n AdamaxOptimizer,\n MomentumOptimizer,\n RMSPropOptimizer,\n SGDOptimizer,\n];\n\nexport function registerOptimizers() {\n for (const optimizer of OPTIMIZERS) {\n registerClass(optimizer);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers related to files, such as browser-triggered file downloads,\n * user-selected files in browser.\n */\n\nimport '../flags';\nimport {env} from '../environment';\n\nimport {basename, concatenateArrayBuffers, getModelArtifactsForJSON, getModelArtifactsInfoForJSON, getModelJSONForModelArtifacts} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, ModelArtifacts, ModelJSON, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\nconst DEFAULT_FILE_NAME_PREFIX = 'model';\nconst DEFAULT_JSON_EXTENSION_NAME = '.json';\nconst DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';\n\nfunction defer(f: () => T): Promise {\n return new Promise(resolve => setTimeout(resolve)).then(f);\n}\n\nexport class BrowserDownloads implements IOHandler {\n private readonly modelJsonFileName: string;\n private readonly weightDataFileName: string;\n private readonly modelJsonAnchor: HTMLAnchorElement;\n private readonly weightDataAnchor: HTMLAnchorElement;\n\n static readonly URL_SCHEME = 'downloads://';\n\n constructor(fileNamePrefix?: string) {\n if (!env().getBool('IS_BROWSER')) {\n // TODO(cais): Provide info on what IOHandlers are available under the\n // current environment.\n throw new Error(\n 'browserDownloads() cannot proceed because the current environment ' +\n 'is not a browser.');\n }\n\n if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) {\n fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length);\n }\n if (fileNamePrefix == null || fileNamePrefix.length === 0) {\n fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;\n }\n\n this.modelJsonFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;\n this.weightDataFileName =\n fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (typeof (document) === 'undefined') {\n throw new Error(\n 'Browser downloads are not supported in ' +\n 'this environment since `document` is not present');\n }\n const weightsURL = window.URL.createObjectURL(new Blob(\n [modelArtifacts.weightData], {type: 'application/octet-stream'}));\n\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserDownloads.save() does not support saving model topology ' +\n 'in binary formats yet.');\n } else {\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./' + this.weightDataFileName],\n weights: modelArtifacts.weightSpecs\n }];\n const modelJSON: ModelJSON =\n getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);\n\n const modelJsonURL = window.URL.createObjectURL(\n new Blob([JSON.stringify(modelJSON)], {type: 'application/json'}));\n\n // If anchor elements are not provided, create them without attaching them\n // to parents, so that the downloaded file names can be controlled.\n const jsonAnchor = this.modelJsonAnchor == null ?\n document.createElement('a') :\n this.modelJsonAnchor;\n jsonAnchor.download = this.modelJsonFileName;\n jsonAnchor.href = modelJsonURL;\n // Trigger downloads by evoking a click event on the download anchors.\n // When multiple downloads are started synchronously, Firefox will only\n // save the last one.\n await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));\n\n if (modelArtifacts.weightData != null) {\n const weightDataAnchor = this.weightDataAnchor == null ?\n document.createElement('a') :\n this.weightDataAnchor;\n weightDataAnchor.download = this.weightDataFileName;\n weightDataAnchor.href = weightsURL;\n await defer(\n () => weightDataAnchor.dispatchEvent(new MouseEvent('click')));\n }\n\n return {modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts)};\n }\n }\n}\n\nclass BrowserFiles implements IOHandler {\n private readonly jsonFile: File;\n private readonly weightsFiles: File[];\n\n constructor(files: File[]) {\n if (files == null || files.length < 1) {\n throw new Error(\n `When calling browserFiles, at least 1 file is required, ` +\n `but received ${files}`);\n }\n this.jsonFile = files[0];\n this.weightsFiles = files.slice(1);\n }\n\n async load(): Promise {\n return new Promise((resolve, reject) => {\n const jsonReader = new FileReader();\n jsonReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const modelJSON = JSON.parse((event.target as any).result) as ModelJSON;\n\n const modelTopology = modelJSON.modelTopology;\n if (modelTopology == null) {\n reject(new Error(`modelTopology field is missing from file ${\n this.jsonFile.name}`));\n return;\n }\n\n const weightsManifest = modelJSON.weightsManifest;\n if (weightsManifest == null) {\n reject(new Error(`weightManifest field is missing from file ${\n this.jsonFile.name}`));\n return;\n }\n\n if (this.weightsFiles.length === 0) {\n resolve({modelTopology});\n return;\n }\n\n const modelArtifactsPromise = getModelArtifactsForJSON(\n modelJSON, (weightsManifest) => this.loadWeights(weightsManifest));\n resolve(modelArtifactsPromise);\n };\n\n jsonReader.onerror = error => reject(\n `Failed to read model topology and weights manifest JSON ` +\n `from file '${this.jsonFile.name}'. BrowserFiles supports loading ` +\n `Keras-style tf.Model artifacts only.`);\n jsonReader.readAsText(this.jsonFile);\n });\n }\n\n private loadWeights(weightsManifest: WeightsManifestConfig): Promise<[\n /* weightSpecs */ WeightsManifestEntry[], /* weightData */ ArrayBuffer\n ]> {\n const weightSpecs: WeightsManifestEntry[] = [];\n const paths: string[] = [];\n for (const entry of weightsManifest) {\n weightSpecs.push(...entry.weights);\n paths.push(...entry.paths);\n }\n\n const pathToFile: {[path: string]: File} =\n this.checkManifestAndWeightFiles(weightsManifest);\n\n const promises: Array> =\n paths.map(path => this.loadWeightsFile(path, pathToFile[path]));\n\n return Promise.all(promises).then(\n buffers => [weightSpecs, concatenateArrayBuffers(buffers)]);\n }\n\n private loadWeightsFile(path: string, file: File): Promise {\n return new Promise((resolve, reject) => {\n const weightFileReader = new FileReader();\n weightFileReader.onload = (event: Event) => {\n // tslint:disable-next-line:no-any\n const weightData = (event.target as any).result as ArrayBuffer;\n resolve(weightData);\n };\n weightFileReader.onerror = error =>\n reject(`Failed to weights data from file of path '${path}'.`);\n weightFileReader.readAsArrayBuffer(file);\n });\n }\n\n /**\n * Check the compatibility between weights manifest and weight files.\n */\n private checkManifestAndWeightFiles(manifest: WeightsManifestConfig):\n {[path: string]: File} {\n const basenames: string[] = [];\n const fileNames = this.weightsFiles.map(file => basename(file.name));\n const pathToFile: {[path: string]: File} = {};\n for (const group of manifest) {\n group.paths.forEach(path => {\n const pathBasename = basename(path);\n if (basenames.indexOf(pathBasename) !== -1) {\n throw new Error(\n `Duplicate file basename found in weights manifest: ` +\n `'${pathBasename}'`);\n }\n basenames.push(pathBasename);\n if (fileNames.indexOf(pathBasename) === -1) {\n throw new Error(\n `Weight file with basename '${pathBasename}' is not provided.`);\n } else {\n pathToFile[path] = this.weightsFiles[fileNames.indexOf(pathBasename)];\n }\n });\n }\n\n if (basenames.length !== this.weightsFiles.length) {\n throw new Error(\n `Mismatch in the number of files in weights manifest ` +\n `(${basenames.length}) and the number of weight files provided ` +\n `(${this.weightsFiles.length}).`);\n }\n return pathToFile;\n }\n}\n\nexport const browserDownloadsRouter: IORouter = (url: string|string[]) => {\n if (!env().getBool('IS_BROWSER')) {\n return null;\n } else {\n if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) {\n return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length));\n } else {\n return null;\n }\n }\n};\nIORouterRegistry.registerSaveRouter(browserDownloadsRouter);\n\n/**\n * Creates an IOHandler that triggers file downloads from the browser.\n *\n * The returned `IOHandler` instance can be used as model exporting methods such\n * as `tf.Model.save` and supports only saving.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense(\n * {units: 1, inputShape: [10], activation: 'sigmoid'}));\n * const saveResult = await model.save('downloads://mymodel');\n * // This will trigger downloading of two files:\n * // 'mymodel.json' and 'mymodel.weights.bin'.\n * console.log(saveResult);\n * ```\n *\n * @param fileNamePrefix Prefix name of the files to be downloaded. For use with\n * `tf.Model`, `fileNamePrefix` should follow either of the following two\n * formats:\n * 1. `null` or `undefined`, in which case the default file\n * names will be used:\n * - 'model.json' for the JSON file containing the model topology and\n * weights manifest.\n * - 'model.weights.bin' for the binary file containing the binary weight\n * values.\n * 2. A single string or an Array of a single string, as the file name prefix.\n * For example, if `'foo'` is provided, the downloaded JSON\n * file and binary weights file will be named 'foo.json' and\n * 'foo.weights.bin', respectively.\n * @param config Additional configuration for triggering downloads.\n * @returns An instance of `BrowserDownloads` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserDownloads(fileNamePrefix = 'model'): IOHandler {\n return new BrowserDownloads(fileNamePrefix);\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from user-selected files.\n *\n * This method can be used for loading from files such as user-selected files\n * in the browser.\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * // Note: This code snippet won't run properly without the actual file input\n * // elements in the HTML DOM.\n *\n * // Suppose there are two HTML file input (``)\n * // elements.\n * const uploadJSONInput = document.getElementById('upload-json');\n * const uploadWeightsInput = document.getElementById('upload-weights');\n * const model = await tf.loadLayersModel(tf.io.browserFiles(\n * [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));\n * ```\n *\n * @param files `File`s to load from. Currently, this function supports only\n * loading from files that contain Keras-style models (i.e., `tf.Model`s), for\n * which an `Array` of `File`s is expected (in that order):\n * - A JSON file containing the model topology and weight manifest.\n * - Optionally, one or more binary files containing the binary weights.\n * These files must have names that match the paths in the `weightsManifest`\n * contained by the aforementioned JSON file, or errors will be thrown\n * during loading. These weights files have the same format as the ones\n * generated by `tensorflowjs_converter` that comes with the `tensorflowjs`\n * Python PIP package. If no weights files are provided, only the model\n * topology will be loaded from the JSON file above.\n * @returns An instance of `Files` `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function browserFiles(files: File[]): IOHandler {\n return new BrowserFiles(files);\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {assert} from '../util';\n\nimport {OnProgressCallback} from './types';\n\n/**\n * Monitor Promise.all progress, fire onProgress callback function.\n *\n * @param promises Promise list going to be monitored\n * @param onProgress Callback function. Fired when a promise resolved.\n * @param startFraction Optional fraction start. Default to 0.\n * @param endFraction Optional fraction end. Default to 1.\n */\nexport function monitorPromisesProgress(\n promises: Array>, onProgress: OnProgressCallback,\n startFraction?: number, endFraction?: number) {\n checkPromises(promises);\n startFraction = startFraction == null ? 0 : startFraction;\n endFraction = endFraction == null ? 1 : endFraction;\n checkFraction(startFraction, endFraction);\n let resolvedPromise = 0;\n\n const registerMonitor = (promise: Promise<{}>) => {\n promise.then(value => {\n const fraction = startFraction +\n ++resolvedPromise / promises.length * (endFraction - startFraction);\n // pass fraction as parameter to callback function.\n onProgress(fraction);\n return value;\n });\n return promise;\n };\n\n function checkPromises(promises: Array>): void {\n assert(\n promises != null && Array.isArray(promises) && promises.length > 0,\n () => 'promises must be a none empty array');\n }\n\n function checkFraction(startFraction: number, endFraction: number): void {\n assert(\n startFraction >= 0 && startFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got startFraction ${startFraction}`);\n assert(\n endFraction >= 0 && endFraction <= 1,\n () => `Progress fraction must be in range [0, 1], but ` +\n `got endFraction ${endFraction}`);\n assert(\n endFraction >= startFraction,\n () => `startFraction must be no more than endFraction, but ` +\n `got startFraction ${startFraction} and endFraction ` +\n `${endFraction}`);\n }\n\n return Promise.all(promises.map(registerMonitor));\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '../environment';\n\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\nimport {decodeWeights} from './io_utils';\nimport {monitorPromisesProgress} from './progress';\nimport {DTYPE_VALUE_SIZE_MAP, LoadOptions, WeightsManifestConfig, WeightsManifestEntry} from './types';\n\n/**\n * Reads binary weights data from a number of URLs.\n *\n * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.\n * @param requestOptions RequestInit (options) for the HTTP requests.\n * @param fetchFunc Optional overriding value for the `window.fetch` function.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same\n * length as `fetchURLs`.\n */\nexport async function loadWeightsAsArrayBuffer(\n fetchURLs: string[], loadOptions?: LoadOptions): Promise {\n if (loadOptions == null) {\n loadOptions = {};\n }\n\n const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch :\n loadOptions.fetchFunc;\n\n // Create the requests for all of the weights in parallel.\n const requests = fetchURLs.map(\n fetchURL =>\n fetchFunc(fetchURL, loadOptions.requestInit, {isBinary: true}));\n\n const fetchStartFraction = 0;\n const fetchEndFraction = 0.5;\n\n const responses = loadOptions.onProgress == null ?\n await Promise.all(requests) :\n await monitorPromisesProgress(\n requests, loadOptions.onProgress, fetchStartFraction,\n fetchEndFraction);\n\n const bufferPromises = responses.map(response => response.arrayBuffer());\n\n const bufferStartFraction = 0.5;\n const bufferEndFraction = 1;\n\n const buffers = loadOptions.onProgress == null ?\n await Promise.all(bufferPromises) :\n await monitorPromisesProgress(\n bufferPromises, loadOptions.onProgress, bufferStartFraction,\n bufferEndFraction);\n return buffers;\n}\n\n/**\n * Reads a weights manifest JSON configuration, fetches the weights and\n * returns them as `Tensor`s.\n *\n * @param manifest The weights manifest JSON.\n * @param filePathPrefix The path prefix for filenames given in the manifest.\n * Defaults to the empty string.\n * @param weightNames The names of the weights to be fetched.\n */\nexport async function loadWeights(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[],\n requestInit?: RequestInit): Promise {\n // TODO(nsthorat): Groups are currently fetched atomically. If you need a\n // single weight from a group, the whole group will be fetched. At a future\n // date, we should support fetching only the individual shards within a\n // group that are needed to reconstruct the requested weight.\n // TODO(cais): Use `decodeWeights` for implementation.\n\n const fetchWeights = (fetchUrls: string[]) =>\n loadWeightsAsArrayBuffer(fetchUrls, {requestInit});\n const loadWeights = weightsLoaderFactory(fetchWeights);\n\n return loadWeights(manifest, filePathPrefix, weightNames);\n}\n\n/**\n * Creates a function, which reads a weights manifest JSON configuration,\n * fetches the weight files using the specified function and returns them as\n * `Tensor`s.\n *\n * ```js\n * // example for creating a nodejs weight loader, which reads the weight files\n * // from disk using fs.readFileSync\n *\n * import * as fs from 'fs'\n *\n * const fetchWeightsFromDisk = (filePaths: string[]) =>\n * filePaths.map(filePath => fs.readFileSync(filePath).buffer)\n *\n * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)\n *\n * const manifest = JSON.parse(\n * fs.readFileSync('./my_model-weights_manifest').toString()\n * )\n * const weightMap = await loadWeights(manifest, './')\n * ```\n * @param fetchWeightsFunction The function used for fetching the weight files.\n * @returns Weight loading function.\n */\nexport function weightsLoaderFactory(\n fetchWeightsFunction: (fetchUrls: string[]) => Promise):\n (manifest: WeightsManifestConfig, filePathPrefix?: string,\n weightNames?: string[]) => Promise {\n return async(\n manifest: WeightsManifestConfig, filePathPrefix = '',\n weightNames?: string[]): Promise => {\n // Collect all the groups, weights, and their relative offsets to be\n // fetched.\n const groupIndicesToFetchMap = manifest.map(() => false);\n const groupWeightsToFetch: {\n [group: number]: Array<{\n manifestEntry: WeightsManifestEntry; groupOffset: number;\n sizeBytes: number;\n }>\n } = {};\n const weightsFound =\n weightNames != null ? weightNames.map(() => false) : [];\n const allManifestWeightNames: string[] = [];\n manifest.forEach((manifestGroupConfig, groupIndex) => {\n let groupOffset = 0;\n manifestGroupConfig.weights.forEach(weightsEntry => {\n const rawDtype = ('quantization' in weightsEntry) ?\n weightsEntry.quantization.dtype :\n weightsEntry.dtype;\n\n const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] *\n util.sizeFromShape(weightsEntry.shape);\n\n const enqueueWeightsForFetchingFn = () => {\n groupIndicesToFetchMap[groupIndex] = true;\n if (groupWeightsToFetch[groupIndex] == null) {\n groupWeightsToFetch[groupIndex] = [];\n }\n\n groupWeightsToFetch[groupIndex].push({\n manifestEntry: weightsEntry,\n groupOffset,\n sizeBytes: weightsBytes\n });\n };\n\n if (weightNames != null) {\n weightNames.forEach((weightName, weightIndex) => {\n if (weightName === weightsEntry.name) {\n enqueueWeightsForFetchingFn();\n weightsFound[weightIndex] = true;\n }\n });\n } else {\n enqueueWeightsForFetchingFn();\n }\n\n allManifestWeightNames.push(weightsEntry.name);\n groupOffset += weightsBytes;\n });\n });\n\n if (!weightsFound.every(found => found)) {\n const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);\n throw new Error(\n `Could not find weights in manifest with names: ` +\n `${weightsNotFound.join(', ')}. \\n` +\n `Manifest JSON has weights with names: ` +\n `${allManifestWeightNames.join(', ')}.`);\n }\n\n // Convert the one-hot boolean groupId => shouldFetch map to a list of group\n // IDs.\n const groupIndicesToFetch =\n groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {\n if (shouldFetch) {\n accumulator.push(i);\n }\n return accumulator;\n }, []);\n\n const fetchUrls: string[] = [];\n groupIndicesToFetch.forEach(i => {\n manifest[i].paths.forEach(filepath => {\n const fetchUrl = filePathPrefix +\n (!filePathPrefix.endsWith('/') ? '/' : '') + filepath;\n fetchUrls.push(fetchUrl);\n });\n });\n const buffers = await fetchWeightsFunction(fetchUrls);\n\n const weightsTensorMap: NamedTensorMap = {};\n let bufferIndexOffset = 0;\n groupIndicesToFetch.forEach(i => {\n const numBuffers = manifest[i].paths.length;\n\n let groupBytes = 0;\n for (let i = 0; i < numBuffers; i++) {\n groupBytes += buffers[bufferIndexOffset + i].byteLength;\n }\n\n // Create a buffer for the whole group.\n const groupBuffer = new ArrayBuffer(groupBytes);\n const groupByteBuffer = new Uint8Array(groupBuffer);\n let groupBufferOffset = 0;\n for (let i = 0; i < numBuffers; i++) {\n const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);\n groupByteBuffer.set(buffer, groupBufferOffset);\n groupBufferOffset += buffer.byteLength;\n }\n\n const weightsEntries = groupWeightsToFetch[i];\n weightsEntries.forEach(weightsEntry => {\n const byteBuffer = groupBuffer.slice(\n weightsEntry.groupOffset,\n weightsEntry.groupOffset + weightsEntry.sizeBytes);\n const nameToTensorMap =\n decodeWeights(byteBuffer, [weightsEntry.manifestEntry]);\n for (const name in nameToTensorMap) {\n weightsTensorMap[name] = nameToTensorMap[name];\n }\n });\n\n bufferIndexOffset += numBuffers;\n });\n\n return weightsTensorMap;\n };\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandler implementations based on HTTP requests in the web browser.\n *\n * Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).\n */\n\nimport {env} from '../environment';\n\nimport {assert} from '../util';\nimport {concatenateArrayBuffers, getModelArtifactsForJSON, getModelArtifactsInfoForJSON, getModelJSONForModelArtifacts, getWeightSpecs} from './io_utils';\nimport {IORouter, IORouterRegistry} from './router_registry';\nimport {IOHandler, LoadOptions, ModelArtifacts, ModelJSON, OnProgressCallback, SaveResult, WeightsManifestConfig, WeightsManifestEntry} from './types';\nimport {loadWeightsAsArrayBuffer} from './weights_loader';\n\nconst OCTET_STREAM_MIME_TYPE = 'application/octet-stream';\nconst JSON_TYPE = 'application/json';\nexport class HTTPRequest implements IOHandler {\n protected readonly path: string;\n protected readonly requestInit: RequestInit;\n\n private readonly fetch: Function;\n private readonly weightUrlConverter: (weightName: string) => Promise;\n\n readonly DEFAULT_METHOD = 'POST';\n\n static readonly URL_SCHEME_REGEX = /^https?:\\/\\//;\n\n private readonly weightPathPrefix: string;\n private readonly onProgress: OnProgressCallback;\n\n constructor(path: string, loadOptions?: LoadOptions) {\n if (loadOptions == null) {\n loadOptions = {};\n }\n this.weightPathPrefix = loadOptions.weightPathPrefix;\n this.onProgress = loadOptions.onProgress;\n this.weightUrlConverter = loadOptions.weightUrlConverter;\n\n if (loadOptions.fetchFunc != null) {\n assert(\n typeof loadOptions.fetchFunc === 'function',\n () => 'Must pass a function that matches the signature of ' +\n '`fetch` (see ' +\n 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');\n this.fetch = loadOptions.fetchFunc;\n } else {\n this.fetch = env().platform.fetch;\n }\n\n assert(\n path != null && path.length > 0,\n () => 'URL path for http must not be null, undefined or ' +\n 'empty.');\n\n if (Array.isArray(path)) {\n assert(\n path.length === 2,\n () => 'URL paths for http must have a length of 2, ' +\n `(actual length is ${path.length}).`);\n }\n this.path = path;\n\n if (loadOptions.requestInit != null &&\n loadOptions.requestInit.body != null) {\n throw new Error(\n 'requestInit is expected to have no pre-existing body, but has one.');\n }\n this.requestInit = loadOptions.requestInit || {};\n }\n\n async save(modelArtifacts: ModelArtifacts): Promise {\n if (modelArtifacts.modelTopology instanceof ArrayBuffer) {\n throw new Error(\n 'BrowserHTTPRequest.save() does not support saving model topology ' +\n 'in binary formats yet.');\n }\n\n const init = Object.assign({method: this.DEFAULT_METHOD}, this.requestInit);\n init.body = new FormData();\n\n const weightsManifest: WeightsManifestConfig = [{\n paths: ['./model.weights.bin'],\n weights: modelArtifacts.weightSpecs,\n }];\n const modelTopologyAndWeightManifest: ModelJSON =\n getModelJSONForModelArtifacts(modelArtifacts, weightsManifest);\n\n init.body.append(\n 'model.json',\n new Blob(\n [JSON.stringify(modelTopologyAndWeightManifest)],\n {type: JSON_TYPE}),\n 'model.json');\n\n if (modelArtifacts.weightData != null) {\n init.body.append(\n 'model.weights.bin',\n new Blob([modelArtifacts.weightData], {type: OCTET_STREAM_MIME_TYPE}),\n 'model.weights.bin');\n }\n\n const response = await this.fetch(this.path, init);\n\n if (response.ok) {\n return {\n modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts),\n responses: [response],\n };\n } else {\n throw new Error(\n `BrowserHTTPRequest.save() failed due to HTTP response status ` +\n `${response.status}.`);\n }\n }\n\n /**\n * Load model artifacts via HTTP request(s).\n *\n * See the documentation to `tf.io.http` for details on the saved\n * artifacts.\n *\n * @returns The loaded model artifacts (if loading succeeds).\n */\n async load(): Promise {\n const modelConfigRequest = await this.fetch(this.path, this.requestInit);\n\n if (!modelConfigRequest.ok) {\n throw new Error(\n `Request to ${this.path} failed with status code ` +\n `${modelConfigRequest.status}. Please verify this URL points to ` +\n `the model JSON of the model to load.`);\n }\n let modelJSON: ModelJSON;\n try {\n modelJSON = await modelConfigRequest.json();\n } catch (e) {\n let message = `Failed to parse model JSON of response from ${this.path}.`;\n // TODO(nsthorat): Remove this after some time when we're comfortable that\n // .pb files are mostly gone.\n if (this.path.endsWith('.pb')) {\n message += ' Your path contains a .pb file extension. ' +\n 'Support for .pb models have been removed in TensorFlow.js 1.0 ' +\n 'in favor of .json models. You can re-convert your Python ' +\n 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +\n 'or you can convert your.pb models with the \\'pb2json\\'' +\n 'NPM script in the tensorflow/tfjs-converter repository.';\n } else {\n message += ' Please make sure the server is serving valid ' +\n 'JSON for this request.';\n }\n throw new Error(message);\n }\n\n // We do not allow both modelTopology and weightsManifest to be missing.\n const modelTopology = modelJSON.modelTopology;\n const weightsManifest = modelJSON.weightsManifest;\n if (modelTopology == null && weightsManifest == null) {\n throw new Error(\n `The JSON from HTTP path ${this.path} contains neither model ` +\n `topology or manifest for weights.`);\n }\n\n return getModelArtifactsForJSON(\n modelJSON, (weightsManifest) => this.loadWeights(weightsManifest));\n }\n\n private async loadWeights(weightsManifest: WeightsManifestConfig):\n Promise<[WeightsManifestEntry[], ArrayBuffer]> {\n const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;\n const [prefix, suffix] = parseUrl(weightPath);\n const pathPrefix = this.weightPathPrefix || prefix;\n\n const weightSpecs = getWeightSpecs(weightsManifest);\n\n const fetchURLs: string[] = [];\n const urlPromises: Array> = [];\n for (const weightsGroup of weightsManifest) {\n for (const path of weightsGroup.paths) {\n if (this.weightUrlConverter != null) {\n urlPromises.push(this.weightUrlConverter(path));\n } else {\n fetchURLs.push(pathPrefix + path + suffix);\n }\n }\n }\n\n if (this.weightUrlConverter) {\n fetchURLs.push(...await Promise.all(urlPromises));\n }\n\n const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {\n requestInit: this.requestInit,\n fetchFunc: this.fetch,\n onProgress: this.onProgress\n });\n return [weightSpecs, concatenateArrayBuffers(buffers)];\n }\n}\n\n/**\n * Extract the prefix and suffix of the url, where the prefix is the path before\n * the last file, and suffix is the search params after the last file.\n * ```\n * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'\n * [prefix, suffix] = parseUrl(url)\n * // prefix = 'http://tfhub.dev/model/1/'\n * // suffix = '?tfjs-format=file'\n * ```\n * @param url the model url to be parsed.\n */\nexport function parseUrl(url: string): [string, string] {\n const lastSlash = url.lastIndexOf('/');\n const lastSearchParam = url.lastIndexOf('?');\n const prefix = url.substring(0, lastSlash);\n const suffix =\n lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';\n return [prefix + '/', suffix];\n}\n\nexport function isHTTPScheme(url: string): boolean {\n return url.match(HTTPRequest.URL_SCHEME_REGEX) != null;\n}\n\nexport const httpRouter: IORouter =\n (url: string, loadOptions?: LoadOptions) => {\n if (typeof fetch === 'undefined' &&\n (loadOptions == null || loadOptions.fetchFunc == null)) {\n // `http` uses `fetch` or `node-fetch`, if one wants to use it in\n // an environment that is not the browser or node they have to setup a\n // global fetch polyfill.\n return null;\n } else {\n let isHTTP = true;\n if (Array.isArray(url)) {\n isHTTP = url.every(urlItem => isHTTPScheme(urlItem));\n } else {\n isHTTP = isHTTPScheme(url);\n }\n if (isHTTP) {\n return http(url, loadOptions);\n }\n }\n return null;\n };\nIORouterRegistry.registerSaveRouter(httpRouter);\nIORouterRegistry.registerLoadRouter(httpRouter);\n\n/**\n * Creates an IOHandler subtype that sends model artifacts to HTTP server.\n *\n * An HTTP request of the `multipart/form-data` mime type will be sent to the\n * `path` URL. The form data includes artifacts that represent the topology\n * and/or weights of the model. In the case of Keras-style `tf.Model`, two\n * blobs (files) exist in form-data:\n * - A JSON file consisting of `modelTopology` and `weightsManifest`.\n * - A binary weights file consisting of the concatenated weight values.\n * These files are in the same format as the one generated by\n * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).\n *\n * The following code snippet exemplifies the client-side code that uses this\n * function:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));\n *\n * const saveResult = await model.save(tf.io.http(\n * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));\n * console.log(saveResult);\n * ```\n *\n * If the default `POST` method is to be used, without any custom parameters\n * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:\n *\n * ```js\n * const saveResult = await model.save('http://model-server:5000/upload');\n * ```\n *\n * The following GitHub Gist\n * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864\n * implements a server based on [flask](https://github.com/pallets/flask) that\n * can receive the request. Upon receiving the model artifacts via the requst,\n * this particular server reconstitutes instances of [Keras\n * Models](https://keras.io/models/model/) in memory.\n *\n *\n * @param path A URL path to the model.\n * Can be an absolute HTTP path (e.g.,\n * 'http://localhost:8000/model-upload)') or a relative path (e.g.,\n * './model-upload').\n * @param requestInit Request configurations to be used when sending\n * HTTP request to server using `fetch`. It can contain fields such as\n * `method`, `credentials`, `headers`, `mode`, etc. See\n * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request\n * for more information. `requestInit` must not have a body, because the\n * body will be set by TensorFlow.js. File blobs representing the model\n * topology (filename: 'model.json') and the weights of the model (filename:\n * 'model.weights.bin') will be appended to the body. If `requestInit` has a\n * `body`, an Error will be thrown.\n * @param loadOptions Optional configuration for the loading. It includes the\n * following fields:\n * - weightPathPrefix Optional, this specifies the path prefix for weight\n * files, by default this is calculated from the path param.\n * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,\n * the `fetch` from node-fetch can be used here.\n * - onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n * @returns An instance of `IOHandler`.\n *\n * @doc {\n * heading: 'Models',\n * subheading: 'Loading',\n * namespace: 'io',\n * ignoreCI: true\n * }\n */\nexport function http(path: string, loadOptions?: LoadOptions): IOHandler {\n return new HTTPRequest(path, loadOptions);\n}\n\n/**\n * Deprecated. Use `tf.io.http`.\n * @param path\n * @param loadOptions\n */\nexport function browserHTTPRequest(\n path: string, loadOptions?: LoadOptions): IOHandler {\n return http(path, loadOptions);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * IOHandlers that pass through the in-memory ModelArtifacts format.\n */\n\nimport {IOHandler, IOHandlerSync, LoadHandler, ModelArtifacts, SaveHandler, SaveResult, TrainingConfig, WeightsManifestEntry} from './types';\n\nclass PassthroughLoader implements IOHandlerSync {\n constructor(private readonly modelArtifacts?: ModelArtifacts) {}\n\n load(): ModelArtifacts {\n return this.modelArtifacts;\n }\n}\n\nclass PassthroughSaver> {\n constructor(\n private readonly saveHandler: (artifacts: ModelArtifacts) => R) {}\n\n save(modelArtifacts: ModelArtifacts): R {\n return this.saveHandler(modelArtifacts);\n }\n}\n\nclass PassthroughAsync implements IOHandler {\n load?: LoadHandler;\n save?: SaveHandler;\n\n constructor(handler: IOHandlerSync) {\n if (handler.load) {\n this.load = () => Promise.resolve(handler.load());\n }\n if (handler.save) {\n this.save = (modelArtifacts: ModelArtifacts) =>\n Promise.resolve(handler.save(modelArtifacts));\n }\n }\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data. Optional.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs. Optional.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandler` that simply loads the provided data.\n */\nexport function fromMemory(\n modelArtifacts: {}|ModelArtifacts, weightSpecs?: WeightsManifestEntry[],\n weightData?: ArrayBuffer, trainingConfig?: TrainingConfig): IOHandler {\n\n const args = arguments as unknown as Parameters;\n return new PassthroughAsync(fromMemorySync(...args));\n}\n\n/**\n * Creates an IOHandler that loads model artifacts from memory.\n *\n * When used in conjunction with `tf.loadLayersModel`, an instance of\n * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.\n *\n * ```js\n * const model = await tf.loadLayersModel(tf.io.fromMemory(\n * modelTopology, weightSpecs, weightData));\n * ```\n *\n * @param modelArtifacts a object containing model topology (i.e., parsed from\n * the JSON format).\n * @param weightSpecs An array of `WeightsManifestEntry` objects describing the\n * names, shapes, types, and quantization of the weight data. Optional.\n * @param weightData A single `ArrayBuffer` containing the weight data,\n * concatenated in the order described by the weightSpecs. Optional.\n * @param trainingConfig Model training configuration. Optional.\n *\n * @returns A passthrough `IOHandlerSync` that simply loads the provided data.\n */\nexport function fromMemorySync(\n modelArtifacts: {}|ModelArtifacts, weightSpecs?: WeightsManifestEntry[],\n weightData?: ArrayBuffer, trainingConfig?: TrainingConfig): IOHandlerSync {\n if (arguments.length === 1) {\n const isModelArtifacts =\n (modelArtifacts as ModelArtifacts).modelTopology != null ||\n (modelArtifacts as ModelArtifacts).weightSpecs != null;\n if (isModelArtifacts) {\n return new PassthroughLoader(modelArtifacts as ModelArtifacts);\n } else {\n // Legacy support: with only modelTopology.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({modelTopology: modelArtifacts as {}});\n }\n } else {\n // Legacy support.\n // TODO(cais): Remove this deprecated API.\n console.warn(\n 'Please call tf.io.fromMemory() with only one argument. ' +\n 'The argument should be of type ModelArtifacts. ' +\n 'The multi-argument signature of tf.io.fromMemory() has been ' +\n 'deprecated and will be removed in a future release.');\n return new PassthroughLoader({\n modelTopology: modelArtifacts as {},\n weightSpecs,\n weightData,\n trainingConfig\n });\n }\n}\n\n/**\n * Creates an IOHandler that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * promise that resolves to a `SaveResult`.\n */\nexport function withSaveHandler(\n saveHandler: (artifacts: ModelArtifacts) =>\n Promise): IOHandler {\n return new PassthroughSaver(saveHandler);\n}\n\n/**\n * Creates an IOHandlerSync that passes saved model artifacts to a callback.\n *\n * ```js\n * function handleSave(artifacts) {\n * // ... do something with the artifacts ...\n * return {modelArtifactsInfo: {...}, ...};\n * }\n *\n * const saveResult = model.save(tf.io.withSaveHandler(handleSave));\n * ```\n *\n * @param saveHandler A function that accepts a `ModelArtifacts` and returns a\n * `SaveResult`.\n */\nexport function withSaveHandlerSync(\n saveHandler: (artifacts: ModelArtifacts) => SaveResult): IOHandlerSync {\n return new PassthroughSaver(saveHandler);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the dot product of two matrices, A * B. These must be matrices.\n *\n * ```js\n * const a = tf.tensor2d([1, 2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.matMul(b).print(); // or tf.matMul(a, b)\n * ```\n * @param a First matrix in dot product operation.\n * @param b Second matrix in dot product operation.\n * @param transposeA If true, `a` is transposed before multiplication.\n * @param transposeB If true, `b` is transposed before multiplication.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction matMul_(\n a: Tensor|TensorLike, b: Tensor|TensorLike, transposeA = false,\n transposeB = false): T {\n let $a = convertToTensor(a, 'a', 'matMul');\n let $b = convertToTensor(b, 'b', 'matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: BatchMatMulInputs = {a: $a, b: $b};\n const attrs: BatchMatMulAttrs = {transposeA, transposeB};\n\n return ENGINE.runKernel(\n BatchMatMul, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const matMul = /* @__PURE__ */ op({matMul_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {OneHot, OneHotAttrs, OneHotInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {DataType, TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take\n * value `onValue` (defaults to 1), while all other locations take value\n * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank\n * `R+1` with the last axis of size `depth`.\n * `indices` used to encode prediction class must start from 0. For example,\n * if you have 3 classes of data, class 1 should be encoded as 0, class 2\n * should be 1, and class 3 should be 2.\n *\n * ```js\n * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print();\n * ```\n *\n * @param indices `tf.Tensor` of indices with dtype `int32`. Indices must\n * start from 0.\n * @param depth The depth of the one hot dimension.\n * @param onValue A number used to fill in the output when the index matches\n * the location.\n * @param offValue A number used to fill in the output when the index does\n * not match the location.\n * @param dtype The dtype of the output tensor, default to 'int32'.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction oneHot_(\n indices: Tensor|TensorLike, depth: number, onValue = 1, offValue = 0,\n dtype: DataType = 'int32'): Tensor {\n if (depth < 2) {\n throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`);\n }\n const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32');\n\n const inputs: OneHotInputs = {indices: $indices};\n const attrs: OneHotAttrs = {dtype, depth, onValue, offValue};\n\n return ENGINE.runKernel(\n OneHot, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const oneHot = /* @__PURE__ */ op({oneHot_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Imag, ImagInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n/**\n * Returns the imaginary part of a complex (or real) tensor.\n *\n * Given a tensor input, this operation returns a tensor of type float that is\n * the imaginary part of each element in input considered as a complex number.\n * If input is real, a tensor of all zeros is returned.\n *\n * ```js\n * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);\n * tf.imag(x).print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction imag_(input: T|TensorLike): T {\n const $input = convertToTensor(input, 'input', 'imag');\n\n const inputs: ImagInputs = {input: $input};\n return ENGINE.runKernel(Imag, inputs as unknown as NamedTensorMap);\n}\n\nexport const imag = /* @__PURE__ */ op({imag_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Neg, NegInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes `-1 * x` element-wise.\n *\n * ```js\n * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);\n *\n * x.neg().print(); // or tf.neg(x)\n * ```\n *\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction neg_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'neg');\n\n const inputs: NegInputs = {x: $x};\n return ENGINE.runKernel(Neg, inputs as unknown as NamedTensorMap);\n}\nexport const neg = /* @__PURE__ */ op({neg_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Real, RealInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Returns the real part of a complex (or real) tensor.\n *\n * Given a tensor input, this operation returns a tensor of type float that is\n * the real part of each element in input considered as a complex number.\n *\n * If the input is real, it simply makes a clone.\n *\n * ```js\n * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]);\n * tf.real(x).print();\n * ```\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction real_(input: T|TensorLike): T {\n const $input = convertToTensor(input, 'input', 'real');\n\n const inputs: RealInputs = {input: $input};\n return ENGINE.runKernel(Real, inputs as unknown as NamedTensorMap);\n}\n\nexport const real = /* @__PURE__ */ op({real_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {tidy} from '../globals';\nimport {Transpose, TransposeAttrs, TransposeInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {complex} from './complex';\nimport {imag} from './imag';\nimport {neg} from './neg';\nimport {op} from './operation';\nimport {real} from './real';\n\n/**\n * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`.\n *\n * The returned `tf.Tensor`'s dimension `i` will correspond to the input\n * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`,\n * where `n` is the rank of the input `tf.Tensor`. Hence by default, this\n * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]);\n *\n * a.transpose().print(); // or tf.transpose(a)\n * ```\n *\n * @param x The tensor to transpose.\n * @param perm The permutation of the dimensions of a.\n * @param conjugate Will conjugate complex input if true.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction transpose_(\n x: T|TensorLike, perm?: number[], conjugate?: boolean): T {\n const $x = convertToTensor(x, 'x', 'transpose');\n\n if (perm == null) {\n perm = $x.shape.map((s, i) => i).reverse();\n }\n util.assert(\n $x.rank === perm.length,\n () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of perm ${perm}.`);\n perm.forEach(axis => {\n util.assert(\n axis >= 0 && axis < $x.rank,\n () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` +\n ` but got ${perm}`);\n });\n\n if ($x.rank <= 1) {\n return $x.clone();\n }\n\n const inputs: TransposeInputs = {x: $x};\n const attrs: TransposeAttrs = {perm};\n\n if ($x.dtype === 'complex64') {\n return tidy(() => {\n let $real = real($x);\n let $imag = imag($x);\n $real = ENGINE.runKernel(\n Transpose, {x: $real} as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n $imag = ENGINE.runKernel(\n Transpose, {x: $imag} as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n if (conjugate) {\n $imag = neg($imag);\n }\n return complex($real, $imag);\n });\n }\n\n return ENGINE.runKernel(\n Transpose, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const transpose = /* @__PURE__ */ op({transpose_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport {matMul} from './mat_mul';\nimport {oneHot} from './one_hot';\nimport {op} from './operation';\nimport {transpose} from './transpose';\n\n/**\n * Computes the confusion matrix from true labels and predicted labels.\n *\n * ```js\n * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');\n * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');\n * const numClasses = 3;\n * const out = tf.math.confusionMatrix(labels, predictions, numClasses);\n * out.print();\n * // Expected output matrix:\n * // [[2, 0, 0],\n * // [0, 1, 1],\n * // [0, 0, 1]]\n * ```\n *\n * @param labels The target labels, assumed to be 0-based integers\n * for the classes. The shape is `[numExamples]`, where\n * `numExamples` is the number of examples included.\n * @param predictions The predicted classes, assumed to be\n * 0-based integers for the classes. Must have the same shape as `labels`.\n * @param numClasses Number of all classes, as an integer.\n * Its value must be larger than the largest element in `labels` and\n * `predictions`.\n * @returns The confusion matrix as a int32-type 2D tensor. The value at\n * row `r` and column `c` is the number of times examples of actual class\n * `r` were predicted as class `c`.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function confusionMatrix_(\n labels: Tensor1D|TensorLike, predictions: Tensor1D|TensorLike,\n numClasses: number): Tensor2D {\n const $labels = convertToTensor(labels, 'labels', 'confusionMatrix');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'confusionMatrix');\n\n util.assert(\n numClasses == null || numClasses > 0 && Number.isInteger(numClasses),\n () => `If provided, numClasses must be a positive integer, ` +\n `but got ${numClasses}`);\n util.assert(\n $labels.rank === 1,\n () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);\n util.assert(\n $predictions.rank === 1,\n () => `Expected the rank of predictions to be 1, ` +\n `but got ${$predictions.rank}`);\n util.assert(\n $labels.shape[0] === $predictions.shape[0],\n () => `Mismatch in the number of examples: ` +\n `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +\n `Labels and predictions should have the same number of elements.`);\n util.assert(\n numClasses > 0 && Number.isInteger(numClasses),\n () => `numClasses is required to be a positive integer, but got ` +\n `${numClasses}`);\n // TODO(cais): In the future, if oneHot supports tensors inputs for\n // `numClasses`, `confusionMatrix` can make `numClasses` optional.\n\n const oneHotLabels = oneHot(cast($labels, 'int32'), numClasses) as Tensor2D;\n const oneHotPredictions =\n oneHot(cast($predictions, 'int32'), numClasses) as Tensor2D;\n const oneHotLabelsT: Tensor2D = transpose(oneHotLabels);\n const product: Tensor2D = matMul(oneHotLabelsT, oneHotPredictions);\n return cast(product, 'int32');\n}\n\nexport const confusionMatrix = /* @__PURE__ */ op({confusionMatrix_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {env} from '../environment';\nimport {FromPixels, FromPixelsAttrs, FromPixelsInputs} from '../kernel_names';\nimport {getKernel, NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor2D, Tensor3D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {PixelData, TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {op} from './operation';\nimport {tensor3d} from './tensor3d';\n\nlet fromPixels2DContext: CanvasRenderingContext2D;\n\n/**\n * Creates a `tf.Tensor` from an image.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * tf.browser.fromPixels(image).print();\n * ```\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @returns A Tensor3D with the shape `[height, width, numChannels]`.\n *\n * Note: fromPixels can be lossy in some cases, same image may result in\n * slightly different tensor values, if rendered by different rendering\n * engines. This means that results from different browsers, or even same\n * browser with CPU and GPU rendering engines can be different. See discussion\n * in details:\n * https://github.com/tensorflow/tfjs/issues/5482\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nfunction fromPixels_(\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap,\n numChannels = 3): Tensor3D {\n // Sanity checks.\n if (numChannels > 4) {\n throw new Error(\n 'Cannot construct Tensor with more than 4 channels from pixels.');\n }\n if (pixels == null) {\n throw new Error('pixels passed to tf.browser.fromPixels() can not be null');\n }\n let isPixelData = false;\n let isImageData = false;\n let isVideo = false;\n let isImage = false;\n let isCanvasLike = false;\n let isImageBitmap = false;\n if ((pixels as PixelData).data instanceof Uint8Array) {\n isPixelData = true;\n } else if (\n typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {\n isImageData = true;\n } else if (\n typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement) {\n isVideo = true;\n } else if (\n typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement) {\n isImage = true;\n // tslint:disable-next-line: no-any\n } else if ((pixels as any).getContext != null) {\n isCanvasLike = true;\n } else if (\n typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {\n isImageBitmap = true;\n } else {\n throw new Error(\n 'pixels passed to tf.browser.fromPixels() must be either an ' +\n `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +\n `in browser, or OffscreenCanvas, ImageData in webworker` +\n ` or {data: Uint32Array, width: number, height: number}, ` +\n `but was ${(pixels as {}).constructor.name}`);\n }\n // If the current backend has 'FromPixels' registered, it has a more\n // efficient way of handling pixel uploads, so we call that.\n const kernel = getKernel(FromPixels, ENGINE.backendName);\n if (kernel != null) {\n const inputs: FromPixelsInputs = {pixels};\n const attrs: FromPixelsAttrs = {numChannels};\n return ENGINE.runKernel(\n FromPixels, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n }\n\n const [width, height] = isVideo ?\n [\n (pixels as HTMLVideoElement).videoWidth,\n (pixels as HTMLVideoElement).videoHeight\n ] :\n [pixels.width, pixels.height];\n let vals: Uint8ClampedArray|Uint8Array;\n\n if (isCanvasLike) {\n vals =\n // tslint:disable-next-line:no-any\n (pixels as any).getContext('2d').getImageData(0, 0, width, height).data;\n } else if (isImageData || isPixelData) {\n vals = (pixels as PixelData | ImageData).data;\n } else if (isImage || isVideo || isImageBitmap) {\n if (fromPixels2DContext == null) {\n if (typeof document === 'undefined') {\n if (typeof OffscreenCanvas !== 'undefined' &&\n typeof OffscreenCanvasRenderingContext2D !== 'undefined') {\n // @ts-ignore\n fromPixels2DContext = new OffscreenCanvas(1, 1).getContext('2d');\n } else {\n throw new Error(\n 'Cannot parse input in current context. ' +\n 'Reason: OffscreenCanvas Context2D rendering is not supported.');\n }\n } else {\n fromPixels2DContext =\n document.createElement('canvas').getContext(\n '2d', {willReadFrequently: true});\n }\n }\n fromPixels2DContext.canvas.width = width;\n fromPixels2DContext.canvas.height = height;\n fromPixels2DContext.drawImage(\n pixels as HTMLVideoElement, 0, 0, width, height);\n vals = fromPixels2DContext.getImageData(0, 0, width, height).data;\n }\n let values: Int32Array;\n if (numChannels === 4) {\n values = new Int32Array(vals);\n } else {\n const numPixels = width * height;\n values = new Int32Array(numPixels * numChannels);\n for (let i = 0; i < numPixels; i++) {\n for (let channel = 0; channel < numChannels; ++channel) {\n values[i * numChannels + channel] = vals[i * 4 + channel];\n }\n }\n }\n const outShape: [number, number, number] = [height, width, numChannels];\n return tensor3d(values, outShape, 'int32');\n}\n\n// Helper functions for |fromPixelsAsync| to check whether the input can\n// be wrapped into imageBitmap.\nfunction isPixelData(pixels: PixelData|ImageData|HTMLImageElement|\n HTMLCanvasElement|HTMLVideoElement|\n ImageBitmap): pixels is PixelData {\n return (pixels != null) && ((pixels as PixelData).data instanceof Uint8Array);\n}\n\nfunction isImageBitmapFullySupported() {\n return typeof window !== 'undefined' &&\n typeof (ImageBitmap) !== 'undefined' &&\n window.hasOwnProperty('createImageBitmap');\n}\n\nfunction isNonEmptyPixels(pixels: PixelData|ImageData|HTMLImageElement|\n HTMLCanvasElement|HTMLVideoElement|ImageBitmap) {\n return pixels != null && pixels.width !== 0 && pixels.height !== 0;\n}\n\nfunction canWrapPixelsToImageBitmap(pixels: PixelData|ImageData|\n HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap) {\n return isImageBitmapFullySupported() && !(pixels instanceof ImageBitmap) &&\n isNonEmptyPixels(pixels) && !isPixelData(pixels);\n}\n\n/**\n * Creates a `tf.Tensor` from an image in async way.\n *\n * ```js\n * const image = new ImageData(1, 1);\n * image.data[0] = 100;\n * image.data[1] = 150;\n * image.data[2] = 200;\n * image.data[3] = 255;\n *\n * (await tf.browser.fromPixelsAsync(image)).print();\n * ```\n * This API is the async version of fromPixels. The API will first\n * check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to\n * imageBitmap if the flag is set to true.\n *\n * @param pixels The input image to construct the tensor from. The\n * supported image types are all 4-channel. You can also pass in an image\n * object with following attributes:\n * `{data: Uint8Array; width: number; height: number}`\n * @param numChannels The number of channels of the output tensor. A\n * numChannels value less than 4 allows you to ignore channels. Defaults to\n * 3 (ignores alpha channel of input image).\n *\n * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}\n */\nexport async function fromPixelsAsync(\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap,\n numChannels = 3) {\n let inputs: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap = null;\n\n // Check whether the backend needs to wrap |pixels| to imageBitmap and\n // whether |pixels| can be wrapped to imageBitmap.\n if (env().getBool('WRAP_TO_IMAGEBITMAP') &&\n canWrapPixelsToImageBitmap(pixels)) {\n // Force the imageBitmap creation to not do any premultiply alpha\n // ops.\n let imageBitmap;\n\n try {\n // wrap in try-catch block, because createImageBitmap may not work\n // properly in some browsers, e.g.\n // https://bugzilla.mozilla.org/show_bug.cgi?id=1335594\n // tslint:disable-next-line: no-any\n imageBitmap = await (createImageBitmap as any)(\n pixels as ImageBitmapSource, {premultiplyAlpha: 'none'});\n } catch (e) {\n imageBitmap = null;\n }\n\n // createImageBitmap will clip the source size.\n // In some cases, the input will have larger size than its content.\n // E.g. new Image(10, 10) but with 1 x 1 content. Using\n // createImageBitmap will clip the size from 10 x 10 to 1 x 1, which\n // is not correct. We should avoid wrapping such resouce to\n // imageBitmap.\n if (imageBitmap != null && imageBitmap.width === pixels.width &&\n imageBitmap.height === pixels.height) {\n inputs = imageBitmap;\n } else {\n inputs = pixels;\n }\n } else {\n inputs = pixels;\n }\n\n return fromPixels_(inputs, numChannels);\n}\n\n/**\n * Draws a `tf.Tensor` of pixel values to a byte array or optionally a\n * canvas.\n *\n * When the dtype of the input is 'float32', we assume values in the range\n * [0-1]. Otherwise, when input is 'int32', we assume values in the range\n * [0-255].\n *\n * Returns a promise that resolves when the canvas has been drawn to.\n *\n * @param img A rank-2 tensor with shape `[height, width]`, or a rank-3 tensor\n * of shape `[height, width, numChannels]`. If rank-2, draws grayscale. If\n * rank-3, must have depth of 1, 3 or 4. When depth of 1, draws\n * grayscale. When depth of 3, we draw with the first three components of\n * the depth dimension corresponding to r, g, b and alpha = 1. When depth of\n * 4, all four components of the depth dimension correspond to r, g, b, a.\n * @param canvas The canvas to draw to.\n *\n * @doc {heading: 'Browser', namespace: 'browser'}\n */\nexport async function toPixels(\n img: Tensor2D|Tensor3D|TensorLike,\n canvas?: HTMLCanvasElement): Promise {\n let $img = convertToTensor(img, 'img', 'toPixels');\n if (!(img instanceof Tensor)) {\n // Assume int32 if user passed a native array.\n const originalImgTensor = $img;\n $img = cast(originalImgTensor, 'int32');\n originalImgTensor.dispose();\n }\n if ($img.rank !== 2 && $img.rank !== 3) {\n throw new Error(\n `toPixels only supports rank 2 or 3 tensors, got rank ${$img.rank}.`);\n }\n const [height, width] = $img.shape.slice(0, 2);\n const depth = $img.rank === 2 ? 1 : $img.shape[2];\n\n if (depth > 4 || depth === 2) {\n throw new Error(\n `toPixels only supports depth of size ` +\n `1, 3 or 4 but got ${depth}`);\n }\n\n if ($img.dtype !== 'float32' && $img.dtype !== 'int32') {\n throw new Error(\n `Unsupported type for toPixels: ${$img.dtype}.` +\n ` Please use float32 or int32 tensors.`);\n }\n\n const data = await $img.data();\n const multiplier = $img.dtype === 'float32' ? 255 : 1;\n const bytes = new Uint8ClampedArray(width * height * 4);\n\n for (let i = 0; i < height * width; ++i) {\n const rgba = [0, 0, 0, 255];\n\n for (let d = 0; d < depth; d++) {\n const value = data[i * depth + d];\n\n if ($img.dtype === 'float32') {\n if (value < 0 || value > 1) {\n throw new Error(\n `Tensor values for a float32 Tensor must be in the ` +\n `range [0 - 1] but encountered ${value}.`);\n }\n } else if ($img.dtype === 'int32') {\n if (value < 0 || value > 255) {\n throw new Error(\n `Tensor values for a int32 Tensor must be in the ` +\n `range [0 - 255] but encountered ${value}.`);\n }\n }\n\n if (depth === 1) {\n rgba[0] = value * multiplier;\n rgba[1] = value * multiplier;\n rgba[2] = value * multiplier;\n } else {\n rgba[d] = value * multiplier;\n }\n }\n\n const j = i * 4;\n bytes[j + 0] = Math.round(rgba[0]);\n bytes[j + 1] = Math.round(rgba[1]);\n bytes[j + 2] = Math.round(rgba[2]);\n bytes[j + 3] = Math.round(rgba[3]);\n }\n\n if (canvas != null) {\n canvas.width = width;\n canvas.height = height;\n const ctx = canvas.getContext('2d');\n const imageData = new ImageData(bytes, width, height);\n ctx.putImageData(imageData, 0, 0);\n }\n if ($img !== img) {\n $img.dispose();\n }\n return bytes;\n}\n\nexport const fromPixels = /* @__PURE__ */ op({fromPixels_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor3D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike3D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor3d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor3d([[[1], [2]], [[3], [4]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor3d(\n values: TensorLike3D, shape?: [number, number, number],\n dtype?: DataType): Tensor3D {\n assertNonNull(values);\n if (shape != null && shape.length !== 3) {\n throw new Error('tensor3d() requires shape to have three numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 3 && inferredShape.length !== 1) {\n throw new Error(\n 'tensor3d() requires values to be number[][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error(\n 'tensor3d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype) as Tensor3D;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport { TensorInfo } from '../tensor_info';\nimport * as util from '../util';\n\nconst NEW_AXIS = -2;\nconst SHRINK_AXIS = -1;\n\n// Sparse slicing specification\n// if one does foo[3:5, ..., -3], the begin, end and strides will have length\n// of 3.\ninterface StridedSliceSparseSpec {\n dims: number;\n numAddAxisAfterEllipsis: number;\n begin: number[];\n end: number[];\n strides: number[];\n beginMask: number;\n endMask: number;\n ellipsisMask: number;\n newAxisMask: number;\n shrinkAxisMask: number;\n}\n\n// Dense slicing specification\n// all ellipses and newaxis are expanded out. So if foo[3:5, ..., -3] where foo\n// is 10 dimensional, each array of begin, end, strides will have 10 entries\n// where as the sparse can have length less than the rank of foo.\ninterface StridedSliceDenseSpec {\n dims: number;\n beginMask?: number;\n endMask?: number;\n beginValid: boolean;\n endValid: boolean;\n begin?: number[];\n end?: number[];\n strides?: number[];\n // This array helps construct the final shape of the slice.\n // The final tensor is reduced in rank whenever a single index e.g. foo[3]\n // is called for. The final tensor increases in rank with newAxis entries.\n // If an index in this array is positive, the size of the dimension is\n // obtained from canonical end-begin. Otherwise, if it is a NEW_AXIS, it will\n // be 1. A shrunk dimension is skipped.\n finalShapeGatherIndices?: number[];\n // This array has the same size as finalShapeGatherIndices, but it remembers\n // the sparse index that a dimension comes from, instead of dense index.\n // A -1 in this vector means the index is not from the sparse input.\n finalShapeGatherIndicesSparse?: number[];\n inputShapeGatherIndicesSparse?: number[];\n // The dense indexed shrink mask is which processing dimensions should be\n // shrunk. For example, if foo.shape = [10, 10, 10, 10], foo[3, ..., 5] has\n // sparseShrinkAxisMask of 5 (0101) and denseShrinkAxisMask of 9 (1001),\n // yielding a final shape [10, 10].\n shrinkAxisMask?: number;\n}\n\nexport type SliceInfo = {\n finalShapeSparse: number[],\n finalShape: number[],\n isIdentity: boolean,\n sliceDim0: boolean,\n isSimpleSlice: boolean,\n begin: number[],\n end: number[],\n strides: number[]\n};\n\nexport function assertParamsValid(\n input: TensorInfo, begin: number[], size: number[]): void {\n const inputRank = input.shape.length;\n util.assert(\n inputRank === begin.length,\n () => `Error in slice${inputRank}D: Length of begin ${begin} must ` +\n `match the rank of the array (${inputRank}).`);\n util.assert(\n inputRank === size.length,\n () => `Error in slice${inputRank}D: Length of size ${size} must ` +\n `match the rank of the array (${inputRank}).`);\n\n for (let i = 0; i < inputRank; ++i) {\n util.assert(\n begin[i] + size[i] <= input.shape[i],\n () => `Error in slice${inputRank}D: begin[${i}] + size[${i}] ` +\n `(${begin[i] + size[i]}) would overflow input.shape[${i}] (${\n input.shape[i]})`);\n }\n}\n\n/** Converts a binary mask to an array of axes. Used in stridedSlice(). */\nexport function maskToAxes(mask: number): number[] {\n const axes = [];\n let axis = 0;\n while (mask > 0) {\n if (mask & 1) {\n axes.push(axis);\n }\n mask /= 2;\n axis++;\n }\n return axes;\n}\n\n/** Computes the output shape given the strided slice params. */\nexport function computeOutShape(\n begin: number[], end: number[], strides: number[]): number[] {\n const size = [];\n for (let axis = 0; axis < begin.length; axis++) {\n size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]);\n }\n return size;\n}\n\n// Creates full selection at the elided dimensions. If the dimension matches\n// the ellipsis mask, override the current stride value. Otherwise, insert.\nexport function stridesWithElidedDims(\n strides: number[], ellipsisInsertionIndex: number, numElidedAxes: number,\n inputShape: number[]): number[] {\n const newStrides = [...strides];\n for (let i = newStrides.length; i < inputShape.length; i++) {\n newStrides.push(1);\n }\n for (let i = 0; i < numElidedAxes; i++) {\n if (i === 0) {\n newStrides[ellipsisInsertionIndex] = 1;\n } else {\n newStrides.splice(\n ellipsisInsertionIndex, 0 /* num elements to delete */,\n 1 /* element to add */);\n newStrides.pop();\n }\n }\n return newStrides;\n}\n\nfunction unnormalizeAxis(\n ellipsisInsertionIndex: number, numElidedAxes: number,\n normalizedAxis: number): number {\n if (normalizedAxis <= ellipsisInsertionIndex) {\n return normalizedAxis;\n }\n\n return normalizedAxis - (numElidedAxes - 1);\n}\n\nfunction getElidedAxes(numElidedAxes: number, ellipsisInsertionIndex: number) {\n const elidedAxes = [];\n for (let i = 0; i < numElidedAxes; i++) {\n elidedAxes.push(ellipsisInsertionIndex + i);\n }\n return elidedAxes;\n}\n\n// Normalize the start, end and strides.\nexport function getNormalizedAxes(\n inputShape: number[], ellipsisAxes: number[], numInterpolatedAxes: number,\n begin: number[], end: number[], strides: number[], beginMask: number,\n endMask: number,\n ellipsisMask: number): {begin: number[], end: number[], strides: number[]} {\n const inputRank = inputShape.length;\n let normalizedBegin = new Array(inputRank),\n normalizedEnd = new Array(inputRank),\n normalizedStrides = new Array(inputRank);\n if (ellipsisAxes.length && numInterpolatedAxes > 0) {\n const fullIndex = ellipsisAxes[0];\n\n // The ellipsis applies to the masked index as well as any dimensions\n // that are interpolated.\n const numElidedAxes = numInterpolatedAxes + 1;\n normalizedBegin = startIndicesWithElidedDims(\n beginMask, fullIndex, numElidedAxes, begin, inputShape);\n normalizedEnd = stopIndicesWithElidedDims(\n endMask, fullIndex, numElidedAxes, end, inputShape);\n normalizedStrides =\n stridesWithElidedDims(strides, fullIndex, numElidedAxes, inputShape);\n } else {\n for (let axis = 0; axis < inputRank; axis++) {\n normalizedBegin[axis] = startForAxis(\n beginMask, begin, strides, inputShape, axis, ellipsisMask);\n normalizedEnd[axis] =\n stopForAxis(endMask, end, strides, inputShape, axis, ellipsisMask);\n normalizedStrides[axis] = stridesForAxis(strides, axis, ellipsisMask);\n }\n }\n\n return {\n begin: normalizedBegin,\n end: normalizedEnd,\n strides: normalizedStrides\n };\n}\n\n// Creates full selection at the elided dimensions. If the dimension matches\n// the ellipsis mask, override the current start value. Otherwise, insert.\nexport function startIndicesWithElidedDims(\n beginMask: number, ellipsisInsertionIndex: number, numElidedAxes: number,\n originalBegin: number[], inputShape: number[]): number[] {\n const newIndices = [...inputShape];\n const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex);\n\n for (let axis = 0; axis < newIndices.length; axis++) {\n if (elidedAxes.indexOf(axis) > -1) {\n newIndices[axis] = 0;\n } else {\n const originalAxis =\n unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis);\n let originalValue = originalBegin[originalAxis];\n if (beginMask & 1 << originalAxis) {\n originalValue = 0;\n }\n\n newIndices[axis] = originalValue;\n }\n }\n return newIndices;\n}\n\n// Creates full selection at the elided dimensions. If the dimension matches\n// the ellipsis mask, override the current stop value. Otherwise, insert.\nexport function stopIndicesWithElidedDims(\n endMask: number, ellipsisInsertionIndex: number, numElidedAxes: number,\n originalEnd: number[], inputShape: number[]): number[] {\n const newIndices = [...inputShape];\n const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex);\n\n for (let axis = 0; axis < newIndices.length; axis++) {\n if (elidedAxes.indexOf(axis) > -1) {\n newIndices[axis] = Number.MAX_SAFE_INTEGER;\n } else {\n const originalAxis =\n unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis);\n let originalValue = originalEnd[originalAxis];\n if (endMask & 1 << originalAxis) {\n originalValue = Number.MAX_SAFE_INTEGER;\n }\n newIndices[axis] = originalValue;\n }\n }\n\n for (let i = 0; i < newIndices.length; i++) {\n // Handle negative indices\n const axisSize = inputShape[i];\n if (newIndices[i] < 0) {\n newIndices[i] += axisSize;\n }\n newIndices[i] = util.clamp(0, newIndices[i], inputShape[i]);\n }\n return newIndices;\n}\n\nexport function stridesForAxis(\n strides: number[], axis: number, ellipsisMask: number): number {\n let stride = strides[axis];\n if (ellipsisMask & (1 << axis) || stride == null) {\n stride = 1;\n }\n\n return stride;\n}\n\nexport function startForAxis(\n beginMask: number, startIndices: number[], strides: number[],\n inputShape: number[], axis: number, ellipsisMask: number): number {\n // Begin with the specified index\n let start = startIndices[axis];\n const stride = strides[axis] || 1;\n\n // Check the axis bit from right of masked axes, or the begin index is not set\n // for the axis.\n if (beginMask & 1 << axis || ellipsisMask & 1 << axis || start == null) {\n if (stride > 0) {\n // Forward iteration - use the first element. These values will get\n // clamped below (Note: We could have set them to 0 and axis_size-1, but\n // use lowest() and max() to maintain symmetry with StopForAxis())\n start = Number.MIN_SAFE_INTEGER;\n } else {\n // Backward iteration - use the last element.\n start = Number.MAX_SAFE_INTEGER;\n }\n }\n\n // Handle negative indices\n const axisSize = inputShape[axis];\n if (start < 0) {\n start += axisSize;\n }\n\n // Clamping\n start = util.clamp(0, start, axisSize - 1);\n\n return start;\n}\n\nexport function stopForAxis(\n endMask: number, stopIndices: number[], strides: number[],\n inputShape: number[], axis: number, ellipsisMask: number): number {\n // Begin with the specified index\n let stop = stopIndices[axis];\n const stride = strides[axis] || 1;\n\n // Check the axis bit from right of masked axes, or if the stop index is not\n // set for this axis.\n if (endMask & (1 << axis) || ellipsisMask & (1 << axis) || stop == null) {\n if (stride > 0) {\n // Forward iteration - use the last element. These values will get\n // clamped below\n stop = Number.MAX_SAFE_INTEGER;\n } else {\n // Backward iteration - use the first element.\n stop = Number.MIN_SAFE_INTEGER;\n }\n }\n\n // Handle negative indices\n const axisSize = inputShape[axis];\n if (stop < 0) {\n stop += axisSize;\n }\n\n // Clamping\n // Because the end index points one past the last element, we need slightly\n // different clamping ranges depending on the direction.\n if (stride > 0) {\n // Forward iteration\n stop = util.clamp(0, stop, axisSize);\n } else {\n // Backward iteration\n stop = util.clamp(-1, stop, axisSize - 1);\n }\n\n return stop;\n}\n\n/**\n * Returns true if the slice occupies a continous set of elements in the\n * 'flat' space.\n */\nexport function isSliceContinous(\n shape: number[], begin: number[], size: number[]) {\n // Index of the first axis that has size > 1.\n let firstNonOneAxis = size.length;\n for (let i = 0; i < size.length; i++) {\n if (size[i] > 1) {\n firstNonOneAxis = i;\n break;\n }\n }\n\n for (let i = firstNonOneAxis + 1; i < size.length; i++) {\n if (begin[i] > 0 || size[i] !== shape[i]) {\n return false;\n }\n }\n return true;\n}\n\nexport function computeFlatOffset(begin: number[], strides: number[]): number {\n let flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1;\n for (let i = 0; i < begin.length - 1; i++) {\n flatOffset += begin[i] * strides[i];\n }\n return flatOffset;\n}\n\nexport function parseSliceParams(\n x: TensorInfo, begin: number|number[], size?: number|number[]) {\n // The following logic allows for more ergonomic calls.\n let begin_: number[];\n const xRank = x.shape.length;\n if (typeof begin === 'number') {\n begin_ = [begin, ...new Array(xRank - 1).fill(0)];\n } else if (begin.length < xRank) {\n begin_ = begin.concat(new Array(xRank - begin.length).fill(0));\n } else {\n begin_ = begin.slice();\n }\n begin_.forEach(d => {\n util.assert(\n d !== -1, () => 'slice() does not support negative begin indexing.');\n });\n let size_: number[];\n if (size == null) {\n size_ = new Array(xRank).fill(-1);\n } else if (typeof size === 'number') {\n size_ = [size, ...new Array(xRank - 1).fill(-1)];\n } else if (size.length < xRank) {\n size_ = size.concat(new Array(xRank - size.length).fill(-1));\n } else {\n size_ = size;\n }\n size_ = size_.map((d, i) => {\n if (d >= 0) {\n return d;\n } else {\n util.assert(\n d === -1,\n () => `Negative size values should be exactly -1 but got ` +\n `${d} for the slice() size at index ${i}.`);\n return x.shape[i] - begin_[i];\n }\n });\n return [begin_, size_];\n}\n\n// Convert the slicing specification from a sparse representation to a dense\n// representation. This means that all ellipses and newaxis are expanded out.\nexport function sliceInfo(\n xShape: number[], begin: number[], end: number[], strides: number[],\n beginMask: number, endMask: number, ellipsisMask: number,\n newAxisMask: number, shrinkAxisMask: number): SliceInfo {\n let stridesNonNull;\n if (strides == null) {\n stridesNonNull = new Array(begin.length);\n stridesNonNull.fill(1);\n } else {\n stridesNonNull = strides;\n }\n\n // Only one non-zero bit is allowed in ellipsisMask, which means ellipsisMask\n // is a power of 2. Use bit compares to ensure ellipsisMask is 0 or a power\n // of 2. When i is a power of 2, i & (i - 1) is always 0.\n // Also ref:\n // https://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2\n if (ellipsisMask != null && (ellipsisMask & (ellipsisMask - 1)) !== 0) {\n throw new Error('Multiple ellipses in slice is not allowed.');\n }\n\n // Step 1: Account for ellipsis and new axis.\n // Check for ellipsis and count how many non-newaxis there are after.\n let ellipsisSeen = false;\n\n const sparseSpec: StridedSliceSparseSpec = {\n dims: stridesNonNull.length,\n numAddAxisAfterEllipsis: 0,\n begin: begin.slice(),\n end: end.slice(),\n strides: stridesNonNull.slice(),\n beginMask,\n endMask,\n ellipsisMask,\n newAxisMask,\n shrinkAxisMask\n };\n\n for (let i = 0; i < sparseSpec.dims; i++) {\n if (ellipsisSeen && ((1 << i) & newAxisMask) !== 0) {\n sparseSpec.numAddAxisAfterEllipsis++;\n }\n if ((1 << i) & ellipsisMask) {\n ellipsisSeen = true;\n }\n }\n // If no ellipsis insert one at the end.\n if (!ellipsisSeen) {\n sparseSpec.ellipsisMask |= (1 << sparseSpec.dims);\n sparseSpec.dims++; // this effects loop iteration below\n }\n\n // Step 2: Make a sparse spec into a full index spec.\n //\n // The sparse spec deos not correspond to the number of dimensions.\n // Make a dense spec that cooresponds to the number of dimensions.\n //\n // For example suppose foo[...,3:] on foo.shape = [2, 2, 3] then we need to\n // produce the missing beginMask for the first two dimensions i.e. from\n // beginMaskSpec = 0, endMaskSpec = 2, we achieve beginMask = 6 (110),\n // endMask = 7 (111).\n const denseSpec: StridedSliceDenseSpec = {\n dims: xShape.length,\n beginMask: 0,\n endMask: 0,\n beginValid: false,\n endValid: false\n };\n\n buildDenseSpec(sparseSpec, denseSpec);\n\n // Step 3: Make implicit ranges (non-zero beginMasks and endMasks) explicit\n // and bounds check.\n let isIdentity = true;\n let sliceDim0 = true;\n let isSimpleSlice = true;\n const processingShape = [];\n const finalShape = [];\n\n for (let i = 0; i < xShape.length; ++i) {\n if (denseSpec.strides[i] === 0) {\n throw Error(`strides[${i}] must be non-zero`);\n }\n const shrinkI = !!(denseSpec.shrinkAxisMask & (1 << i));\n const dimI = xShape[i];\n if (dimI === -1) {\n processingShape.push(shrinkI ? 1 : -1);\n continue;\n }\n\n const masks =\n [denseSpec.beginMask & (1 << i), denseSpec.endMask & (1 << i)];\n const validRange = [\n denseSpec.strides[i] > 0 ? 0 : -1,\n denseSpec.strides[i] > 0 ? dimI : dimI - 1\n ];\n\n if (shrinkI && denseSpec.strides[i] <= 0) {\n throw Error('only stride 1 allowed on non-range indexing.');\n }\n\n isSimpleSlice = isSimpleSlice && (denseSpec.strides[i] === 1);\n\n const beginAndEndMasked =\n !!((denseSpec.beginMask & (1 << i)) && (denseSpec.endMask & (1 << i)));\n\n if (denseSpec.beginValid && denseSpec.endValid) {\n if (shrinkI) {\n // If we are shrinking, the end index is now possibly incorrect. In\n // particular foo[-1] produces sparseBegin = -1, sparseEnd = 0.\n // and canonical puts these to n-1 and 0, which implies a degenerate\n // interval. Fortunately, it is now safe to re-create end as begin + 1.\n const xFwd = denseSpec.begin[i] < 0 ? dimI + denseSpec.begin[i] :\n denseSpec.begin[i];\n denseSpec.begin[i] = xFwd;\n denseSpec.end[i] = denseSpec.begin[i] + 1;\n if (xFwd < 0 || xFwd >= dimI) {\n throw Error(`slice index ${denseSpec.begin[i]} of dimension ${\n i} out of bounds.`);\n }\n } else {\n denseSpec.begin[i] = canonical(\n denseSpec.begin[i], 0, denseSpec.strides[i], dimI, masks,\n validRange);\n denseSpec.end[i] = canonical(\n denseSpec.end[i], 1, denseSpec.strides[i], dimI, masks, validRange);\n }\n // Update optimization values\n const takeAllInDimension = denseSpec.strides[i] === 1 &&\n denseSpec.begin[i] === 0 && denseSpec.end[i] === dimI;\n isIdentity = isIdentity && takeAllInDimension;\n sliceDim0 = sliceDim0 &&\n ((i === 0 && denseSpec.strides[i] === 1) || takeAllInDimension);\n } else {\n isIdentity =\n isIdentity && ((denseSpec.strides[i] === 1) && beginAndEndMasked);\n sliceDim0 = sliceDim0 &&\n ((i === 0 && denseSpec.strides[i] === 1) || beginAndEndMasked);\n }\n // Compute the processing shape (the intermediate Eigen will produce)\n let intervalLength;\n let knownInterval = false;\n if (denseSpec.beginValid && denseSpec.endValid) {\n intervalLength = denseSpec.end[i] - denseSpec.begin[i];\n knownInterval = true;\n } else if (shrinkI) {\n // The dimension is still known as 1 for the processingShape, but will be\n // discarded for the final shape.\n intervalLength = 1;\n knownInterval = true;\n } else if (beginAndEndMasked) {\n // Even if we don't have values for begin or end, we do know that this\n // dimension covers the whole interval. If we have shape information for\n // this dimension, that tells us the interval length.\n if (dimI >= 0) {\n if (denseSpec.strides[i] < 0) {\n intervalLength = -dimI;\n } else {\n intervalLength = dimI;\n }\n knownInterval = true;\n }\n }\n if (knownInterval) {\n let sizeI;\n // Hold zero if the interval is degenerate, otherwise account for\n // remainder\n if (intervalLength === 0 ||\n ((intervalLength < 0) !== (denseSpec.strides[i] < 0))) {\n sizeI = 0;\n } else {\n sizeI = Math.trunc(intervalLength / denseSpec.strides[i]) +\n (intervalLength % denseSpec.strides[i] !== 0 ? 1 : 0);\n }\n processingShape.push(sizeI);\n } else {\n processingShape.push(-1);\n }\n }\n\n // Step 4: Compute the final shape\n //\n // newAxis will increase dimension by 1 (with a one-size dimension)\n // slices like foo[3, ...] will reduce dimension by 1.\n // This cannot be done earlier, because it depends on Step 3.\n for (let denseDim = 0; denseDim < denseSpec.finalShapeGatherIndices.length;\n ++denseDim) {\n const gatherIndex = denseSpec.finalShapeGatherIndices[denseDim];\n if (gatherIndex >= 0) {\n finalShape.push(processingShape[gatherIndex]);\n } else if (gatherIndex === NEW_AXIS) {\n finalShape.push(1);\n }\n }\n\n const finalShapeSparse = finalShape.filter(\n (dim, i) => denseSpec.finalShapeGatherIndices[i] !== NEW_AXIS);\n\n return {\n finalShapeSparse,\n finalShape,\n isIdentity,\n sliceDim0,\n isSimpleSlice,\n begin: denseSpec.begin,\n end: denseSpec.end,\n strides: denseSpec.strides\n };\n}\n\nfunction buildDenseSpec(\n sparse: StridedSliceSparseSpec, dense: StridedSliceDenseSpec) {\n dense.beginMask = 0;\n dense.endMask = 0;\n dense.shrinkAxisMask = 0;\n\n let fullIndex = 0;\n dense.beginValid = sparse.begin != null;\n dense.endValid = sparse.end != null;\n\n dense.begin = new Array(dense.dims);\n dense.end = new Array(dense.dims);\n dense.strides = new Array(dense.dims);\n dense.finalShapeGatherIndices = [];\n dense.finalShapeGatherIndicesSparse = [];\n dense.inputShapeGatherIndicesSparse = new Array(dense.dims);\n\n for (let i = 0; i < sparse.dims; i++) {\n if ((1 << i) & sparse.ellipsisMask) {\n // Only the bit that has ellipsis will fall in this condition.\n // Expand the ellipsis into the appropriate indices\n // Note: this only works because we guaranteed one ellipsis.\n const nextIndex = Math.min(\n dense.dims - (sparse.dims - i) + 1 + sparse.numAddAxisAfterEllipsis,\n dense.dims);\n for (; fullIndex < nextIndex; fullIndex++) {\n // newAxis aren't real axis so you have to skip.\n dense.begin[fullIndex] = 0;\n dense.end[fullIndex] = 0;\n dense.strides[fullIndex] = 1;\n dense.beginMask |= (1 << fullIndex);\n dense.endMask |= (1 << fullIndex);\n dense.finalShapeGatherIndices.push(fullIndex);\n dense.finalShapeGatherIndicesSparse.push(-1);\n dense.inputShapeGatherIndicesSparse[fullIndex] = i;\n }\n } else if ((1 << i) & sparse.newAxisMask) {\n // Only the bit that has newAxis will fall in this condition.\n dense.finalShapeGatherIndices.push(NEW_AXIS);\n dense.finalShapeGatherIndicesSparse.push(-1);\n } else {\n if (fullIndex === dense.begin.length) {\n throw Error(\n `Index out of range using input dim ${fullIndex}; input ` +\n `has only ${dense.dims} dims, ${dense.begin.length}.`);\n }\n\n // Gather slicing spec into appropriate index.\n if (sparse.begin != null) {\n dense.begin[fullIndex] = sparse.begin[i];\n }\n if (sparse.end != null) {\n dense.end[fullIndex] = sparse.end[i];\n }\n dense.strides[fullIndex] = sparse.strides[i];\n if (sparse.beginMask & (1 << i)) {\n dense.beginMask |= (1 << fullIndex);\n }\n if (sparse.endMask & (1 << i)) {\n dense.endMask |= (1 << fullIndex);\n }\n // If shrink, record where to get the dimensionality from (i.e. newAxis)\n // creates a fake 1 size dimension. Also remember shrink axis (now in\n // dense form) so we can ignore dense.end below.\n if (sparse.shrinkAxisMask & (1 << i)) {\n dense.finalShapeGatherIndices.push(SHRINK_AXIS);\n dense.finalShapeGatherIndicesSparse.push(-1);\n dense.shrinkAxisMask |= (1 << fullIndex);\n } else {\n dense.finalShapeGatherIndices.push(fullIndex);\n // Remember that where in the sparse shape the dense dim comes from.\n dense.finalShapeGatherIndicesSparse.push(i);\n }\n dense.inputShapeGatherIndicesSparse[fullIndex] = i;\n fullIndex++;\n }\n }\n}\n\nfunction canonical(\n x: number, c: number, strideI: number, dimI: number, masks: number[],\n validRange: number[]) {\n if (masks[c]) {\n return strideI > 0 ? validRange[c] : validRange[(c + 1) & 1];\n } else {\n const xFwd = x < 0 ? dimI + x : x; // make negative indices positive\n return xFwd < validRange[0] ? validRange[0] :\n xFwd > validRange[1] ? validRange[1] : xFwd;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Acos, AcosInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes acos of the input `tf.Tensor` element-wise: `acos(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.acos().print(); // or tf.acos(x)\n * ```\n * @param x The input tensor.\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction acos_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'acos');\n const inputs: AcosInputs = {x: $x};\n\n return ENGINE.runKernel(Acos, inputs as unknown as NamedTensorMap);\n}\nexport const acos = /* @__PURE__ */ op({acos_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Acosh, AcoshInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise:\n * `acosh(x)`\n *\n * ```js\n * const x = tf.tensor1d([10, 1, 3, 5.7]);\n *\n * x.acosh().print(); // or tf.acosh(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction acosh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'acosh');\n const inputs: AcoshInputs = {x: $x};\n\n return ENGINE.runKernel(Acosh, inputs as unknown as NamedTensorMap);\n}\nexport const acosh = /* @__PURE__ */ op({acosh_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {AddN, AddNInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype.\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n * const b = tf.tensor1d([3, 4]);\n * const c = tf.tensor1d([5, 6]);\n *\n * tf.addN([a, b, c]).print();\n * ```\n * @param tensors A list of tensors with the same shape and dtype.\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction addN_(tensors: Array): T {\n util.assert(\n Array.isArray(tensors),\n () => 'The argument passed to tf.addN() must be a list of tensors');\n util.assert(\n tensors.length >= 1,\n () => `Must pass at least one tensor to tf.addN(), but got ` +\n `${tensors.length}`);\n\n const $tensors =\n tensors.map((t, i) => convertToTensor(t, `tensors${i}`, 'addN'));\n\n const firstTensor = $tensors[0];\n $tensors.forEach(t => {\n if (t.dtype !== firstTensor.dtype) {\n throw new Error(\n 'All tensors passed to tf.addN() must have the same dtype');\n }\n });\n\n $tensors.forEach(t => {\n if (!util.arraysEqual(t.shape, firstTensor.shape)) {\n throw new Error(\n 'All tensors passed to tf.addN() must have the same shape');\n }\n });\n\n const inputs: AddNInputs = $tensors;\n\n return ENGINE.runKernel(AddN, inputs as unknown as NamedTensorMap);\n}\n\nexport const addN = /* @__PURE__ */ op({addN_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {All, AllAttrs, AllInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the logical and of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in\n * `axes`. If `keepDims` is true, the reduced dimensions are retained with\n * length 1. If `axes` has no entries, all dimensions are reduced, and a\n * `tf.Tensor` with a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 1, 1], 'bool');\n *\n * x.all().print(); // or tf.all(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');\n *\n * const axis = 1;\n * x.all(axis).print(); // or tf.all(x, axis)\n * ```\n *\n * @param x The input tensor. Must be of dtype bool.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction all_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'all', 'bool');\n\n const inputs: AllInputs = {x: $x};\n const attrs: AllAttrs = {axis, keepDims};\n\n return ENGINE.runKernel(\n All, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const all = /* @__PURE__ */ op({all_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Any, AnyAttrs, AnyInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the logical or of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in\n * `axes`. If `keepDims` is true, the reduced dimensions are retained with\n * length 1. If `axes` has no entries, all dimensions are reduced, and a\n * `tf.Tensor` with a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 1, 1], 'bool');\n *\n * x.any().print(); // or tf.any(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool');\n *\n * const axis = 1;\n * x.any(axis).print(); // or tf.any(x, axis)\n * ```\n *\n * @param x The input tensor. Must be of dtype bool.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction any_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'any', 'bool');\n\n const inputs: AnyInputs = {x: $x};\n const attrs: AnyAttrs = {axis, keepDims};\n\n return ENGINE.runKernel(\n Any, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\n// tslint:disable-next-line:variable-name\nexport const any = /* @__PURE__ */ op({any_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {ArgMax, ArgMaxAttrs, ArgMaxInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns the indices of the maximum values along an `axis`.\n *\n * The result has the same shape as `input` with the dimension along `axis`\n * removed.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.argMax().print(); // or tf.argMax(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);\n *\n * const axis = 1;\n * x.argMax(axis).print(); // or tf.argMax(x, axis)\n * ```\n *\n * @param x The input tensor.\n * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction argMax_(x: Tensor|TensorLike, axis = 0): T {\n const $x = convertToTensor(x, 'x', 'argMax');\n\n const inputs: ArgMaxInputs = {x: $x};\n const attrs: ArgMaxAttrs = {axis};\n\n return ENGINE.runKernel(\n ArgMax, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const argMax = /* @__PURE__ */ op({argMax_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {ArgMin, ArgMinAttrs, ArgMinInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns the indices of the minimum values along an `axis`.\n *\n * The result has the same shape as `input` with the dimension along `axis`\n * removed.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.argMin().print(); // or tf.argMin(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]);\n *\n * const axis = 1;\n * x.argMin(axis).print(); // or tf.argMin(x, axis)\n * ```\n *\n * @param x The input tensor.\n * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension).\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction argMin_(x: Tensor|TensorLike, axis = 0): T {\n const $x = convertToTensor(x, 'x', 'argMin');\n\n const inputs: ArgMinInputs = {x: $x};\n const attrs: ArgMinAttrs = {axis};\n\n return ENGINE.runKernel(\n ArgMin, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const argMin = /* @__PURE__ */ op({argMin_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Asin, AsinInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes asin of the input `tf.Tensor` element-wise: `asin(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.asin().print(); // or tf.asin(x)\n * ```\n * @param x The input tensor.\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction asin_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'asin');\n const inputs: AsinInputs = {x: $x};\n\n return ENGINE.runKernel(Asin, inputs as unknown as NamedTensorMap);\n}\nexport const asin = /* @__PURE__ */ op({asin_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Asinh, AsinhInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise:\n * `asinh(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.asinh().print(); // or tf.asinh(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction asinh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'asinh');\n\n const inputs: AsinhInputs = {x: $x};\n\n return ENGINE.runKernel(Asinh, inputs as unknown as NamedTensorMap);\n}\nexport const asinh = /* @__PURE__ */ op({asinh_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Atan, AtanInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes atan of the input `tf.Tensor` element-wise: `atan(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.atan().print(); // or tf.atan(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction atan_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'atan');\n\n const inputs: AtanInputs = {x: $x};\n\n return ENGINE.runKernel(Atan, inputs as unknown as NamedTensorMap);\n}\nexport const atan = /* @__PURE__ */ op({atan_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Atan2, Atan2Inputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes arctangent of `tf.Tensor`s a / b element-wise: `atan2(a, b)`.\n * Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1.0, 1.0, -1.0, .7]);\n * const b = tf.tensor1d([2.0, 13.0, 3.5, .21]);\n *\n * tf.atan2(a, b).print()\n * ```\n *\n * @param a The first tensor.\n * @param b The second tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction atan2_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'atan2');\n let $b = convertToTensor(b, 'b', 'atan2');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: Atan2Inputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Atan2, inputs as unknown as NamedTensorMap);\n}\n\nexport const atan2 = /* @__PURE__ */ op({atan2_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Atanh, AtanhInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise:\n * `atanh(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, .1, -.1, .7]);\n *\n * x.atanh().print(); // or tf.atanh(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction atanh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'atanh');\n\n const inputs: AtanhInputs = {x: $x};\n\n return ENGINE.runKernel(Atanh, inputs as unknown as NamedTensorMap);\n}\nexport const atanh = /* @__PURE__ */ op({atanh_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as util from '../util';\n\ntype PadType = 'SAME'|'VALID'|'NUMBER'|'EXPLICIT';\n\n// For NHWC should be in the following form:\n// [[0, 0], [pad_top,pad_bottom], [pad_left, pad_right], [0, 0]]\n// For NCHW should be in the following form:\n// [[0, 0], [0, 0], [pad_top,pad_bottom], [pad_left, pad_right]]\n// Reference: https://www.tensorflow.org/api_docs/python/tf/nn/conv2d\nexport type ExplicitPadding =\n [[number, number], [number, number], [number, number], [number, number]];\n\nexport type PadInfo = {\n top: number,\n left: number,\n right: number,\n bottom: number,\n type: PadType\n};\n\nexport type PadInfo3D = {\n top: number,\n left: number,\n right: number,\n bottom: number,\n front: number,\n back: number,\n type: PadType\n};\n\n/**\n * Information about the forward pass of a convolution/pooling operation.\n * It includes input and output shape, strides, filter size and padding\n * information.\n */\nexport type Conv2DInfo = {\n batchSize: number,\n inHeight: number,\n inWidth: number,\n inChannels: number,\n outHeight: number,\n outWidth: number,\n outChannels: number,\n dataFormat: 'channelsFirst'|'channelsLast',\n strideHeight: number,\n strideWidth: number,\n dilationHeight: number,\n dilationWidth: number,\n filterHeight: number,\n filterWidth: number,\n effectiveFilterHeight: number,\n effectiveFilterWidth: number,\n padInfo: PadInfo,\n inShape: [number, number, number, number],\n outShape: [number, number, number, number],\n filterShape: [number, number, number, number]\n};\n\n/**\n *\n * @param inputShape Input tensor shape is of the following dimensions:\n * `[batch, height, width, inChannels]`.\n * @param filterShape The filter shape is of the following dimensions:\n * `[filterHeight, filterWidth, depth]`.\n * @param strides The strides of the sliding window for each dimension of the\n * input tensor: `[strideHeight, strideWidth]`.\n * If `strides` is a single number,\n * then `strideHeight == strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1*1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat The data format of the input and output data.\n * Defaults to 'NHWC'.\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.\n * Defaults to `[1, 1]`. If `dilations` is a single number, then\n * `dilationHeight == dilationWidth`.\n */\nexport function computeDilation2DInfo(\n inputShape: [number, number, number, number],\n filterShape: [number, number, number], strides: number|[number, number],\n pad: 'same'|'valid'|number, dataFormat: 'NHWC' = 'NHWC',\n dilations: number|[number, number]) {\n // `computerConv2DInfo` require filterShape to be in the dimension of:\n // `[filterHeight, filterWidth, depth, outDepth]`, dilation2d doesn't have\n // outDepth, it should have the same depth as the input.\n // Input shape: [batch, height, width, inChannels]\n const inputChannels = inputShape[3];\n const $filterShape =\n [...filterShape, inputChannels] as [number, number, number, number];\n const $dataFormat = convertConv2DDataFormat(dataFormat);\n\n return computeConv2DInfo(\n inputShape, $filterShape, strides, dilations, pad,\n null /* roundingMode */, null /* depthWise */, $dataFormat);\n}\n\nexport function computePool2DInfo(\n inShape: [number, number, number, number],\n filterSize: [number, number]|number, strides: number|[number, number],\n dilations: number|[number, number],\n pad: 'same'|'valid'|number|ExplicitPadding,\n roundingMode?: 'floor'|'round'|'ceil',\n dataFormat: 'channelsFirst'|'channelsLast' = 'channelsLast'): Conv2DInfo {\n const [filterHeight, filterWidth] = parseTupleParam(filterSize);\n\n let filterShape: [number, number, number, number];\n if (dataFormat === 'channelsLast') {\n filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];\n } else if (dataFormat === 'channelsFirst') {\n filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];\n } else {\n throw new Error(`Unknown dataFormat ${dataFormat}`);\n }\n\n return computeConv2DInfo(\n inShape, filterShape, strides, dilations, pad, roundingMode, false,\n dataFormat);\n}\n\n/**\n * Computes the information for a forward pass of a pooling3D operation.\n */\nexport function computePool3DInfo(\n inShape: [number, number, number, number, number],\n filterSize: number|[number, number, number],\n strides: number|[number, number, number],\n dilations: number|[number, number, number], pad: 'same'|'valid'|number,\n roundingMode?: 'floor'|'round'|'ceil',\n dataFormat: 'NDHWC'|'NCDHW' = 'NDHWC'): Conv3DInfo {\n const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize);\n\n let filterShape: [number, number, number, number, number];\n let $dataFormat: 'channelsFirst'|'channelsLast';\n if (dataFormat === 'NDHWC') {\n $dataFormat = 'channelsLast';\n filterShape =\n [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]];\n } else if (dataFormat === 'NCDHW') {\n $dataFormat = 'channelsFirst';\n filterShape =\n [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]];\n } else {\n throw new Error(`Unknown dataFormat ${dataFormat}`);\n }\n\n return computeConv3DInfo(\n inShape, filterShape, strides, dilations, pad, false, $dataFormat,\n roundingMode);\n}\n\n/**\n * Computes the information for a forward pass of a convolution/pooling\n * operation.\n */\nexport function computeConv2DInfo(\n inShape: [number, number, number, number],\n filterShape: [number, number, number, number],\n strides: number|[number, number], dilations: number|[number, number],\n pad: 'same'|'valid'|number|ExplicitPadding,\n roundingMode?: 'floor'|'round'|'ceil', depthwise = false,\n dataFormat: 'channelsFirst'|'channelsLast' = 'channelsLast'): Conv2DInfo {\n let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1];\n if (dataFormat === 'channelsLast') {\n [batchSize, inHeight, inWidth, inChannels] = inShape;\n } else if (dataFormat === 'channelsFirst') {\n [batchSize, inChannels, inHeight, inWidth] = inShape;\n } else {\n throw new Error(`Unknown dataFormat ${dataFormat}`);\n }\n\n const [filterHeight, filterWidth, , filterChannels] = filterShape;\n const [strideHeight, strideWidth] = parseTupleParam(strides);\n const [dilationHeight, dilationWidth] = parseTupleParam(dilations);\n\n const effectiveFilterHeight =\n getEffectiveFilterSize(filterHeight, dilationHeight);\n const effectiveFilterWidth =\n getEffectiveFilterSize(filterWidth, dilationWidth);\n const {padInfo, outHeight, outWidth} = getPadAndOutInfo(\n pad, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight,\n effectiveFilterWidth, roundingMode, dataFormat);\n\n const outChannels = depthwise ? filterChannels * inChannels : filterChannels;\n\n let outShape: [number, number, number, number];\n if (dataFormat === 'channelsFirst') {\n outShape = [batchSize, outChannels, outHeight, outWidth];\n } else if (dataFormat === 'channelsLast') {\n outShape = [batchSize, outHeight, outWidth, outChannels];\n }\n\n return {\n batchSize,\n dataFormat,\n inHeight,\n inWidth,\n inChannels,\n outHeight,\n outWidth,\n outChannels,\n padInfo,\n strideHeight,\n strideWidth,\n filterHeight,\n filterWidth,\n effectiveFilterHeight,\n effectiveFilterWidth,\n dilationHeight,\n dilationWidth,\n inShape,\n outShape,\n filterShape\n };\n}\n\n/**\n * Information about the forward pass of a 3D convolution/pooling operation.\n * It includes input and output shape, strides, filter size and padding\n * information.\n */\nexport type Conv3DInfo = {\n batchSize: number,\n inDepth: number,\n inHeight: number,\n inWidth: number,\n inChannels: number,\n outDepth: number,\n outHeight: number,\n outWidth: number,\n outChannels: number,\n dataFormat: 'channelsFirst'|'channelsLast',\n strideDepth: number,\n strideHeight: number,\n strideWidth: number,\n dilationDepth: number,\n dilationHeight: number,\n dilationWidth: number,\n filterDepth: number,\n filterHeight: number,\n filterWidth: number,\n effectiveFilterDepth: number,\n effectiveFilterHeight: number,\n effectiveFilterWidth: number,\n padInfo: PadInfo3D,\n inShape: [number, number, number, number, number],\n outShape: [number, number, number, number, number],\n filterShape: [number, number, number, number, number]\n};\n\n/**\n * Computes the information for a forward pass of a 3D convolution/pooling\n * operation.\n */\nexport function computeConv3DInfo(\n inShape: [number, number, number, number, number],\n filterShape: [number, number, number, number, number],\n strides: number|[number, number, number],\n dilations: number|[number, number, number], pad: 'same'|'valid'|number,\n depthwise = false,\n dataFormat: 'channelsFirst'|'channelsLast' = 'channelsLast',\n roundingMode?: 'floor'|'round'|'ceil'): Conv3DInfo {\n let [batchSize, inDepth, inHeight, inWidth, inChannels] =\n [-1, -1, -1, -1, -1];\n if (dataFormat === 'channelsLast') {\n [batchSize, inDepth, inHeight, inWidth, inChannels] = inShape;\n } else if (dataFormat === 'channelsFirst') {\n [batchSize, inChannels, inDepth, inHeight, inWidth] = inShape;\n } else {\n throw new Error(`Unknown dataFormat ${dataFormat}`);\n }\n\n const [filterDepth, filterHeight, filterWidth, , filterChannels] =\n filterShape;\n const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides);\n const [dilationDepth, dilationHeight, dilationWidth] =\n parse3TupleParam(dilations);\n\n const effectiveFilterDepth =\n getEffectiveFilterSize(filterDepth, dilationDepth);\n const effectiveFilterHeight =\n getEffectiveFilterSize(filterHeight, dilationHeight);\n const effectiveFilterWidth =\n getEffectiveFilterSize(filterWidth, dilationWidth);\n const {padInfo, outDepth, outHeight, outWidth} = get3DPadAndOutInfo(\n pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth,\n effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth,\n roundingMode);\n\n const outChannels = depthwise ? filterChannels * inChannels : filterChannels;\n\n let outShape: [number, number, number, number, number];\n if (dataFormat === 'channelsFirst') {\n outShape = [batchSize, outChannels, outDepth, outHeight, outWidth];\n } else if (dataFormat === 'channelsLast') {\n outShape = [batchSize, outDepth, outHeight, outWidth, outChannels];\n }\n\n return {\n batchSize,\n dataFormat,\n inDepth,\n inHeight,\n inWidth,\n inChannels,\n outDepth,\n outHeight,\n outWidth,\n outChannels,\n padInfo,\n strideDepth,\n strideHeight,\n strideWidth,\n filterDepth,\n filterHeight,\n filterWidth,\n effectiveFilterDepth,\n effectiveFilterHeight,\n effectiveFilterWidth,\n dilationDepth,\n dilationHeight,\n dilationWidth,\n inShape,\n outShape,\n filterShape\n };\n}\n\nfunction computeOutputShape2D(\n inShape: [number, number], fieldSize: number, stride: number,\n zeroPad?: number, roundingMode?: 'floor'|'round'|'ceil'): [number, number] {\n if (zeroPad == null) {\n zeroPad = computeDefaultPad(inShape, fieldSize, stride);\n }\n const inputRows = inShape[0];\n const inputCols = inShape[1];\n\n const outputRows =\n round((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);\n const outputCols =\n round((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);\n\n return [outputRows, outputCols];\n}\n\nfunction computeOutputShape4D(\n inShape: [number, number, number, number],\n filterShape: [number, number, number], outChannels: number,\n strides: [number, number, number], zeroPad?: number,\n roundingMode?: 'floor'|'round'|'ceil'): [number, number, number, number] {\n if (zeroPad == null) {\n zeroPad = computeDefaultPad(inShape, filterShape[0], strides[0]);\n }\n const outShape: [number, number, number, number] = [0, 0, 0, outChannels];\n for (let index = 0; index < 3; index++) {\n if (inShape[index] + 2 * zeroPad >= filterShape[index]) {\n outShape[index] = round(\n (inShape[index] - filterShape[index] + 2 * zeroPad) / strides[index] +\n 1,\n roundingMode);\n }\n }\n return outShape;\n}\n\nexport function computeDefaultPad(\n inputShape: [number, number]|[number, number, number, number],\n fieldSize: number, stride: number, dilation = 1): number {\n const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);\n return Math.floor(\n (inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);\n}\n\nfunction parseTupleParam(param: number|number[]): [number, number, number] {\n if (typeof param === 'number') {\n return [param, param, param];\n }\n if (param.length === 2) {\n return [param[0], param[1], 1];\n }\n return param as [number, number, number];\n}\n\nfunction parse3TupleParam(param: number|[number, number, number]):\n [number, number, number] {\n return typeof param === 'number' ? [param, param, param] : param;\n}\n\n/* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d\n * Atrous convolution is equivalent to standard convolution with upsampled\n * filters with effective_filter_height =\n * filter_height + (filter_height - 1) * (dilation - 1)\n * and effective_filter_width =\n * filter_width + (filter_width - 1) * (dilation - 1),\n * produced by inserting dilation - 1 zeros along consecutive elements across\n * the filters' spatial dimensions.\n * When there is a dilation, this converts a filter dimension to the\n * effective filter dimension, so it can be used in a standard convolution.\n */\nfunction getEffectiveFilterSize(filterSize: number, dilation: number) {\n if (dilation <= 1) {\n return filterSize;\n }\n\n return filterSize + (filterSize - 1) * (dilation - 1);\n}\n\nfunction getPadAndOutInfo(\n pad: 'same'|'valid'|number|ExplicitPadding, inHeight: number,\n inWidth: number, strideHeight: number, strideWidth: number,\n filterHeight: number, filterWidth: number,\n roundingMode: 'floor'|'round'|'ceil',\n dataFormat: 'channelsFirst'|\n 'channelsLast'): {padInfo: PadInfo, outHeight: number, outWidth: number} {\n let padInfo: PadInfo;\n let outHeight: number;\n let outWidth: number;\n\n if (typeof pad === 'number') {\n const padType = (pad === 0) ? 'VALID' : 'NUMBER';\n padInfo = {top: pad, bottom: pad, left: pad, right: pad, type: padType};\n const outShape = computeOutputShape2D(\n [inHeight, inWidth], filterHeight, strideHeight, pad, roundingMode);\n outHeight = outShape[0];\n outWidth = outShape[1];\n } else if (pad === 'same') {\n outHeight = Math.ceil(inHeight / strideHeight);\n outWidth = Math.ceil(inWidth / strideWidth);\n const padAlongHeight =\n Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight);\n const padAlongWidth =\n Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);\n const top = Math.floor(padAlongHeight / 2);\n const bottom = padAlongHeight - top;\n const left = Math.floor(padAlongWidth / 2);\n const right = padAlongWidth - left;\n padInfo = {top, bottom, left, right, type: 'SAME'};\n } else if (pad === 'valid') {\n padInfo = {top: 0, bottom: 0, left: 0, right: 0, type: 'VALID'};\n outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);\n outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);\n } else if (typeof pad === 'object') {\n const top = dataFormat === 'channelsLast' ? pad[1][0] : pad[2][0];\n const bottom = dataFormat === 'channelsLast' ? pad[1][1] : pad[2][1];\n const left = dataFormat === 'channelsLast' ? pad[2][0] : pad[3][0];\n const right = dataFormat === 'channelsLast' ? pad[2][1] : pad[3][1];\n const padType = (top === 0 && bottom === 0 && left === 0 && right === 0) ?\n 'VALID' :\n 'EXPLICIT';\n padInfo = {top, bottom, left, right, type: padType};\n outHeight = round(\n (inHeight - filterHeight + top + bottom) / strideHeight + 1,\n roundingMode);\n outWidth = round(\n (inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode);\n } else {\n throw Error(`Unknown padding parameter: ${pad}`);\n }\n return {padInfo, outHeight, outWidth};\n}\n\nfunction get3DPadAndOutInfo(\n pad: 'same'|'valid'|number, inDepth: number, inHeight: number,\n inWidth: number, strideDepth: number, strideHeight: number,\n strideWidth: number, filterDepth: number, filterHeight: number,\n filterWidth: number, roundingMode?: 'floor'|'round'|'ceil'): {\n padInfo: PadInfo3D,\n outDepth: number,\n outHeight: number,\n outWidth: number\n} {\n let padInfo: PadInfo3D;\n let outDepth: number;\n let outHeight: number;\n let outWidth: number;\n\n if (pad === 'valid') {\n pad = 0;\n }\n\n if (typeof pad === 'number') {\n const padType = (pad === 0) ? 'VALID' : 'NUMBER';\n padInfo = {\n top: pad,\n bottom: pad,\n left: pad,\n right: pad,\n front: pad,\n back: pad,\n type: padType\n };\n const outShape = computeOutputShape4D(\n [inDepth, inHeight, inWidth, 1],\n [filterDepth, filterHeight, filterWidth], 1,\n [strideDepth, strideHeight, strideWidth], pad, roundingMode);\n outDepth = outShape[0];\n outHeight = outShape[1];\n outWidth = outShape[2];\n } else if (pad === 'same') {\n outDepth = Math.ceil(inDepth / strideDepth);\n outHeight = Math.ceil(inHeight / strideHeight);\n outWidth = Math.ceil(inWidth / strideWidth);\n const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth;\n const padAlongHeight =\n (outHeight - 1) * strideHeight + filterHeight - inHeight;\n const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth;\n const front = Math.floor(padAlongDepth / 2);\n const back = padAlongDepth - front;\n const top = Math.floor(padAlongHeight / 2);\n const bottom = padAlongHeight - top;\n const left = Math.floor(padAlongWidth / 2);\n const right = padAlongWidth - left;\n\n padInfo = {top, bottom, left, right, front, back, type: 'SAME'};\n } else {\n throw Error(`Unknown padding parameter: ${pad}`);\n }\n return {padInfo, outDepth, outHeight, outWidth};\n}\n\n/**\n * Rounds a value depending on the rounding mode\n * @param value\n * @param roundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction round(value: number, roundingMode?: 'floor'|'round'|'ceil') {\n if (!roundingMode) {\n return Math.trunc(value);\n }\n switch (roundingMode) {\n case 'round':\n // used for Caffe Conv\n return Math.round(value);\n case 'ceil':\n // used for Caffe Pool\n return Math.ceil(value);\n case 'floor':\n return Math.floor(value);\n default:\n throw new Error(`Unknown roundingMode ${roundingMode}`);\n }\n}\n\nexport function tupleValuesAreOne(param: number|number[]): boolean {\n const [dimA, dimB, dimC] = parseTupleParam(param);\n return dimA === 1 && dimB === 1 && dimC === 1;\n}\n\nexport function eitherStridesOrDilationsAreOne(\n strides: number|number[], dilations: number|number[]): boolean {\n return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);\n}\n\nexport function stridesOrDilationsArePositive(values: number|\n number[]): boolean {\n return parseTupleParam(values).every(value => value > 0);\n}\n\n/**\n * Convert Conv2D dataFormat from 'NHWC'|'NCHW' to\n * 'channelsLast'|'channelsFirst'\n * @param dataFormat in 'NHWC'|'NCHW' mode\n * @return dataFormat in 'channelsLast'|'channelsFirst' mode\n * @throws unknown dataFormat\n */\nexport function convertConv2DDataFormat(dataFormat: 'NHWC'|'NCHW'):\n 'channelsLast'|'channelsFirst' {\n if (dataFormat === 'NHWC') {\n return 'channelsLast';\n } else if (dataFormat === 'NCHW') {\n return 'channelsFirst';\n } else {\n throw new Error(`Unknown dataFormat ${dataFormat}`);\n }\n}\n\n/**\n * Check validity of pad when using dimRoundingMode.\n * @param opDesc A string of op description\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid` output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n * @throws unknown padding parameter\n */\nexport function checkPadOnDimRoundingMode(\n opDesc: string, pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil') {\n if (dimRoundingMode != null) {\n if (typeof pad === 'string') {\n throw Error(\n `Error in ${opDesc}: pad must be an integer when using ` +\n `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`);\n } else if (typeof pad === 'number') {\n util.assert(\n util.isInt(pad),\n () => `Error in ${opDesc}: pad must be an integer when using ` +\n `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`);\n } else if (typeof pad === 'object') {\n (pad as ExplicitPadding).forEach(p => {\n p.forEach(v => {\n util.assert(\n util.isInt(v),\n () => `Error in ${opDesc}: pad must be an integer when using ` +\n `dimRoundingMode ${dimRoundingMode} but got pad ${v}.`);\n });\n });\n } else {\n throw Error(`Error in ${opDesc}: Unknown padding parameter: ${pad}`);\n }\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Reshape, ReshapeAttrs, ReshapeInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, ShapeMap, TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Reshapes a `tf.Tensor` to a given shape.\n *\n * Given an input tensor, returns a new tensor with the same values as the\n * input tensor with shape `shape`.\n *\n * If one component of shape is the special value -1, the size of that\n * dimension is computed so that the total size remains constant. In\n * particular, a shape of [-1] flattens into 1-D. At most one component of\n * shape can be -1.\n *\n * If shape is 1-D or higher, then the operation returns a tensor with shape\n * shape filled with the values of tensor. In this case, the number of\n * elements implied by shape must be the same as the number of elements in\n * tensor.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.reshape([2, 2]).print();\n * ```\n *\n * @param x The input tensor to be reshaped.\n * @param shape An array of integers defining the output tensor shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction reshape_(\n x: Tensor|TensorLike, shape: ShapeMap[R]): Tensor {\n const $x = convertToTensor(x, 'x', 'reshape', 'string_or_numeric');\n\n const inputs: ReshapeInputs = {x: $x};\n const attrs: ReshapeAttrs = {shape};\n return ENGINE.runKernel(\n Reshape, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\nexport const reshape = /* @__PURE__ */ op({reshape_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {AvgPool, AvgPoolAttrs, AvgPoolInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the 2D average pooling of an image.\n *\n * @param x The input tensor, of rank 4 or rank 3 of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param filterSize The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param pad The type of padding algorithm:\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction avgPool_(\n x: T|TensorLike, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'avgPool', 'float32');\n const dilations = 1;\n\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in avgPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n util.assert(\n x4D.rank === 4,\n () => `Error in avgPool: x must be rank 4 but got rank ${x4D.rank}.`);\n conv_util.checkPadOnDimRoundingMode('avgPool', pad, dimRoundingMode);\n const inputs: AvgPoolInputs = {x: x4D};\n const attrs: AvgPoolAttrs = {filterSize, strides, pad, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n let res = ENGINE.runKernel(\n AvgPool, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n res = cast(res, $x.dtype);\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n\n return res;\n}\n\nexport const avgPool = /* @__PURE__ */ op({avgPool_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {AvgPool3D, AvgPool3DAttrs, AvgPool3DInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport {checkPadOnDimRoundingMode} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the 3D average pooling.\n *\n * ```js\n * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);\n * const result = tf.avgPool3d(x, 2, 1, 'valid');\n * result.print();\n * ```\n *\n * @param x The input tensor, of rank 5 or rank 4 of shape\n * `[batch, depth, height, width, inChannels]`.\n * @param filterSize The filter size:\n * `[filterDepth, filterHeight, filterWidth]`.\n * If `filterSize` is a single number,\n * then `filterDepth == filterHeight == filterWidth`.\n * @param strides The strides of the pooling:\n * `[strideDepth, strideHeight, strideWidth]`.\n * If `strides` is a single number,\n * then `strideDepth == strideHeight == strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1*1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n * @param dataFormat An optional string from: \"NDHWC\", \"NCDHW\". Defaults to\n * \"NDHWC\". Specify the data format of the input and output data. With the\n * default format \"NDHWC\", the data is stored in the order of: [batch,\n * depth, height, width, channels]. Only \"NDHWC\" is currently supported.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction avgPool3d_(\n x: T|TensorLike, filterSize: [number, number, number]|number,\n strides: [number, number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil',\n dataFormat: 'NDHWC'|'NCDHW' = 'NDHWC'): T {\n const $x = convertToTensor(x, 'x', 'avgPool3d', 'float32');\n\n let x5D = $x as Tensor5D;\n let reshapedTo5D = false;\n if ($x.rank === 4) {\n reshapedTo5D = true;\n x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);\n }\n\n util.assert(\n x5D.rank === 5,\n () => `Error in avgPool3d: x must be rank 5 but got rank ${x5D.rank}.`);\n util.assert(\n dataFormat === 'NDHWC',\n () => `Error in avgPool3d: Only NDHWC is currently supported, ` +\n `but got dataFormat of ${dataFormat}`);\n util.assert(\n (typeof strides === 'number' && strides > 0) ||\n (Array.isArray(strides) && strides[0] > 0 && strides[1] > 0 &&\n strides[2] > 0),\n () => `Error in avgPool3d: Stride must be > 0, but got '${strides}'`);\n checkPadOnDimRoundingMode('avgPool3d', pad, dimRoundingMode);\n const inputs: AvgPool3DInputs = {x: x5D};\n const attrs:\n AvgPool3DAttrs = {filterSize, strides, pad, dimRoundingMode, dataFormat};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n let res = ENGINE.runKernel(\n AvgPool3D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n res = cast(res, x5D.dtype);\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n\n return res;\n}\n\nexport const avgPool3d = /* @__PURE__ */ op({avgPool3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Concat, ConcatAttrs, ConcatInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensorArray} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\n\nimport {clone} from './clone';\nimport {op} from './operation';\n\n/**\n * Concatenates a list of `tf.Tensor`s along a given axis.\n *\n * The tensors ranks and types must match, and their sizes must match in all\n * dimensions except `axis`.\n *\n * Also available are stricter rank-specific methods that assert that\n * `tensors` are of the given rank:\n * - `tf.concat1d`\n * - `tf.concat2d`\n * - `tf.concat3d`\n * - `tf.concat4d`\n *\n * Except `tf.concat1d` (which does not have axis param), all methods have\n * same signature as this method.\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n * const b = tf.tensor1d([3, 4]);\n * a.concat(b).print(); // or a.concat(b)\n * ```\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n * const b = tf.tensor1d([3, 4]);\n * const c = tf.tensor1d([5, 6]);\n * tf.concat([a, b, c]).print();\n * ```\n *\n * ```js\n * const a = tf.tensor2d([[1, 2], [10, 20]]);\n * const b = tf.tensor2d([[3, 4], [30, 40]]);\n * const axis = 1;\n * tf.concat([a, b], axis).print();\n * ```\n * @param tensors A list of tensors to concatenate.\n * @param axis The axis to concatenate along. Defaults to 0 (the first dim).\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction concat_(tensors: Array, axis = 0): T {\n assert(tensors.length >= 1, () => 'Pass at least one tensor to concat');\n\n const $tensors =\n convertToTensorArray(tensors, 'tensors', 'concat', 'string_or_numeric');\n\n if ($tensors[0].dtype === 'complex64') {\n $tensors.forEach(tensor => {\n if (tensor.dtype !== 'complex64') {\n throw new Error(`Cannot concatenate complex64 tensors with a tensor\n with dtype ${tensor.dtype}. `);\n }\n });\n }\n\n if ($tensors.length === 1) {\n return clone($tensors[0]);\n }\n\n const inputs: ConcatInputs = $tensors;\n const attr: ConcatAttrs = {axis};\n\n return ENGINE.runKernel(\n Concat, inputs as unknown as NamedTensorMap,\n attr as unknown as NamedAttrMap);\n}\n\nexport const concat = /* @__PURE__ */ op({concat_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Sigmoid, SigmoidInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes sigmoid element-wise, `1 / (1 + exp(-x))`\n *\n * ```js\n * const x = tf.tensor1d([0, -1, 2, -3]);\n *\n * x.sigmoid().print(); // or tf.sigmoid(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction sigmoid_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'sigmoid', 'float32');\n\n const inputs: SigmoidInputs = {x: $x};\n\n return ENGINE.runKernel(Sigmoid, inputs as unknown as NamedTensorMap);\n}\nexport const sigmoid = /* @__PURE__ */ op({sigmoid_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Slice, SliceAttrs, SliceInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Extracts a slice from a `tf.Tensor` starting at coordinates `begin`\n * and is of size `size`.\n *\n * Also available are stricter rank-specific methods with the same signature\n * as this method that assert that `x` is of the given rank:\n * - `tf.slice1d`\n * - `tf.slice2d`\n * - `tf.slice3d`\n * - `tf.slice4d`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n *\n * x.slice([1], [2]).print();\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * x.slice([1, 0], [1, 2]).print();\n * ```\n * @param x The input `tf.Tensor` to slice from.\n * @param begin The coordinates to start the slice from. The length can be\n * less than the rank of x - the rest of the axes will have implicit 0 as\n * start. Can also be a single number, in which case it specifies the\n * first axis.\n * @param size The size of the slice. The length can be less than the rank of\n * x - the rest of the axes will have implicit -1. A value of -1 requests\n * the rest of the dimensions in the axis. Can also be a single number,\n * in which case it specifies the size of the first axis.\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction slice_>(\n x: T|TensorLike, begin: number|number[], size?: number|number[]): T {\n const $x = convertToTensor(x, 'x', 'slice', 'string_or_numeric');\n\n if ($x.rank === 0) {\n throw new Error('Slicing scalar is not possible');\n }\n\n const inputs: SliceInputs = {x: $x};\n const attrs: SliceAttrs = {begin, size};\n\n return ENGINE.runKernel(\n Slice, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const slice = /* @__PURE__ */ op({slice_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tanh, TanhInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, 70]);\n *\n * x.tanh().print(); // or tf.tanh(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction tanh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'tanh', 'float32');\n\n const inputs: TanhInputs = {x: $x};\n\n return ENGINE.runKernel(Tanh, inputs as unknown as NamedTensorMap);\n}\nexport const tanh = /* @__PURE__ */ op({tanh_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {add} from './add';\nimport {concat} from './concat';\nimport {matMul} from './mat_mul';\nimport {mul} from './mul';\nimport {op} from './operation';\nimport {sigmoid} from './sigmoid';\nimport {slice} from './slice';\nimport {tanh} from './tanh';\n\n/**\n * Computes the next state and output of a BasicLSTMCell.\n *\n * Returns `[newC, newH]`.\n *\n * Derived from tf.contrib.rnn.BasicLSTMCell.\n *\n * @param forgetBias Forget bias for the cell.\n * @param lstmKernel The weights for the cell.\n * @param lstmBias The bias for the cell.\n * @param data The input to the cell.\n * @param c Previous cell state.\n * @param h Previous cell output.\n *\n * @doc {heading: 'Operations', subheading: 'RNN'}\n */\nfunction basicLSTMCell_(\n forgetBias: Scalar|TensorLike, lstmKernel: Tensor2D|TensorLike,\n lstmBias: Tensor1D|TensorLike, data: Tensor2D|TensorLike,\n c: Tensor2D|TensorLike, h: Tensor2D|TensorLike): [Tensor2D, Tensor2D] {\n const $forgetBias =\n convertToTensor(forgetBias, 'forgetBias', 'basicLSTMCell');\n const $lstmKernel =\n convertToTensor(lstmKernel, 'lstmKernel', 'basicLSTMCell');\n const $lstmBias = convertToTensor(lstmBias, 'lstmBias', 'basicLSTMCell');\n const $data = convertToTensor(data, 'data', 'basicLSTMCell');\n const $c = convertToTensor(c, 'c', 'basicLSTMCell');\n const $h = convertToTensor(h, 'h', 'basicLSTMCell');\n\n const combined = concat([$data, $h], 1);\n const weighted = matMul(combined, $lstmKernel);\n const res: Tensor2D = add(weighted, $lstmBias);\n\n // i = input_gate, j = new_input, f = forget_gate, o = output_gate\n const batchSize = res.shape[0];\n const sliceCols = res.shape[1] / 4;\n const sliceSize: [number, number] = [batchSize, sliceCols];\n const i = slice(res, [0, 0], sliceSize);\n const j = slice(res, [0, sliceCols], sliceSize);\n const f = slice(res, [0, sliceCols * 2], sliceSize);\n const o = slice(res, [0, sliceCols * 3], sliceSize);\n\n const newC: Tensor2D =\n add(mul(sigmoid(i), tanh(j)),\n mul($c, sigmoid(add($forgetBias, f)) as Tensor2D));\n const newH: Tensor2D = mul(tanh(newC), sigmoid(o));\n return [newC, newH];\n}\n\nexport const basicLSTMCell = /* @__PURE__ */ op({basicLSTMCell_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {BatchToSpaceND, BatchToSpaceNDAttrs, BatchToSpaceNDInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * This operation reshapes the \"batch\" dimension 0 into `M + 1` dimensions of\n * shape `blockShape + [batch]`, interleaves these blocks back into the grid\n * defined by the spatial dimensions `[1, ..., M]`, to obtain a result with\n * the same rank as the input. The spatial dimensions of this intermediate\n * result are then optionally cropped according to `crops` to produce the\n * output. This is the reverse of `tf.spaceToBatchND`. See below for a precise\n * description.\n *\n * ```js\n * const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]);\n * const blockShape = [2, 2];\n * const crops = [[0, 0], [0, 0]];\n *\n * x.batchToSpaceND(blockShape, crops).print();\n * ```\n *\n * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +\n * remainingShape`, where spatialShape has `M` dimensions.\n * @param blockShape A 1-D array. Must have shape `[M]`, all values must\n * be >= 1.\n * @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0.\n * `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input\n * dimension `i + 1`, which corresponds to spatial dimension `i`. It is required\n * that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]`\n *\n * This operation is equivalent to the following steps:\n *\n * 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ...,\n * blockShape[M-1], batch / prod(blockShape), x.shape[1], ...,\n * x.shape[N-1]]`\n *\n * 2. Permute dimensions of `reshaped` to produce `permuted` of shape `[batch /\n * prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M],\n * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`\n *\n * 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch /\n * prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] *\n * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]`\n *\n * 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted`\n * according to `crops` to produce the output of shape: `[batch /\n * prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1],\n * ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] -\n * crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]`\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction batchToSpaceND_(\n x: T|TensorLike, blockShape: number[], crops: number[][]): T {\n const $x = convertToTensor(x, 'x', 'batchToSpaceND');\n const prod = blockShape.reduce((a, b) => a * b);\n\n util.assert(\n $x.rank >= 1 + blockShape.length,\n () => `input rank is ${$x.rank} but should be > than blockShape.length ${\n blockShape.length}`);\n\n util.assert(\n crops.length === blockShape.length,\n () => `crops.length is ${\n crops.length} but should be equal to blockShape.length ${\n blockShape.length}`);\n\n util.assert(\n $x.shape[0] % prod === 0,\n () => `input tensor batch is ${\n $x.shape[0]} but is not divisible by the product of ` +\n `the elements of blockShape ${blockShape.join(' * ')} === ${prod}`);\n\n const inputs: BatchToSpaceNDInputs = {x: $x};\n const attrs: BatchToSpaceNDAttrs = {blockShape, crops};\n\n return ENGINE.runKernel(\n BatchToSpaceND, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const batchToSpaceND = /* @__PURE__ */ op({batchToSpaceND_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {FusedBatchNorm, FusedBatchNormAttrs, FusedBatchNormInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor1D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {xAs4D} from './batchnorm_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Batch normalization.\n *\n * As described in\n * [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167).\n *\n * Mean, variance, scale, and offset can be of two shapes:\n * - The same shape as the input.\n * - In the common case, the depth dimension is the last dimension of x, so\n * the values would be a `tf.Tensor1D` of shape [depth].\n *\n * Also available are stricter rank-specific methods with the same signature\n * as this method that assert that parameters passed are of given rank\n * - `tf.batchNorm2d`\n * - `tf.batchNorm3d`\n * - `tf.batchNorm4d`\n *\n * @param x The input Tensor.\n * @param mean A mean Tensor.\n * @param variance A variance Tensor.\n * @param offset An offset Tensor.\n * @param scale A scale Tensor.\n * @param varianceEpsilon A small float number to avoid dividing by 0.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction batchNorm_(\n x: Tensor|TensorLike, mean: Tensor|Tensor1D|TensorLike,\n variance: Tensor|Tensor1D|TensorLike,\n offset?: Tensor|Tensor1D|TensorLike,\n scale?: Tensor|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor {\n if (varianceEpsilon == null) {\n varianceEpsilon = 0.001;\n }\n const $x = convertToTensor(x, 'x', 'batchNorm');\n const $mean = convertToTensor(mean, 'mean', 'batchNorm');\n const $variance = convertToTensor(variance, 'variance', 'batchNorm');\n let $scale: Tensor|Tensor1D;\n if (scale != null) {\n $scale = convertToTensor(scale, 'scale', 'batchNorm');\n }\n let $offset: Tensor|Tensor1D;\n if (offset != null) {\n $offset = convertToTensor(offset, 'offset', 'batchNorm');\n }\n\n util.assert(\n $mean.rank === $variance.rank,\n () => 'Batch normalization gradient requires mean and variance to have ' +\n 'equal ranks.');\n util.assert(\n $offset == null || $mean.rank === $offset.rank,\n () => 'Batch normalization gradient requires mean and offset to have ' +\n 'equal ranks.');\n util.assert(\n $scale == null || $mean.rank === $scale.rank,\n () => 'Batch normalization gradient requires mean and scale to have ' +\n 'equal ranks.');\n\n const x4D: Tensor4D = xAs4D($x);\n\n const inputs: FusedBatchNormInputs = {\n x: x4D,\n scale: $scale,\n offset: $offset,\n mean: $mean,\n variance: $variance\n };\n\n const attrs: FusedBatchNormAttrs = {varianceEpsilon};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n FusedBatchNorm, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor;\n\n return reshape(res, $x.shape);\n}\n\nexport const batchNorm = /* @__PURE__ */ op({batchNorm_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor, Tensor4D} from '../tensor';\nimport {Rank} from '../types';\nimport {reshape} from './reshape';\n\nexport function xAs4D(x: Tensor) {\n let x4D: Tensor4D;\n if (x.rank === 0 || x.rank === 1) {\n x4D = reshape(x, [1, 1, 1, x.size]);\n } else if (x.rank === 2) {\n x4D = reshape(x, [1, 1, x.shape[0], x.shape[1]]);\n } else if (x.rank === 3) {\n x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);\n } else {\n x4D = x as Tensor4D;\n }\n\n return x4D;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {batchNorm} from './batchnorm';\nimport {op} from './operation';\n\n/**\n * Batch normalization, strictly for 2D. For the more relaxed version, see\n * `tf.batchNorm`.\n *\n * @param x The input Tensor.\n * @param mean A mean Tensor.\n * @param variance A variance Tensor.\n * @param offset An offset Tensor.\n * @param scale A scale Tensor.\n * @param varianceEpsilon A small float number to avoid dividing by 0.\n */\nfunction batchNorm2d_(\n x: Tensor2D|TensorLike, mean: Tensor2D|Tensor1D|TensorLike,\n variance: Tensor2D|Tensor1D|TensorLike,\n offset?: Tensor2D|Tensor1D|TensorLike, scale?: Tensor2D|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor2D {\n const $x = convertToTensor(x, 'x', 'batchNorm');\n const $mean = convertToTensor(mean, 'mean', 'batchNorm');\n const $variance = convertToTensor(variance, 'variance', 'batchNorm');\n let $scale: Tensor2D|Tensor1D;\n if (scale != null) {\n $scale = convertToTensor(scale, 'scale', 'batchNorm');\n }\n let $offset: Tensor2D|Tensor1D;\n if (offset != null) {\n $offset = convertToTensor(offset, 'offset', 'batchNorm');\n }\n util.assert(\n $x.rank === 2,\n () => `Error in batchNorm2D: x must be rank 2 but got rank ` +\n `${$x.rank}.`);\n util.assert(\n $mean.rank === 2 || $mean.rank === 1,\n () => `Error in batchNorm2D: mean must be rank 2 or rank 1 but ` +\n `got rank ${$mean.rank}.`);\n util.assert(\n $variance.rank === 2 || $variance.rank === 1,\n () => `Error in batchNorm2D: variance must be rank 2 or rank 1 ` +\n `but got rank ${$variance.rank}.`);\n if ($scale != null) {\n util.assert(\n $scale.rank === 2 || $scale.rank === 1,\n () => `Error in batchNorm2D: scale must be rank 2 or rank 1 ` +\n `but got rank ${$scale.rank}.`);\n }\n if ($offset != null) {\n util.assert(\n $offset.rank === 2 || $offset.rank === 1,\n () => `Error in batchNorm2D: offset must be rank 2 or rank 1 ` +\n `but got rank ${$offset.rank}.`);\n }\n\n return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);\n}\n\nexport const batchNorm2d = /* @__PURE__ */ op({batchNorm2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D, Tensor3D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {batchNorm} from './batchnorm';\nimport {op} from './operation';\n\n/**\n * Batch normalization, strictly for 3D. For the more relaxed version, see\n * `tf.batchNorm`.\n *\n * @param x The input Tensor.\n * @param mean A mean Tensor.\n * @param variance A variance Tensor.\n * @param offset An offset Tensor.\n * @param scale A scale Tensor.\n * @param varianceEpsilon A small float number to avoid dividing by 0.\n */\nfunction batchNorm3d_(\n x: Tensor3D|TensorLike, mean: Tensor3D|Tensor1D|TensorLike,\n variance: Tensor3D|Tensor1D|TensorLike,\n offset?: Tensor3D|Tensor1D|TensorLike, scale?: Tensor3D|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor3D {\n const $x = convertToTensor(x, 'x', 'batchNorm');\n const $mean = convertToTensor(mean, 'mean', 'batchNorm');\n const $variance = convertToTensor(variance, 'variance', 'batchNorm');\n let $scale: Tensor3D|Tensor1D;\n if (scale != null) {\n $scale = convertToTensor(scale, 'scale', 'batchNorm');\n }\n let $offset: Tensor3D|Tensor1D;\n if (offset != null) {\n $offset = convertToTensor(offset, 'offset', 'batchNorm');\n }\n util.assert(\n $x.rank === 3,\n () => `Error in batchNorm3D: x must be rank 3 but got rank ` +\n `${$x.rank}.`);\n util.assert(\n $mean.rank === 3 || $mean.rank === 1,\n () => `Error in batchNorm3D: mean must be rank 3 or rank 1 but ` +\n `got rank ${$mean.rank}.`);\n util.assert(\n $variance.rank === 3 || $variance.rank === 1,\n () => `Error in batchNorm3D: variance must be rank 3 or rank 1 ` +\n `but got rank ${$variance.rank}.`);\n if ($scale != null) {\n util.assert(\n $scale.rank === 3 || $scale.rank === 1,\n () => `Error in batchNorm3D: scale must be rank 3 or rank 1 ` +\n `but got rank ${$scale.rank}.`);\n }\n if ($offset != null) {\n util.assert(\n $offset.rank === 3 || $offset.rank === 1,\n () => `Error in batchNorm3D: offset must be rank 3 or rank 1 ` +\n `but got rank ${$offset.rank}.`);\n }\n\n return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);\n}\n\nexport const batchNorm3d = /* @__PURE__ */ op({batchNorm3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D, Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {batchNorm} from './batchnorm';\nimport {op} from './operation';\n\n/**\n * Batch normalization, strictly for 4D. For the more relaxed version, see\n * `tf.batchNorm`.\n *\n * @param x The input Tensor.\n * @param mean A mean Tensor.\n * @param variance A variance Tensor.\n * @param offset An offset Tensor.\n * @param scale A scale Tensor.\n * @param varianceEpsilon A small float number to avoid dividing by 0.\n */\nfunction batchNorm4d_(\n x: Tensor4D|TensorLike, mean: Tensor4D|Tensor1D|TensorLike,\n variance: Tensor4D|Tensor1D|TensorLike,\n offset?: Tensor4D|Tensor1D|TensorLike, scale?: Tensor4D|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor4D {\n const $x = convertToTensor(x, 'x', 'batchNorm');\n const $mean = convertToTensor(mean, 'mean', 'batchNorm');\n const $variance = convertToTensor(variance, 'variance', 'batchNorm');\n let $scale: Tensor4D|Tensor1D;\n if (scale != null) {\n $scale = convertToTensor(scale, 'scale', 'batchNorm');\n }\n let $offset: Tensor4D|Tensor1D;\n if (offset != null) {\n $offset = convertToTensor(offset, 'offset', 'batchNorm');\n }\n util.assert(\n $x.rank === 4,\n () => `Error in batchNorm4D: x must be rank 4 but got rank ` +\n `${$x.rank}.`);\n util.assert(\n $mean.rank === 4 || $mean.rank === 1,\n () => `Error in batchNorm4D: mean must be rank 4 or rank 1 but ` +\n `got rank ${$mean.rank}.`);\n util.assert(\n $variance.rank === 4 || $variance.rank === 1,\n () => `Error in batchNorm4D: variance must be rank 4 or rank 1 ` +\n `but got rank ${$variance.rank}.`);\n if ($scale != null) {\n util.assert(\n $scale.rank === 4 || $scale.rank === 1,\n () => `Error in batchNorm4D: scale must be rank 4 or rank 1 ` +\n `but got rank ${$scale.rank}.`);\n }\n if ($offset != null) {\n util.assert(\n $offset.rank === 4 || $offset.rank === 1,\n () => `Error in batchNorm4D: offset must be rank 4 or rank 1 ` +\n `but got rank ${$offset.rank}.`);\n }\n return batchNorm($x, $mean, $variance, $offset, $scale, varianceEpsilon);\n}\n\nexport const batchNorm4d = /* @__PURE__ */ op({batchNorm4d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Bincount, BincountAttrs, BincountInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor1D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Outputs a vector with length `size` and the same dtype as `weights`.\n *\n * If `weights` are empty, then index `i` stores the number of times the value\n * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the\n * sum of the value in `weights` at each index where the corresponding value in\n * `x` is `i`.\n *\n * Values in `x` outside of the range [0, size) are ignored.\n *\n * @param x The input int tensor, rank 1.\n * @param weights The weights tensor, must have the same shape as x, or a\n * length-0 Tensor, in which case it acts as all weights equal to 1.\n * @param size Non-negative integer.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction bincount_(\n x: T|TensorLike, weights: T|TensorLike, size: number): T {\n const $x = convertToTensor(x, 'x', 'bincount');\n const $weights = convertToTensor(weights, 'weights', 'bincount');\n\n util.assert(\n $x.dtype === 'int32',\n () => `Error in bincount: input ` +\n `dtype must be int32, but got ${$x.dtype}`);\n util.assert(size >= 0, () => `size must be non-negative, but got ${size}.`);\n util.assert(\n $weights.size === $x.size || $weights.size === 0,\n () => `Error in bincount: weights must have the same size as input or` +\n `0-length, but got input shape: ${$x.shape}, weights shape: ` +\n `${$weights.shape}.`);\n\n const inputs: BincountInputs = {x: $x, weights: $weights};\n const attrs: BincountAttrs = {size};\n\n return ENGINE.runKernel(\n Bincount, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const bincount = /* @__PURE__ */ op({bincount_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport { NamedTensorMap } from '../tensor_types';\nimport { ENGINE } from '../engine';\nimport { BroadcastArgs, BroadcastArgsInputs } from '../kernel_names';\nimport { Tensor } from '../tensor';\nimport { convertToTensor } from '../tensor_util_env';\nimport { Rank, TensorLike } from '../types';\n\nimport { op } from './operation';\n\n/**\n * Return the shape of s0 op s1 with broadcast.\n *\n * compute r0, the broadcasted shape as a tensor.\n * s0, s1 and r0 are all integer vectors.\n *\n * This function returns the shape of the result of an operation between\n * two tensors of size s0 and s1 performed with broadcast.\n *\n * @param s0 A tensor representing a shape\n * @param s1 A tensor representing a shape\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction broadcastArgs_(\n s0: Tensor | TensorLike, s1: Tensor | TensorLike): Tensor {\n const shape1Input = convertToTensor(s0, 's0', 'broadcastArgs', 'int32');\n const shape2Input = convertToTensor(s1, 's1', 'broadcastArgs', 'int32');\n\n if (shape1Input.rank !== 1) {\n throw new Error(\n 'broadcastArgs(): first input must be a vector (rank=1). ' +\n `Has rank ${shape1Input.rank}`);\n }\n\n if (shape2Input.rank !== 1) {\n throw new Error(\n 'broadcastArgs(): second input must be a vector (rank=1). ' +\n `Has rank ${shape2Input.rank}`);\n }\n\n const inputs: BroadcastArgsInputs = { s0: shape1Input, s1: shape2Input };\n return ENGINE.runKernel(BroadcastArgs, inputs as unknown as NamedTensorMap);\n}\n\nexport const broadcastArgs = /* @__PURE__ */ op({ broadcastArgs_ });\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tile, TileAttrs, TileInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, ShapeMap, TensorLike} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {clone} from './clone';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Broadcast an array to a compatible shape NumPy-style.\n *\n * The tensor's shape is compared to the broadcast shape from end to beginning.\n * Ones are prepended to the tensor's shape until it has the same length as\n * the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is\n * already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then\n * the input tensor is tiled N times along that axis (using tf.tile).\n *\n * @param input The tensor that is to be broadcasted.\n * @param shape The input is to be broadcast to this shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction broadcastTo_(\n x: Tensor|TensorLike, shape: ShapeMap[R]): Tensor {\n let input = convertToTensor(x, 'broadcastTo', 'x');\n const xShape = input.shape;\n\n assertNonNegativeIntegerDimensions(shape);\n\n if (shape.length < input.rank) {\n throw new Error(`broadcastTo(): shape.length=${shape.length} < input.rank=${\n input.rank}.`);\n }\n\n if (shape.length > input.rank) {\n const newShape = input.shape.slice();\n while (newShape.length < shape.length) {\n newShape.unshift(1);\n }\n input = reshape(input, newShape);\n }\n\n const inputShape = input.shape;\n const reps: number[] = Array.from(shape);\n for (let i = shape.length - 1; i >= 0; i--) {\n if (inputShape[i] === shape[i]) {\n reps[i] = 1;\n } else if (input.shape[i] !== 1) {\n throw new Error(\n `broadcastTo(): [${xShape}] cannot be broadcast to [${shape}].`);\n }\n }\n const axes = reps.map((n, i) => n > 1 ? i : -1).filter(i => i >= 0);\n\n if (axes.length === 0) {\n return clone(input) as Tensor;\n }\n\n // TODO call broadcastTo kernel directly once backends implement broadcstTo\n const inputs: TileInputs = {x: input};\n const attrs: TileAttrs = {reps};\n return ENGINE.runKernel(\n Tile, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const broadcastTo = /* @__PURE__ */ op({broadcastTo_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Ceil, CeilInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)`\n *\n * ```js\n * const x = tf.tensor1d([.6, 1.1, -3.3]);\n *\n * x.ceil().print(); // or tf.ceil(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction ceil_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'ceil', 'float32');\n\n const inputs: CeilInputs = {x: $x};\n return ENGINE.runKernel(Ceil, inputs as unknown as NamedTensorMap);\n}\nexport const ceil = /* @__PURE__ */ op({ceil_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {ClipByValue, ClipByValueAttrs, ClipByValueInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {fill} from './fill';\n\nimport {op} from './operation';\n\n/**\n * Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)`\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n *\n * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3)\n * ```\n * @param x The input tensor.\n * @param clipValueMin Lower bound of range to be clipped to.\n * @param clipValueMax Upper bound of range to be clipped to.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction clipByValue_(\n x: T|TensorLike, clipValueMin: number, clipValueMax: number): T {\n const $x = convertToTensor(x, 'x', 'clipByValue');\n util.assert(\n (clipValueMin <= clipValueMax),\n () => `Error in clip: min (${clipValueMin}) must be ` +\n `less than or equal to max (${clipValueMax}).`);\n\n if (clipValueMin === clipValueMax) {\n return fill($x.shape, clipValueMin, $x.dtype) as T;\n }\n\n const inputs: ClipByValueInputs = {x: $x};\n const attrs: ClipByValueAttrs = {clipValueMin, clipValueMax};\n\n return ENGINE.runKernel(\n ClipByValue, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const clipByValue = /* @__PURE__ */ op({clipByValue_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D} from '../tensor';\nimport {TensorLike} from '../types';\n\nimport {concat} from './concat';\nimport {op} from './operation';\n\n/**\n * Concatenates a list of`tf.Tensor1D`s along an axis. See `concat` for details.\n *\n * For example, if:\n * A: shape(3) = |r1, g1, b1|\n * B: shape(2) = |r2, g2|\n * C = tf.concat1d([A, B]) == |r1, g1, b1, r2, g2|\n *\n * @param tensors A list of`tf.Tensor`s to concatenate.\n * @return The concatenated array.\n */\nfunction concat1d_(tensors: Array): Tensor1D {\n return concat(tensors, 0 /* axis */);\n}\n\nexport const concat1d = /* @__PURE__ */ op({concat1d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor2D} from '../tensor';\nimport {TensorLike} from '../types';\n\nimport {concat} from './concat';\nimport {op} from './operation';\n\n/**\n * Concatenates a list of`tf.Tensor2D`s along an axis. See `concat` for details.\n *\n * For example, if:\n * A: shape(2, 3) = | r1, g1, b1 |\n * | r2, g2, b2 |\n *\n * B: shape(2, 3) = | r3, g3, b3 |\n * | r4, g4, b4 |\n *\n * C = tf.concat2d([A, B], axis)\n *\n * if axis = 0:\n * C: shape(4, 3) = | r1, g1, b1 |\n * | r2, g2, b2 |\n * | r3, g3, b3 |\n * | r4, g4, b4 |\n *\n * if axis = 1:\n * C = shape(2, 6) = | r1, g1, b1, r3, g3, b3 |\n * | r2, g2, b2, r4, g4, b4 |\n *\n *\n * @param tensors A list of `tf.Tensor`s to concatenate.\n * @param axis The axis to concatenate along.\n * @return The concatenated array.\n */\nfunction concat2d_(\n tensors: Array, axis: number): Tensor2D {\n return concat(tensors, axis);\n}\n\nexport const concat2d = /* @__PURE__ */ op({concat2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor3D} from '../tensor';\nimport {TensorLike} from '../types';\n\nimport {concat} from './concat';\nimport {op} from './operation';\n\n/**\n * Concatenates a list of `tf.Tensor3D`s along an axis.\n * See `concat` for details.\n *\n * For example, if:\n * A: shape(2, 1, 3) = | r1, g1, b1 |\n * | r2, g2, b2 |\n *\n * B: shape(2, 1, 3) = | r3, g3, b3 |\n * | r4, g4, b4 |\n *\n * C = tf.concat3d([A, B], axis)\n *\n * if axis = 0:\n * C: shape(4, 1, 3) = | r1, g1, b1 |\n * | r2, g2, b2 |\n * | r3, g3, b3 |\n * | r4, g4, b4 |\n *\n * if axis = 1:\n * C: shape(2, 2, 3) = | r1, g1, b1, r3, g3, b3 |\n * | r2, g2, b2, r4, g4, b4 |\n *\n * if axis = 2:\n * C = shape(2, 1, 6) = | r1, g1, b1, r3, g3, b3 |\n * | r2, g2, b2, r4, g4, b4 |\n *\n * @param tensors A list of`tf.Tensor`s to concatenate.\n * @param axis The axis to concate along.\n * @return The concatenated array.\n */\nfunction concat3d_(\n tensors: Array, axis: number): Tensor3D {\n return concat(tensors, axis);\n}\n\nexport const concat3d = /* @__PURE__ */ op({concat3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor4D} from '../tensor';\nimport {TensorLike} from '../types';\n\nimport {concat} from './concat';\nimport {op} from './operation';\n\n/**\n * Concatenates a list of `tf.Tensor4D`s along an axis.\n * See `concat` for details.\n *\n * @param tensors A list of `tf.Tensor`s to concatenate.\n * @param axis The axis to concate along.\n * @return The concatenated array.\n */\nfunction concat4d_(\n tensors: Array, axis: number): Tensor4D {\n return concat(tensors, axis);\n}\n\nexport const concat4d = /* @__PURE__ */ op({concat4d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv2D, Conv2DAttrs, Conv2DInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes a 2D convolution over the input x.\n *\n * @param x The input tensor, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is\n * assumed.\n * @param filter The filter, rank 4, of shape\n * `[filterHeight, filterWidth, inDepth, outDepth]`.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels].\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction conv2d_(\n x: T|TensorLike, filter: Tensor4D|TensorLike,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat: 'NHWC'|'NCHW' = 'NHWC',\n dilations: [number, number]|number = [1, 1],\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'conv2d', 'float32');\n const $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n util.assert(\n x4D.rank === 4,\n () => `Error in conv2d: input must be rank 4, but got rank ${x4D.rank}.`);\n util.assert(\n $filter.rank === 4,\n () => `Error in conv2d: filter must be rank 4, but got rank ` +\n `${$filter.rank}.`);\n conv_util.checkPadOnDimRoundingMode('conv2d', pad, dimRoundingMode);\n const inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];\n util.assert(\n inDepth === $filter.shape[2],\n () => `Error in conv2d: depth of input (${inDepth}) must match ` +\n `input depth for filter ${$filter.shape[2]}.`);\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in conv2D: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n util.assert(\n conv_util.stridesOrDilationsArePositive(dilations),\n () => 'Error in conv2D: Dilated rates should be larger than 0.');\n util.assert(\n conv_util.stridesOrDilationsArePositive(strides),\n () => 'Error in conv2D: Strides should be larger than 0.');\n\n const inputs: Conv2DInputs = {x: x4D, filter: $filter};\n const attrs:\n Conv2DAttrs = {strides, pad, dataFormat, dilations, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Conv2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const conv2d = /* @__PURE__ */ op({conv2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor2D, Tensor3D, Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {conv2d} from './conv2d';\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes a 1D convolution over the input x.\n *\n * @param x The input tensor, of rank 3 or rank 2, of shape\n * `[batch, width, inChannels]`. If rank 2, batch of 1 is assumed.\n * @param filter The filter, rank 3, of shape\n * `[filterWidth, inDepth, outDepth]`.\n * @param stride The number of entries by which the filter is moved right at\n * each step.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat An optional string from \"NWC\", \"NCW\". Defaults to \"NWC\",\n * the data is stored in the order of [batch, in_width, in_channels]. Only\n * \"NWC\" is currently supported.\n * @param dilation The dilation rate in which we sample input values in\n * atrous convolution. Defaults to `1`. If it is greater than 1, then\n * stride must be `1`.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction conv1d_(\n x: T|TensorLike, filter: Tensor3D|TensorLike, stride: number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat: 'NWC'|'NCW' = 'NWC', dilation = 1,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'conv1d');\n const $filter = convertToTensor(filter, 'filter', 'conv1d');\n\n let x3D = $x as Tensor3D;\n let reshapedTo3D = false;\n if ($x.rank === 2) {\n reshapedTo3D = true;\n x3D = reshape($x, [1, $x.shape[0], $x.shape[1]]);\n }\n\n util.assert(\n x3D.rank === 3,\n () => `Error in conv1d: input must be rank 3, but got rank ${x3D.rank}.`);\n util.assert(\n $filter.rank === 3,\n () => `Error in conv1d: filter must be rank 3, but got rank ` +\n `${$filter.rank}.`);\n conv_util.checkPadOnDimRoundingMode('conv1d', pad, dimRoundingMode);\n util.assert(\n x3D.shape[2] === $filter.shape[1],\n () => `Error in conv1d: depth of input (${x3D.shape[2]}) must match ` +\n `input depth for filter ${$filter.shape[1]}.`);\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(stride, dilation),\n () => 'Error in conv1D: Either stride or dilation must be 1. ' +\n `Got stride ${stride} and dilation '${dilation}'`);\n util.assert(\n conv_util.stridesOrDilationsArePositive(dilation),\n () => 'Error in conv1D: Dilated rates should be larger than 0.');\n util.assert(\n conv_util.stridesOrDilationsArePositive(stride),\n () => 'Error in conv1D: Stride should be larger than 0.');\n util.assert(\n dataFormat === 'NWC',\n () => `Error in conv1d: got dataFormat of ${\n dataFormat} but only NWC is currently supported.`);\n\n const filter4D = reshape(\n $filter, [1, $filter.shape[0], $filter.shape[1], $filter.shape[2]]);\n const input4D = reshape(x3D, [x3D.shape[0], 1, x3D.shape[1], x3D.shape[2]]);\n const strides: [number, number] = [1, stride];\n const dilations: [number, number] = [1, dilation];\n\n const conv2dDataFormat = 'NHWC';\n\n const res = conv2d(\n (input4D as Tensor4D), (filter4D as Tensor4D), strides, pad,\n conv2dDataFormat, dilations, dimRoundingMode);\n\n if (reshapedTo3D) {\n return reshape(res, [res.shape[2], res.shape[3]]) as T;\n }\n\n return reshape(res, [res.shape[0], res.shape[2], res.shape[3]]) as T;\n}\n\nexport const conv1d = /* @__PURE__ */ op({conv1d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv2DBackpropInput, Conv2DBackpropInputAttrs, Conv2DBackpropInputInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the derivative of the input of a 2D convolution.\n *\n * @param xShape The shape of the input: [batch, height, width, inDepth].\n * If length of 3, batch of 1 is assumed.\n * @param dy The derivative of the output, of rank 4 or rank 3 of shape\n * `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is\n * assumed.\n * @param filter The filter, rank 4, of shape\n * `[filterHeight, filterWidth, inDepth, outDepth]`.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`.\n * @param pad The type of padding algorithm used:\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels].\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction conv2DBackpropInput_(\n xShape: [number, number, number, number]|[number, number, number], dy: T,\n filter: Tensor4D, strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat: 'NHWC'|'NCHW' = 'NHWC',\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n util.assert(\n xShape.length === dy.rank,\n () => `Length of inShape ` +\n `(${xShape.length}) and rank of dy (${dy.rank}) must match`);\n\n let xShape4D = xShape as [number, number, number, number];\n let dy4D = dy as Tensor4D;\n let reshapedTo4D = false;\n if (dy.rank === 3) {\n reshapedTo4D = true;\n dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);\n xShape4D = [1, xShape[0], xShape[1], xShape[2]];\n }\n\n util.assert(\n xShape4D.length === 4,\n () =>\n `Error in conv2dDerInput: inShape must be length 4, but got length ` +\n `${xShape4D.length}.`);\n util.assert(\n dy4D.rank === 4,\n () => `Error in conv2dDerInput: dy must be rank 4, but got ` +\n `rank ${dy4D.rank}`);\n util.assert(\n filter.rank === 4,\n () => `Error in conv2dDerInput: filter must be rank 4, but got ` +\n `rank ${filter.rank}`);\n const inDepth = dataFormat === 'NHWC' ? xShape4D[3] : xShape4D[1];\n const outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];\n util.assert(\n inDepth === filter.shape[2],\n () => `Error in conv2dDerInput: depth of input (${inDepth}) must ` +\n `match input depth for filter ${filter.shape[2]}.`);\n util.assert(\n outDepth === filter.shape[3],\n () => `Error in conv2dDerInput: depth of output (${outDepth}) must ` +\n `match output depth for filter ${filter.shape[3]}.`);\n conv_util.checkPadOnDimRoundingMode('conv2dDerInput', pad, dimRoundingMode);\n const inputs: Conv2DBackpropInputInputs = {dy: dy4D, filter};\n const attrs: Conv2DBackpropInputAttrs =\n {strides, pad, dataFormat, dimRoundingMode, inputShape: xShape4D};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Conv2DBackpropInput, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const conv2DBackpropInput = /* @__PURE__ */ op({conv2DBackpropInput_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {conv2DBackpropInput} from './conv2d_backprop_input';\nimport {ExplicitPadding} from './conv_util';\nimport {op} from './operation';\n\n/**\n * Computes the transposed 2D convolution of an image, also known as a\n * deconvolution.\n *\n * @param x The input image, of rank 4 or rank 3, of shape\n * `[batch, height, width, inDepth]`. If rank 3, batch of 1 is assumed.\n * @param filter The filter, rank 4, of shape\n * `[filterHeight, filterWidth, outDepth, inDepth]`.\n * `inDepth` must match `inDepth` in `x`.\n * @param outputShape Output shape, of rank 4 or rank 3:\n * `[batch, height, width, outDepth]`. If rank 3, batch of 1 is assumed.\n * @param strides The strides of the original convolution:\n * `[strideHeight, strideWidth]`.\n * @param pad The type of padding algorithm used in the non-transpose version\n * of the op.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction conv2dTranspose_(\n x: T|TensorLike, filter: Tensor4D|TensorLike,\n outputShape: [number, number, number, number]|[number, number, number],\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'conv2dTranspose');\n const $filter = convertToTensor(filter, 'filter', 'conv2dTranspose');\n\n return conv2DBackpropInput(\n outputShape, $x, $filter, strides, pad, 'NHWC', dimRoundingMode);\n}\n\nexport const conv2dTranspose = /* @__PURE__ */ op({conv2dTranspose_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv3D, Conv3DAttrs, Conv3DInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {eitherStridesOrDilationsAreOne, stridesOrDilationsArePositive} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes a 3D convolution over the input x.\n *\n * @param x The input tensor, of rank 5 or rank 4, of shape\n * `[batch, depth, height, width, channels]`. If rank 4,\n * batch of 1 is assumed.\n * @param filter The filter, rank 5, of shape\n * `[filterDepth, filterHeight, filterWidth, inChannels, outChannels]`.\n * inChannels must match between input and filter.\n * @param strides The strides of the convolution: `[strideDepth, strideHeight,\n * strideWidth]`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat: An optional string from: \"NDHWC\", \"NCDHW\". Defaults to\n * \"NDHWC\". Specify the data format of the input and output data. With the\n * default format \"NDHWC\", the data is stored in the order of: [batch,\n * depth, height, width, channels]. Only \"NDHWC\" is currently supported.\n * @param dilations The dilation rates: `[dilationDepth, dilationHeight,\n * dilationWidth]` in which we sample input values across the height\n * and width dimensions in atrous convolution. Defaults to `[1, 1, 1]`.\n * If `dilations` is a single number, then\n * `dilationDepth == dilationHeight == dilationWidth`. If it is greater\n * than 1, then all values of `strides` must be 1.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction conv3d_(\n x: T|TensorLike, filter: Tensor5D|TensorLike,\n strides: [number, number, number]|number, pad: 'valid'|'same',\n dataFormat: 'NDHWC'|'NCDHW' = 'NDHWC',\n dilations: [number, number, number]|number = [1, 1, 1]): T {\n const $x = convertToTensor(x, 'x', 'conv3d');\n const $filter = convertToTensor(filter, 'filter', 'conv3d');\n\n let x5D = $x as Tensor5D;\n let reshapedTo5D = false;\n\n if ($x.rank === 4) {\n reshapedTo5D = true;\n x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);\n }\n util.assert(\n x5D.rank === 5,\n () => `Error in conv3d: input must be rank 5, but got rank ${x5D.rank}.`);\n util.assert(\n $filter.rank === 5,\n () => `Error in conv3d: filter must be rank 5, but got rank ` +\n `${$filter.rank}.`);\n util.assert(\n x5D.shape[4] === $filter.shape[3],\n () => `Error in conv3d: depth of input (${x5D.shape[4]}) must match ` +\n `input depth for filter ${$filter.shape[3]}.`);\n util.assert(\n eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in conv3D: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n util.assert(\n dataFormat === 'NDHWC',\n () => `Error in conv3d: got dataFormat of ${\n dataFormat} but only NDHWC is currently supported.`);\n util.assert(\n stridesOrDilationsArePositive(dilations),\n () => 'Error in conv3D: Dilated rates should be larger than 0.');\n util.assert(\n stridesOrDilationsArePositive(strides),\n () => 'Error in conv3D: Strides should be larger than 0.');\n\n const inputs: Conv3DInputs = {x: x5D, filter: $filter};\n\n const attrs: Conv3DAttrs = {strides, pad, dataFormat, dilations};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Conv3D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n return res;\n}\n\nexport const conv3d = /* @__PURE__ */ op({conv3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv3DBackpropInputV2, Conv3DBackpropInputV2Attrs, Conv3DBackpropInputV2Inputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the derivative of the input of a 3D convolution.\n *\n * @param xShape The shape of the input: [batch, depth, height, width,\n * in_channels]. If length of 4, batch of 1 is assumed.\n * @param dy The derivative of the output, of rank 5 or rank 4 of shape\n * `[batch, outDepth, outHeight, outWidth, in_channels]`.\n * If rank 4, batch of 1 is assumed.\n * @param filter The filter, rank 5, of shape\n * `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`.\n * @param strides The strides of the convolution: `[strideDepth, strideHeight,\n * strideWidth]`.\n * @param pad The type of padding algorithm used:\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n */\nfunction conv3DBackpropInput_(\n xShape:\n [number, number, number, number,\n number]|[number, number, number, number],\n dy: T, filter: Tensor5D, strides: [number, number, number]|number,\n pad: 'valid'|'same'): T {\n util.assert(\n xShape.length === dy.rank,\n () => `Length of inShape ` +\n `(${xShape.length}) and rank of dy (${dy.rank}) must match`);\n\n let xShape5D = xShape as [number, number, number, number, number];\n let dy5D = dy as Tensor5D;\n let reshapedTo5D = false;\n if (dy.rank === 4) {\n reshapedTo5D = true;\n dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);\n xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]];\n }\n\n const inDepth = xShape5D[4];\n const outDepth = dy5D.shape[4];\n util.assert(\n xShape5D.length === 5,\n () =>\n `Error in conv3dDerInput: inShape must be length 5, but got length ` +\n `${xShape5D.length}.`);\n util.assert(\n dy5D.rank === 5,\n () => `Error in conv3dDerInput: dy must be rank 5, but got ` +\n `rank ${dy5D.rank}`);\n util.assert(\n filter.rank === 5,\n () => `Error in conv3dDerInput: filter must be rank 5, but got ` +\n `rank ${filter.rank}`);\n util.assert(\n inDepth === filter.shape[3],\n () => `Error in conv3dDerInput: depth of input (${inDepth}) must ` +\n `match input depth for filter ${filter.shape[3]}.`);\n util.assert(\n outDepth === filter.shape[4],\n () => `Error in conv3dDerInput: depth of output (${outDepth}) must ` +\n `match output depth for filter ${filter.shape[4]}.`);\n\n const inputs: Conv3DBackpropInputV2Inputs = {dy: dy5D, filter};\n\n const attrs:\n Conv3DBackpropInputV2Attrs = {pad, strides, inputShape: xShape5D};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Conv3DBackpropInputV2, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n return res;\n}\n\nexport const conv3DBackpropInput = /* @__PURE__ */ op({conv3DBackpropInput_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {conv3DBackpropInput} from './conv3d_backprop_input';\nimport {op} from './operation';\n\n/**\n * Computes the transposed 3D convolution of a volume, also known as a\n * deconvolution.\n *\n * @param x The input image, of rank 5 or rank 4, of shape\n * `[batch, depth, height, width, inDepth]`. If rank 4, batch of 1 is assumed.\n * @param filter The filter, rank 4, of shape\n * `[depth, filterHeight, filterWidth, outDepth, inDepth]`.\n * `inDepth` must match `inDepth` in `x`.\n * @param outputShape Output shape, of rank 5 or rank 4:\n * `[batch, depth, height, width, outDepth]`. If rank 3, batch of 1 is\n * assumed.\n * @param strides The strides of the original convolution:\n * `[strideDepth, strideHeight, strideWidth]`.\n * @param pad The type of padding algorithm used in the non-transpose version\n * of the op.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction conv3dTranspose_(\n x: T|TensorLike, filter: Tensor5D|TensorLike,\n outputShape:\n [number, number, number, number,\n number]|[number, number, number, number],\n strides: [number, number, number]|number, pad: 'valid'|'same'): T {\n const $x = convertToTensor(x, 'x', 'conv3dTranspose');\n const $filter = convertToTensor(filter, 'filter', 'conv3dTranspose');\n\n return conv3DBackpropInput(outputShape, $x, $filter, strides, pad);\n}\n\nexport const conv3dTranspose = /* @__PURE__ */ op({conv3dTranspose_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Cos, CosInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes cos of the input `tf.Tensor` element-wise: `cos(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);\n *\n * x.cos().print(); // or tf.cos(x)\n * ```\n * @param x The input tensor. Must be float32 type.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction cos_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'cos', 'float32');\n\n const inputs: CosInputs = {x: $x};\n\n return ENGINE.runKernel(Cos, inputs as unknown as NamedTensorMap);\n}\nexport const cos = /* @__PURE__ */ op({cos_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Cosh, CoshInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.cosh().print(); // or tf.cosh(x)\n * ```\n * @param x The input tensor. Must be float32 type.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction cosh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'cosh', 'float32');\n const inputs: CoshInputs = {x: $x};\n\n return ENGINE.runKernel(Cosh, inputs as unknown as NamedTensorMap);\n}\nexport const cosh = /* @__PURE__ */ op({cosh_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the 'License');\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an 'AS IS' BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport { ENGINE } from '../engine';\nimport { Cumprod, CumprodAttrs, CumprodInputs } from '../kernel_names';\nimport { NamedAttrMap } from '../kernel_registry';\nimport { Tensor } from '../tensor';\nimport { NamedTensorMap } from '../tensor_types';\nimport { convertToTensor } from '../tensor_util_env';\nimport { TensorLike } from '../types';\n\nimport { op } from './operation';\n\n/**\n * Computes the cumulative product of a `tf.Tensor` along `axis`.\n *\n * ```js\n * const x = tf.tensor([1, 2, 3, 4]);\n * x.cumprod().print();\n * ```\n * ```js\n * const x = tf.tensor([[1, 2], [3, 4]]);\n * x.cumprod().print();\n * ```\n *\n * @param x The input tensor to cumulatively multiply.\n * @param axis The axis along which to multiply. Optional. Defaults to 0.\n * @param exclusive Whether to perform exclusive cumulative product. Optional.\n * Defaults to false. If set to true then the product of each tensor entry\n * does not include its own value, but only the values previous to it\n * along the specified axis.\n * @param reverse Whether to multiply in the opposite direction. Optional.\n * Defaults to false.\n *\n * @doc {heading: 'Operations', subheading: 'Scan'}\n */\nfunction cumprod_(\n x: Tensor | TensorLike,\n axis = 0,\n exclusive = false,\n reverse = false\n): T {\n const $x = convertToTensor(x, 'x', 'cumprod');\n\n const inputs: CumprodInputs = { x: $x };\n const attrs: CumprodAttrs = { axis, exclusive, reverse };\n\n return ENGINE.runKernel(\n Cumprod,\n inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap\n );\n}\n\nexport const cumprod = /* @__PURE__ */ op({ cumprod_ });\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Cumsum, CumsumAttrs, CumsumInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the cumulative sum of a `tf.Tensor` along `axis`.\n *\n * ```js\n * const x = tf.tensor([1, 2, 3, 4]);\n * x.cumsum().print();\n * ```\n * ```js\n * const x = tf.tensor([[1, 2], [3, 4]]);\n * x.cumsum().print();\n * ```\n *\n * @param x The input tensor to be summed.\n * @param axis The axis along which to sum. Optional. Defaults to 0.\n * @param exclusive Whether to perform exclusive cumulative sum. Optional.\n * Defaults to false. If set to true then the sum of each tensor entry\n * does not include its own value, but only the values previous to it\n * along the specified axis.\n * @param reverse Whether to sum in the opposite direction. Optional.\n * Defaults to false.\n *\n * @doc {heading: 'Operations', subheading: 'Scan'}\n */\nfunction cumsum_(\n x: Tensor|TensorLike, axis = 0, exclusive = false, reverse = false): T {\n const $x = convertToTensor(x, 'x', 'cumsum');\n\n const inputs: CumsumInputs = {x: $x};\n const attrs: CumsumAttrs = {axis, exclusive, reverse};\n\n return ENGINE.runKernel(\n Cumsum, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const cumsum = /* @__PURE__ */ op({cumsum_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {DenseBincount, DenseBincountAttrs, DenseBincountInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Outputs a vector with length `size` and the same dtype as `weights`.\n *\n * If `weights` are empty, then index `i` stores the number of times the value\n * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the\n * sum of the value in `weights` at each index where the corresponding value in\n * `x` is `i`.\n *\n * Values in `x` outside of the range [0, size) are ignored.\n *\n * @param x The input int tensor, rank 1 or rank 2.\n * @param weights The weights tensor, must have the same shape as x, or a\n * length-0 Tensor, in which case it acts as all weights equal to 1.\n * @param size Non-negative integer.\n * @param binaryOutput Optional. Whether the kernel should count the appearance\n * or number of occurrences. Defaults to False.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction denseBincount_(\n x: T|TensorLike, weights: T|TensorLike, size: number,\n binaryOutput = false): T {\n const $x = convertToTensor(x, 'x', 'denseBincount');\n const $weights = convertToTensor(weights, 'weights', 'denseBincount');\n\n util.assert(\n $x.dtype === 'int32',\n () => `Error in denseBincount: input ` +\n `dtype must be int32, but got ${$x.dtype}`);\n util.assert(\n $x.rank <= 2,\n () => `Error in denseBincount: input must be at most rank 2, but got ` +\n `rank ${$x.rank}.`);\n util.assert(size >= 0, () => `size must be non-negative, but got ${size}.`);\n util.assert(\n $weights.size === $x.size || $weights.size === 0,\n () =>\n `Error in denseBincount: weights must have the same shape as x or ` +\n `0-length, but got x shape: ${$x.shape}, weights shape: ` +\n `${$weights.shape}.`);\n\n const inputs: DenseBincountInputs = {x: $x, weights: $weights};\n const attrs: DenseBincountAttrs = {size, binaryOutput};\n\n return ENGINE.runKernel(\n DenseBincount, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const denseBincount = /* @__PURE__ */ op({denseBincount_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {DepthToSpace, DepthToSpaceAttrs, DepthToSpaceInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike4D} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Rearranges data from depth into blocks of spatial data. More specifically,\n * this op outputs a copy of the input tensor where values from the `depth`\n * dimension are moved in spatial blocks to the `height` and `width` dimensions.\n * The attr `blockSize` indicates the input block size and how the data is\n * moved.\n *\n * - Chunks of data of size `blockSize * blockSize` from depth are rearranged\n * into non-overlapping blocks of size `blockSize x blockSize`\n *\n * - The width the output tensor is `inputWidth * blockSize`, whereas the\n * height is `inputHeight * blockSize`\n *\n * - The Y, X coordinates within each block of the output image are determined\n * by the high order component of the input channel index\n *\n * - The depth of the input tensor must be divisible by `blockSize *\n * blockSize`\n *\n * The `dataFormat` attr specifies the layout of the input and output tensors\n * with the following options: \"NHWC\": [ `batch, height, width, channels` ]\n * \"NCHW\": [ `batch, channels, height, width` ]\n *\n * ```js\n * const x = tf.tensor4d([1, 2, 3, 4], [1, 1, 1, 4]);\n * const blockSize = 2;\n * const dataFormat = \"NHWC\";\n *\n * tf.depthToSpace(x, blockSize, dataFormat).print();\n * ```\n *\n * @param x The input tensor of rank 4\n * @param blockSIze An `int` that is `>= 2`. The size of the spatial block\n * @param dataFormat An optional string from: \"NHWC\", \"NCHW\". Defaults to \"NHWC\"\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction depthToSpace_(\n x: Tensor4D|TensorLike4D, blockSize: number,\n dataFormat: 'NHWC'|'NCHW' = 'NHWC'): Tensor4D {\n const $x = convertToTensor(x, 'x', 'depthToSpace', 'float32') as Tensor4D;\n\n const inputHeight = (dataFormat === 'NHWC') ? $x.shape[1] : $x.shape[2];\n const inputWidth = (dataFormat === 'NHWC') ? $x.shape[2] : $x.shape[3];\n const inputDepth = (dataFormat === 'NHWC') ? $x.shape[3] : $x.shape[1];\n\n util.assert(\n blockSize > 1,\n () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`);\n\n util.assert(\n inputHeight * blockSize >= 0,\n () => `Negative dimension size caused by overflow when multiplying\n ${inputHeight} and ${blockSize} for depthToSpace with input shape\n ${$x.shape}`);\n\n util.assert(\n inputWidth * blockSize >= 0,\n () => `Negative dimension size caused by overflow when multiplying\n ${inputWidth} and ${blockSize} for depthToSpace with input shape\n ${$x.shape}`);\n\n util.assert(\n (inputDepth % (blockSize * blockSize) === 0),\n () => `Dimension size must be evenly divisible by ${\n blockSize * blockSize} but is ${\n inputDepth} for depthToSpace with input shape ${$x.shape}`);\n\n const inputs: DepthToSpaceInputs = {x: $x};\n const attrs: DepthToSpaceAttrs = {blockSize, dataFormat};\n\n return ENGINE.runKernel(\n DepthToSpace, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const depthToSpace = /* @__PURE__ */ op({depthToSpace_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {DepthwiseConv2dNative, DepthwiseConv2dNativeAttrs, DepthwiseConv2dNativeInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Depthwise 2D convolution.\n *\n * Given a 4D `input` array and a `filter` array of shape\n * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing\n * `inChannels` convolutional filters of depth 1, this op applies a\n * different filter to each input channel (expanding from 1 channel to\n * `channelMultiplier` channels for each), then concatenates the results\n * together. The output has `inChannels * channelMultiplier` channels.\n *\n * See\n * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](\n * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)\n * for more details.\n *\n * @param x The input tensor, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is\n * assumed.\n * @param filter The filter tensor, rank 4, of shape\n * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`. If strides is a single number, then `strideHeight ==\n * strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels]. Only \"NHWC\" is currently supported.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction depthwiseConv2d_(\n x: T|TensorLike, filter: Tensor4D|TensorLike,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat: 'NHWC'|'NCHW' = 'NHWC',\n dilations: [number, number]|number = [1, 1],\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');\n const $filter =\n convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n util.assert(\n x4D.rank === 4,\n () => `Error in depthwiseConv2d: input must be rank 4, but got ` +\n `rank ${x4D.rank}.`);\n util.assert(\n $filter.rank === 4,\n () => `Error in depthwiseConv2d: filter must be rank 4, but got rank ` +\n `${$filter.rank}.`);\n const inChannels = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];\n util.assert(\n inChannels === $filter.shape[2],\n () => `Error in depthwiseConv2d: number of input channels ` +\n `(${inChannels}) must match the inChannels dimension in ` +\n `filter ${$filter.shape[2]}.`);\n conv_util.checkPadOnDimRoundingMode('depthwiseConv2d', pad, dimRoundingMode);\n const inputs: DepthwiseConv2dNativeInputs = {x: x4D, filter: $filter};\n const attrs: DepthwiseConv2dNativeAttrs =\n {strides, pad, dataFormat, dilations, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n DepthwiseConv2dNative, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const depthwiseConv2d = /* @__PURE__ */ op({depthwiseConv2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Diag, DiagInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\n\nimport {op} from './operation';\n\n/**\n * Returns a diagonal tensor with given diagonal values.\n *\n * Given a diagonal, this operation returns a tensor with the diagonal and\n * everything else padded with zeros.\n *\n * Assume the input has dimensions `[D1,..., Dk]`, then the output is a tensor\n * of rank 2k with dimensions `[D1,..., Dk, D1,..., Dk]`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n *\n * tf.diag(x).print()\n * ```\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [4, 2])\n *\n * tf.diag(x).print()\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction diag_(x: Tensor): Tensor {\n const $x = convertToTensor(x, 'x', 'diag');\n\n const inputs: DiagInputs = {x: $x};\n\n return ENGINE.runKernel(Diag, inputs as unknown as NamedTensorMap);\n}\n\nexport const diag = /* @__PURE__ */ op({diag_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Dilation2D, Dilation2DAttrs, Dilation2DInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the grayscale dilation over the input `x`.\n *\n * @param x The input tensor, rank 3 or rank 4 of shape\n * `[batch, height, width, depth]`. If rank 3, batch of 1 is assumed.\n * @param filter The filter tensor, rank 3, of shape\n * `[filterHeight, filterWidth, depth]`.\n * @param strides The strides of the sliding window for each dimension of the\n * input tensor: `[strideHeight, strideWidth]`.\n * If `strides` is a single number,\n * then `strideHeight == strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1*1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat Specify the data format of the input and output data.\n * Defaults to 'NHWC'. Only 'NHWC' is currently supported. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels].\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * for atrous morphological dilation. Defaults to `[1, 1]`. If `dilations`\n * is a single number, then `dilationHeight == dilationWidth`. If it is\n * greater than 1, then all values of `strides` must be 1.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction dilation2d_(\n x: T|TensorLike, filter: Tensor3D|TensorLike,\n strides: [number, number]|number, pad: 'valid'|'same',\n dilations: [number, number]|number = [1, 1],\n dataFormat: 'NHWC' = 'NHWC'): T {\n const $x = convertToTensor(x, 'x', 'dilation2d');\n const $filter = convertToTensor(filter, 'filter', 'dilation2d');\n\n util.assert(\n $x.rank === 3 || $x.rank === 4,\n () => `Error in dilation2d: input must be rank 3 or 4, but got rank ` +\n `${$x.rank}.`);\n util.assert(\n $filter.rank === 3,\n () => `Error in dilation2d: filter must be rank 3, but got rank ` +\n `${$filter.rank}.`);\n util.assert(\n dataFormat === 'NHWC',\n () => `Error in dilation2d: Only NHWC is currently supported, ` +\n `but got dataFormat of ${dataFormat}`);\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n\n if ($x.rank === 3) {\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n reshapedTo4D = true;\n }\n\n util.assert(\n x4D.shape[3] === $filter.shape[2],\n () => `Error in dilation2d: input and filter must have the same depth: ${\n x4D.shape[3]} vs ${$filter.shape[2]}`);\n\n const inputs: Dilation2DInputs = {x: x4D, filter: $filter};\n const attrs: Dilation2DAttrs = {strides, pad, dilations};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Dilation2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n\n return res;\n}\n\nexport const dilation2d = /* @__PURE__ */ op({dilation2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Equal, EqualInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a == b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([2, 2, 2]);\n *\n * a.equal(b).print();\n * ```\n *\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction equal_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'equal', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'equal', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: EqualInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Equal, inputs as unknown as NamedTensorMap);\n}\n\nexport const equal = /* @__PURE__ */ op({equal_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Select, SelectInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {broadcastTo} from './broadcast_to';\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the elements, either `a` or `b` depending on the `condition`.\n *\n * If the condition is true, select from `a`, otherwise select from `b`.\n *\n * ```js\n * const cond = tf.tensor1d([false, false, true], 'bool');\n * const a = tf.tensor1d([1 , 2, 3]);\n * const b = tf.tensor1d([-1, -2, -3]);\n *\n * a.where(cond, b).print();\n * ```\n *\n * @param condition The input condition. Must be of dtype bool.\n * @param a If `condition` is rank 1, `a` may have a higher rank but\n * its first dimension must match the size of `condition`.\n * @param b A tensor with the same dtype as `a` and with shape that is\n * compatible with `a`.\n * @return A tensor with same dtype as `a` and `b`, and shape that is\n * broadcastable from `a` and `b`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction where_(\n condition: Tensor|TensorLike, a: T|TensorLike, b: T|TensorLike): T {\n const $a = convertToTensor(a, 'a', 'where');\n const $b = convertToTensor(b, 'b', 'where');\n const $condition = convertToTensor(condition, 'condition', 'where', 'bool');\n // TODO: move this logic to forward function when the broadcastTo op is\n // implemented in WASM.\n // Find the broadcastable shape for $condition, $a, and $b.\n const broadcastShape = assertAndGetBroadcastShape(\n assertAndGetBroadcastShape($condition.shape, $a.shape), $b.shape);\n const $broadcastedCondition = broadcastTo($condition, broadcastShape);\n const $broadcastedA = broadcastTo($a, broadcastShape);\n const $broadcastedB = broadcastTo($b, broadcastShape);\n\n const inputs: SelectInputs = {\n condition: $broadcastedCondition,\n t: $broadcastedA,\n e: $broadcastedB\n };\n return ENGINE.runKernel(Select, inputs as unknown as NamedTensorMap);\n}\n\nexport const where = /* @__PURE__ */ op({where_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {div} from './div';\nimport {equal} from './equal';\nimport {op} from './operation';\nimport {where} from './where';\nimport {zerosLike} from './zeros_like';\n\n/**\n * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0\n * if denominator is 0.\n *\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 9, 16]);\n * const b = tf.tensor1d([1, 2, 3, 4]);\n * const c = tf.tensor1d([0, 0, 0, 0]);\n *\n * a.divNoNan(b).print(); // or tf.divNoNan(a, b)\n * a.divNoNan(c).print(); // or tf.divNoNan(a, c)\n * ```\n *\n * ```js\n * // Broadcast div a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(2);\n * const c = tf.scalar(0);\n *\n * a.divNoNan(b).print(); // or tf.divNoNan(a, b)\n * a.divNoNan(c).print(); // or tf.divNoNan(a, c)\n * ```\n *\n * @param a The first tensor as the numerator.\n * @param b The second tensor as the denominator. Must have the same dtype as\n * `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction divNoNan_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n // TODO: Make this into its own kernel.\n let $a = convertToTensor(a, 'a', 'div');\n let $b = convertToTensor(b, 'b', 'div');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const divResult = div($a, $b);\n const zeros = zerosLike(divResult);\n const bEqualsZero = equal($b, zeros);\n return where(bEqualsZero, zeros, divResult) as T;\n}\n\nexport const divNoNan = /* @__PURE__ */ op({divNoNan_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor,} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {matMul} from './mat_mul';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the dot product of two matrices and/or vectors, `t1` and `t2`.\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n * const b = tf.tensor2d([[1, 2], [3, 4]]);\n * const c = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);\n *\n * a.dot(b).print(); // or tf.dot(a, b)\n * b.dot(a).print();\n * b.dot(c).print();\n * ```\n * @param t1 The first tensor in the dot operation.\n * @param t2 The second tensor in the dot operation.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction dot_(t1: Tensor|TensorLike, t2: Tensor|TensorLike): Tensor {\n const $t1 = convertToTensor(t1, 't1', 'dot');\n const $t2 = convertToTensor(t2, 't2', 'dot');\n\n util.assert(\n ($t1.rank === 1 || $t1.rank === 2) && ($t2.rank === 1 || $t2.rank === 2),\n () => `Error in dot: inputs must all be rank 1 or 2, but got ranks ` +\n `${$t1.rank} and ${$t2.rank}.`);\n\n const t1Inner = ($t1.rank === 1 ? $t1.size : $t1.shape[1]);\n const t2Inner = ($t2.rank === 1 ? $t2.size : $t2.shape[0]);\n\n util.assert(\n t1Inner === t2Inner,\n () => `Error in dot: inner dimensions of inputs must match, but got ` +\n `${t1Inner} and ${t2Inner}.`);\n\n if ($t1.rank === 1 && $t2.rank === 1) {\n const t12D = reshape($t1, [1, -1]);\n const t22D = reshape($t2, [-1, 1]);\n const t1t2 = matMul(t12D, t22D);\n return reshape(t1t2, []);\n } else if ($t1.rank === 1 && $t2.rank === 2) {\n const t12D = reshape($t1, [1, -1]);\n const t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);\n const t1t2 = matMul(t12D, t22D);\n return reshape(t1t2, [t1t2.size]);\n } else if ($t1.rank === 2 && $t2.rank === 1) {\n const t22D = reshape($t2, [-1, 1]);\n const t1t2 = matMul($t1, t22D);\n return reshape(t1t2, [t1t2.size]);\n } else {\n const t22D = reshape($t2, [$t2.shape[0], $t2.shape[1]]);\n const t1t2 = matMul($t1, t22D);\n return t1t2;\n }\n}\n\nexport const dot = /* @__PURE__ */ op({dot_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Einsum, EinsumAttrs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\n\nimport {op} from './operation';\n\n/**\n * Tensor contraction over specified indices and outer product.\n *\n * `einsum` allows defining Tensors by defining their element-wise computation.\n * This computation is based on\n * [Einstein summation](https://en.wikipedia.org/wiki/Einstein_notation).\n *\n * Some special cases include:\n *\n * Matrix multiplication:\n * ```js\n * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);\n * const y = tf.tensor2d([[0, 1], [2, 3], [4, 5]]);\n * x.print();\n * y.print();\n * tf.einsum('ij,jk->ik', x, y).print();\n * ```\n *\n * Dot product:\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n * const y = tf.tensor1d([0, 1, 2]);\n * x.print();\n * y.print();\n * tf.einsum('i,i->', x, y).print();\n * ```\n *\n * Batch dot product:\n * ```js\n * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]);\n * const y = tf.tensor2d([[0, 1, 2], [3, 4, 5]]);\n * x.print();\n * y.print();\n * tf.einsum('bi,bi->b', x, y).print();\n * ```\n *\n * Outer prouduct:\n * ```js\n * const x = tf.tensor1d([1, 3, 5]);\n * const y = tf.tensor1d([2, 4, 6]);\n * x.print();\n * y.print();\n * tf.einsum('i,j->ij', x, y).print();\n * ```\n *\n * Matrix transpose:\n * ```js\n * const x = tf.tensor2d([[1, 2], [3, 4]]);\n * x.print();\n * tf.einsum('ij->ji', x).print();\n * ```\n *\n * Batch matrix transpose:\n * ```js\n * const x = tf.tensor3d([[[1, 2], [3, 4]], [[-1, -2], [-3, -4]]]);\n * x.print();\n * tf.einsum('bij->bji', x).print();\n * ```\n *\n * Limitations:\n *\n * This implementation of einsum has the following limitations:\n *\n * - Does not support >2 input tensors.\n * - Does not support duplicate axes for any given input tensor. E.g., equation\n * 'ii->' is not supported.\n * - The `...` notation is not supported.\n *\n * @param equation a string describing the contraction, in the same format as\n * [numpy.einsum](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html).\n * @param tensors the input(s) to contract (each one a Tensor), whose shapes\n * should be consistent with equation.\n * @returns The output tensor.\n *\n * @doc {heading: 'Tensors', subheading: 'Matrices'}\n */\nexport function einsum_(equation: string, ...tensors: Tensor[]): Tensor {\n const $tensors =\n tensors.map((t, i) => convertToTensor(t, `tensors${i}`, 'einsum'));\n const attrs: EinsumAttrs = {equation};\n return ENGINE.runKernel(\n Einsum, $tensors as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const einsum = /* @__PURE__ */ op({einsum_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Elu, EluInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes exponential linear element-wise: `x > 0 ? x : (e ^ x) - 1`.\n *\n * ```js\n * const x = tf.tensor1d([-1, 1, -3, 2]);\n *\n * x.elu().print(); // or tf.elu(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction elu_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'elu', 'float32');\n\n const inputs: EluInputs = {x: $x};\n\n return ENGINE.runKernel(Elu, inputs as unknown as NamedTensorMap);\n}\n\nexport const elu = /* @__PURE__ */ op({elu_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Erf, ErfInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {cast} from './cast';\nimport {op} from './operation';\n\n/**\n * Computes Gauss error function of the input `tf.Tensor` element-wise:\n * `erf(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, .1, -.1, .7]);\n *\n * x.erf().print(); // or tf.erf(x);\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction erf_(x: T|TensorLike): T {\n let $x = convertToTensor(x, 'x', 'erf');\n util.assert(\n $x.dtype === 'int32' || $x.dtype === 'float32',\n () => 'Input dtype must be `int32` or `float32`.');\n\n if ($x.dtype === 'int32') {\n $x = cast($x, 'float32');\n }\n\n const inputs: ErfInputs = {x: $x};\n return ENGINE.runKernel(Erf, inputs as unknown as NamedTensorMap);\n}\nexport const erf = /* @__PURE__ */ op({erf_});\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as util from '../util';\n\n/**\n * Returns true if the axis specifies the inner most dimensions of the\n * array.\n */\nexport function axesAreInnerMostDims(axes: number[], rank: number): boolean {\n for (let i = 0; i < axes.length; ++i) {\n if (axes[axes.length - i - 1] !== rank - 1 - i) {\n return false;\n }\n }\n return true;\n}\n\nexport function combineLocations(\n outputLoc: number[], reduceLoc: number[], axes: number[]): number[] {\n const rank = outputLoc.length + reduceLoc.length;\n const loc = [];\n let outIdx = 0;\n let reduceIdx = 0;\n   for (let dim = 0; dim < rank; dim++) {\n if (axes.indexOf(dim) === -1) {\n loc.push(outputLoc[outIdx++]);\n } else {\n loc.push(reduceLoc[reduceIdx++]);\n }\n }\n return loc;\n}\n\nexport function computeOutAndReduceShapes(\n aShape: number[], axes: number[]): [number[], number[]] {\n const outShape = [];\n const rank = aShape.length;\n for (let dim = 0; dim < rank; dim++) {\n if (axes.indexOf(dim) === -1) {\n outShape.push(aShape[dim]);\n }\n }\n const reduceShape = axes.map(dim => aShape[dim]);\n return [outShape, reduceShape];\n}\n\nexport function expandShapeToKeepDim(\n shape: number[], axes: number[]): number[] {\n const reduceSubShape = axes.map(x => 1);\n return combineLocations(shape, reduceSubShape, axes);\n}\n\nexport function assertAxesAreInnerMostDims(\n msg: string, axes: number[], rank: number): void {\n util.assert(\n axesAreInnerMostDims(axes, rank),\n () => `${msg} supports only inner-most axes for now. ` +\n `Got axes ${axes} and rank-${rank} input.`);\n}\n\n/**\n * Returns the axes permutation to be used with `tf.transpose`, if such\n * permutation is necessary. Otherwise it returns null. This method is used by\n * operations that operate only on inner-most axes.\n */\nexport function getAxesPermutation(axes: number[], rank: number): number[]|\n null {\n if (axesAreInnerMostDims(axes, rank)) {\n return null;\n }\n const result: number[] = [];\n for (let i = 0; i < rank; ++i) {\n if (axes.indexOf(i) === -1) {\n result.push(i);\n }\n }\n axes.forEach(axis => result.push(axis));\n return result;\n}\n\n/** Returns the axes permutation that undoes the original permutation. */\nexport function getUndoAxesPermutation(axes: number[]): number[] {\n return axes.map((axis, i) => [i, axis])\n .sort((a, b) => a[1] - b[1])\n .map(x => x[0]);\n}\n\nexport function getInnerMostAxes(numAxes: number, rank: number): number[] {\n const res: number[] = [];\n for (let i = rank - numAxes; i < rank; ++i) {\n res.push(i);\n }\n return res;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Max, MaxAttrs, MaxInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the maximum of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in\n * `axes`. If `keepDims` is true, the reduced dimensions are retained with\n * length 1. If `axes` has no entries, all dimensions are reduced, and a\n * `tf.Tensor` with a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.max().print(); // or tf.max(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.max(axis).print(); // or tf.max(x, axis)\n * ```\n *\n * @param x The input tensor.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction max_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'max');\n\n const inputs: MaxInputs = {x: $x};\n const attrs: MaxAttrs = {reductionIndices: axis, keepDims};\n\n return ENGINE.runKernel(\n Max, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const max = /* @__PURE__ */ op({max_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Min, MinAttrs, MinInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the minimum value from the input.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the array is reduced by 1 for each entry in `axes`.\n * If `keepDims` is true, the reduced dimensions are retained with length 1.\n * If `axes` has no entries, all dimensions are reduced, and an array with a\n * single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.min().print(); // or tf.min(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.min(axis).print(); // or tf.min(x, axis)\n * ```\n *\n * @param x The input Tensor.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction min_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'min');\n\n const inputs: MinInputs = {x: $x};\n const attrs: MinAttrs = {axis, keepDims};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n Min, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n}\n\nexport const min = /* @__PURE__ */ op({min_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Sum, SumAttrs, SumInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {op} from './operation';\n\n/**\n * Computes the sum of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in\n * `axes`. If `keepDims` is true, the reduced dimensions are retained with\n * length 1. If axes has no entries, all dimensions are reduced, and a\n * `tf.Tensor` with a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.sum().print(); // or tf.sum(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.sum(axis).print(); // or tf.sum(x, axis)\n * ```\n *\n * @param x The input tensor to compute the sum over. If the dtype is `bool`\n * it will be converted to `int32` and the output dtype will be `int32`.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction sum_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n let $x = convertToTensor(x, 'x', 'sum');\n if ($x.dtype === 'bool') {\n $x = cast($x, 'int32');\n }\n\n const inputs: SumInputs = {x: $x};\n const attrs: SumAttrs = {axis, keepDims};\n\n return ENGINE.runKernel(\n Sum, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const sum = /* @__PURE__ */ op({sum_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {parseAxisParam} from '../util';\n\nimport {abs} from './abs';\nimport * as axis_util from './axis_util';\nimport {max} from './max';\nimport {min} from './min';\nimport {op} from './operation';\nimport {pow} from './pow';\nimport {reshape} from './reshape';\nimport {scalar} from './scalar';\nimport {sqrt} from './sqrt';\nimport {square} from './square';\nimport {sum} from './sum';\n\n/**\n * Computes the norm of scalar, vectors, and matrices.\n * This function can compute several different vector norms (the 1-norm, the\n * Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0)\n * and matrix norms (Frobenius, 1-norm, and inf-norm).\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n *\n * x.norm().print(); // or tf.norm(x)\n * ```\n *\n * @param x The input array.\n * @param ord Optional. Order of the norm. Supported norm types are\n * following:\n *\n * | ord | norm for matrices | norm for vectors\n * |------------|---------------------------|---------------------\n * |'euclidean' |Frobenius norm |2-norm\n * |'fro' |Frobenius norm\t |\n * |Infinity |max(sum(abs(x), axis=1)) |max(abs(x))\n * |-Infinity |min(sum(abs(x), axis=1)) |min(abs(x))\n * |1 |max(sum(abs(x), axis=0)) |sum(abs(x))\n * |2 | |sum(abs(x)^2)^(1/2)\n *\n * @param axis Optional. If axis is null (the default), the input is\n * considered a vector and a single vector norm is computed over the entire\n * set of values in the Tensor, i.e. norm(x, ord) is equivalent\n * to norm(x.reshape([-1]), ord). If axis is an integer, the input\n * is considered a batch of vectors, and axis determines the axis in x\n * over which to compute vector norms. If axis is a 2-tuple of integer it is\n * considered a batch of matrices and axis determines the axes in NDArray\n * over which to compute a matrix norm.\n * @param keepDims Optional. If true, the norm has the same dimensionality\n * as the input.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction norm_(\n x: Tensor|TensorLike, ord: number|'euclidean'|'fro' = 'euclidean',\n axis: number|number[] = null, keepDims = false): Tensor {\n x = convertToTensor(x, 'x', 'norm');\n\n const norm = normImpl(x, ord, axis);\n let keepDimsShape = norm.shape;\n if (keepDims) {\n const axes = parseAxisParam(axis, x.shape);\n keepDimsShape = axis_util.expandShapeToKeepDim(norm.shape, axes);\n }\n return reshape(norm, keepDimsShape);\n}\n\nfunction normImpl(\n x: Tensor, p: number|string, axis: number|number[] = null): Tensor {\n if (x.rank === 0) {\n return abs(x);\n }\n\n // consider vector when no axis is specified\n if (x.rank !== 1 && axis === null) {\n return normImpl(reshape(x, [-1]), p, axis);\n }\n\n // vector\n if (x.rank === 1 || typeof axis === 'number' ||\n Array.isArray(axis) && axis.length === 1) {\n if (p === 1) {\n return sum(abs(x), axis);\n }\n if (p === Infinity) {\n return max(abs(x), axis);\n }\n if (p === -Infinity) {\n return min(abs(x), axis);\n }\n if (p === 'euclidean' || p === 2) {\n // norm(x, 2) = sum(abs(xi) ^ 2) ^ 1/2\n return sqrt(sum(pow(abs(x), scalar(2, 'int32')), axis));\n }\n\n throw new Error(`Error in norm: invalid ord value: ${p}`);\n }\n\n // matrix (assumption axis[0] < axis[1])\n if (Array.isArray(axis) && axis.length === 2) {\n if (p === 1) {\n return max(sum(abs(x), axis[0]), axis[1] - 1);\n }\n if (p === Infinity) {\n return max(sum(abs(x), axis[1]), axis[0]);\n }\n if (p === -Infinity) {\n return min(sum(abs(x), axis[1]), axis[0]);\n }\n if (p === 'fro' || p === 'euclidean') {\n // norm(x) = sqrt(sum(pow(x, 2)))\n return sqrt(sum(square(x), axis));\n }\n\n throw new Error(`Error in norm: invalid ord value: ${p}`);\n }\n\n throw new Error(`Error in norm: invalid axis: ${axis}`);\n}\n\nexport const norm = /* @__PURE__ */ op({norm_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {TensorLike} from '../types';\n\nimport {norm} from './norm';\nimport {op} from './operation';\n\n/**\n * Computes the Euclidean norm of scalar, vectors, and matrices.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n *\n * x.euclideanNorm().print(); // or tf.euclideanNorm(x)\n * ```\n *\n * @param x The input array.\n * @param axis Optional. If axis is null (the default), the input is\n * considered a vector and a single vector norm is computed over the entire\n * set of values in the Tensor, i.e. euclideanNorm(x) is equivalent\n * to euclideanNorm(x.reshape([-1])). If axis is an integer, the input\n * is considered a batch of vectors, and axis determines the axis in x\n * over which to compute vector norms. If axis is a 2-tuple of integer it is\n * considered a batch of matrices and axis determines the axes in NDArray\n * over which to compute a matrix norm.\n * @param keepDims Optional. If true, the norm has the same dimensionality\n * as the input.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction euclideanNorm_(\n x: Tensor|TensorLike, axis: number|number[] = null,\n keepDims = false): Tensor {\n return norm(x, 'euclidean', axis, keepDims);\n}\n\nexport const euclideanNorm = /* @__PURE__ */ op({euclideanNorm_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Exp, ExpInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, -3]);\n *\n * x.exp().print(); // or tf.exp(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction exp_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'exp');\n\n const inputs: ExpInputs = {x: $x};\n return ENGINE.runKernel(Exp, inputs as unknown as NamedTensorMap);\n}\nexport const exp = /* @__PURE__ */ op({exp_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {ExpandDims, ExpandDimsAttrs, ExpandDimsInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Returns a `tf.Tensor` that has expanded rank, by inserting a dimension\n * into the tensor's shape.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * const axis = 1;\n * x.expandDims(axis).print();\n * ```\n *\n * @param x The input tensor whose dimensions are to be expanded.\n * @param axis The dimension index at which to insert shape of `1`. Defaults\n * to 0 (the first dimension).\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction expandDims_(x: Tensor|TensorLike, axis = 0): T {\n const $x = convertToTensor(x, 'x', 'expandDims', 'string_or_numeric');\n\n util.assert(axis <= $x.rank, () => 'Axis must be <= rank of the tensor');\n\n const inputs: ExpandDimsInputs = {input: $x};\n const attrs: ExpandDimsAttrs = {dim: axis};\n\n return ENGINE.runKernel(\n ExpandDims, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const expandDims = /* @__PURE__ */ op({expandDims_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Expm1, Expm1Inputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes exponential of the input `tf.Tensor` minus one element-wise.\n * `e ^ x - 1`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, -3]);\n *\n * x.expm1().print(); // or tf.expm1(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction expm1_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'expm1');\n\n const inputs: Expm1Inputs = {x: $x};\n return ENGINE.runKernel(Expm1, inputs as unknown as NamedTensorMap);\n}\nexport const expm1 = /* @__PURE__ */ op({expm1_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tile, TileAttrs, TileInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Construct a tensor by repeating it the number of times given by reps.\n *\n * This operation creates a new tensor by replicating `input` `reps`\n * times. The output tensor's `i`th dimension has `input.shape[i] *\n * reps[i]` elements, and the values of `input` are replicated\n * `reps[i]` times along the `i`th dimension. For example, tiling\n * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`.\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n *\n * a.tile([2]).print(); // or a.tile([2])\n * ```\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * a.tile([1, 2]).print(); // or a.tile([1, 2])\n * ```\n * @param x The tensor to tile.\n * @param reps Determines the number of replications per dimension.\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction tile_(x: T|TensorLike, reps: number[]): T {\n const $x = convertToTensor(x, 'x', 'tile', 'string_or_numeric');\n util.assert(\n $x.rank === reps.length,\n () => `Error in transpose: rank of input ${$x.rank} ` +\n `must match length of reps ${reps}.`);\n\n const inputs: TileInputs = {x: $x};\n const attrs: TileAttrs = {reps};\n\n return ENGINE.runKernel(\n Tile, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const tile = /* @__PURE__ */ op({tile_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor2D} from '../tensor';\nimport {DataType} from '../types';\n\nimport {buffer} from './buffer';\nimport {expandDims} from './expand_dims';\nimport {op} from './operation';\nimport {reshape} from './reshape';\nimport {tile} from './tile';\n\n/**\n * Create an identity matrix.\n *\n * @param numRows Number of rows.\n * @param numColumns Number of columns. Defaults to `numRows`.\n * @param batchShape If provided, will add the batch shape to the beginning\n * of the shape of the returned `tf.Tensor` by repeating the identity\n * matrix.\n * @param dtype Data type.\n * @returns Identity matrix of the specified size and data type, possibly\n * with batch repetition if `batchShape` is specified.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction eye_(\n numRows: number, numColumns?: number,\n batchShape?:\n [\n number\n ]|[number,\n number]|[number, number, number]|[number, number, number, number],\n dtype: DataType = 'float32'): Tensor2D {\n if (numColumns == null) {\n numColumns = numRows;\n }\n const buff = buffer([numRows, numColumns], dtype);\n const n = numRows <= numColumns ? numRows : numColumns;\n for (let i = 0; i < n; ++i) {\n buff.set(1, i, i);\n }\n const out: Tensor2D = reshape(buff.toTensor(), [numRows, numColumns]);\n if (batchShape == null) {\n return out;\n } else {\n if (batchShape.length === 1) {\n return tile(expandDims(out, 0), [batchShape[0], 1, 1]) as Tensor2D;\n } else if (batchShape.length === 2) {\n // tslint:disable-next-line:no-unnecessary-type-assertion\n return tile(\n expandDims(expandDims(out, 0), 0),\n [batchShape[0], batchShape[1], 1, 1]) as Tensor2D;\n } else if (batchShape.length === 3) {\n // tslint:disable-next-line:no-unnecessary-type-assertion\n return tile(expandDims(expandDims(expandDims(out, 0), 0), 0), [\n batchShape[0], batchShape[1], batchShape[2], 1, 1\n ]) as Tensor2D;\n } else {\n throw new Error(\n `eye() currently supports only 1D and 2D ` +\n // tslint:disable-next-line:no-any\n `batchShapes, but received ${(batchShape as any).length}D.`);\n }\n }\n}\n\nexport const eye = /* @__PURE__ */ op({eye_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Floor, FloorInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes floor of input `tf.Tensor` element-wise: `floor(x)`.\n *\n * ```js\n * const x = tf.tensor1d([.6, 1.1, -3.3]);\n *\n * x.floor().print(); // or tf.floor(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction floor_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'floor', 'float32');\n\n const inputs: FloorInputs = {x: $x};\n return ENGINE.runKernel(Floor, inputs as unknown as NamedTensorMap);\n}\nexport const floor = /* @__PURE__ */ op({floor_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {GatherV2, GatherV2Attrs, GatherV2Inputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Gather slices from tensor `x`'s axis `axis` according to `indices`.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * const indices = tf.tensor1d([1, 3, 3], 'int32');\n *\n * x.gather(indices).print();\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const indices = tf.tensor1d([1, 1, 0], 'int32');\n *\n * x.gather(indices).print();\n * ```\n * @param x The input tensor whose slices are to be gathered.\n * @param indices The indices of the values to extract.\n * @param axis The axis over which to select values. Defaults to 0.\n * @param batchDims Optional. The number of batch dimensions. It must be less\n * than or equal to rank(indices). Defaults to 0.\n * The output tensor will have shape of\n * `x.shape[:axis] + indices.shape[batchDims:] + x.shape[axis + 1:]`\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction gather_(\n x: T|TensorLike, indices: Tensor|TensorLike, axis = 0, batchDims = 0): T {\n const $x = convertToTensor(x, 'x', 'gather');\n const $indices = convertToTensor(indices, 'indices', 'gather', 'int32');\n\n const inputs: GatherV2Inputs = {x: $x, indices: $indices};\n const attrs: GatherV2Attrs = {axis, batchDims};\n\n return ENGINE.runKernel(\n GatherV2, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const gather = /* @__PURE__ */ op({gather_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Greater, GreaterInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a > b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([2, 2, 2]);\n *\n * a.greater(b).print();\n * ```\n *\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction greater_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'greater', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'greater', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: GreaterInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Greater, inputs as unknown as NamedTensorMap);\n}\n\nexport const greater = /* @__PURE__ */ op({greater_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {GreaterEqual, GreaterEqualInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a >= b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([2, 2, 2]);\n *\n * a.greaterEqual(b).print();\n * ```\n *\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction greaterEqual_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'greaterEqual', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'greaterEqual', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: GreaterEqualInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(GreaterEqual, inputs as unknown as NamedTensorMap);\n}\n\nexport const greaterEqual = /* @__PURE__ */ op({greaterEqual_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {IsFinite, IsFiniteInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns which elements of x are finite.\n *\n * ```js\n * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);\n *\n * x.isFinite().print(); // or tf.isNaN(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction isFinite_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'isFinite');\n\n const inputs: IsFiniteInputs = {x: $x};\n\n return ENGINE.runKernel(IsFinite, inputs as unknown as NamedTensorMap);\n}\nexport const isFinite = /* @__PURE__ */ op({isFinite_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {IsInf, IsInfInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns which elements of x are Infinity or -Infinity.\n *\n * ```js\n * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);\n *\n * x.isInf().print(); // or tf.isNaN(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction isInf_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'isInf');\n\n const inputs: IsInfInputs = {x: $x};\n\n return ENGINE.runKernel(IsInf, inputs as unknown as NamedTensorMap);\n}\nexport const isInf = /* @__PURE__ */ op({isInf_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {IsNan, IsNanInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns which elements of x are NaN.\n *\n * ```js\n * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]);\n *\n * x.isNaN().print(); // or tf.isNaN(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction isNaN_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'isNaN');\n const inputs: IsNanInputs = {x: $x};\n\n return ENGINE.runKernel(IsNan, inputs as unknown as NamedTensorMap);\n}\nexport const isNaN = /* @__PURE__ */ op({isNaN_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LeakyRelu, LeakyReluAttrs, LeakyReluInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes leaky rectified linear element-wise.\n *\n * See\n * [http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf](\n * http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n *\n * x.leakyRelu(0.1).print(); // or tf.leakyRelu(x, 0.1)\n * ```\n * @param x The input tensor.\n * @param alpha The scaling factor for negative values, defaults to 0.2.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction leakyRelu_(x: T|TensorLike, alpha = 0.2): T {\n const $x = convertToTensor(x, 'x', 'leakyRelu');\n\n const inputs: LeakyReluInputs = {x: $x};\n const attrs: LeakyReluAttrs = {alpha};\n\n return ENGINE.runKernel(\n LeakyRelu, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const leakyRelu = /* @__PURE__ */ op({leakyRelu_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Less, LessInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a < b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([2, 2, 2]);\n *\n * a.less(b).print();\n * ```\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction less_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'less', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'less', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: LessInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Less, inputs as unknown as NamedTensorMap);\n}\n\nexport const less = /* @__PURE__ */ op({less_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {LessEqual, LessEqualInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a <= b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([2, 2, 2]);\n *\n * a.lessEqual(b).print();\n * ```\n *\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction lessEqual_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'lessEqual', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'lessEqual', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: LessEqualInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(LessEqual, inputs as unknown as NamedTensorMap);\n}\n\nexport const lessEqual = /* @__PURE__ */ op({lessEqual_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LinSpace, LinSpaceAttrs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor1D} from '../tensor';\n\n/**\n * Return an evenly spaced sequence of numbers over the given interval.\n *\n * ```js\n * tf.linspace(0, 9, 10).print();\n * ```\n * @param start The start value of the sequence.\n * @param stop The end value of the sequence.\n * @param num The number of values to generate.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function linspace(start: number, stop: number, num: number): Tensor1D {\n if (num <= 0) {\n throw new Error('The number of values should be positive.');\n }\n\n const attrs: LinSpaceAttrs = {start, stop, num};\n return ENGINE.runKernel(LinSpace, {}, attrs as unknown as NamedAttrMap);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LRN, LRNAttrs, LRNInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Normalizes the activation of a local neighborhood across or within\n * channels.\n *\n * @param x The input tensor. The 4-D input tensor is treated as a 3-D array\n * of 1D vectors (along the last dimension), and each vector is\n * normalized independently.\n * @param depthRadius The number of adjacent channels in the 1D normalization\n * window.\n * @param bias A constant bias term for the basis.\n * @param alpha A scale factor, usually positive.\n * @param beta An exponent.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction localResponseNormalization_(\n x: T|TensorLike, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5): T {\n const $x = convertToTensor(x, 'x', 'localResponseNormalization');\n util.assert(\n $x.rank === 4 || $x.rank === 3,\n () => `Error in localResponseNormalization: x must be rank 3 or 4 but got\n rank ${$x.rank}.`);\n util.assert(\n util.isInt(depthRadius),\n () => `Error in localResponseNormalization: depthRadius must be an ` +\n `integer but got depthRadius ${depthRadius}.`);\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n const inputs: LRNInputs = {x: x4D};\n\n const attrs: LRNAttrs = {depthRadius, bias, alpha, beta};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n LRN, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n } else {\n return res;\n }\n}\n\nexport const localResponseNormalization = /* @__PURE__ */ op({localResponseNormalization_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Log, LogInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, Math.E]);\n *\n * x.log().print(); // or tf.log(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction log_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'log', 'float32');\n\n const inputs: LogInputs = {x: $x};\n return ENGINE.runKernel(Log, inputs as unknown as NamedTensorMap);\n}\nexport const log = /* @__PURE__ */ op({log_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Log1p, Log1pInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes natural logarithm of the input `tf.Tensor` plus one\n * element-wise: `ln(1 + x)`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, Math.E - 1]);\n *\n * x.log1p().print(); // or tf.log1p(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction log1p_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'log1p');\n\n const inputs: Log1pInputs = {x: $x};\n return ENGINE.runKernel(Log1p, inputs as unknown as NamedTensorMap);\n}\nexport const log1p = /* @__PURE__ */ op({log1p_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Softplus, SoftplusInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.softplus().print(); // or tf.softplus(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction softplus_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'softplus');\n\n const inputs: SoftplusInputs = {x: $x};\n return ENGINE.runKernel(Softplus, inputs as unknown as NamedTensorMap);\n}\nexport const softplus = /* @__PURE__ */ op({softplus_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {customGrad} from '../gradients';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {mul} from './mul';\nimport {neg} from './neg';\nimport {op} from './operation';\nimport {sigmoid} from './sigmoid';\nimport {softplus} from './softplus';\n\n/**\n * Computes log sigmoid of the input `tf.Tensor` element-wise:\n * `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`.\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.logSigmoid().print(); // or tf.logSigmoid(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction logSigmoid_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'logSigmoid');\n\n // Use a custom gradient to maintain previous implementation.\n // There is no LogSigmoid kernel in TF so we can't use engine.runKernel\n // directly\n const customOp = customGrad((x: Tensor) => {\n // TODO(yassogba) we can remove the chained softplus call here only\n // after backends have modualrized softplus at which point we can call\n // engine runKernel(..., Sotfplus, ...) directly.\n const value = neg(softplus(neg(x)));\n\n const gradFunc = (dy: T) => {\n const derX = mul(dy, sigmoid(neg(x)));\n return derX;\n };\n return {value, gradFunc};\n });\n\n return customOp($x) as T;\n}\nexport const logSigmoid = /* @__PURE__ */ op({logSigmoid_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {customGrad} from '../gradients';\n\nimport {Tensor} from '../tensor';\nimport {GradSaveFunc} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {exp} from './exp';\nimport {log} from './log';\nimport {max} from './max';\nimport {mul} from './mul';\nimport {op} from './operation';\nimport {sub} from './sub';\nimport {sum} from './sum';\n\n/**\n * Computes the log softmax.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n *\n * a.logSoftmax().print(); // or tf.logSoftmax(a)\n * ```\n *\n * ```js\n * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);\n *\n * a.logSoftmax().print(); // or tf.logSoftmax(a)\n * ```\n *\n * @param logits The logits array.\n * @param axis The dimension softmax would be performed on. Defaults to `-1`\n * which indicates the last dimension.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction logSoftmax_(logits: T|TensorLike, axis = -1): T {\n const $logits = convertToTensor(logits, 'logits', 'logSoftmax');\n\n if (axis === -1) {\n axis = $logits.rank - 1;\n }\n if (axis !== $logits.rank - 1) {\n throw Error(\n 'Log Softmax along a non-last dimension is not yet supported. ' +\n `Logits was rank ${$logits.rank} and axis was ${axis}`);\n }\n\n // const forward: ForwardFunc = (backend, save) => {\n // const keepDims = true;\n // const xMax = max(logits, axis, true);\n // const shifted = sub(logits, xMax);\n // const value =\n // sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis,\n // keepDims)));\n // save([value]);\n // return value;\n // };\n\n // Use a custom gradient for numerical stability.\n const customOp = customGrad((logits: Tensor, save: GradSaveFunc) => {\n const keepDims = true;\n const xMax = max(logits, axis, true);\n const shifted = sub(logits, xMax);\n const value =\n sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis, keepDims)));\n save([value]);\n\n const gradFunc = (dy: Tensor, saved: Tensor[]) => {\n const [value] = saved;\n const keepDims = true;\n const softmax = exp(value);\n return sub(dy, mul(sum(dy, axis, keepDims), softmax));\n };\n return {value, gradFunc};\n });\n\n return customOp($logits) as T;\n\n // TODO Use Engine.runKernel when CPU/WebGL/WASM backends implement this.\n // const inputs: LogSoftmaxInputs = {logits: $logits};\n // const attrs: LogSoftmaxAttrs = {axis};\n // return ENGINE.runKernel(\n // LogSoftmax, inputs as unknown as NamedTensorMap,\n // attrs as unknown as NamedAttrMap);\n}\n\nexport const logSoftmax = /* @__PURE__ */ op({logSoftmax_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {parseAxisParam} from '../util';\n\nimport {add} from './add';\nimport {expandShapeToKeepDim} from './axis_util';\nimport {exp} from './exp';\nimport {log} from './log';\nimport {max} from './max';\nimport {op} from './operation';\nimport {reshape} from './reshape';\nimport {sub} from './sub';\nimport {sum} from './sum';\n\n/**\n * Computes the log(sum(exp(elements across the reduction dimensions))).\n *\n * Reduces the input along the dimensions given in `axis`. Unless `keepDims`\n * is true, the rank of the array is reduced by 1 for each entry in `axis`.\n * If `keepDims` is true, the reduced dimensions are retained with length 1.\n * If `axis` has no entries, all dimensions are reduced, and an array with a\n * single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.logSumExp().print(); // or tf.logSumExp(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis)\n * ```\n * @param x The input tensor.\n * @param axis The dimension(s) to reduce. If null (the default),\n * reduces all dimensions.\n * @param keepDims If true, retains reduced dimensions with length\n * of 1. Defaults to false.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction logSumExp_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'logSumExp');\n\n const axes = parseAxisParam(axis, $x.shape);\n const xMax = max($x, axes, true /* keepDims */);\n const a = sub($x, xMax);\n const b = exp(a);\n const c = sum(b, axes);\n const d = log(c);\n const res = add(reshape(xMax, d.shape), d);\n\n if (keepDims) {\n const newShape = expandShapeToKeepDim(res.shape, axes);\n return reshape(res, newShape) as T;\n }\n return res as T;\n}\n\nexport const logSumExp = /* @__PURE__ */ op({logSumExp_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {LogicalAnd, LogicalAndInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of `a AND b` element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([false, false, true, true], 'bool');\n * const b = tf.tensor1d([false, true, false, true], 'bool');\n *\n * a.logicalAnd(b).print();\n * ```\n *\n * @param a The first input tensor. Must be of dtype bool.\n * @param b The second input tensor. Must be of dtype bool.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction logicalAnd_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n const $a = convertToTensor(a, 'a', 'logicalAnd', 'bool');\n const $b = convertToTensor(b, 'b', 'logicalAnd', 'bool');\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: LogicalAndInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(LogicalAnd, inputs as unknown as NamedTensorMap);\n}\n\nexport const logicalAnd = /* @__PURE__ */ op({logicalAnd_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LogicalNot, LogicalNotInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of `NOT x` element-wise.\n *\n * ```js\n * const a = tf.tensor1d([false, true], 'bool');\n *\n * a.logicalNot().print();\n * ```\n *\n * @param x The input tensor. Must be of dtype 'bool'.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction logicalNot_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'logicalNot', 'bool');\n const inputs: LogicalNotInputs = {x: $x};\n return ENGINE.runKernel(LogicalNot, inputs as unknown as NamedTensorMap);\n}\n\nexport const logicalNot = /* @__PURE__ */ op({logicalNot_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LogicalOr, LogicalOrInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of `a OR b` element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([false, false, true, true], 'bool');\n * const b = tf.tensor1d([false, true, false, true], 'bool');\n *\n * a.logicalOr(b).print();\n * ```\n * @param a The first input tensor. Must be of dtype bool.\n * @param b The second input tensor. Must be of dtype bool.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction logicalOr_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n const $a = convertToTensor(a, 'a', 'logicalOr', 'bool');\n const $b = convertToTensor(b, 'b', 'logicalOr', 'bool');\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: LogicalOrInputs = {a: $a, b: $b};\n return ENGINE.runKernel(LogicalOr, inputs as unknown as NamedTensorMap);\n}\nexport const logicalOr = /* @__PURE__ */ op({logicalOr_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {logicalAnd} from './logical_and';\nimport {logicalNot} from './logical_not';\nimport {logicalOr} from './logical_or';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of `a XOR b` element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([false, false, true, true], 'bool');\n * const b = tf.tensor1d([false, true, false, true], 'bool');\n *\n * a.logicalXor(b).print();\n * ```\n *\n * @param a The first input tensor. Must be of dtype bool.\n * @param b The second input tensor. Must be of dtype bool.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction logicalXor_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n const $a = convertToTensor(a, 'a', 'logicalXor', 'bool');\n const $b = convertToTensor(b, 'b', 'logicalXor', 'bool');\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n // x ^ y = (x | y) & ~(x & y)\n return logicalAnd(logicalOr(a, b), logicalNot(logicalAnd(a, b)));\n}\n\nexport const logicalXor = /* @__PURE__ */ op({logicalXor_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {SearchSorted, SearchSortedAttrs, SearchSortedInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {sizeFromShape} from '../util_base';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\nconst INT32_MAX = 2147483648;\n/**\n * Searches for where a value would go in a sorted sequence.\n *\n * This is not a method for checking containment (like javascript in).\n *\n * The typical use case for this operation is \"binning\", \"bucketing\", or\n * \"discretizing\". The values are assigned to bucket-indices based on the edges\n * listed in 'sortedSequence'. This operation returns the bucket-index for each\n * value.\n *\n * The side argument controls which index is returned if a value lands exactly\n * on an edge.\n *\n * The axis is not settable for this operation. It always operates on the\n * innermost dimension (axis=-1). The operation will accept any number of outer\n * dimensions.\n *\n * Note: This operation assumes that 'sortedSequence' is sorted along the\n * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not\n * sorted no error is raised and the content of the returned tensor is not well\n * defined.\n *\n * ```js\n * const edges = tf.tensor1d([-1, 3.3, 9.1, 10.0]);\n * let values = tf.tensor1d([0.0, 4.1, 12.0]);\n * const result1 = tf.searchSorted(edges, values, 'left');\n * result1.print(); // [1, 2, 4]\n *\n * const seq = tf.tensor1d([0, 3, 9, 10, 10]);\n * values = tf.tensor1d([0, 4, 10]);\n * const result2 = tf.searchSorted(seq, values, 'left');\n * result2.print(); // [0, 2, 3]\n * const result3 = tf.searchSorted(seq, values, 'right');\n * result3.print(); // [1, 2, 5]\n *\n * const sortedSequence = tf.tensor2d([[0., 3., 8., 9., 10.],\n * [1., 2., 3., 4., 5.]]);\n * values = tf.tensor2d([[9.8, 2.1, 4.3],\n * [0.1, 6.6, 4.5, ]]);\n * const result4 = tf.searchSorted(sortedSequence, values, 'left');\n * result4.print(); // [[4, 1, 2], [0, 5, 4]]\n * ```\n * @param sortedSequence: N-D. Sorted sequence.\n * @param values: N-D. Search values.\n * @param side: 'left'|'right'. Defaults to 'left'. 'left' corresponds to lower\n * bound and 'right' to upper bound.\n * @return An N-D int32 tensor the size of values containing the result of\n * applying either lower bound or upper bound (depending on side) to each\n * value. The result is not a global index to the entire Tensor, but the\n * index in the last dimension.\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nfunction searchSorted_(\n sortedSequence: Tensor|TensorLike, values: Tensor|TensorLike,\n side: 'left'|'right' = 'left'): Tensor {\n const $sortedSequence =\n convertToTensor(sortedSequence, 'sortedSequence', 'searchSorted');\n const $values = convertToTensor(values, 'values', 'searchSorted');\n\n const sequenceSize = $sortedSequence.shape[$sortedSequence.shape.length - 1];\n const valuesSize = $values.shape[$values.shape.length - 1];\n const $sortedSequence2D = reshape($sortedSequence, [-1, sequenceSize]);\n const $values2D = reshape($values, [-1, valuesSize]);\n\n if ($sortedSequence2D.rank < 2) {\n throw new Error(`Sorted input argument must be at least 2-dimensional`);\n }\n if ($sortedSequence2D.shape[0] !== $values2D.shape[0]) {\n throw new Error(\n `Leading dimension of 'sortedSequence' and 'values' must match.`);\n }\n if (sizeFromShape($values2D.shape) >= INT32_MAX) {\n throw new Error(`values tensor size must less than ${INT32_MAX}`);\n }\n if ($sortedSequence2D.shape[1] >= INT32_MAX) {\n throw new Error(`trailing dim_size must less than ${\n INT32_MAX} for int32 output type, was ${$sortedSequence2D.shape[1]}`);\n }\n\n const inputs: SearchSortedInputs = {\n sortedSequence: $sortedSequence2D,\n values: $values2D,\n };\n const attrs: SearchSortedAttrs = {side};\n\n return ENGINE.runKernel(SearchSorted, inputs as {}, attrs as {});\n}\n\nexport const searchSorted = /* @__PURE__ */ op({searchSorted_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {TensorLike} from '../types';\nimport {searchSorted} from './search_sorted';\n\n/**\n * Searches for where a value would go in a sorted sequence.\n *\n * This is not a method for checking containment (like javascript in).\n *\n * The typical use case for this operation is \"binning\", \"bucketing\", or\n * \"discretizing\". The values are assigned to bucket-indices based on the edges\n * listed in 'sortedSequence'. This operation returns the bucket-index for each\n * value.\n *\n * The index returned corresponds to the first edge greater than or equal to the\n * value.\n *\n * The axis is not settable for this operation. It always operates on the\n * innermost dimension (axis=-1). The operation will accept any number of outer\n * dimensions.\n *\n * Note: This operation assumes that 'lowerBound' is sorted along the\n * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not\n * sorted no error is raised and the content of the returned tensor is not well\n * defined.\n *\n * ```js\n * const edges = tf.tensor1d([-1, 3.3, 9.1, 10.0]);\n * let values = tf.tensor1d([0.0, 4.1, 12.0]);\n * const result1 = tf.lowerBound(edges, values);\n * result1.print(); // [1, 2, 4]\n *\n * const seq = tf.tensor1d([0, 3, 9, 10, 10]);\n * values = tf.tensor1d([0, 4, 10]);\n * const result2 = tf.lowerBound(seq, values);\n * result2.print(); // [0, 2, 3]\n *\n * const sortedSequence = tf.tensor2d([[0., 3., 8., 9., 10.],\n * [1., 2., 3., 4., 5.]]);\n * values = tf.tensor2d([[9.8, 2.1, 4.3],\n * [0.1, 6.6, 4.5, ]]);\n * const result3 = tf.lowerBound(sortedSequence, values);\n * result3.print(); // [[4, 1, 2], [0, 5, 4]]\n * ```\n * @param sortedSequence: N-D. Sorted sequence.\n * @param values: N-D. Search values.\n * @return An N-D int32 tensor the size of values containing the result of\n * applying lower bound to each value. The result is not a global index to\n * the entire Tensor, but the index in the last dimension.\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function lowerBound(\n sortedSequence: Tensor|TensorLike, values: Tensor|TensorLike): Tensor {\n return searchSorted(sortedSequence, values, 'left');\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MaxPool, MaxPoolAttrs, MaxPoolInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the 2D max pooling of an image.\n *\n * @param x The input tensor, of rank 4 or rank 3 of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param filterSize The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in dilated pooling. Defaults to `[1, 1]`. If `dilations` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction maxPool_(\n x: T|TensorLike, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $x = convertToTensor(x, 'x', 'maxPool');\n const dilations = 1;\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n util.assert(\n x4D.rank === 4,\n () => `Error in maxPool: input must be rank 4 but got rank ${x4D.rank}.`);\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in maxPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n conv_util.checkPadOnDimRoundingMode('maxPool', pad, dimRoundingMode);\n const inputs: MaxPoolInputs = {x: x4D};\n const attrs: MaxPoolAttrs = {filterSize, strides, pad, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n MaxPool, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const maxPool = /* @__PURE__ */ op({maxPool_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MaxPool3D, MaxPool3DAttrs, MaxPool3DInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {checkPadOnDimRoundingMode} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the 3D max pooling.\n *\n * ```js\n * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]);\n * const result = tf.maxPool3d(x, 2, 1, 'valid');\n * result.print();\n * ```\n *\n * @param x The input tensor, of rank 5 or rank 4 of shape\n * `[batch, depth, height, width, inChannels]`.\n * @param filterSize The filter size:\n * `[filterDepth, filterHeight, filterWidth]`.\n * If `filterSize` is a single number,\n * then `filterDepth == filterHeight == filterWidth`.\n * @param strides The strides of the pooling:\n * `[strideDepth, strideHeight, strideWidth]`.\n * If `strides` is a single number,\n * then `strideDepth == strideHeight == strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1*1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n * @param dataFormat An optional string from: \"NDHWC\", \"NCDHW\". Defaults to\n * \"NDHWC\". Specify the data format of the input and output data. With the\n * default format \"NDHWC\", the data is stored in the order of: [batch,\n * depth, height, width, channels]. Only \"NDHWC\" is currently supported.\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction maxPool3d_(\n x: T|TensorLike, filterSize: [number, number, number]|number = [1, 1, 1],\n strides: [number, number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil',\n dataFormat: 'NDHWC'|'NCDHW' = 'NDHWC'): T {\n const $x = convertToTensor(x, 'x', 'maxPool3d');\n\n let x5D = $x as Tensor5D;\n let reshapedTo5D = false;\n if ($x.rank === 4) {\n reshapedTo5D = true;\n x5D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]);\n }\n\n util.assert(\n x5D.rank === 5,\n () => `Error in maxPool3d: x must be rank 5 but got rank ${x5D.rank}.`);\n util.assert(\n dataFormat === 'NDHWC',\n () => `Error in maxPool3d: Only NDHWC is currently supported, ` +\n `but got dataFormat of ${dataFormat}`);\n checkPadOnDimRoundingMode('maxPool3d', pad, dimRoundingMode);\n const inputs: MaxPool3DInputs = {x: x5D};\n const attrs:\n MaxPool3DAttrs = {filterSize, strides, pad, dimRoundingMode, dataFormat};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n MaxPool3D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n\n return res;\n}\n\nexport const maxPool3d = /* @__PURE__ */ op({maxPool3d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MaxPoolWithArgmax, MaxPoolWithArgmaxAttrs, MaxPoolWithArgmaxInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the 2D max pooling of an image with Argmax index.\n * The indices in argmax are flattened, so that a maximum value at position `[b,\n * y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if\n * include_batch_in_index is False; `((b * height + y) * width + x) * channels\n * +c` if include_batch_in_index is True.\n *\n * The indices returned are always in `[0, height) x [0, width)` before\n * flattening.\n *\n * @param x The input tensor, of rank 4 or rank 3 of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param filterSize The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param dataFormat An optional string from: \"NDHWC\", \"NCDHW\". Defaults to\n * \"NDHWC\". Specify the data format of the input and output data. With the\n * default format \"NDHWC\", the data is stored in the order of: [batch,\n * depth, height, width, channels]. Only \"NDHWC\" is currently supported.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param includeBatchIndex Defaults to False. Whether to include batch\n * dimension in flattened index of argmax.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction maxPoolWithArgmax_(\n x: T|TensorLike, filterSize: [number, number]|number,\n strides: [number, number]|number, pad: 'valid'|'same'|number,\n includeBatchInIndex = false): NamedTensorMap {\n const $x = convertToTensor(x, 'x', 'maxPoolWithArgmax');\n\n const inputs: MaxPoolWithArgmaxInputs = {x: $x};\n const attrs:\n MaxPoolWithArgmaxAttrs = {filterSize, strides, pad, includeBatchInIndex};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const result = ENGINE.runKernel(\n MaxPoolWithArgmax, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor[];\n\n return {result: result[0], indexes: result[1]};\n}\n\nexport const maxPoolWithArgmax = /* @__PURE__ */ op({maxPoolWithArgmax_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Mean, MeanAttrs, MeanInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the mean of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is\n * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`.\n * If `keepDims` is true, the reduced dimensions are retained with length 1.\n * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with\n * a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.mean().print(); // or tf.mean(a)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.mean(axis).print(); // or tf.mean(x, axis)\n * ```\n *\n * @param x The input tensor.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction mean_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n const $x = convertToTensor(x, 'x', 'mean');\n\n const inputs: MeanInputs = {x: $x};\n const attrs: MeanAttrs = {axis, keepDims};\n\n return ENGINE.runKernel(\n Mean, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const mean = /* @__PURE__ */ op({mean_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions, makeZerosTypedArray, sizeFromShape} from '../util';\n\nimport {complex} from './complex';\n\n/**\n * Creates a `tf.Tensor` with all elements set to 0.\n *\n * ```js\n * tf.zeros([2, 2]).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The type of an element in the resulting tensor. Can\n * be 'float32', 'int32' or 'bool'. Defaults to 'float'.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function zeros(\n shape: ShapeMap[R], dtype: DataType = 'float32'): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n if (dtype === 'complex64') {\n const real = zeros(shape, 'float32');\n const imag = zeros(shape, 'float32');\n return complex(real, imag);\n }\n const values = makeZerosTypedArray(sizeFromShape(shape), dtype);\n return ENGINE.makeTensor(values, shape, dtype) as Tensor;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {makeOnesTypedArray, sizeFromShape} from '../util';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\nimport {complex} from './complex';\nimport {zeros} from './zeros';\n\n/**\n * Creates a `tf.Tensor` with all elements set to 1.\n *\n * ```js\n * tf.ones([2, 2]).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The type of an element in the resulting tensor. Defaults to\n * 'float'.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function ones(\n shape: ShapeMap[R], dtype: DataType = 'float32'): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n if (dtype === 'complex64') {\n const real = ones(shape, 'float32');\n const imag = zeros(shape, 'float32');\n return complex(real, imag);\n }\n const values = makeOnesTypedArray(sizeFromShape(shape), dtype);\n return ENGINE.makeTensor(values, shape, dtype) as Tensor;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {matMul} from './mat_mul';\nimport {ones} from './ones';\nimport {reshape} from './reshape';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {sizeFromShape} from '../util_base';\n\n/**\n * Broadcasts parameters for evaluation on an N-D grid.\n *\n * Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n * of N-D coordinate arrays for evaluating expressions on an N-D grid.\n *\n * Notes:\n * `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n * When the `indexing` argument is set to 'xy' (the default), the broadcasting\n * instructions for the first two dimensions are swapped.\n * Examples:\n * Calling `const [X, Y] = meshgrid(x, y)` with the tensors\n *\n * ```javascript\n * const x = [1, 2, 3];\n * const y = [4, 5, 6];\n * const [X, Y] = tf.meshgrid(x, y);\n * // X = [[1, 2, 3],\n * // [1, 2, 3],\n * // [1, 2, 3]]\n * // Y = [[4, 4, 4],\n * // [5, 5, 5],\n * // [6, 6, 6]]\n * ```\n *\n * @param x Tensor with rank geq 1.\n * @param y Tensor with rank geq 1.\n * @param indexing\n *\n * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}\n */\nexport function meshgrid(\n x?: T|TensorLike, y?: T|TensorLike, {indexing = 'xy'} = {}): T[] {\n if (indexing !== 'xy' && indexing !== 'ij') {\n throw new TypeError(\n `${indexing} is not a valid third argument to meshgrid`);\n }\n if (x === undefined) {\n return [];\n }\n let $x = convertToTensor(\n x, 'x', 'meshgrid', x instanceof Tensor ? x.dtype : 'float32');\n\n if (y === undefined) {\n return [$x];\n }\n let $y = convertToTensor(\n y, 'y', 'meshgrid', y instanceof Tensor ? y.dtype : 'float32');\n\n const w = sizeFromShape($x.shape);\n const h = sizeFromShape($y.shape);\n\n if (indexing === 'xy') {\n $x = reshape($x, [1, -1]) as T;\n $y = reshape($y, [-1, 1]) as T;\n return [\n matMul(ones([h, 1], $x.dtype), $x),\n matMul($y, ones([1, w], $y.dtype)),\n ];\n }\n\n $x = reshape($x, [-1, 1]) as T;\n $y = reshape($y, [1, -1]) as T;\n return [\n matMul($x, ones([1, h], $x.dtype)),\n matMul(ones([w, 1], $y.dtype), $y),\n ];\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Minimum, MinimumInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {cast} from './cast';\nimport {op} from './operation';\n\n/**\n * Returns the min of a and b (`a < b ? a : b`) element-wise.\n * Supports broadcasting.\n *\n * We also expose `minimumStrict` which has the same signature as this op and\n * asserts that `a` and `b` are the same shape (does not broadcast).\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 3, 16]);\n * const b = tf.tensor1d([1, 2, 9, 4]);\n *\n * a.minimum(b).print(); // or tf.minimum(a, b)\n * ```\n *\n * ```js\n * // Broadcast minimum a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(5);\n *\n * a.minimum(b).print(); // or tf.minimum(a, b)\n * ```\n *\n * @param a The first tensor.\n * @param b The second tensor. Must have the same type as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction minimum_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'minimum');\n let $b = convertToTensor(b, 'b', 'minimum');\n [$a, $b] = makeTypesMatch($a, $b);\n\n if ($a.dtype === 'bool') {\n $a = cast($a, 'int32');\n $b = cast($b, 'int32');\n }\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: MinimumInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Minimum, inputs as unknown as NamedTensorMap);\n}\n\nexport const minimum = /* @__PURE__ */ op({minimum_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MirrorPad, MirrorPadAttrs, MirrorPadInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Pads a `tf.Tensor` using mirror padding.\n *\n * This operation implements the `REFLECT` and `SYMMETRIC` modes of pad.\n *\n * ```js\n * const x = tf.range(0, 9).reshape([1, 1, 3, 3]);\n * x.mirrorPad([[0, 0], [0, 0], [2, 2], [2, 2]], 'reflect').print();\n * ```\n * @param x The tensor to pad.\n * @param paddings An array of length `R` (the rank of the tensor), where\n * each element is a length-2 tuple of ints `[padBefore, padAfter]`,\n * specifying how much to pad along each dimension of the tensor.\n * In \"reflect\" mode, the padded regions do not include the borders,\n * while in \"symmetric\" mode the padded regions do include the borders.\n * For example, if the input is `[1, 2, 3]` and paddings is `[0, 2]`,\n * then the output is `[1, 2, 3, 2, 1]` in \"reflect\" mode, and\n * `[1, 2, 3, 3, 2]` in \"symmetric\" mode.\n * If `mode` is \"reflect\" then both `paddings[D, 0]` and `paddings[D, 1]`\n * must be no greater than `x.shape[D] - 1`. If mode is \"symmetric\"\n * then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than\n * `x.shape[D]`\n * @param mode String to specify padding mode. Can be `'reflect' | 'symmetric'`\n */\n/** @doc {heading: 'Tensors', subheading: 'Transformations'} */\nfunction mirrorPad_(\n x: T|TensorLike, paddings: Array<[number, number]>,\n mode: 'reflect'|'symmetric'): T {\n util.assert(\n mode === 'reflect' || mode === 'symmetric',\n () => `Invalid mode. Mode must be either reflect or symmetric. ` +\n `Got ${mode}.`);\n\n const $x = convertToTensor(x, 'x', 'mirrorPad');\n if ($x.rank === 0) {\n throw new Error(\n 'mirrorPad(scalar) is not defined. ' +\n 'Pass non-scalar to mirrorPad');\n }\n util.assert(\n paddings.length === $x.rank,\n () => `Padding doesn't match input. Must be ${$x.rank}. ` +\n `Got ${paddings.length}.`);\n const shapeOffset = mode === 'reflect' ? 1 : 0;\n for (let i = 0; i < $x.rank; i++) {\n util.assert(\n paddings[i].length === 2,\n () => `Invalid number of paddings. Must be length of 2 each.`);\n util.assert(\n paddings[i][0] >= 0 && paddings[i][0] <= $x.shape[i] - shapeOffset &&\n paddings[i][1] >= 0 && paddings[i][1] <= $x.shape[i] - shapeOffset,\n () => `Padding in dimension ${i} cannot be greater than or equal ` +\n `to ${$x.shape[i] - shapeOffset} or less than 0 for input of ` +\n `shape ${$x.shape}`);\n }\n\n const attrs: MirrorPadAttrs = {paddings, mode};\n const inputs: MirrorPadInputs = {x: $x};\n return ENGINE.runKernel(\n MirrorPad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const mirrorPad = /* @__PURE__ */ op({mirrorPad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Mod, ModInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns the mod of a and b element-wise.\n * `floor(x / y) * y + mod(x, y) = x`\n * Supports broadcasting.\n *\n * We also expose `tf.modStrict` which has the same signature as this op and\n * asserts that `a` and `b` are the same shape (does not broadcast).\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 3, 16]);\n * const b = tf.tensor1d([1, 2, 9, 4]);\n *\n * a.mod(b).print(); // or tf.mod(a, b)\n * ```\n *\n * ```js\n * // Broadcast a mod b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(5);\n *\n * a.mod(b).print(); // or tf.mod(a, b)\n * ```\n *\n * @param a The first tensor.\n * @param b The second tensor. Must have the same type as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction mod_(a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'mod');\n let $b = convertToTensor(b, 'b', 'mod');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const inputs: ModInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(Mod, inputs as unknown as NamedTensorMap);\n}\n\nexport const mod = /* @__PURE__ */ op({mod_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {parseAxisParam} from '../util';\n\nimport {expandShapeToKeepDim} from './axis_util';\nimport {cast} from './cast';\nimport {mean} from './mean';\nimport {op} from './operation';\nimport {reshape} from './reshape';\nimport {square} from './square';\nimport {sub} from './sub';\n\n/**\n * Calculates the mean and variance of `x`. The mean and variance are\n * calculated by aggregating the contents of `x` across `axes`. If `x` is\n * 1-D and `axes = [0]` this is just the mean and variance of a vector.\n *\n * @param x The input tensor.\n * @param axis The dimension(s) along with to compute mean and\n * variance. By default it reduces all dimensions.\n * @param keepDims If true, the moments have the same dimensionality as the\n * input.\n * @return An object with two keys: `mean` and `variance`.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction moments_(\n x: Tensor|TensorLike, axis: number|number[] = null,\n keepDims = false): {mean: Tensor, variance: Tensor} {\n x = convertToTensor(x, 'x', 'moments');\n const axes = parseAxisParam(axis, x.shape);\n const xMean = mean(x, axes, keepDims);\n let keepDimsShape = xMean.shape;\n if (!keepDims) {\n keepDimsShape = expandShapeToKeepDim(xMean.shape, axes);\n }\n const devSquared =\n square(sub(cast(x, 'float32'), reshape(xMean, keepDimsShape)));\n const variance = mean(devSquared, axes, keepDims);\n return {mean: xMean, variance};\n}\n\nexport const moments = /* @__PURE__ */ op({moments_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor2D} from '../tensor';\nimport {convertToTensor, convertToTensorArray} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * @docalias (data: Tensor2D, c: Tensor2D, h: Tensor2D): [Tensor2D, Tensor2D]\n */\nexport type LSTMCellFunc = {\n (data: Tensor2D, c: Tensor2D, h: Tensor2D): [Tensor2D, Tensor2D];\n};\n\n/**\n * Computes the next states and outputs of a stack of LSTMCells.\n *\n * Each cell output is used as input to the next cell.\n *\n * Returns `[cellState, cellOutput]`.\n *\n * Derived from tf.contrib.rn.MultiRNNCell.\n *\n * @param lstmCells Array of LSTMCell functions.\n * @param data The input to the cell.\n * @param c Array of previous cell states.\n * @param h Array of previous cell outputs.\n *\n * @doc {heading: 'Operations', subheading: 'RNN'}\n */\nfunction multiRNNCell_(\n lstmCells: LSTMCellFunc[], data: Tensor2D|TensorLike,\n c: Array,\n h: Array): [Tensor2D[], Tensor2D[]] {\n const $data = convertToTensor(data, 'data', 'multiRNNCell');\n const $c = convertToTensorArray(c, 'c', 'multiRNNCell');\n const $h = convertToTensorArray(h, 'h', 'multiRNNCell');\n\n let input = $data;\n const newStates = [];\n for (let i = 0; i < lstmCells.length; i++) {\n const output = lstmCells[i](input, $c[i], $h[i]);\n newStates.push(output[0]);\n newStates.push(output[1]);\n input = output[1];\n }\n const newC: Tensor2D[] = [];\n const newH: Tensor2D[] = [];\n for (let i = 0; i < newStates.length; i += 2) {\n newC.push(newStates[i]);\n newH.push(newStates[i + 1]);\n }\n return [newC, newH];\n}\nexport const multiRNNCell = /* @__PURE__ */ op({multiRNNCell_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Multinomial, MultinomialAttrs, MultinomialInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Creates a `tf.Tensor` with values drawn from a multinomial distribution.\n *\n * ```js\n * const probs = tf.tensor([.75, .25]);\n * tf.multinomial(probs, 3).print();\n * ```\n *\n * @param logits 1D array with unnormalized log-probabilities, or\n * 2D array of shape `[batchSize, numOutcomes]`. See the `normalized`\n * parameter.\n * @param numSamples Number of samples to draw for each row slice.\n * @param seed The seed number.\n * @param normalized Whether the provided `logits` are normalized true\n * probabilities (sum to 1). Defaults to false.\n * @return 1D array of shape `[numSamples]`, or 2D array of shape\n * `[batchSize, numSamples]`, depending on the rank of the input.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction multinomial_(\n logits: Tensor1D|Tensor2D|TensorLike, numSamples: number, seed?: number,\n normalized = false): Tensor1D|Tensor2D {\n const $logits = convertToTensor(logits, 'logits', 'multinomial');\n const numOutcomes = $logits.size;\n const origRank = $logits.rank;\n if (numOutcomes < 2) {\n throw new Error(\n `Error in multinomial: you need at least 2 outcomes, but got ` +\n `${numOutcomes}.`);\n }\n if (origRank > 2) {\n throw new Error(`Rank of probabilities must be 1 or 2, but is ${origRank}`);\n }\n // TODO(lina128): Investigate correct seed behavior. The code seems not allow\n // setting see to 0.\n seed = seed || Math.random();\n\n // The kernel only accepts (and returns) rank 2 tensors.\n const logits2D: Tensor2D =\n origRank === 1 ? reshape($logits, [1, -1]) : $logits as Tensor2D;\n\n const inputs: MultinomialInputs = {logits: logits2D};\n const attrs: MultinomialAttrs = {numSamples, seed, normalized};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n Multinomial, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor2D;\n\n // tslint:disable-next-line:no-unnecessary-type-assertion\n return origRank === 1 ? reshape(res, [res.size]) as Tensor1D : res;\n}\n\nexport const multinomial = /* @__PURE__ */ op({multinomial_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {NotEqual, NotEqualInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns the truth value of (a != b) element-wise. Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([0, 2, 3]);\n *\n * a.notEqual(b).print();\n * ```\n * @param a The first input tensor.\n * @param b The second input tensor. Must have the same dtype as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nfunction notEqual_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'notEqual', 'string_or_numeric');\n let $b = convertToTensor(b, 'b', 'notEqual', 'string_or_numeric');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: NotEqualInputs = {a: $a, b: $b};\n\n return ENGINE.runKernel(NotEqual, inputs as unknown as NamedTensorMap);\n}\n\nexport const notEqual = /* @__PURE__ */ op({notEqual_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {OnesLike, OnesLikeInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Creates a `tf.Tensor` with all elements set to 1 with the same shape as the\n * given tensor.\n *\n * ```js\n * const x = tf.tensor([1, 2]);\n * tf.onesLike(x).print();\n * ```\n * @param x A tensor.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction onesLike_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'onesLike');\n\n const inputs: OnesLikeInputs = {x: $x};\n return ENGINE.runKernel(OnesLike, inputs as unknown as NamedTensorMap);\n}\n\nexport const onesLike = /* @__PURE__ */ op({onesLike_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {matMul} from './mat_mul';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the outer product of two vectors, `v1` and `v2`.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n * const b = tf.tensor1d([3, 4, 5]);\n *\n * tf.outerProduct(a, b).print();\n * ```\n * @param v1 The first vector in the outer product operation.\n * @param v2 The second vector in the outer product operation.\n *\n * @doc {heading: 'Operations', subheading: 'Matrices'}\n */\nfunction outerProduct_(\n v1: Tensor1D|TensorLike, v2: Tensor1D|TensorLike): Tensor2D {\n const $v1 = convertToTensor(v1, 'v1', 'outerProduct');\n const $v2 = convertToTensor(v2, 'v2', 'outerProduct');\n\n util.assert(\n $v1.rank === 1 && $v2.rank === 1,\n () => `Error in outerProduct: inputs must be rank 1, but got ranks ` +\n `${$v1.rank} and ${$v2.rank}.`);\n\n const v12D = reshape($v1, [-1, 1]);\n const v22D = reshape($v2, [1, -1]);\n return matMul(v12D, v22D);\n}\n\nexport const outerProduct = /* @__PURE__ */ op({outerProduct_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {PadV2, PadV2Attrs, PadV2Inputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Pads a `tf.Tensor` with a given value and paddings.\n *\n * This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`,\n * refer to `tf.mirrorPad`.\n *\n * Also available are stricter rank-specific methods with the same signature\n * as this method that assert that `paddings` is of given length.\n * - `tf.pad1d`\n * - `tf.pad2d`\n * - `tf.pad3d`\n * - `tf.pad4d`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * x.pad([[1, 2]]).print();\n * ```\n * @param x The tensor to pad.\n * @param paddings An array of length `R` (the rank of the tensor), where\n * each element is a length-2 tuple of ints `[padBefore, padAfter]`,\n * specifying how much to pad along each dimension of the tensor.\n * @param constantValue The pad value to use. Defaults to 0.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction pad_(\n x: T|TensorLike, paddings: Array<[number, number]>, constantValue = 0): T {\n const $x = convertToTensor(x, 'x', 'pad');\n if ($x.rank === 0) {\n throw new Error('pad(scalar) is not defined. Pass non-scalar to pad');\n }\n\n const attrs: PadV2Attrs = {paddings, constantValue};\n const inputs: PadV2Inputs = {x: $x};\n return ENGINE.runKernel(\n PadV2, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const pad = /* @__PURE__ */ op({pad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor1D} from '../tensor';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\nimport {op} from './operation';\nimport {pad} from './pad';\n\n/**\n * Pads a `tf.Tensor1D` with a given value and paddings. See `pad` for details.\n */\nfunction pad1d_(\n x: Tensor1D|TensorLike, paddings: [number, number],\n constantValue = 0): Tensor1D {\n assert(\n paddings.length === 2,\n () => 'Invalid number of paddings. Must be length of 2.');\n return pad(x, [paddings], constantValue);\n}\n\nexport const pad1d = /* @__PURE__ */ op({pad1d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor2D} from '../tensor';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\nimport {op} from './operation';\nimport {pad} from './pad';\n\n/**\n * Pads a `tf.Tensor2D` with a given value and paddings. See `pad` for details.\n */\nfunction pad2d_(\n x: Tensor2D|TensorLike, paddings: [[number, number], [number, number]],\n constantValue = 0): Tensor2D {\n assert(\n paddings.length === 2 && paddings[0].length === 2 &&\n paddings[1].length === 2,\n () => 'Invalid number of paddings. Must be length of 2 each.');\n return pad(x, paddings, constantValue);\n}\n\nexport const pad2d = /* @__PURE__ */ op({pad2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor3D} from '../tensor';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\nimport {op} from './operation';\nimport {pad} from './pad';\n\n/**\n * Pads a `tf.Tensor3D` with a given value and paddings. See `pad` for details.\n */\nfunction pad3d_(\n x: Tensor3D|TensorLike,\n paddings: [[number, number], [number, number], [number, number]],\n constantValue = 0): Tensor3D {\n assert(\n paddings.length === 3 && paddings[0].length === 2 &&\n paddings[1].length === 2 && paddings[2].length === 2,\n () => 'Invalid number of paddings. Must be length of 2 each.');\n return pad(x, paddings, constantValue);\n}\n\nexport const pad3d = /* @__PURE__ */ op({pad3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor4D} from '../tensor';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\nimport {op} from './operation';\nimport {pad} from './pad';\n\n/**\n * Pads a `tf.Tensor4D` with a given value and paddings. See `pad` for details.\n */\nfunction pad4d_(\n x: Tensor4D|TensorLike,\n paddings:\n [\n [number, number], [number, number], [number, number], [number, number]\n ],\n constantValue = 0): Tensor4D {\n assert(\n paddings.length === 4 && paddings[0].length === 2 &&\n paddings[1].length === 2 && paddings[2].length === 2 &&\n paddings[3].length === 2,\n () => 'Invalid number of paddings. Must be length of 2 each.');\n return pad(x, paddings, constantValue);\n}\n\nexport const pad4d = /* @__PURE__ */ op({pad4d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {SpaceToBatchND, SpaceToBatchNDAttrs, SpaceToBatchNDInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * This operation divides \"spatial\" dimensions `[1, ..., M]` of the input into\n * a grid of blocks of shape `blockShape`, and interleaves these blocks with\n * the \"batch\" dimension (0) such that in the output, the spatial\n * dimensions `[1, ..., M]` correspond to the position within the grid,\n * and the batch dimension combines both the position within a spatial block\n * and the original batch position. Prior to division into blocks,\n * the spatial dimensions of the input are optionally zero padded\n * according to `paddings`. See below for a precise description.\n *\n * ```js\n * const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]);\n * const blockShape = [2, 2];\n * const paddings = [[0, 0], [0, 0]];\n *\n * x.spaceToBatchND(blockShape, paddings).print();\n * ```\n *\n * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape +\n * remainingShape`, where spatialShape has `M` dimensions.\n * @param blockShape A 1-D array. Must have shape `[M]`, all values must\n * be >= 1.\n * @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >=\n * 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad\n * from input dimension `i + 1`, which corresponds to spatial dimension `i`. It\n * is required that\n * `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0`\n *\n * This operation is equivalent to the following steps:\n *\n * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input\n * according to `paddings` to produce `padded` of shape paddedShape.\n *\n * 2. Reshape `padded` to `reshapedPadded` of shape:\n * `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ...,\n * paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape`\n *\n * 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded`\n * of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ...,\n * paddedShape[M] / blockShape[M-1]] + remainingShape`\n *\n * 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the\n * batch dimension, producing an output tensor of shape:\n * `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ...,\n * paddedShape[M] / blockShape[M-1]] + remainingShape`\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction spaceToBatchND_(\n x: T|TensorLike, blockShape: number[], paddings: number[][]): T {\n const $x = convertToTensor(x, 'x', 'spaceToBatchND');\n\n util.assert(\n $x.rank >= 1 + blockShape.length,\n () => `input rank ${$x.rank} should be > than [blockShape] ${\n blockShape.length}`);\n\n util.assert(\n paddings.length === blockShape.length,\n () => `paddings.shape[0] ${\n paddings.length} must be equal to [blockShape] ${blockShape.length}`);\n\n util.assert(\n $x.shape.reduce(\n (a, b, i) => {\n if (i > 0 && i <= blockShape.length) {\n return a &&\n ((b + paddings[i - 1][0] + paddings[i - 1][1]) %\n blockShape[i - 1] ===\n 0);\n }\n return a;\n },\n true),\n () => `input spatial dimensions ${$x.shape.slice(1)} with paddings ${\n paddings.toString()} must be divisible by blockShapes ${\n blockShape.toString()}`);\n\n const inputs: SpaceToBatchNDInputs = {x: $x};\n const attrs: SpaceToBatchNDAttrs = {blockShape, paddings};\n\n return ENGINE.runKernel(\n SpaceToBatchND, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const spaceToBatchND = /* @__PURE__ */ op({spaceToBatchND_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {avgPool} from './avg_pool';\nimport {batchToSpaceND} from './batch_to_space_nd';\nimport * as conv_util from './conv_util';\nimport {maxPool} from './max_pool';\nimport {op} from './operation';\nimport {reshape} from './reshape';\nimport {spaceToBatchND} from './space_to_batch_nd';\n\n/**\n * Performs an N-D pooling operation\n *\n * @param input The input tensor, of rank 4 or rank 3 of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param windowShape The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param poolingType The type of pooling, either 'max' or 'avg'.\n * @param pad The type of padding algorithm:\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_guides/python/nn#Convolution](\n * https://www.tensorflow.org/api_guides/python/nn#Convolution)\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in dilated pooling. Defaults to `[1, 1]`. If `dilationRate` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction pool_(\n input: T|TensorLike, windowShape: [number, number]|number,\n poolingType: 'avg'|'max',\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dilations?: [number, number]|number, strides?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil') {\n if (dilations == null) {\n dilations = [1, 1];\n }\n if (strides == null) {\n strides = 1;\n }\n if (pad === 0) {\n pad = 'valid';\n }\n\n const $x = convertToTensor(input, 'x', 'maxPool');\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in pool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = conv_util.computePool2DInfo(\n x4D.shape, windowShape, strides, dilations, pad);\n const dilation: [number, number] =\n [convInfo.dilationHeight, convInfo.dilationWidth];\n\n // The following implementation does batchToSpace(pool(spaceToBatch(x)))\n // whenever dilation > 1 since the TF kernels do not support dilation > 1.\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L1037\n\n let basePadding: number[][];\n if (pad === 'same') {\n basePadding = withSpaceToBatchBasePaddings(\n [convInfo.filterHeight, convInfo.filterWidth], dilation);\n } else {\n basePadding = [[0, 0], [0, 0]];\n }\n\n const isDilationOne = dilation[0] === 1 && dilation[1] === 1;\n const [adjustedPadding, adjustedCrops] = requiredSpaceToBatchPaddings(\n [convInfo.inHeight, convInfo.inWidth], dilation, basePadding);\n const convertedPad = isDilationOne ? pad : 'valid';\n const convertedX =\n isDilationOne ? x4D : spaceToBatchND(x4D, dilation, adjustedPadding);\n\n const forwardOp = poolingType === 'avg' ?\n () => avgPool(convertedX, windowShape, strides, convertedPad,\n dimRoundingMode) :\n () => maxPool(convertedX, windowShape, strides, convertedPad,\n dimRoundingMode);\n const y = forwardOp();\n\n const res = isDilationOne ? y : batchToSpaceND(y, dilation, adjustedCrops);\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n\n return res as T;\n}\n\n// Helper function to compute crops and paddings for pool with dilation > 1.\n// tslint:disable-next-line:max-line-length\n// https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/array_ops.py#L2184\nfunction requiredSpaceToBatchPaddings(\n inputShape: [number, number], blockShape: [number, number],\n basePadding: number[][]) {\n const padStart = basePadding.map(b => b[0]);\n const origPadEnd = basePadding.map(b => b[1]);\n const fullInputShape = inputShape.concat(padStart, origPadEnd);\n const padEndExtra = blockShape.map((b, i) => (b - fullInputShape[i] % b) % b);\n const padEnd = origPadEnd.map((s, i) => s + padEndExtra[i]);\n const paddings = blockShape.map((_, i) => [padStart[i], padEnd[i]]);\n const crops = blockShape.map((_, i) => [0, padEndExtra[i]]);\n return [paddings, crops];\n}\n\n// Helper function to compute base paddings for pool with dilation > 1.\n// tslint:disable-next-line:max-line-length\n// https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L524\nfunction withSpaceToBatchBasePaddings(\n filterShape: [number, number], dilation: [number, number]) {\n // Spatial dimensions of the filters and the upsampled filters in which we\n // introduce (rate - 1) zeros between consecutive filter values.\n const dilatedFilterShape = filterShape.map((s, i) => {\n return s + (s - 1) * (dilation[i] - 1);\n });\n const padExtraShape = dilatedFilterShape.map(s => s - 1);\n\n // When padding is odd, we pad more at end, following the same\n // convention as conv2d.\n const padExtraStart = padExtraShape.map(s => Math.floor(s / 2));\n const padExtraEnd = padExtraShape.map((s, i) => s - padExtraStart[i]);\n return padExtraShape.map((_, i) => {\n return [padExtraStart[i], padExtraEnd[i]];\n });\n}\n\nexport const pool = /* @__PURE__ */ op({pool_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Prelu, PreluInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes leaky rectified linear element-wise with parametric alphas.\n *\n * `x < 0 ? alpha * x : f(x) = x`\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n * const alpha = tf.scalar(0.1);\n *\n * x.prelu(alpha).print(); // or tf.prelu(x, alpha)\n * ```\n * @param x The input tensor.\n * @param alpha Scaling factor for negative values.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction prelu_(x: T|TensorLike, alpha: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'prelu');\n const $alpha = convertToTensor(alpha, 'alpha', 'prelu');\n\n const inputs: PreluInputs = {x: $x, alpha: $alpha};\n return ENGINE.runKernel(Prelu, inputs as unknown as NamedTensorMap);\n}\n\nexport const prelu = /* @__PURE__ */ op({prelu_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Prod, ProdAttrs, ProdInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {cast} from './cast';\nimport {op} from './operation';\n\n/**\n * Computes the product of elements across dimensions of a `tf.Tensor`.\n *\n * Reduces the input along the dimensions given in `axes`. Unless `keepDims`\n * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in\n * `axes`. If `keepDims` is true, the reduced dimensions are retained with\n * length 1. If `axes` has no entries, all dimensions are reduced, and a\n * `tf.Tensor` with a single element is returned.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3]);\n *\n * x.prod().print(); // or tf.prod(x)\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.prod(axis).print(); // or tf.prod(x, axis)\n * ```\n *\n * @param x The input tensor to compute the product over. If the dtype is `bool`\n * it will be converted to `int32` and the output dtype will be `int32`.\n * @param axis The dimension(s) to reduce. By default it reduces\n * all dimensions.\n * @param keepDims If true, retains reduced dimensions with size 1.\n *\n * @doc {heading: 'Operations', subheading: 'Reduction'}\n */\nfunction prod_(\n x: Tensor|TensorLike, axis: number|number[] = null, keepDims = false): T {\n let $x = convertToTensor(x, 'x', 'prod');\n\n if ($x.dtype === 'bool') {\n // bool is not an allowed type for the underlying kernel.\n $x = cast($x, 'int32');\n }\n\n const inputs: ProdInputs = {x: $x};\n const attrs: ProdAttrs = {axis, keepDims};\n\n return ENGINE.runKernel(\n Prod, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const prod = /* @__PURE__ */ op({prod_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {RaggedGather, RaggedGatherAttrs, RaggedGatherInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Gather ragged slices from params axis 0 according to indices.\n *\n * @param paramsNestedSplits: A list of at least 1 Tensor with type 'int32' The\n * nestedRowSplits tensors that define the row-partitioning for the params\n * RaggedTensor input.\n * @param paramsDenseValues: A Tensor. The flatValues for the params\n * RaggedTensor.\n * @param indices: A Tensor. Must be one of type: int32. Indices in the\n * outermost dimension of params of the values that should be gathered.\n * @param outputRaggedRank: An int that is >= 0. The ragged rank of the output\n * RaggedTensor. outputNestedSplits will contain this number of rowSplits\n * tensors. This value should equal indices.shape.ndims + params.raggedRank\n * - 1.\n * @return A map with the following properties:\n * - outputNestedSplits: A list of outputRaggedRank Tensor objects with the\n * same type as paramsNestedSplits.\n * - outputDenseValues: A Tensor. Has the same type as paramsDenseValues.\n * @doc {heading: 'Operations', subheading: 'Ragged'}\n */\n\ninterface RaggedGatherMap {\n outputNestedSplits: Tensor[];\n outputDenseValues: Tensor;\n}\n\nfunction raggedGather_(\n paramsNestedSplits: Tensor[], paramsDenseValues: Tensor|TensorLike,\n indices: Tensor|TensorLike, outputRaggedRank: number): RaggedGatherMap {\n const $paramsNestedSplits = paramsNestedSplits.map(\n (t, i) => convertToTensor(t, `tensors${i}`, 'raggedGather', 'int32'));\n const $paramsDenseValues =\n convertToTensor(paramsDenseValues, 'paramsDenseValues', 'raggedGather');\n const $indices = convertToTensor(indices, 'indices', 'raggedGather', 'int32');\n\n const inputs: RaggedGatherInputs = {\n paramsNestedSplits: $paramsNestedSplits,\n paramsDenseValues: $paramsDenseValues,\n indices: $indices,\n };\n const attrs: RaggedGatherAttrs = {outputRaggedRank};\n\n const result: Tensor[] =\n ENGINE.runKernel(RaggedGather, inputs as {}, attrs as {});\n return {\n outputNestedSplits: result.slice(0, result.length - 1),\n outputDenseValues: result[result.length - 1],\n };\n}\n\nexport const raggedGather = /* @__PURE__ */ op({raggedGather_});\n","/**\n * @license\n * Copyright 2022 Google LLC.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {RaggedRange, RaggedRangeInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Returns a RaggedTensor result composed from rtDenseValues and rtNestedSplits,\n * such that result[i] = [starts[i], starts[i] + deltas[i], ..., limits[i]]).\n *\n * @param starts: A Tensor. Must be one of the following types:\n * 'float32', 'int32'. The starts of each range.\n * @param limits: A Tensor. Must have the same type as starts. The limits of\n * each range.\n * @param deltas: A Tensor. Must have the same type as starts. The deltas of\n * each range.\n * @return A map with the following properties:\n * - rtNestedSplits: A Tensor of type 'int32'.\n * - rtDenseValues: A Tensor. Has the same type as starts.\n */\n\nfunction raggedRange_(\n starts: Tensor|TensorLike, limits: Tensor|TensorLike,\n deltas: Tensor|TensorLike): NamedTensorMap {\n const $starts = convertToTensor(starts, 'starts', 'raggedRange');\n const $limits =\n convertToTensor(limits, 'limits', 'raggedRange', $starts.dtype);\n const $deltas =\n convertToTensor(deltas, 'deltas', 'raggedRange', $starts.dtype);\n\n const inputs: RaggedRangeInputs = {\n starts: $starts,\n limits: $limits,\n deltas: $deltas,\n };\n\n const result: Tensor[] = ENGINE.runKernel(RaggedRange, inputs as {});\n return {\n rtNestedSplits: result[0],\n rtDenseValues: result[1],\n };\n}\n\nexport const raggedRange = /* @__PURE__ */ op({raggedRange_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {RaggedTensorToTensor, RaggedTensorToTensorAttrs, RaggedTensorToTensorInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Create a dense tensor from a ragged tensor, possibly altering its shape.\n *\n * The raggedTensorToTensor op creates a dense tensor from am array of row\n * partition tensors, a value vector, and default values. If the shape is\n * unspecified, the minimal shape required to contain all the elements in the\n * ragged tensor (the natural shape) will be used. If some dimensions are left\n * unspecified, then the size of the natural shape is used in that dimension.\n *\n * The defaultValue will be broadcast to the output shape. After that, the\n * values from the ragged tensor overwrite the default values. Note that the\n * defaultValue must have less dimensions than the value.\n *\n * The row partition tensors are in the order of the dimensions. At present, the\n * types can be: \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n * \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n * \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then it\n * is preceded by \"FIRST_DIM_SIZE\".\n * ```\n * @param shape: A Tensor. Must be one of the following types: 'int32'. The\n * desired shape of the output tensor. If left unspecified (empty), the\n * minimal shape required to contain all the elements in the ragged tensor\n * (the natural shape) will be used. If some dimensions are left\n * unspecified, then the size of the natural shape is used in that\n * dimension.\n *\n * Note that dense dimensions cannot be modified by the shape argument.\n * Trying to change the size of a dense dimension will cause the op to fail.\n * Examples: natural shape: [4, 5, 6] shape: -1 output shape: [4, 5, 6]\n *\n * natural shape: [4, 5, 6] shape: [3, -1, 2] output shape: [3, 5, 2]\n *\n * natural shape: [4, 5, 6] shape: [3, 7, 2] output shape: [3, 7, 2]\n * @param values: A Tensor. A 1D tensor representing the values of the ragged\n * tensor.\n * @param defaultValue: A Tensor. Must have the same type as values. The\n * defaultValue when the shape is larger than the ragged tensor. The\n * defaultValue is broadcast until it is the shape of the output tensor,\n * and then overwritten by values in the ragged tensor. The default value\n * must be compatible with this broadcast operation, and must have fewer\n * dimensions than the value tensor.\n * @param rowPartitionTensors: A list of at least 1 Tensor objects with the same\n * type in: 'int32'.\n * @param rowPartitionTypes: A list of strings. The types of the row partition\n * tensors. At present, these can be:\n * \"ROW_SPLITS\": the row_splits tensor from the ragged tensor.\n * \"VALUE_ROWIDS\": the value_rowids tensor from the ragged tensor.\n * \"FIRST_DIM_SIZE\": if value_rowids is used for the first dimension, then\n * it is preceeded by \"FIRST_DIM_SIZE\". The tensors are in the order of\n * the dimensions.\n * @return A Tensor. Has the same type as values.\n * @doc {heading: 'Operations', subheading: 'Ragged'}\n */\nfunction raggedTensorToTensor_(\n shape: Tensor|TensorLike, values: Tensor|TensorLike,\n defaultValue: Tensor|TensorLike, rowPartitionTensors: Tensor[],\n rowPartitionTypes: string[]): Tensor {\n const $shape =\n convertToTensor(shape, 'shape', 'raggedTensorToTensor', 'int32');\n const $values = convertToTensor(values, 'values', 'raggedTensorToTensor');\n const $defaultValue = convertToTensor(\n defaultValue, 'defaultValue', 'raggedTensorToTensor', $values.dtype);\n const $rowPartitionTensors = rowPartitionTensors.map(\n (t, i) =>\n convertToTensor(t, `tensors${i}`, 'raggedTensorToTensor', 'int32'));\n\n const inputs: RaggedTensorToTensorInputs = {\n shape: $shape,\n values: $values,\n defaultValue: $defaultValue,\n rowPartitionTensors: $rowPartitionTensors\n };\n const attrs: RaggedTensorToTensorAttrs = {rowPartitionTypes};\n\n return ENGINE.runKernel(RaggedTensorToTensor, inputs as {}, attrs as {});\n}\n\nexport const raggedTensorToTensor = /* @__PURE__ */ op({raggedTensorToTensor_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {sizeFromShape} from '../util';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {op} from './operation';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a random number generator\n * function defined by the user.\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param randFunction A random number generator function which is called\n * for each element in the output tensor.\n * @param dtype The data type of the output tensor. Defaults to 'float32'.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction rand_(\n shape: ShapeMap[R], randFunction: () => number,\n dtype?: DataType): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n const size = sizeFromShape(shape);\n let values = null;\n if (dtype == null || dtype === 'float32') {\n values = new Float32Array(size);\n } else if (dtype === 'int32') {\n values = new Int32Array(size);\n } else if (dtype === 'bool') {\n values = new Uint8Array(size);\n } else {\n throw new Error(`Unknown data type ${dtype}`);\n }\n for (let i = 0; i < size; i++) {\n values[i] = randFunction();\n }\n return ENGINE.makeTensor(values, shape, dtype) as Tensor;\n}\n\nexport const rand = /* @__PURE__ */ op({rand_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as seedrandom from 'seedrandom';\n\nimport {expectNumbersClose, testEpsilon} from '../test_util';\nimport {TypedArray} from '../types';\n\nexport interface RandomBase {\n nextValue(): number;\n}\n\nexport interface RandomGamma {\n nextValue(): number;\n}\n\nexport interface RandNormalDataTypes {\n float32: Float32Array;\n int32: Int32Array;\n}\n\nexport interface RandGammaDataTypes {\n float32: Float32Array;\n int32: Int32Array;\n}\n\n// https://en.wikipedia.org/wiki/Marsaglia_polar_method\nexport class MPRandGauss implements RandomBase {\n private mean: number;\n private stdDev: number;\n private nextVal: number;\n private dtype?: keyof RandNormalDataTypes;\n private truncated?: boolean;\n private upper?: number;\n private lower?: number;\n private random: seedrandom.prng;\n\n constructor(\n mean: number, stdDeviation: number, dtype?: keyof RandNormalDataTypes,\n truncated?: boolean, seed?: number) {\n this.mean = mean;\n this.stdDev = stdDeviation;\n this.dtype = dtype;\n this.nextVal = NaN;\n this.truncated = truncated;\n if (this.truncated) {\n this.upper = this.mean + this.stdDev * 2;\n this.lower = this.mean - this.stdDev * 2;\n }\n const seedValue = seed ? seed : Math.random();\n this.random = seedrandom.alea(seedValue.toString());\n }\n\n /** Returns next sample from a Gaussian distribution. */\n public nextValue(): number {\n if (!isNaN(this.nextVal)) {\n const value = this.nextVal;\n this.nextVal = NaN;\n return value;\n }\n\n let resultX: number, resultY: number;\n let isValid = false;\n while (!isValid) {\n let v1: number, v2: number, s: number;\n do {\n v1 = 2 * this.random() - 1;\n v2 = 2 * this.random() - 1;\n s = v1 * v1 + v2 * v2;\n } while (s >= 1 || s === 0);\n\n const mul = Math.sqrt(-2.0 * Math.log(s) / s);\n resultX = this.mean + this.stdDev * v1 * mul;\n resultY = this.mean + this.stdDev * v2 * mul;\n\n if (!this.truncated || this.isValidTruncated(resultX)) {\n isValid = true;\n }\n }\n\n if (!this.truncated || this.isValidTruncated(resultY)) {\n this.nextVal = this.convertValue(resultY);\n }\n return this.convertValue(resultX);\n }\n\n /** Handles proper rounding for non-floating-point numbers. */\n private convertValue(value: number): number {\n if (this.dtype == null || this.dtype === 'float32') {\n return value;\n }\n return Math.round(value);\n }\n\n /** Returns true if less than 2-standard-deviations from the mean. */\n private isValidTruncated(value: number): boolean {\n return value <= this.upper && value >= this.lower;\n }\n}\n\n// Marsaglia, George, and Wai Wan Tsang. 2000. \"A Simple Method for Generating\n// Gamma Variables.\"\nexport class RandGamma implements RandomGamma {\n private alpha: number;\n private beta: number;\n private d: number;\n private c: number;\n private dtype?: keyof RandGammaDataTypes;\n private randu: seedrandom.prng;\n private randn: MPRandGauss;\n\n constructor(\n alpha: number, beta: number, dtype: keyof RandGammaDataTypes,\n seed?: number) {\n this.alpha = alpha;\n this.beta = 1 / beta; // convert rate to scale parameter\n this.dtype = dtype;\n\n const seedValue = seed ? seed : Math.random();\n this.randu = seedrandom.alea(seedValue.toString());\n this.randn = new MPRandGauss(0, 1, dtype, false, this.randu());\n\n if (alpha < 1) {\n this.d = alpha + (2 / 3);\n } else {\n this.d = alpha - (1 / 3);\n }\n this.c = 1 / Math.sqrt(9 * this.d);\n }\n\n /** Returns next sample from a gamma distribution. */\n public nextValue(): number {\n let x2: number, v0: number, v1: number, x: number, u: number, v: number;\n while (true) {\n do {\n x = this.randn.nextValue();\n v = 1 + (this.c * x);\n } while (v <= 0);\n v *= v * v;\n x2 = x * x;\n v0 = 1 - (0.331 * x2 * x2);\n v1 = (0.5 * x2) + (this.d * (1 - v + Math.log(v)));\n u = this.randu();\n if (u < v0 || Math.log(u) < v1) {\n break;\n }\n }\n v = (1 / this.beta) * this.d * v;\n if (this.alpha < 1) {\n v *= Math.pow(this.randu(), 1 / this.alpha);\n }\n return this.convertValue(v);\n }\n /** Handles proper rounding for non-floating-point numbers. */\n private convertValue(value: number): number {\n if (this.dtype === 'float32') {\n return value;\n }\n return Math.round(value);\n }\n}\n\nexport class UniformRandom implements RandomBase {\n private min: number;\n private range: number;\n private random: seedrandom.prng;\n private dtype?: keyof RandNormalDataTypes;\n\n constructor(\n min = 0, max = 1, dtype?: keyof RandNormalDataTypes,\n seed?: string|number) {\n this.min = min;\n this.range = max - min;\n this.dtype = dtype;\n if (seed == null) {\n seed = Math.random();\n }\n if (typeof seed === 'number') {\n seed = seed.toString();\n }\n\n if (!this.canReturnFloat() && this.range <= 1) {\n throw new Error(\n `The difference between ${min} - ${max} <= 1 and dtype is not float`);\n }\n this.random = seedrandom.alea(seed);\n }\n\n /** Handles proper rounding for non floating point numbers. */\n private canReturnFloat = () =>\n (this.dtype == null || this.dtype === 'float32');\n\n private convertValue(value: number): number {\n if (this.canReturnFloat()) {\n return value;\n }\n return Math.round(value);\n }\n\n nextValue() {\n return this.convertValue(this.min + this.range * this.random());\n }\n}\n\nexport function jarqueBeraNormalityTest(values: TypedArray|number[]) {\n // https://en.wikipedia.org/wiki/Jarque%E2%80%93Bera_test\n const n = values.length;\n const s = skewness(values);\n const k = kurtosis(values);\n const jb = n / 6 * (Math.pow(s, 2) + 0.25 * Math.pow(k - 3, 2));\n // JB test requires 2-degress of freedom from Chi-Square @ 0.95:\n // http://www.itl.nist.gov/div898/handbook/eda/section3/eda3674.htm\n const CHI_SQUARE_2DEG = 5.991;\n if (jb > CHI_SQUARE_2DEG) {\n throw new Error(`Invalid p-value for JB: ${jb}`);\n }\n}\n\nexport function expectArrayInMeanStdRange(\n actual: TypedArray|number[], expectedMean: number, expectedStdDev: number,\n epsilon?: number) {\n if (epsilon == null) {\n epsilon = testEpsilon();\n }\n const actualMean = mean(actual);\n expectNumbersClose(actualMean, expectedMean, epsilon);\n expectNumbersClose(\n standardDeviation(actual, actualMean), expectedStdDev, epsilon);\n}\n\nfunction mean(values: TypedArray|number[]) {\n let sum = 0;\n for (let i = 0; i < values.length; i++) {\n sum += values[i];\n }\n return sum / values.length;\n}\n\nfunction standardDeviation(values: TypedArray|number[], mean: number) {\n let squareDiffSum = 0;\n for (let i = 0; i < values.length; i++) {\n const diff = values[i] - mean;\n squareDiffSum += diff * diff;\n }\n return Math.sqrt(squareDiffSum / values.length);\n}\n\nfunction kurtosis(values: TypedArray|number[]) {\n // https://en.wikipedia.org/wiki/Kurtosis\n const valuesMean = mean(values);\n const n = values.length;\n let sum2 = 0;\n let sum4 = 0;\n for (let i = 0; i < n; i++) {\n const v = values[i] - valuesMean;\n sum2 += Math.pow(v, 2);\n sum4 += Math.pow(v, 4);\n }\n return (1 / n) * sum4 / Math.pow((1 / n) * sum2, 2);\n}\n\nfunction skewness(values: TypedArray|number[]) {\n // https://en.wikipedia.org/wiki/Skewness\n const valuesMean = mean(values);\n const n = values.length;\n let sum2 = 0;\n let sum3 = 0;\n for (let i = 0; i < n; i++) {\n const v = values[i] - valuesMean;\n sum2 += Math.pow(v, 2);\n sum3 += Math.pow(v, 3);\n }\n return (1 / n) * sum3 / Math.pow((1 / (n - 1)) * sum2, 3 / 2);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {buffer} from './buffer';\nimport {op} from './operation';\nimport {RandGamma} from './rand_util';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a gamma distribution.\n *\n * ```js\n * tf.randomGamma([2, 2], 1).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param alpha The shape parameter of the gamma distribution.\n * @param beta The inverse scale parameter of the gamma distribution. Defaults\n * to 1.\n * @param dtype The data type of the output. Defaults to float32.\n * @param seed The seed for the random number generator.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction randomGamma_(\n shape: ShapeMap[R], alpha: number, beta = 1,\n dtype: 'float32'|'int32' = 'float32', seed?: number): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n if (beta == null) {\n beta = 1;\n }\n if (dtype == null) {\n dtype = 'float32';\n }\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new Error(`Unsupported data type ${dtype}`);\n }\n const rgamma = new RandGamma(alpha, beta, dtype, seed);\n const res = buffer(shape, dtype);\n for (let i = 0; i < res.values.length; i++) {\n res.values[i] = rgamma.nextValue();\n }\n return res.toTensor();\n}\n\nexport const randomGamma = /* @__PURE__ */ op({randomGamma_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {buffer} from './buffer';\nimport {op} from './operation';\nimport {MPRandGauss} from './rand_util';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a normal distribution.\n *\n * ```js\n * tf.randomNormal([2, 2]).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param mean The mean of the normal distribution.\n * @param stdDev The standard deviation of the normal distribution.\n * @param dtype The data type of the output.\n * @param seed The seed for the random number generator.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction randomNormal_(\n shape: ShapeMap[R], mean = 0, stdDev = 1, dtype?: 'float32'|'int32',\n seed?: number): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n if (dtype != null && (dtype as DataType) === 'bool') {\n throw new Error(`Unsupported data type ${dtype}`);\n }\n const randGauss =\n new MPRandGauss(mean, stdDev, dtype, false /* truncated */, seed);\n const res = buffer(shape, dtype);\n for (let i = 0; i < res.values.length; i++) {\n res.values[i] = randGauss.nextValue();\n }\n return res.toTensor();\n}\n\nexport const randomNormal = /* @__PURE__ */ op({randomNormal_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\n\nimport {op} from './operation';\nimport {randomNormal} from './random_normal';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a normal distribution.\n *\n * The generated values will have mean 0 and standard deviation 1.\n *\n * ```js\n * tf.randomStandardNormal([2, 2]).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param dtype The data type of the output.\n * @param seed The seed for the random number generator.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction randomStandardNormal_(\n shape: ShapeMap[R], dtype?: 'float32'|'int32', seed?: number): Tensor {\n if (dtype != null && (dtype as DataType) === 'bool') {\n throw new Error(`Unsupported data type ${dtype}`);\n }\n return randomNormal(shape, 0, 1, dtype, seed);\n}\n\nexport const randomStandardNormal = /* @__PURE__ */ op({randomStandardNormal_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {buffer} from './buffer';\nimport {op} from './operation';\nimport {UniformRandom} from './rand_util';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a uniform distribution.\n *\n * The generated values follow a uniform distribution in the range [minval,\n * maxval). The lower bound minval is included in the range, while the upper\n * bound maxval is excluded.\n *\n * ```js\n * tf.randomUniform([2, 2]).print();\n * ```\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param minval The lower bound on the range of random values to generate.\n * Defaults to 0.\n * @param maxval The upper bound on the range of random values to generate.\n * Defaults to 1.\n * @param dtype The data type of the output tensor. Defaults to 'float32'.\n *\n * @doc {heading: 'Tensors', subheading: 'Random'}\n */\nfunction randomUniform_(\n shape: ShapeMap[R], minval = 0, maxval = 1, dtype: DataType = 'float32',\n seed?: number|string): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n const res = buffer(shape, dtype);\n const random = new UniformRandom(minval, maxval, null, seed);\n for (let i = 0; i < res.values.length; i++) {\n res.values[i] = random.nextValue();\n }\n return res.toTensor();\n}\n\nexport const randomUniform = /* @__PURE__ */ op({randomUniform_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Range, RangeAttrs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor1D} from '../tensor';\n\n/**\n * Creates a new `tf.Tensor1D` filled with the numbers in the range provided.\n *\n * The tensor is a half-open interval meaning it includes start, but\n * excludes stop. Decrementing ranges and negative step values are also\n * supported.\n *\n *\n * ```js\n * tf.range(0, 9, 2).print();\n * ```\n *\n * @param start An integer start value\n * @param stop An integer stop value\n * @param step An integer increment (will default to 1 or -1)\n * @param dtype The data type of the output tensor. Defaults to 'float32'.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function range(\n start: number, stop: number, step = 1,\n dtype: 'float32'|'int32' = 'float32'): Tensor1D {\n if (step === 0) {\n throw new Error('Cannot have a step of zero');\n }\n\n const attrs: RangeAttrs = {start, stop, step, dtype};\n\n return ENGINE.runKernel(Range, {} /* inputs */,\n attrs as unknown as NamedAttrMap);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Reciprocal, ReciprocalInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes reciprocal of x element-wise: `1 / x`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, 2]);\n *\n * x.reciprocal().print(); // or tf.reciprocal(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction reciprocal_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'reciprocal');\n\n const inputs: ReciprocalInputs = {x: $x};\n return ENGINE.runKernel(Reciprocal, inputs as unknown as NamedTensorMap);\n}\nexport const reciprocal = /* @__PURE__ */ op({reciprocal_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Relu, ReluInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes rectified linear element-wise: `max(x, 0)`.\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n *\n * x.relu().print(); // or tf.relu(x)\n * ```\n * @param x The input tensor. If the dtype is `bool`, the output dtype will be\n * `int32`.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction relu_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'relu');\n\n const inputs: ReluInputs = {x: $x};\n\n return ENGINE.runKernel(Relu, inputs as unknown as NamedTensorMap);\n}\n\nexport const relu = /* @__PURE__ */ op({relu_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Relu6, Relu6Inputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes rectified linear 6 element-wise: `min(max(x, 0), 6)`.\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 8]);\n *\n * x.relu6().print(); // or tf.relu6(x)\n * ```\n * @param x The input tensor. If the dtype is `bool`, the output dtype will be\n * `int32`.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction relu6_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'relu6');\n\n const inputs: Relu6Inputs = {x: $x};\n\n return ENGINE.runKernel(Relu6, inputs as unknown as NamedTensorMap);\n}\n\nexport const relu6 = /* @__PURE__ */ op({relu6_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Reverse, ReverseAttrs, ReverseInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Reverses a `tf.Tensor` along a specified axis.\n *\n * Also available are stricter rank-specific methods that assert that `x` is\n * of the given rank:\n * - `tf.reverse1d`\n * - `tf.reverse2d`\n * - `tf.reverse3d`\n * - `tf.reverse4d`\n *\n * Except `tf.reverse1d` (which does not have axis param), all methods have\n * same signature as this method.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n *\n * x.reverse().print();\n * ```\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * const axis = 1;\n * x.reverse(axis).print();\n * ```\n * @param x The input tensor to be reversed.\n * @param axis The set of dimensions to reverse. Must be in the\n * range [-rank(x), rank(x)). Defaults to all axes.\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction reverse_(\n x: T|TensorLike, axis?: number|number[]): T {\n const $x = convertToTensor(x, 'x', 'reverse');\n\n const inputs: ReverseInputs = {x: $x};\n const attrs: ReverseAttrs = {dims: axis};\n\n return ENGINE.runKernel(\n Reverse, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const reverse = /* @__PURE__ */ op({reverse_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {op} from './operation';\nimport {reverse} from './reverse';\n\n/**\n * Reverses a `tf.Tensor1D`.\n *\n * @param x The input tensor.\n */\nfunction reverse1d_(x: Tensor1D|TensorLike): Tensor1D {\n const $x = convertToTensor(x, 'x', 'reverse');\n util.assert(\n $x.rank === 1,\n () => `Error in reverse1D: x must be rank 1 but got rank ${$x.rank}.`);\n return reverse($x, 0);\n}\n\nexport const reverse1d = /* @__PURE__ */ op({reverse1d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {op} from './operation';\nimport {reverse} from './reverse';\n\n/**\n * Reverses a `tf.Tensor2D` along a specified axis.\n *\n * @param x The input tensor.\n * @param axis The set of dimensions to reverse. Must be in the\n * range [-rank(x), rank(x)). Defaults to all axes.\n */\nfunction reverse2d_(x: Tensor2D|TensorLike, axis?: number|number[]): Tensor2D {\n const $x = convertToTensor(x, 'x', 'reverse');\n util.assert(\n $x.rank === 2,\n () => `Error in reverse2D: x must be rank 2 but got rank ${$x.rank}.`);\n return reverse($x, axis);\n}\n\nexport const reverse2d = /* @__PURE__ */ op({reverse2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor3D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {op} from './operation';\nimport {reverse} from './reverse';\n\n/**\n * Reverses a `tf.Tensor3D` along a specified axis.\n *\n * @param x The input tensor.\n * @param axis The set of dimensions to reverse. Must be in the\n * range [-rank(x), rank(x)). Defaults to all axes.\n */\nfunction reverse3d_(x: Tensor3D|TensorLike, axis?: number|number[]): Tensor3D {\n const $x = convertToTensor(x, 'x', 'reverse');\n util.assert(\n $x.rank === 3,\n () => `Error in reverse3D: x must be rank 3 but got rank ${$x.rank}.`);\n return reverse($x, axis);\n}\n\nexport const reverse3d = /* @__PURE__ */ op({reverse3d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\nimport {op} from './operation';\nimport {reverse} from './reverse';\n\n/**\n * Reverses a `tf.Tensor4D` along a specified axis.\n *\n * @param x The input tensor.\n * @param axis The set of dimensions to reverse. Must be in the\n * range [-rank(x), rank(x)). Defaults to all axes.\n */\nfunction reverse4d_(x: Tensor4D|TensorLike, axis?: number|number[]): Tensor4D {\n const $x = convertToTensor(x, 'x', 'reverse');\n util.assert(\n $x.rank === 4,\n () => `Error in reverse4D: x must be rank 4 but got rank ${$x.rank}.`);\n return reverse($x, axis);\n}\n\nexport const reverse4d = /* @__PURE__ */ op({reverse4d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Round, RoundInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes round of input `tf.Tensor` element-wise: `round(x)`.\n * It implements banker's rounding.\n *\n * ```js\n * const x = tf.tensor1d([.6, 1.1, -3.3]);\n *\n * x.round().print(); // or tf.round(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction round_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'round');\n const inputs: RoundInputs = {x: $x};\n\n return ENGINE.runKernel(Round, inputs as unknown as NamedTensorMap);\n}\n\nexport const round = /* @__PURE__ */ op({round_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Rsqrt, RsqrtInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes reciprocal of square root of the input `tf.Tensor` element-wise:\n * `y = 1 / sqrt(x)`\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 4, -1]);\n *\n * x.rsqrt().print(); // or tf.rsqrt(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction rsqrt_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'rsqrt', 'float32');\n\n const inputs: RsqrtInputs = {x: $x};\n\n return ENGINE.runKernel(Rsqrt, inputs as unknown as NamedTensorMap);\n}\nexport const rsqrt = /* @__PURE__ */ op({rsqrt_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Selu, SeluInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes scaled exponential linear element-wise.\n *\n * `x < 0 ? scale * alpha * (exp(x) - 1) : scale * x`\n *\n * ```js\n * const x = tf.tensor1d([-1, 2, -3, 4]);\n *\n * x.selu().print(); // or tf.selu(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction selu_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'selu');\n\n const inputs: SeluInputs = {x: $x};\n\n return ENGINE.runKernel(Selu, inputs as unknown as NamedTensorMap);\n}\n\nexport const selu = /* @__PURE__ */ op({selu_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {conv2d} from './conv2d';\nimport {depthwiseConv2d} from './depthwise_conv2d';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * 2-D convolution with separable filters.\n *\n * Performs a depthwise convolution that acts separately on channels followed\n * by a pointwise convolution that mixes channels. Note that this is\n * separability between dimensions [1, 2] and 3, not spatial separability\n * between dimensions 1 and 2.\n *\n * See\n * [https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d](\n * https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d)\n * for more details.\n *\n * @param x The input tensor, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is\n * assumed.\n * @param depthwiseFilter The depthwise filter tensor, rank 4, of shape\n * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. This is\n * the filter used in the first step.\n * @param pointwiseFilter The pointwise filter tensor, rank 4, of shape\n * `[1, 1, inChannels * channelMultiplier, outChannels]`. This is\n * the filter used in the second step.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`. If strides is a single number, then `strideHeight ==\n * strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels]. Only \"NHWC\" is currently supported.\n *\n * @doc {heading: 'Operations', subheading: 'Convolution'}\n */\nfunction separableConv2d_(\n x: T|TensorLike, depthwiseFilter: Tensor4D|TensorLike,\n pointwiseFilter: Tensor4D|TensorLike, strides: [number, number]|number,\n pad: 'valid'|'same', dilation: [number, number]|number = [1, 1],\n dataFormat: 'NHWC'|'NCHW' = 'NHWC'): T {\n const $x = convertToTensor(x, 'x', 'separableConv2d');\n const $depthwiseFilter =\n convertToTensor(depthwiseFilter, 'depthwiseFilter', 'separableConv2d');\n const $pointwiseFilter =\n convertToTensor(pointwiseFilter, 'pointwiseFilter', 'separableConv2d');\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n\n if (dataFormat === 'NCHW') {\n throw new Error(\n 'separableConv2d currently does not support dataFormat NCHW; only ' +\n 'NHWC is supported');\n }\n\n util.assert(\n x4D.rank === 4,\n () => `Error in separableConv2d: input must be rank 4, but got ` +\n `rank ${x4D.rank}.`);\n util.assert(\n $depthwiseFilter.rank === 4,\n () => `Error in separableConv2d: depthwise filter must be rank 4, but ` +\n `got rank ${$depthwiseFilter.rank}.`);\n util.assert(\n $pointwiseFilter.rank === 4,\n () => `Error in separableConv2d: pointwise filter must be rank 4, but ` +\n `got rank ${$depthwiseFilter.rank}.`);\n util.assert(\n $pointwiseFilter.shape[0] === 1,\n () =>\n `Error in separableConv2d: the first dimension of pointwise filter ` +\n ` must be 1, but got ${$pointwiseFilter.shape[0]}.`);\n util.assert(\n $pointwiseFilter.shape[1] === 1,\n () => `Error in separableConv2d: the second dimension of pointwise ` +\n `filter must be 1, but got ${$pointwiseFilter.shape[1]}.`);\n\n const inChannels = $depthwiseFilter.shape[2];\n const channelMultiplier = $depthwiseFilter.shape[3];\n util.assert(\n $pointwiseFilter.shape[2] === inChannels * channelMultiplier,\n () =>\n `Error in separableConv2d: the third dimension of pointwise filter ` +\n `must be ${inChannels * channelMultiplier}, ` +\n `but got ${$pointwiseFilter.shape[2]}.`);\n\n const depthwise = depthwiseConv2d(\n x4D, $depthwiseFilter, strides, pad, dataFormat, dilation);\n const pointwiseStride = 1;\n const res =\n conv2d(depthwise, $pointwiseFilter, pointwiseStride, 'valid', dataFormat);\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res as T;\n}\n\nexport const separableConv2d = /* @__PURE__ */ op({separableConv2d_});\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor, TensorBuffer} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\n/**\n * Computes the difference between two lists of numbers.\n *\n * Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out`\n * that represents all values that are in `x` but not in `y`. The returned\n * Tensor `out` is sorted in the same order that the numbers appear in `x`\n * (duplicates are preserved). This operation also returns a Tensor indices that\n * represents the position of each out element in `x`. In other words:\n *\n * `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]`\n *\n * ```js\n * const x = [1, 2, 3, 4, 5, 6];\n * const y = [1, 3, 5];\n *\n * const [out, indices] = await tf.setdiff1dAsync(x, y);\n * out.print(); // [2, 4, 6]\n * indices.print(); // [1, 3, 5]\n * ```\n *\n * @param x 1-D Tensor. Values to keep.\n * @param y 1-D Tensor. Must have the same type as x. Values to exclude in the\n * output.\n * @returns Promise of Tensor tuple [out, indices].\n * out: Tensor with the same type as x.\n * indices: A Tensor of type int32.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nasync function setdiff1dAsync_(\n x: Tensor|TensorLike, y: Tensor|TensorLike): Promise<[Tensor, Tensor]> {\n const $x = convertToTensor(x, 'x', 'setdiff1d');\n const $y = convertToTensor(y, 'y', 'setdiff1d');\n\n util.assert(\n $x.dtype === $y.dtype,\n () => `x and y should have the same dtype, but got x (${\n $x.dtype}) and y (${$y.dtype}).`);\n\n util.assert(\n $x.rank === 1, () => `x should be 1D tensor, but got x (${$x.shape}).`);\n\n util.assert(\n $y.rank === 1, () => `y should be 1D tensor, but got y (${$y.shape}).`);\n\n const xVals = await $x.data();\n const yVals = await $y.data();\n const ySet = new Set(yVals);\n\n let outputSize = 0;\n for (let i = 0; i < xVals.length; i++) {\n if (!ySet.has(xVals[i])) {\n outputSize++;\n }\n }\n\n const buffer = new TensorBuffer([outputSize], $x.dtype);\n const indices = new TensorBuffer([outputSize], 'int32');\n for (let i = 0, p = 0; i < xVals.length; i++) {\n if (!ySet.has(xVals[i])) {\n buffer.values[p] = xVals[i];\n indices.values[p] = i;\n p++;\n }\n }\n return [buffer.toTensor(), indices.toTensor()];\n}\nexport const setdiff1dAsync = setdiff1dAsync_;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Sign, SignInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Returns an element-wise indication of the sign of a number.\n *\n * ```js\n * const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]);\n *\n * x.sign().print(); // or tf.sign(x)\n * ```\n * @param x The input Tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction sign_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'sign');\n const inputs: SignInputs = {x: $x};\n return ENGINE.runKernel(Sign, inputs as unknown as NamedTensorMap);\n}\nexport const sign = /* @__PURE__ */ op({sign_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Sin, SinInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes sin of the input Tensor element-wise: `sin(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);\n *\n * x.sin().print(); // or tf.sin(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction sin_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'sin', 'float32');\n\n const inputs: SinInputs = {x: $x};\n\n return ENGINE.runKernel(Sin, inputs as unknown as NamedTensorMap);\n}\nexport const sin = /* @__PURE__ */ op({sin_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Sinh, SinhInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, 1, -1, .7]);\n *\n * x.sinh().print(); // or tf.sinh(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction sinh_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'sinh');\n const inputs: SinhInputs = {x: $x};\n\n return ENGINE.runKernel(Sinh, inputs as unknown as NamedTensorMap);\n}\nexport const sinh = /* @__PURE__ */ op({sinh_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {slice} from './slice';\n\n/**\n * Extracts a 1D slice from 1D array starting at coordinates `begin` and is\n * of length `size`. See `slice` for details.\n */\nfunction slice1d_(\n x: Tensor1D|TensorLike, begin: number, size: number): Tensor1D {\n const $x = convertToTensor(x, 'x', 'slice1d');\n util.assert(\n $x.rank === 1,\n () =>\n `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`);\n return slice($x, [begin], [size]);\n}\nexport const slice1d = /* @__PURE__ */ op({slice1d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {slice} from './slice';\n\n/**\n * Extracts a 2D slice from a 2D array starting at coordinates `begin` and\n * is of size `size`. See `slice` for details.\n */\nfunction slice2d_(\n x: Tensor2D|TensorLike, begin: [number, number],\n size: [number, number]): Tensor2D {\n const $x = convertToTensor(x, 'x', 'slice2d');\n util.assert(\n $x.rank === 2,\n () =>\n `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`);\n return slice($x, begin, size);\n}\nexport const slice2d = /* @__PURE__ */ op({slice2d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor3D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {slice} from './slice';\n\n/**\n * Extracts a 3D slice from a 3D array starting at coordinates `begin` and\n * is of size `size`. See `slice` for details.\n */\nfunction slice3d_(\n x: Tensor3D|TensorLike, begin: [number, number, number],\n size: [number, number, number]): Tensor3D {\n const $x = convertToTensor(x, 'x', 'slice3d');\n util.assert(\n $x.rank === 3,\n () =>\n `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`);\n return slice($x, begin, size);\n}\nexport const slice3d = /* @__PURE__ */ op({slice3d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor4D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {slice} from './slice';\n\n/**\n * Extracts a 4D slice from a 4D array starting at coordinates `begin` and\n * is of size `size`. See `slice` for details.\n */\nfunction slice4d_(\n x: Tensor4D|TensorLike, begin: [number, number, number, number],\n size: [number, number, number, number]): Tensor4D {\n const $x = convertToTensor(x, 'x', 'slice4d');\n util.assert(\n $x.rank === 4,\n () =>\n `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`);\n return slice($x, begin, size);\n}\nexport const slice4d = /* @__PURE__ */ op({slice4d_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Softmax, SoftmaxAttrs, SoftmaxInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes the softmax normalized vector given the logits.\n *\n * ```js\n * const a = tf.tensor1d([1, 2, 3]);\n *\n * a.softmax().print(); // or tf.softmax(a)\n * ```\n *\n * ```js\n * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]);\n *\n * a.softmax().print(); // or tf.softmax(a)\n * ```\n *\n * @param logits The logits array.\n * @param dim The dimension softmax would be performed on. Defaults to `-1`\n * which indicates the last dimension.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction softmax_(logits: T|TensorLike, dim = -1): T {\n const $logits = convertToTensor(logits, 'logits', 'softmax', 'float32');\n\n if (dim === -1) {\n dim = $logits.rank - 1;\n }\n if (dim !== $logits.rank - 1) {\n throw Error(\n 'Softmax along a non-last dimension is not yet supported. ' +\n `Logits was rank ${$logits.rank} and dim was ${dim}`);\n }\n\n const inputs: SoftmaxInputs = {logits: $logits};\n const attrs: SoftmaxAttrs = {dim};\n\n return ENGINE.runKernel(\n Softmax, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const softmax = /* @__PURE__ */ op({softmax_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {FFT, FFTInputs} from '../../kernel_names';\nimport {Tensor} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {assert} from '../../util';\nimport {op} from '../operation';\n\n/**\n * Fast Fourier transform.\n *\n * Computes the 1-dimensional discrete Fourier transform over the inner-most\n * dimension of input.\n *\n * ```js\n * const real = tf.tensor1d([1, 2, 3]);\n * const imag = tf.tensor1d([1, 2, 3]);\n * const x = tf.complex(real, imag);\n *\n * x.fft().print(); // tf.spectral.fft(x).print();\n * ```\n * @param input The complex input to compute an fft over.\n *\n * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}\n */\nfunction fft_(input: Tensor): Tensor {\n assert(\n input.dtype === 'complex64',\n () => `The dtype for tf.spectral.fft() must be complex64 ` +\n `but got ${input.dtype}.`);\n\n const inputs: FFTInputs = {input};\n\n return ENGINE.runKernel(FFT, inputs as unknown as NamedTensorMap);\n}\n\nexport const fft = /* @__PURE__ */ op({fft_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {IFFT, IFFTInputs} from '../../kernel_names';\nimport {Tensor} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {assert} from '../../util';\nimport {op} from '../operation';\n\n/**\n * Inverse fast Fourier transform.\n *\n * Computes the inverse 1-dimensional discrete Fourier transform over the\n * inner-most dimension of input.\n *\n * ```js\n * const real = tf.tensor1d([1, 2, 3]);\n * const imag = tf.tensor1d([1, 2, 3]);\n * const x = tf.complex(real, imag);\n *\n * x.ifft().print(); // tf.spectral.ifft(x).print();\n * ```\n * @param input The complex input to compute an ifft over.\n *\n * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}\n */\nfunction ifft_(input: Tensor): Tensor {\n assert(\n input.dtype === 'complex64',\n () => `The dtype for tf.spectral.ifft() must be complex64 ` +\n `but got ${input.dtype}.`);\n\n const inputs: IFFTInputs = {input};\n\n return ENGINE.runKernel(IFFT, inputs as unknown as NamedTensorMap);\n}\n\nexport const ifft = /* @__PURE__ */ op({ifft_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor2D} from '../../tensor';\nimport {complex} from '../complex';\nimport {concat} from '../concat';\nimport {imag} from '../imag';\nimport {mul} from '../mul';\nimport {op} from '../operation';\nimport {real} from '../real';\nimport {reshape} from '../reshape';\nimport {reverse} from '../reverse';\nimport {scalar} from '../scalar';\nimport {slice} from '../slice';\n\nimport {ifft} from './ifft';\n\n/**\n * Inversed real value input fast Fourier transform.\n *\n * Computes the 1-dimensional inversed discrete Fourier transform over the\n * inner-most dimension of the real input.\n *\n * ```js\n * const real = tf.tensor1d([1, 2, 3]);\n * const imag = tf.tensor1d([0, 0, 0]);\n * const x = tf.complex(real, imag);\n *\n * x.irfft().print();\n * ```\n * @param input The real value input to compute an irfft over.\n *\n * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}\n */\nfunction irfft_(input: Tensor): Tensor {\n const innerDimensionSize = input.shape[input.shape.length - 1];\n const batch = input.size / innerDimensionSize;\n let ret: Tensor;\n if (innerDimensionSize <= 2) {\n const complexInput = reshape(input, [batch, innerDimensionSize]);\n ret = ifft(complexInput);\n } else {\n // The length of unique components of the DFT of a real-valued signal\n // is 2 * (input_len - 1)\n const outputShape = [batch, 2 * (innerDimensionSize - 1)];\n const realInput = reshape(real(input), [batch, innerDimensionSize]);\n const imagInput = reshape(imag(input), [batch, innerDimensionSize]);\n\n const realConjugate =\n reverse(slice(realInput, [0, 1], [batch, innerDimensionSize - 2]), 1);\n const imagConjugate: Tensor2D = mul(\n reverse(slice(imagInput, [0, 1], [batch, innerDimensionSize - 2]), 1),\n scalar(-1));\n\n const r = concat([realInput, realConjugate], 1);\n const i = concat([imagInput, imagConjugate], 1);\n const complexInput =\n reshape(complex(r, i), [outputShape[0], outputShape[1]]);\n ret = ifft(complexInput);\n }\n ret = real(ret);\n // reshape the result if the input is 3D tensor.\n if (input.rank === 3 && input.shape[0] !== 0) {\n const temp = ret;\n const batch = input.shape[0];\n ret = reshape(ret, [batch, ret.shape[0] / batch, ret.shape[1]]);\n temp.dispose();\n }\n return ret;\n}\n\nexport const irfft = /* @__PURE__ */ op({irfft_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {SplitV, SplitVAttrs, SplitVInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Splits a `tf.Tensor` into sub tensors.\n *\n * If `numOrSizeSplits` is a number, splits `x` along dimension `axis`\n * into `numOrSizeSplits` smaller tensors.\n * Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`.\n *\n * If `numOrSizeSplits` is a number array, splits `x` into\n * `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the\n * same size as `x` except along dimension `axis` where the size is\n * `numOrSizeSplits[i]`.\n *\n * ```js\n * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]);\n * const [a, b] = tf.split(x, 2, 1);\n * a.print();\n * b.print();\n *\n * const [c, d, e] = tf.split(x, [1, 2, 1], 1);\n * c.print();\n * d.print();\n * e.print();\n * ```\n *\n * @param x The input tensor to split.\n * @param numOrSizeSplits Either an integer indicating the number of\n * splits along the axis or an array of integers containing the sizes of\n * each output tensor along the axis. If a number then it must evenly divide\n * `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`.\n * Can contain one -1 indicating that dimension is to be inferred.\n * @param axis The dimension along which to split. Defaults to 0 (the first\n * dim).\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction split_(\n x: Tensor|TensorLike, numOrSizeSplits: number[]|number, axis = 0): T[] {\n const $x = convertToTensor(x, 'x', 'split');\n\n const inputs: SplitVInputs = {x: $x};\n const attr: SplitVAttrs = {numOrSizeSplits, axis};\n\n return ENGINE.runKernel(\n SplitV, inputs as unknown as NamedTensorMap,\n attr as unknown as NamedAttrMap) as unknown as T[];\n}\n\nexport const split = /* @__PURE__ */ op({split_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {assert} from '../../util';\nimport {complex} from '../complex';\nimport {concat} from '../concat';\nimport {imag} from '../imag';\nimport {op} from '../operation';\nimport {real} from '../real';\nimport {reshape} from '../reshape';\nimport {slice} from '../slice';\nimport {split} from '../split';\nimport {zeros} from '../zeros';\nimport {zerosLike} from '../zeros_like';\n\nimport {fft} from './fft';\n\n/**\n * Real value input fast Fourier transform.\n *\n * Computes the 1-dimensional discrete Fourier transform over the\n * inner-most dimension of the real input.\n *\n * ```js\n * const real = tf.tensor1d([1, 2, 3]);\n *\n * real.rfft().print();\n * ```\n * @param input The real value input to compute an rfft over.\n *\n * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'}\n */\nfunction rfft_(input: Tensor, fftLength?: number): Tensor {\n assert(\n input.dtype === 'float32',\n () => `The dtype for rfft() must be real value but got ${input.dtype}`);\n\n let innerDimensionSize = input.shape[input.shape.length - 1];\n const batch = input.size / innerDimensionSize;\n\n let adjustedInput: Tensor;\n if (fftLength != null && fftLength < innerDimensionSize) {\n // Need to crop\n const begin = input.shape.map(v => 0);\n const size = input.shape.map(v => v);\n size[input.shape.length - 1] = fftLength;\n adjustedInput = slice(input, begin, size);\n innerDimensionSize = fftLength;\n } else if (fftLength != null && fftLength > innerDimensionSize) {\n // Need to pad with zeros\n const zerosShape = input.shape.map(v => v);\n zerosShape[input.shape.length - 1] = fftLength - innerDimensionSize;\n adjustedInput = concat([input, zeros(zerosShape)], input.shape.length - 1);\n innerDimensionSize = fftLength;\n } else {\n adjustedInput = input;\n }\n\n // Complement the input with zero imaginary numbers.\n const zerosInput = zerosLike(adjustedInput);\n const complexInput =\n reshape(complex(adjustedInput, zerosInput), [batch, innerDimensionSize]);\n\n const ret = fft(complexInput);\n\n // Exclude complex conjugations. These conjugations are put symmetrically.\n const half = Math.floor(innerDimensionSize / 2) + 1;\n const realValues = real(ret);\n const imagValues = imag(ret);\n const realComplexConjugate = split(\n realValues, [half, innerDimensionSize - half],\n realValues.shape.length - 1);\n const imagComplexConjugate = split(\n imagValues, [half, innerDimensionSize - half],\n imagValues.shape.length - 1);\n\n const outputShape = adjustedInput.shape.slice();\n outputShape[adjustedInput.shape.length - 1] = half;\n\n return reshape(\n complex(realComplexConjugate[0], imagComplexConjugate[0]), outputShape);\n}\n\nexport const rfft = /* @__PURE__ */ op({rfft_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {SquaredDifference, SquaredDifferenceInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {makeTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {assertAndGetBroadcastShape} from './broadcast_util';\nimport {op} from './operation';\n\n/**\n * Returns (a - b) * (a - b) element-wise.\n * Supports broadcasting.\n *\n * ```js\n * const a = tf.tensor1d([1, 4, 3, 16]);\n * const b = tf.tensor1d([1, 2, 9, 4]);\n *\n * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)\n * ```\n *\n * ```js\n * // Broadcast squared difference a with b.\n * const a = tf.tensor1d([2, 4, 6, 8]);\n * const b = tf.scalar(5);\n *\n * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b)\n * ```\n *\n * @param a The first tensor.\n * @param b The second tensor. Must have the same type as `a`.\n *\n * @doc {heading: 'Operations', subheading: 'Arithmetic'}\n */\nfunction squaredDifference_(\n a: Tensor|TensorLike, b: Tensor|TensorLike): T {\n let $a = convertToTensor(a, 'a', 'squaredDifference');\n let $b = convertToTensor(b, 'b', 'squaredDifference');\n [$a, $b] = makeTypesMatch($a, $b);\n\n assertAndGetBroadcastShape($a.shape, $b.shape);\n\n const inputs: SquaredDifferenceInputs = {a: $a, b: $b};\n const attrs = {};\n\n return ENGINE.runKernel(\n SquaredDifference, inputs as unknown as NamedTensorMap, attrs);\n}\n\nexport const squaredDifference = /* @__PURE__ */ op({squaredDifference_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {squeezeShape} from '../util';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Removes dimensions of size 1 from the shape of a `tf.Tensor`.\n *\n * ```js\n * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]);\n * x.squeeze().print();\n * ```\n *\n * @param x The input tensor to be squeezed.\n * @param axis An optional list of numbers. If specified, only\n * squeezes the dimensions listed. The dimension index starts at 0. It\n * is an error to squeeze a dimension that is not 1.\n *\n * @doc {heading: 'Tensors', subheading: 'Transformations'}\n */\nfunction squeeze_(x: Tensor|TensorLike, axis?: number[]): T {\n const $x = convertToTensor(x, 'x', 'squeeze', 'string_or_numeric');\n return reshape($x, squeezeShape($x.shape, axis).newShape) as T;\n}\n\nexport const squeeze = /* @__PURE__ */ op({squeeze_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Pack, PackAttrs, PackInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensorArray} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`.\n *\n * ```js\n * const a = tf.tensor1d([1, 2]);\n * const b = tf.tensor1d([3, 4]);\n * const c = tf.tensor1d([5, 6]);\n * tf.stack([a, b, c]).print();\n * ```\n *\n * @param tensors A list of tensor objects with the same shape and dtype.\n * @param axis The axis to stack along. Defaults to 0 (the first dim).\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction stack_(\n tensors: Array, axis = 0): Tensor {\n const $tensors =\n convertToTensorArray(tensors, 'tensors', 'stack', 'string_or_numeric');\n\n util.assert(\n $tensors.length >= 1, () => 'Pass at least one tensor to tf.stack');\n\n if ($tensors.length > 0) {\n util.assert(\n axis <= $tensors[0].rank, () => 'Axis must be <= rank of the tensor');\n }\n\n const inputs: PackInputs = $tensors;\n const attrs: PackAttrs = {axis};\n\n return ENGINE.runKernel(\n Pack, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const stack = /* @__PURE__ */ op({stack_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Step, StepAttrs, StepInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha`\n *\n * ```js\n * const x = tf.tensor1d([0, 2, -1, -3]);\n *\n * x.step(.5).print(); // or tf.step(x, .5)\n * ```\n * @param x The input tensor.\n * @param alpha The gradient when input is negative. Defaults to 0.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction step_(x: T|TensorLike, alpha = 0.0): T {\n const $x = convertToTensor(x, 'x', 'step');\n\n const inputs: StepInputs = {x: $x};\n const attrs: StepAttrs = {alpha};\n\n return ENGINE.runKernel(\n Step, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\nexport const step = /* @__PURE__ */ op({step_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {StridedSlice, StridedSliceAttrs, StridedSliceInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Extracts a strided slice of a tensor.\n *\n * Roughly speaking, this op extracts a slice of size (end-begin)/stride from\n * the given input tensor (x). Starting at the location specified by begin the\n * slice continues by adding stride to the index until all dimensions are not\n * less than end. Note that a stride can be negative, which causes a reverse\n * slice.\n *\n * ```js\n * const t = tf.tensor3d([1, 1, 1 ,2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],\n * [3, 2, 3]);\n * t.stridedSlice([1, 0, 0], [2, 1, 3], [1, 1, 1]).print() // [[[3, 3, 3]]]\n * t.stridedSlice([1, 0, 0], [2, 2, 3], [1, 1, 1]).print() // [[[3, 3, 3],\n * // [4, 4, 4]]]\n * t.stridedSlice([1, -1, 0], [2, -3, 3], [1, -1, 1]).print() // [[[4, 4, 4],\n * // [3, 3, 3]]]\n * ```\n *\n * @param x The tensor to stride slice.\n * @param begin The coordinates to start the slice from.\n * @param end: The coordinates to end the slice at.\n * @param strides: The size of the slice.\n * @param beginMask: If the ith bit of beginMask is set, begin[i] is ignored\n * and the fullest possible range in that dimension is used instead.\n * @param endMask: If the ith bit of endMask is set, end[i] is ignored\n * and the fullest possible range in that dimension is used instead.\n * @param shrinkAxisMask: a bitmask where bit i implies that\n * the ith specification should shrink the dimensionality. begin and end must\n * imply a slice of size 1 in the dimension.\n *\n * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}\n */\nfunction stridedSlice_(\n x: Tensor|TensorLike, begin: number[], end: number[], strides?: number[],\n beginMask = 0, endMask = 0, ellipsisMask = 0, newAxisMask = 0,\n shrinkAxisMask = 0): Tensor {\n const $x = convertToTensor(x, 'x', 'stridedSlice', 'string_or_numeric');\n\n const inputs: StridedSliceInputs = {x: $x};\n const attrs: StridedSliceAttrs = {\n begin,\n end,\n strides,\n beginMask,\n endMask,\n ellipsisMask,\n newAxisMask,\n shrinkAxisMask\n };\n\n return ENGINE.runKernel(\n StridedSlice, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const stridedSlice = /* @__PURE__ */ op({stridedSlice_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tan, TanInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Computes tan of the input `tf.Tensor` element-wise, `tan(x)`\n *\n * ```js\n * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]);\n *\n * x.tan().print(); // or tf.tan(x)\n * ```\n * @param x The input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Basic math'}\n */\nfunction tan_(x: T|TensorLike): T {\n const $x = convertToTensor(x, 'x', 'tan', 'float32');\n\n const inputs: TanInputs = {x: $x};\n\n return ENGINE.runKernel(Tan, inputs as unknown as NamedTensorMap);\n}\nexport const tan = /* @__PURE__ */ op({tan_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike1D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-1 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor1d` as it makes the code more readable.\n *\n * ```js\n * tf.tensor1d([1, 2, 3]).print();\n * ```\n *\n * @param values The values of the tensor. Can be array of numbers,\n * or a `TypedArray`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor1d(values: TensorLike1D, dtype?: DataType): Tensor1D {\n assertNonNull(values);\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 1) {\n throw new Error('tensor1d() requires values to be a flat/TypedArray');\n }\n const shape: number[] = null;\n return makeTensor(values, shape, inferredShape, dtype) as Tensor1D;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor2D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike2D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-2 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor2d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor2d([[1, 2], [3, 4]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor2d([1, 2, 3, 4], [2, 2]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. If not provided, it is inferred from\n * `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor2d(\n values: TensorLike2D, shape?: [number, number],\n dtype?: DataType): Tensor2D {\n assertNonNull(values);\n if (shape != null && shape.length !== 2) {\n throw new Error('tensor2d() requires shape to have two numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 2 && inferredShape.length !== 1) {\n throw new Error(\n 'tensor2d() requires values to be number[][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error(\n 'tensor2d() requires shape to be provided when `values` ' +\n 'are a flat/TypedArray');\n }\n return makeTensor(values, shape, inferredShape, dtype) as Tensor2D;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor4D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike4D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-4 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor4d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor4d(\n values: TensorLike4D, shape?: [number, number, number, number],\n dtype?: DataType): Tensor4D {\n assertNonNull(values);\n if (shape != null && shape.length !== 4) {\n throw new Error('tensor4d() requires shape to have four numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 4 && inferredShape.length !== 1) {\n throw new Error(\n 'tensor4d() requires values to be number[][][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error(\n 'tensor4d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype) as Tensor4D;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor5D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike5D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-5 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor5d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor5d(\n values: TensorLike5D, shape?: [number, number, number, number, number],\n dtype?: DataType): Tensor5D {\n assertNonNull(values);\n if (shape != null && shape.length !== 5) {\n throw new Error('tensor5d() requires shape to have five numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 5 && inferredShape.length !== 1) {\n throw new Error(\n 'tensor5d() requires values to be ' +\n 'number[][][][][] or flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error(\n 'tensor5d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n return makeTensor(values, shape, inferredShape, dtype) as Tensor5D;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor6D} from '../tensor';\nimport {inferShape} from '../tensor_util_env';\nimport {TensorLike6D} from '../types';\nimport {DataType} from '../types';\nimport {assertNonNull} from '../util';\nimport {makeTensor} from './tensor_ops_util';\n\n/**\n * Creates rank-6 `tf.Tensor` with the provided values, shape and dtype.\n *\n * The same functionality can be achieved with `tf.tensor`, but in general\n * we recommend using `tf.tensor6d` as it makes the code more readable.\n *\n * ```js\n * // Pass a nested array.\n * tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print();\n * ```\n * ```js\n * // Pass a flat array and specify a shape.\n * tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print();\n * ```\n *\n * @param values The values of the tensor. Can be nested array of numbers,\n * or a flat array, or a `TypedArray`.\n * @param shape The shape of the tensor. Optional. If not provided,\n * it is inferred from `values`.\n * @param dtype The data type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function tensor6d(\n values: TensorLike6D,\n shape?: [number, number, number, number, number, number],\n dtype?: DataType): Tensor6D {\n assertNonNull(values);\n if (shape != null && shape.length !== 6) {\n throw new Error('tensor6d() requires shape to have six numbers');\n }\n const inferredShape = inferShape(values, dtype);\n if (inferredShape.length !== 6 && inferredShape.length !== 1) {\n throw new Error(\n 'tensor6d() requires values to be number[][][][][][] or ' +\n 'flat/TypedArray');\n }\n if (inferredShape.length === 1 && shape == null) {\n throw new Error(\n 'tensor6d() requires shape to be provided when `values` ' +\n 'are a flat array');\n }\n shape = shape ||\n inferredShape as [number, number, number, number, number, number];\n return makeTensor(values, shape, inferredShape, dtype) as Tensor6D;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {TopK, TopKAttrs, TopKInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\nimport {op} from './operation';\n\n/**\n * Finds the values and indices of the `k` largest entries along the last\n * dimension.\n *\n * If the input is a vector (rank=1), finds the k largest entries in the vector\n * and outputs their values and indices as vectors. Thus values[j] is the j-th\n * largest entry in input, and its index is indices[j].\n * For higher rank inputs, computes the top k entries along the last dimension.\n *\n * If two elements are equal, the lower-index element appears first.\n *\n * ```js\n * const a = tf.tensor2d([[1, 5], [4, 3]]);\n * const {values, indices} = tf.topk(a);\n * values.print();\n * indices.print();\n * ```\n * @param x 1-D or higher `tf.Tensor` with last dimension being at least `k`.\n * @param k Number of top elements to look for along the last dimension.\n * @param sorted If true, the resulting `k` elements will be sorted by the\n * values in descending order.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nfunction topk_(\n x: T|TensorLike, k = 1, sorted = true): {values: T, indices: T} {\n const $x = convertToTensor(x, 'x', 'topk');\n if ($x.rank === 0) {\n throw new Error('topk() expects the input to be of rank 1 or higher');\n }\n const lastDim = $x.shape[$x.shape.length - 1];\n\n if (k < 0) {\n throw new Error(`'k' passed to topk() must be >= 0 but got ${k}`);\n }\n\n if (k > lastDim) {\n throw new Error(\n `'k' passed to topk() must be <= the last dimension (${lastDim}) ` +\n `but got ${k}`);\n }\n\n const inputs: TopKInputs = {x: $x};\n const attrs: TopKAttrs = {k, sorted};\n\n const [values, indices] = ENGINE.runKernel(\n TopK, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as [T, T];\n\n return {values, indices};\n}\n\nexport const topk = /* @__PURE__ */ op({topk_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {DataType, Rank, ShapeMap} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {buffer} from './buffer';\nimport {op} from './operation';\nimport {MPRandGauss} from './rand_util';\n\n/**\n * Creates a `tf.Tensor` with values sampled from a truncated normal\n * distribution.\n *\n * ```js\n * tf.truncatedNormal([2, 2]).print();\n * ```\n *\n * The generated values follow a normal distribution with specified mean and\n * standard deviation, except that values whose magnitude is more than 2\n * standard deviations from the mean are dropped and re-picked.\n *\n * @param shape An array of integers defining the output tensor shape.\n * @param mean The mean of the normal distribution.\n * @param stdDev The standard deviation of the normal distribution.\n * @param dtype The data type of the output tensor.\n * @param seed The seed for the random number generator.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nfunction truncatedNormal_(\n shape: ShapeMap[R], mean = 0, stdDev = 1, dtype?: 'float32'|'int32',\n seed?: number): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n if (dtype != null && (dtype as DataType) === 'bool') {\n throw new Error(`Unsupported data type $ { dtype }`);\n }\n const randGauss =\n new MPRandGauss(mean, stdDev, dtype, true /* truncated */, seed);\n const res = buffer(shape, dtype);\n for (let i = 0; i < res.values.length; i++) {\n res.values[i] = randGauss.nextValue();\n }\n return res.toTensor();\n}\n\nexport const truncatedNormal = /* @__PURE__ */ op({truncatedNormal_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Unique, UniqueAttrs, UniqueInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor1D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {assert} from '../util';\n\nimport {op} from './operation';\n\n/**\n * Finds unique elements along an axis of a tensor.\n *\n * It returns a tensor `values` containing all of the unique elements along the\n * `axis` of the given tensor `x` in the same order that they occur along the\n * `axis` in `x`; `x` does not need to be sorted. It also returns a tensor\n * `indices` the same size as the number of the elements in `x` along the `axis`\n * dimension. It contains the index in the unique output `values`.\n *\n * ```js\n * // A 1-D tensor\n * const a = tf.tensor1d([1, 1, 2, 4, 4, 4, 7, 8, 8]);\n * const {values, indices} = tf.unique(a);\n * values.print(); // [1, 2, 4, 7, 8,]\n * indices.print(); // [0, 0, 1, 2, 2, 2, 3, 4, 4]\n * ```\n *\n * ```js\n * // A 2-D tensor with axis=0\n * //\n * // 'a' is: [[1, 0, 0],\n * // [1, 0, 0],\n * // [2, 0, 0]]\n * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);\n * const {values, indices} = tf.unique(a, 0)\n * values.print(); // [[1, 0, 0],\n * // [2, 0, 0]]\n * indices.print(); // [0, 0, 1]\n * ```\n *\n * ```js\n * // A 2-D tensor with axis=1\n * //\n * // 'a' is: [[1, 0, 0],\n * // [1, 0, 0],\n * // [2, 0, 0]]\n * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]);\n * const {values, indices} = tf.unique(a, 1)\n * values.print(); // [[1, 0],\n * // [1, 0],\n * // [2, 0]]\n * indices.print(); // [0, 1, 1]\n * ```\n * @param x A tensor (int32, string, bool).\n * @param axis The axis of the tensor to find the unique elements.\n * @returns [uniqueElements, indices] (see above for details)\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nfunction unique_(\n x: T|TensorLike, axis = 0): {values: T, indices: Tensor1D} {\n const $x = convertToTensor(x, 'x', 'unique', 'string_or_numeric');\n assert($x.rank > 0, () => 'The input tensor must be at least 1D');\n\n const inputs: UniqueInputs = {x: $x};\n const attrs: UniqueAttrs = {axis};\n const [values, indices] = ENGINE.runKernel(\n Unique, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as [T, Tensor1D];\n return {values, indices};\n}\n\nexport const unique = /* @__PURE__ */ op({unique_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {UnsortedSegmentSum, UnsortedSegmentSumAttrs, UnsortedSegmentSumInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor1D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {assert, isInt} from '../util';\n\nimport {op} from './operation';\n\n/**\n * Computes the sum along segments of a `tf.Tensor`.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 3, 4]);\n * const segmentIds = tf.tensor1d([1, 2, 0, 1], 'int32');\n * const numSegments = 3;\n *\n * x.unsortedSegmentSum(segmentIds, numSegments).print()\n * //or tf.unsortedSegmentSum(x, segmentIds, numSegments)\n * ```\n * @param x The `tf.Tensor` that will be summed along its segments.\n * @param segmentIds A `tf.Tensor1D` whose rank is equal to the rank of `x`'s\n * dimension along the `axis`. Maps each element of `x` to a segment.\n * @param numSegments The number of distinct `segmentIds`.\n *\n * @doc {heading: 'Operations', subheading: 'Segment'}\n */\nfunction unsortedSegmentSum_(\n x: T|TensorLike, segmentIds: Tensor1D|TensorLike, numSegments: number): T {\n const $x = convertToTensor(x, 'x', 'unsortedSegmentSum');\n const $segmentIds =\n convertToTensor(segmentIds, 'segmentIds', 'unsortedSegmentSum', 'int32');\n assert(isInt(numSegments), () => 'numSegments must be of dtype int');\n\n const inputs: UnsortedSegmentSumInputs = {x: $x, segmentIds: $segmentIds};\n const attrs: UnsortedSegmentSumAttrs = {numSegments};\n\n return ENGINE.runKernel(\n UnsortedSegmentSum, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const unsortedSegmentSum = /* @__PURE__ */ op({unsortedSegmentSum_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Unpack, UnpackAttrs, UnpackInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {op} from './operation';\n\n/**\n * Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s.\n *\n * ```js\n * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n *\n * tf.unstack(a).forEach(tensor => tensor.print());\n * ```\n *\n * @param x A tensor object.\n * @param axis The axis to unstack along. Defaults to 0 (the first dim).\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nfunction unstack_(x: Tensor|TensorLike, axis = 0): Tensor[] {\n const $x = convertToTensor(x, 'x', 'unstack', 'string_or_numeric');\n util.assert(\n axis >= -$x.shape.length && axis < $x.shape.length,\n () =>\n `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`);\n\n const inputs: UnpackInputs = {value: $x};\n const attrs: UnpackAttrs = {axis};\n\n return ENGINE.runKernel(\n Unpack, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const unstack = /* @__PURE__ */ op({unstack_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {TensorLike} from '../types';\nimport {searchSorted} from './search_sorted';\n\n/**\n * Searches for where a value would go in a sorted sequence.\n *\n * This is not a method for checking containment (like javascript in).\n *\n * The typical use case for this operation is \"binning\", \"bucketing\", or\n * \"discretizing\". The values are assigned to bucket-indices based on the edges\n * listed in 'sortedSequence'. This operation returns the bucket-index for each\n * value.\n *\n * The index returned corresponds to the first edge greater than the value.\n *\n * The axis is not settable for this operation. It always operates on the\n * innermost dimension (axis=-1). The operation will accept any number of outer\n * dimensions.\n *\n * Note: This operation assumes that 'upperBound' is sorted along the\n * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not\n * sorted no error is raised and the content of the returned tensor is not well\n * defined.\n *\n * ```js\n * const seq = tf.tensor1d([0, 3, 9, 10, 10]);\n * const values = tf.tensor1d([0, 4, 10]);\n * const result = tf.upperBound(seq, values);\n * result.print(); // [1, 2, 5]\n * ```\n * @param sortedSequence: N-D. Sorted sequence.\n * @param values: N-D. Search values.\n * @return An N-D int32 tensor the size of values containing the result of\n * applying upper bound to each value. The result is not a global index to\n * the entire Tensor, but the index in the last dimension.\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nexport function upperBound(\n sortedSequence: Tensor|TensorLike, values: Tensor|TensorLike): Tensor {\n return searchSorted(sortedSequence, values, 'right');\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {Tensor, Variable} from '../tensor';\nimport {DataType, Rank} from '../types';\n\n/**\n * Creates a new variable with the provided initial value.\n * ```js\n * const x = tf.variable(tf.tensor([1, 2, 3]));\n * x.assign(tf.tensor([4, 5, 6]));\n *\n * x.print();\n * ```\n *\n * @param initialValue Initial value for the tensor.\n * @param trainable If true, optimizers are allowed to update it.\n * @param name Name of the variable. Defaults to a unique id.\n * @param dtype If set, initialValue will be converted to the given type.\n *\n * @doc {heading: 'Tensors', subheading: 'Creation'}\n */\nexport function variable(\n initialValue: Tensor, trainable = true, name?: string,\n dtype?: DataType): Variable {\n return ENGINE.makeVariable(initialValue, trainable, name, dtype) as\n Variable;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** An implementation of the Where kernel shared between cpu and webgl */\n\nimport {buffer} from '../ops/buffer';\nimport {Tensor2D} from '../tensor';\nimport {TypedArray} from '../types';\n\nexport function whereImpl(condShape: number[], condVals: TypedArray): Tensor2D {\n const indices = [];\n for (let i = 0; i < condVals.length; i++) {\n if (condVals[i]) {\n indices.push(i);\n }\n }\n\n const inBuffer = buffer(condShape, 'int32');\n\n const out = buffer([indices.length, condShape.length], 'int32');\n for (let i = 0; i < indices.length; i++) {\n const loc = inBuffer.indexToLoc(indices[i]);\n const offset = i * condShape.length;\n out.values.set(loc, offset);\n }\n return out.toTensor() as Tensor2D;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {whereImpl} from '../backends/where_impl';\nimport {Tensor, Tensor2D} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\n\n/**\n * Returns the coordinates of true elements of condition.\n *\n * The coordinates are returned in a 2-D tensor where the first dimension (rows)\n * represents the number of true elements, and the second dimension (columns)\n * represents the coordinates of the true elements. Keep in mind, the shape of\n * the output tensor can vary depending on how many true values there are in\n * input. Indices are output in row-major order. The resulting tensor has the\n * shape `[numTrueElems, condition.rank]`.\n *\n * This is analogous to calling the python `tf.where(cond)` without an x or y.\n *\n * ```js\n * const cond = tf.tensor1d([false, false, true], 'bool');\n * const result = await tf.whereAsync(cond);\n * result.print();\n * ```\n *\n * @doc {heading: 'Operations', subheading: 'Logical'}\n */\nasync function whereAsync_(condition: Tensor|TensorLike): Promise {\n const $condition =\n convertToTensor(condition, 'condition', 'whereAsync', 'bool');\n const vals = await $condition.data();\n const res = whereImpl($condition.shape, vals);\n if (condition !== $condition) {\n $condition.dispose();\n }\n return res;\n}\n\nexport const whereAsync = whereAsync_;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {gather} from './gather';\nimport {reshape} from './reshape';\nimport {squeeze} from './squeeze';\nimport {whereAsync} from './where_async';\n\n/**\n * Apply boolean mask to tensor.\n *\n * ```js\n * const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);\n * const mask = tf.tensor1d([1, 0, 1], 'bool');\n * const result = await tf.booleanMaskAsync(tensor, mask);\n * result.print();\n * ```\n *\n * @param tensor N-D tensor.\n * @param mask K-D boolean tensor, K <= N and K must be known statically.\n * @param axis A 0-D int Tensor representing the axis in tensor to mask from.\n * By default, axis is 0 which will mask from the first dimension.\n * Otherwise K + axis <= N.\n *\n * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}\n */\nasync function booleanMaskAsync_(\n tensor: Tensor|TensorLike, mask: Tensor|TensorLike,\n axis?: number): Promise {\n const $tensor = convertToTensor(tensor, 'tensor', 'boolMask');\n const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');\n\n const axisFrom = axis == null ? 0 : axis;\n const maskDim = $mask.rank;\n const tensorShape = $tensor.shape;\n\n util.assert(maskDim > 0, () => 'mask cannot be scalar');\n util.assertShapesMatch(\n tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape,\n `mask's shape must match the first K dimensions of tensor's shape,`);\n\n let leadingSize = 1;\n for (let i = axisFrom; i < axisFrom + maskDim; i++) {\n leadingSize *= tensorShape[i];\n }\n const targetTensorShape =\n tensorShape.slice(0, axisFrom)\n .concat([leadingSize], tensorShape.slice(axisFrom + maskDim));\n const reshapedTensor = reshape($tensor, targetTensorShape);\n const reshapedMask = reshape($mask, [-1]);\n const positivePositions = await whereAsync(reshapedMask);\n const indices = squeeze(positivePositions, [1]);\n\n const res = gather(reshapedTensor, indices, axisFrom);\n\n // Ensure no memory leak.\n if (tensor !== $tensor) {\n $tensor.dispose();\n }\n if (mask !== $mask) {\n $mask.dispose();\n }\n indices.dispose();\n reshapedTensor.dispose();\n reshapedMask.dispose();\n positivePositions.dispose();\n\n return res;\n}\n\nexport const booleanMaskAsync = booleanMaskAsync_;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor} from '../tensor';\nimport {assertTypesMatch} from '../tensor_util';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {add} from './add';\nimport {div} from './div';\nimport {mul} from './mul';\nimport {op} from './operation';\nimport {pow} from './pow';\nimport {scalar} from './scalar';\nimport {sub} from './sub';\n\n/**\n * Compute the moving average of a variable.\n *\n * Without zeroDebias, the moving average operation is defined by:\n * `v += delta`\n * where\n * `delta = (1 - decay) * (x - v)`\n *\n * With zeroDebias (default), the `delta` term is scaled to debias the\n * effect of the (assumed) zero-initialization of `v`.\n * `delta /= (1 - decay ^ step)`\n *\n * For more details on the zero-debiasing algorithm, see:\n * https://arxiv.org/abs/1412.6980\n *\n * Note that this function is completely stateless and does not keep track of\n * step count. The step count needs to be maintained by the caller and passed\n * in as `step`.\n *\n * @param v The current moving average value.\n * @param x New input value, must have the same shape and dtype as `v`.\n * @param decay The decay factor. Typical values are 0.95 and 0.99.\n * @param step Step count.\n * @param zeroDebias: Whether zeroDebias is to be performed (default: `true`).\n * @returns The new moving average value.\n *\n * @doc {heading: 'Operations', subheading: 'Moving Average'}\n */\nfunction movingAverage_(\n v: T|TensorLike, x: T|TensorLike, decay: number|Scalar,\n step?: number|Scalar, zeroDebias = true): T {\n const $v = convertToTensor(v, 'v', 'movingAverage');\n const $x = convertToTensor(x, 'x', 'movingAverage');\n const $decay = convertToTensor(decay, 'decay', 'movingAverage');\n\n assertTypesMatch($v, $x);\n util.assert(\n util.arraysEqual($v.shape, $x.shape), () => 'Shape mismatch in v and x');\n\n const one = scalar(1);\n const oneMinusDecay = sub(one, $decay);\n\n let update = mul(sub($x, $v), oneMinusDecay);\n if (zeroDebias) {\n util.assert(\n step != null, () => 'When using zeroDebias: true, step is required.');\n const $step = convertToTensor(step, 'step', 'movingAverage');\n update = div(update, sub(one, pow($decay, $step)));\n }\n return add($v, update);\n}\n\nexport const movingAverage = /* @__PURE__ */ op({movingAverage_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorInfo } from '../tensor_info';\nimport {Tensor} from '../tensor';\nimport {computeStrides, sizeFromShape} from '../util';\n\n/**\n * Check whether updates.shape = indices.shape[:batchDim] +\n * shape[sliceDim:]\n *\n * @param x The input tensor.\n */\nexport function validateUpdateShape(\n shape: number[], indices: Tensor, updates: Tensor) {\n const sliceDim = (indices.rank > 1) ? indices.shape[indices.rank - 1] : 1;\n const batchDim = (indices.rank > 1) ? indices.rank - 1 : 1;\n\n const shapeError = 'Must have updates.shape = indices.shape[:batchDim] + ' +\n `shape[sliceDim:], got updates.shape: ${updates.shape}` +\n `, indices.shape: ${indices.shape}, shape: ${shape}` +\n `, sliceDim: ${sliceDim}, and batchDim: ${batchDim}.`;\n\n if (updates.rank < batchDim) {\n throw new Error(shapeError + ` update.rank < ${batchDim}. `);\n }\n if (shape.length < sliceDim + (updates.rank - batchDim)) {\n throw new Error(\n shapeError +\n ` Output shape length < ${sliceDim + (updates.rank - batchDim)}`);\n }\n if (updates.rank !== batchDim + shape.length - sliceDim) {\n throw new Error(\n shapeError + ` update.rank != ${batchDim + shape.length - sliceDim}`);\n }\n for (let d = 0; d < batchDim; ++d) {\n if (updates.shape[d] !== indices.shape[d]) {\n throw new Error(\n shapeError +\n ` updates.shape[${d}] (${updates.shape[d]}) != indices.shape[${d}] (${\n indices.shape[d]}).`);\n }\n }\n for (let d = 0; d < updates.rank - batchDim; ++d) {\n if (updates.shape[d + batchDim] !== shape[d + sliceDim]) {\n throw new Error(\n shapeError +\n ` updates.shape[${d + batchDim}] (${\n updates.shape[d + batchDim]}) != shape[${d + batchDim}] (${\n shape[d + batchDim]})`);\n }\n }\n}\n\nexport interface ScatterShapeInfo {\n sliceRank: number;\n numUpdates: number;\n sliceSize: number;\n strides: number[];\n outputSize: number;\n}\n/**\n * Validate scatter nd inputs.\n *\n * @param update The tensor contains the update values.\n * @param indices The tensor contains the indices for the update values.\n * @param shape The shape of the output tensor.\n */\nexport function validateInput(\n updates: Tensor, indices: Tensor, shape: number[]) {\n if (indices.rank < 1) {\n throw new Error(\n 'tf.scatterND() expects the indices to be rank 1 or higher,' +\n ` but the rank was ${indices.rank}.`);\n }\n if (updates.rank < 1) {\n throw new Error(\n 'tf.scatterND() expects the updates to be rank 1 or higher,' +\n ` but the rank was ${updates.rank}.`);\n }\n if (indices.dtype !== 'int32') {\n throw new Error(`The dtype of 'indices' should be int32, but got dtype: ${\n indices.dtype}`);\n }\n if (shape.length < 1) {\n throw new Error(\n `Output rank must be greater or equal to 1, but got shape: ${shape}`);\n }\n\n if (shape.length === 0) {\n if (indices.size === 0) {\n throw new Error(`Indices specified for empty output. indices shape: ${\n indices.shape}`);\n }\n if (updates.size === 0) {\n throw new Error(`Updates specified for empty output. updates shape: ${\n updates.shape}`);\n }\n }\n\n validateUpdateShape(shape, indices, updates);\n}\n\n/**\n * Calculate the shape information for the output.\n *\n * @param update The tensor contains the update values.\n * @param indices The tensor contains the indices for the update values.\n * @param shape The shape of the output tensor.\n *\n * @returns ScatterShapeInfo\n */\nexport function calculateShapes(\n updates: TensorInfo, indices: TensorInfo,\n shape: number[]): ScatterShapeInfo {\n // Calculate the number of dimensions in indices\n const indicesRank = indices.shape.length;\n const sliceRank = (indicesRank > 1) ? indices.shape[indicesRank - 1] : 1;\n\n // Calculate the number of elements that make up each slice of our updated\n // tensor. This allows us to work with flattened tensors and copy over whole\n // slices at a time.\n const totalNd = shape.length;\n\n let sliceSize = 1;\n for (let i = sliceRank; i < totalNd; ++i) {\n sliceSize *= shape[i];\n }\n\n const safeSliceDim = (sliceRank < 1) ? 1 : sliceRank;\n const numUpdates = sizeFromShape(indices.shape) / safeSliceDim;\n\n const strides = [...computeStrides(shape.slice(0, sliceRank)), 1];\n const outputSize = sizeFromShape(shape);\n return {sliceRank, numUpdates, sliceSize, strides, outputSize};\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {ScatterNd, ScatterNdAttrs, ScatterNdInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, ShapeMap, TensorLike} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {op} from './operation';\nimport * as scatter_nd_util from './scatter_nd_util';\n\n/**\n * Creates a new tensor by applying sparse updates to individual\n * values or slices within a zero tensor of the given shape tensor according to\n * indices. This operator is the inverse of the `tf.gatherND` operator which\n * extracts values or slices from a given tensor.\n *\n * ```js\n * const indices = tf.tensor2d([4, 3, 1, 7], [4, 1], 'int32');\n * const updates = tf.tensor1d([9, 10, 11, 12]);\n * const shape = [8];\n * tf.scatterND(indices, updates, shape).print() //[0, 11, 0, 10, 9, 0, 0, 12]\n * ```\n *\n * @param indices The tensor contains the indices into the output tensor.\n * @param updates The tensor contains the value for the indices.\n * @param shape: The shape of the output tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}\n */\nfunction scatterND_(\n indices: Tensor|TensorLike, updates: Tensor|TensorLike,\n shape: ShapeMap[R]): Tensor {\n assertNonNegativeIntegerDimensions(shape);\n const $indices = convertToTensor(indices, 'indices', 'scatterND', 'int32');\n const $updates = convertToTensor(updates, 'updates', 'scatterND');\n scatter_nd_util.validateInput($updates, $indices, shape);\n\n const inputs: ScatterNdInputs = {indices: $indices, updates: $updates};\n const attrs: ScatterNdAttrs = {shape};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n ScatterNd, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor;\n}\n\nexport const scatterND = /* @__PURE__ */ op({scatterND_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {SparseToDense, SparseToDenseAttrs, SparseToDenseInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport * as sparse_to_dense from '../ops/sparse_to_dense_util';\nimport {Scalar, Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {Rank, ScalarLike, ShapeMap, TensorLike} from '../types';\nimport {assertNonNegativeIntegerDimensions} from '../util_base';\n\nimport {op} from './operation';\n\n/**\n * Converts a sparse representation into a dense tensor.\n *\n * Builds an array dense with shape outputShape such that:\n *\n * // If sparseIndices is scalar\n * dense[i] = (i == sparseIndices ? sparseValues : defaultValue)\n *\n * // If sparseIndices is a vector, then for each i\n * dense[sparseIndices[i]] = sparseValues[i]\n *\n * // If sparseIndices is an n by d matrix, then for each i in [0, n)\n * dense[sparseIndices[i][0], ..., sparseIndices[i][d-1]] = sparseValues[i]\n * All other values in dense are set to defaultValue. If sparseValues is a\n * scalar, all sparse indices are set to this single value.\n *\n * If indices are repeated the final value is summed over all values for those\n * indices.\n *\n * ```js\n * const indices = tf.tensor1d([4, 5, 6, 1, 2, 3], 'int32');\n * const values = tf.tensor1d([10, 11, 12, 13, 14, 15], 'float32');\n * const shape = [8];\n * tf.sparseToDense(indices, values, shape).print();\n * ```\n *\n * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.\n * sparseIndices[i] contains the complete index where sparseValues[i] will be\n * placed.\n * @param sparseValues A 0-D or 1-D Tensor. Values\n * corresponding to each row of sparseIndices, or a scalar value to be used for\n * all sparse indices.\n * @param outputShape Shape of the dense output tensor. The type is inferred.\n * @param defaultValue Scalar. Value to set for indices not specified in\n * sparseIndices. Defaults to zero.\n *\n * @doc {heading: 'Operations', subheading: 'Normalization'}\n */\nfunction sparseToDense_(\n sparseIndices: Tensor|TensorLike, sparseValues: Tensor|TensorLike,\n outputShape: ShapeMap[R], defaultValue: Scalar|ScalarLike = 0): Tensor {\n assertNonNegativeIntegerDimensions(outputShape);\n\n const $sparseIndices =\n convertToTensor(sparseIndices, 'sparseIndices', 'sparseToDense', 'int32');\n const $sparseValues = convertToTensor(\n sparseValues, 'sparseValues', 'sparseToDense', 'string_or_numeric');\n const $defaultValue = convertToTensor(\n defaultValue, 'defaultValue', 'sparseToDense', $sparseValues.dtype);\n\n sparse_to_dense.validateInput(\n $sparseIndices, $sparseValues, outputShape, $defaultValue);\n\n const inputs: SparseToDenseInputs = {\n sparseIndices: $sparseIndices,\n sparseValues: $sparseValues,\n defaultValue: $defaultValue\n };\n\n const attrs: SparseToDenseAttrs = {outputShape};\n\n return ENGINE.runKernel(\n SparseToDense, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const sparseToDense = /* @__PURE__ */ op({sparseToDense_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor} from '../tensor';\n\n/**\n * Validate sparseToDense inputs.\n *\n * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32.\n * sparseIndices[i] contains the complete index where sparseValues[i] will be\n * placed.\n * @param sparseValues A 0-D or 1-D Tensor. Values\n * corresponding to each row of sparseIndices, or a scalar value to be used for\n * all sparse indices.\n * @param outputShape number[]. Shape of the dense output tensor.\n * @param validateIndices boolean. indice validation is not supported, error\n * will be thrown if it is set.\n */\nexport function validateInput(\n sparseIndices: Tensor, sparseValues: Tensor, outputShape: number[],\n defaultValues: Tensor) {\n if (sparseIndices.dtype !== 'int32') {\n throw new Error(\n 'tf.sparseToDense() expects the indices to be int32 type,' +\n ` but the dtype was ${sparseIndices.dtype}.`);\n }\n if (sparseIndices.rank > 2) {\n throw new Error(\n 'sparseIndices should be a scalar, vector, or matrix,' +\n ` but got shape ${sparseIndices.shape}.`);\n }\n\n const numElems = sparseIndices.rank > 0 ? sparseIndices.shape[0] : 1;\n const numDims = sparseIndices.rank > 1 ? sparseIndices.shape[1] : 1;\n\n if (outputShape.length !== numDims) {\n throw new Error(\n 'outputShape has incorrect number of elements:,' +\n ` ${outputShape.length}, should be: ${numDims}.`);\n }\n\n const numValues = sparseValues.size;\n if (!(sparseValues.rank === 0 ||\n sparseValues.rank === 1 && numValues === numElems)) {\n throw new Error(\n 'sparseValues has incorrect shape ' +\n `${sparseValues.shape}, should be [] or [${numElems}]`);\n }\n\n if (sparseValues.dtype !== defaultValues.dtype) {\n throw new Error('sparseValues.dtype must match defaultValues.dtype');\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {GatherNd, GatherNdInputs} from '../kernel_names';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {op} from './operation';\n\n/**\n * Gather slices from input tensor into a Tensor with shape specified by\n * `indices`.\n *\n * `indices` is a K-dimensional integer tensor, best thought of as a\n * (K-1)-dimensional tensor of indices into input, where each element defines a\n * slice of input:\n * output[\\\\(i_0, ..., i_{K-2}\\\\)] = input[indices[\\\\(i_0, ..., i_{K-2}\\\\)]]\n *\n * Whereas in `tf.gather`, `indices` defines slices into the first dimension of\n * input, in `tf.gatherND`, `indices` defines slices into the first N dimensions\n * of input, where N = indices.shape[-1].\n *\n * The last dimension of indices can be at most the rank of input:\n * indices.shape[-1] <= input.rank\n *\n * The last dimension of `indices` corresponds to elements\n * (if indices.shape[-1] == input.rank) or slices\n * (if indices.shape[-1] < input.rank) along dimension indices.shape[-1] of\n * input.\n * The output tensor has shape\n * indices.shape[:-1] + input.shape[indices.shape[-1]:]\n *\n * Note that on CPU, if an out of bound index is found, an error is returned. On\n * GPU, if an out of bound index is found, a 0 is stored in the corresponding\n * output value.\n *\n * ```js\n * const indices = tf.tensor2d([0, 1, 1, 0], [2,2], 'int32');\n * const input = tf.tensor2d([9, 10, 11, 12], [2, 2]);\n * tf.gatherND(input, indices).print() // [10, 11]\n * ```\n *\n * @param x The tensor from which to gather values.\n * @param indices Index tensor, must be of type int32.\n *\n * @doc {heading: 'Operations', subheading: 'Slicing and Joining'}\n */\nfunction gatherND_(x: Tensor|TensorLike, indices: Tensor|TensorLike): Tensor {\n const $indices = convertToTensor(indices, 'indices', 'gatherND', 'int32');\n const $x = convertToTensor(x, 'x', 'gatherND', 'string_or_numeric');\n\n const inputs: GatherNdInputs = {params: $x, indices: $indices};\n\n return ENGINE.runKernel(GatherNd, inputs as unknown as NamedTensorMap);\n}\n\nexport const gatherND = /* @__PURE__ */ op({gatherND_});\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {add} from './add';\nimport {div} from './div';\nimport {getNoiseShape} from './dropout_util';\nimport {floor} from './floor';\nimport {mul} from './mul';\nimport {op} from './operation';\nimport {randomUniform} from './random_uniform';\n\n/**\n * Computes dropout.\n *\n * ```js\n * const x = tf.tensor1d([1, 2, 2, 1]);\n * const rate = 0.75;\n * const output = tf.dropout(x, rate);\n * output.print();\n * ```\n *\n * @param x A floating point Tensor or TensorLike.\n * @param rate A float in the range [0, 1). The probability that each element\n * of x is discarded.\n * @param noiseShape An array of numbers of type int32, representing the\n * shape for randomly generated keep/drop flags. If the noiseShape has null\n * value, it will be automatically replaced with the x's relative dimension\n * size. Optional.\n * @param seed Used to create random seeds. Optional.\n * @returns A Tensor of the same shape of x.\n *\n * @doc {heading: 'Operations', subheading: 'Dropout'}\n */\nfunction dropout_(\n x: Tensor|TensorLike, rate: number, noiseShape?: number[],\n seed?: number|string): Tensor {\n const $x = convertToTensor(x, 'x', 'dropout');\n\n util.assert(\n $x.dtype === 'float32',\n () => `x has to be a floating point tensor since it's going to be ` +\n `scaled, but got a ${$x.dtype} tensor instead.`);\n util.assert(\n rate >= 0 && rate < 1,\n () => `rate must be a float in the range [0, 1), but got ${rate}.`);\n\n if (rate === 0) {\n return x instanceof Tensor ? $x.clone() : $x;\n }\n\n const $noiseShape = getNoiseShape($x, noiseShape);\n const keepProb = 1 - rate;\n const multiplier = div(\n floor(add(randomUniform($noiseShape, 0, 1, 'float32', seed), keepProb)),\n keepProb);\n\n return mul($x, multiplier);\n}\n\nexport const dropout = /* @__PURE__ */ op({dropout_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport * as util from '../util';\n\n/**\n * Normalize noise shape based on provided tensor and noise shape.\n *\n * @param x Tensor.\n * @param noiseShape The shape for the randomly generated keep/drop flags, as\n * an array of numbers. Optional.\n * @returns Normalized noise shape.\n */\nexport function getNoiseShape(x: Tensor, noiseShape?: number[]): number[] {\n if (noiseShape == null) {\n return x.shape.slice();\n }\n if (util.arraysEqual(x.shape, noiseShape)) {\n return noiseShape;\n }\n if (x.shape.length === noiseShape.length) {\n const newDimension: number[] = [];\n for (let i = 0; i < x.shape.length; i++) {\n if (noiseShape[i] == null && x.shape[i] != null) {\n newDimension.push(x.shape[i]);\n } else {\n newDimension.push(noiseShape[i]);\n }\n }\n return newDimension;\n }\n\n return noiseShape;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../tensor';\nimport {tensor1d} from './tensor1d';\n\nexport function enclosingPowerOfTwo(value: number) {\n // Return 2**N for integer N such that 2**N >= value.\n return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2.0))));\n}\n\nexport function cosineWindow(\n windowLength: number, a: number, b: number): Tensor1D {\n const even = 1 - windowLength % 2;\n const newValues = new Float32Array(windowLength);\n for (let i = 0; i < windowLength; ++i) {\n const cosArg = (2.0 * Math.PI * i) / (windowLength + even - 1);\n newValues[i] = a - b * Math.cos(cosArg);\n }\n return tensor1d(newValues, 'float32');\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport {assert, assertShapesMatch, getTypedArrayFromDType} from '../util';\nimport {tensor} from './tensor';\n\n/**\n * Returns whether the targets are in the top K predictions.\n *\n * ```js\n * const predictions = tf.tensor2d([[20, 10, 40, 30], [30, 50, -20, 10]]);\n * const targets = tf.tensor1d([2, 0]);\n * const precision = await tf.inTopKAsync(predictions, targets);\n * precision.print();\n * ```\n * @param predictions 2-D or higher `tf.Tensor` with last dimension being\n * at least `k`.\n * @param targets 1-D or higher `tf.Tensor`.\n * @param k Optional Number of top elements to look at for computing precision,\n * default to 1.\n *\n * @doc {heading: 'Operations', subheading: 'Evaluation'}\n */\nasync function inTopKAsync_(\n predictions: T|TensorLike, targets: U|TensorLike, k = 1): Promise {\n const $predictions = convertToTensor(predictions, 'predictions', 'inTopK');\n const $targets = convertToTensor(targets, 'targets', 'inTopK');\n\n assert(\n $predictions.rank > 1,\n () => 'inTopK() expects the predictions to be of rank 2 or higher, ' +\n `but got ${$predictions.rank}`);\n assert(\n $predictions.rank - 1 === $targets.rank,\n () => `predictions rank should be 1 larger than ` +\n `targets rank, but got predictions rank ` +\n `${$predictions.rank} and targets rank ${$targets.rank}`);\n assertShapesMatch(\n $predictions.shape.slice(0, $predictions.shape.length - 1),\n $targets.shape,\n `predictions's shape should be align with the targets' shape, ` +\n 'except the last dimension.');\n const lastDim = $predictions.shape[$predictions.shape.length - 1];\n assert(\n k > 0 && k <= lastDim,\n () => `'k' passed to inTopK() must be > 0 && <= the predictions last ` +\n `dimension (${lastDim}), but got ${k}`);\n\n const predictionsVals = await $predictions.data();\n const targetsVals = await $targets.data();\n\n // Reshape predictionsVals into a 2d tensor [batch, lastDim]\n // and look up topK along lastDim.\n const [batch, size] = [predictionsVals.length / lastDim, lastDim];\n const precision = getTypedArrayFromDType('bool', batch);\n\n for (let b = 0; b < batch; b++) {\n const offset = b * size;\n const vals = predictionsVals.subarray(offset, offset + size);\n const valAndInd: Array<{value: number, index: number}> = [];\n for (let i = 0; i < vals.length; i++) {\n valAndInd.push({value: vals[i], index: i});\n }\n valAndInd.sort((a, b) => b.value - a.value);\n\n precision[b] = 0;\n for (let i = 0; i < k; i++) {\n if (valAndInd[i].index === targetsVals[b]) {\n precision[b] = 1;\n break;\n }\n }\n }\n\n if (predictions !== $predictions) {\n $predictions.dispose();\n }\n if (targets !== $targets) {\n $targets.dispose();\n }\n\n // Output precision has the same shape as targets.\n return tensor(precision, $targets.shape, 'bool') as U;\n}\n\nexport const inTopKAsync = inTopKAsync_;\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv2DBackpropFilter, Conv2DBackpropFilterAttrs, Conv2DBackpropFilterInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the derivative of the filter of a 2D convolution.\n *\n * @param x The input tensor, of rank 4 or rank 3 of shape\n * [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed.\n * @param dy The dy image, of rank 4 or rank 3, of shape\n * [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed.\n * @param filterShape The shape of the filter, length 4,\n * [filterHeight, filterWidth, inDepth, outDepth].\n * @param strides The strides of the convolution: [strideHeight,\n * strideWidth].\n * @param pad A string from: 'same', 'valid'. The type of padding algorithm\n * used in the forward prop of the op.\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels].\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction conv2DBackpropFilter_(\n x: T, dy: T, filterShape: [number, number, number, number],\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat: 'NHWC'|'NCHW' = 'NHWC',\n dimRoundingMode?: 'floor'|'round'|'ceil'): Tensor4D {\n let x4D = x as Tensor4D;\n if (x.rank === 3) {\n x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);\n }\n let dy4D = dy as Tensor4D;\n if (dy4D.rank === 3) {\n dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);\n }\n util.assert(\n x4D.rank === 4,\n () => `Error in conv2dDerFilter: input must be rank 4, but got shape ` +\n `${x4D.shape}.`);\n util.assert(\n dy4D.rank === 4,\n () => `Error in conv2dDerFilter: dy must be rank 4, but got shape ` +\n `${dy4D.shape}.`);\n util.assert(\n filterShape.length === 4,\n () => `Error in conv2dDerFilter: filterShape must be length 4, but got ` +\n `${filterShape}.`);\n const inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];\n const outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1];\n util.assert(\n inDepth === filterShape[2],\n () => `Error in conv2dDerFilter: depth of input ${inDepth}) must ` +\n `match input depth in filter (${filterShape[2]}.`);\n util.assert(\n outDepth === filterShape[3],\n () => `Error in conv2dDerFilter: depth of dy (${outDepth}) must ` +\n `match output depth for filter (${filterShape[3]}).`);\n conv_util.checkPadOnDimRoundingMode('conv2dDerFilter', pad, dimRoundingMode);\n const inputs: Conv2DBackpropFilterInputs = {x: x4D, dy: dy4D};\n const attrs: Conv2DBackpropFilterAttrs =\n {strides, pad, dataFormat, dimRoundingMode, filterShape};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n Conv2DBackpropFilter, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor4D;\n}\n\nexport const conv2DBackpropFilter = /* @__PURE__ */ op({conv2DBackpropFilter_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../tensor';\n\nimport * as broadcast_util from './broadcast_util';\nimport {elu} from './elu';\nimport {Activation} from './fused_types';\nimport {leakyRelu} from './leaky_relu';\nimport {mul} from './mul';\nimport {prelu} from './prelu';\nimport {relu} from './relu';\nimport {relu6} from './relu6';\nimport {reshape} from './reshape';\nimport {sigmoid} from './sigmoid';\nimport {step} from './step';\nimport {sum} from './sum';\n\n// Returns gradient for fused activation.\nexport function getFusedDyActivation(\n dy: Tensor, y: Tensor, activation: Activation): Tensor {\n if (activation == null || activation === 'linear') {\n return dy;\n }\n if (activation === 'relu') {\n return mul(dy, step(y));\n }\n throw new Error(\n `Cannot compute gradient for fused activation ${activation}.`);\n}\n\n// Returns gradient for fused bias.\nexport function getFusedBiasGradient(\n bias: Tensor, dyActivation: Tensor): Tensor {\n let res = dyActivation;\n const reduceAxes =\n broadcast_util.getReductionAxes(bias.shape, dyActivation.shape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, bias.shape);\n}\n\nexport function applyActivation(\n x: Tensor, activation: Activation, preluActivationWeights?: Tensor,\n leakyreluAlpha?: number): Tensor {\n if (activation === 'linear') {\n return x;\n } else if (activation === 'relu') {\n return relu(x);\n } else if (activation === 'elu') {\n return elu(x);\n } else if (activation === 'relu6') {\n return relu6(x);\n } else if (activation === 'prelu') {\n return prelu(x, preluActivationWeights);\n } else if (activation === 'leakyrelu') {\n return leakyRelu(x, leakyreluAlpha);\n } else if (activation === 'sigmoid') {\n return sigmoid(x);\n }\n throw new Error(`Unknown fused activation ${activation}.`);\n}\n\n// Whether we should call fused ops.\nexport const shouldFuse = (gradientDepth: number, activation: Activation) => {\n const gradientMode = gradientDepth > 0;\n return !gradientMode || activation === 'linear';\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {customGrad} from '../../gradients';\nimport {FusedConv2D, FusedConv2DAttrs, FusedConv2DInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor, Tensor3D, Tensor4D} from '../../tensor';\nimport {GradSaveFunc, NamedTensorMap} from '../../tensor_types';\nimport {makeTypesMatch} from '../../tensor_util';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\nimport {add} from '../add';\nimport * as broadcast_util from '../broadcast_util';\nimport {conv2d as unfusedConv2d} from '../conv2d';\nimport {conv2DBackpropFilter} from '../conv2d_backprop_filter';\nimport {conv2DBackpropInput} from '../conv2d_backprop_input';\nimport * as conv_util from '../conv_util';\nimport {Activation} from '../fused_types';\nimport {applyActivation, getFusedBiasGradient, getFusedDyActivation, shouldFuse} from '../fused_util';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\n\n/**\n * Computes a 2D convolution over the input x, optionally fused with adding a\n * bias and applying an activation.\n *\n * ```js\n * const inputDepth = 2;\n * const inShape = [2, 2, 2, inputDepth];\n * const outputDepth = 2;\n * const fSize = 1;\n * const pad = 0;\n * const strides = 1;\n *\n * const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n * 16], inShape);\n * const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth,\n * outputDepth]);\n *\n * tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC',\n * dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print();\n * ```\n *\n * @param obj An object with the following properties:\n * @param x The input tensor, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is\n * assumed.\n * @param filter The filter, rank 4, of shape\n * `[filterHeight, filterWidth, inDepth, outDepth]`.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid` output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dataFormat An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels]. Only \"NHWC\" is currently supported.\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n * @param bias Tensor to be added to the result.\n * @param activation Name of activation kernel (defaults to `linear`) to be\n * applied\n * after biasAdd.\n * @param preluActivationWeights Tensor of prelu weights to be applied as part\n * of a `prelu` activation, typically the same shape as `x`.\n * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`\n * activation.\n */\nfunction fusedConv2d_({\n x,\n filter,\n strides,\n pad,\n dataFormat = 'NHWC',\n dilations = [1, 1],\n dimRoundingMode,\n bias,\n activation = 'linear',\n preluActivationWeights,\n leakyreluAlpha\n}: {\n x: T|TensorLike,\n filter: Tensor4D|TensorLike,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil',\n bias?: Tensor|TensorLike,\n activation?: Activation,\n preluActivationWeights?: Tensor,\n leakyreluAlpha?: number\n}): T {\n activation = activation || 'linear';\n\n if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {\n // TODO: Transpose bias and preluActivationWeights properly for NCHW\n // format before computation.\n util.assert(\n dataFormat === 'NHWC',\n () => `Error in fused conv2d: got dataFormat of ${dataFormat} but ` +\n `only NHWC is currently supported for the case of gradient depth ` +\n `is 0 and the activation is not linear.`);\n\n let result = unfusedConv2d(\n x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);\n if (bias != null) {\n result = add(result, bias);\n }\n\n return applyActivation(\n result, activation, preluActivationWeights, leakyreluAlpha) as T;\n }\n\n const $x = convertToTensor(x, 'x', 'conv2d', 'float32');\n const $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32');\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n util.assert(\n x4D.rank === 4,\n () => `Error in fused conv2d: input must be rank 4, but got rank ` +\n `${x4D.rank}.`);\n util.assert(\n $filter.rank === 4,\n () => `Error in fused conv2d: filter must be rank 4, but got rank ` +\n `${$filter.rank}.`);\n conv_util.checkPadOnDimRoundingMode('fused conv2d', pad, dimRoundingMode);\n const inputChannels = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1];\n util.assert(\n $filter.shape[2] === inputChannels,\n () => `Error in conv2d: depth of input (${inputChannels}) must match ` +\n `input depth for filter ${$filter.shape[2]}.`);\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in conv2D: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = conv_util.computeConv2DInfo(\n x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode);\n\n let $bias: Tensor;\n if (bias != null) {\n $bias = convertToTensor(bias, 'bias', 'fused conv2d');\n [$bias] = makeTypesMatch($bias, $x);\n\n // According to TensorFlow, the bias is supposed be a 1-D tensor or a\n // scalar.\n //\n // 3-D or 4-D bias is not disabled for NHWC format, because they are\n // currently being used in some cases. For examplem in our code base,\n // https://github.com/tensorflow/tfjs/blob/b53bd47e880367ae57493f0ea628abaf08db2d5d/tfjs-core/src/ops/fused/fused_conv2d_test.ts#L1972.\n if (dataFormat === 'NHWC') {\n broadcast_util.assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);\n } else {\n util.assert(\n $bias.shape.length <= 1,\n () => `Error in fused conv2d: only supports scalar or 1-D Tensor ` +\n `bias for NCHW format but got the bias of ` +\n `rank-${$bias.shape.length}.`);\n\n util.assert(\n $bias.shape.length === 0 || $bias.shape[0] === convInfo.outChannels ||\n $bias.shape[0] === 1,\n () => `Error in fused conv2d: bias shape (${$bias.shape}) is not ` +\n `compatible with the number of output channels ` +\n `(${convInfo.outChannels})`);\n }\n }\n\n let $preluActivationWeights: Tensor;\n if (preluActivationWeights != null) {\n // PReLU's activation weights could be a scalar, a 1-D tensor or a 3-D\n // tensor.\n const alphaShape = preluActivationWeights.shape;\n util.assert(\n alphaShape.length <= 1 || alphaShape.length === 3,\n () => `Error in fused conv2d: only supports scalar, 1-D Tensor or ` +\n `3-D Tensor PReLU activation weights but got a tensor of ` +\n `rank-${alphaShape.length}.`);\n\n if (alphaShape.length === 1) {\n // Whether the data format is NCHW or NHWC, the 1-D PReLU activation\n // weights tensor should be aligned with the output channels of conv2d\n // result.\n util.assert(\n alphaShape[0] === 1 || alphaShape[0] === convInfo.outChannels,\n () => `Error in fused conv2d: PReLU activation weights ` +\n `(${alphaShape}) is not compatible with the number of output ` +\n `channels (${convInfo.outChannels}).`);\n } else if (alphaShape.length === 3) {\n // Whether the data format is NCHW or NHWC, the PReLU activation weights\n // tensor should has the compatible shape with the result of conv2d.\n try {\n broadcast_util.assertAndGetBroadcastShape(\n alphaShape, convInfo.outShape);\n } catch (e) {\n const errMsg =\n `Error in fused conv2d: PReLU activation weights (${alphaShape}) ` +\n `is not compatible with the output shape of the conv2d ` +\n `(${convInfo.outShape}).`;\n throw Error(errMsg);\n }\n }\n\n $preluActivationWeights = convertToTensor(\n preluActivationWeights, 'prelu weights', 'fused conv2d');\n }\n\n const grad = (dy: Tensor4D, saved: Tensor[]) => {\n util.assert(\n dataFormat === 'NHWC',\n () => `Error in gradient of fused conv2D: got dataFormat of ${\n dataFormat} but only NHWC is currently supported.`);\n\n const [$filter, x4D, y, $bias] =\n saved as [Tensor4D, Tensor4D, Tensor4D, Tensor];\n\n const dyActivation = getFusedDyActivation(dy, y, activation) as Tensor4D;\n\n util.assert(\n conv_util.tupleValuesAreOne(dilations),\n () => 'Error in gradient of fused conv2D: ' +\n `dilation rates greater than 1 ` +\n `are not yet supported in gradients. Got dilations '${dilations}'`);\n\n const xDer =\n conv2DBackpropInput(x4D.shape, dyActivation, $filter, strides, pad);\n const filterDer =\n conv2DBackpropFilter(x4D, dyActivation, $filter.shape, strides, pad);\n const der: Tensor[] = [xDer, filterDer];\n\n if ($bias != null) {\n const biasDer = getFusedBiasGradient($bias, dyActivation);\n der.push(biasDer);\n }\n return der;\n };\n\n const inputs: FusedConv2DInputs = {\n x: x4D,\n filter: $filter,\n bias: $bias,\n preluActivationWeights: $preluActivationWeights\n };\n\n const attrs: FusedConv2DAttrs = {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n };\n\n // Depending on the the params passed in we will have different number of\n // inputs and thus a a different number of elements in the gradient.\n if (bias == null) {\n const customOp =\n customGrad((x4D: Tensor4D, filter: Tensor4D, save: GradSaveFunc) => {\n let res: Tensor4D|Tensor3D =\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n FusedConv2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n\n save([filter, x4D, res]);\n\n if (reshapedTo4D) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as\n Tensor3D;\n }\n\n return {value: res, gradFunc: grad};\n });\n return customOp(x4D, $filter) as T;\n } else {\n const customOpWithBias = customGrad(\n (x4D: Tensor4D, filter: Tensor4D, bias: Tensor, save: GradSaveFunc) => {\n let res: Tensor4D|Tensor3D = ENGINE.runKernel(\n FusedConv2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n\n save([filter, x4D, res, bias]);\n\n if (reshapedTo4D) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as\n Tensor3D;\n }\n\n return {value: res, gradFunc: grad};\n });\n\n return customOpWithBias(x4D, $filter, $bias) as T;\n }\n}\nexport const conv2d = /* @__PURE__ */ op({fusedConv2d_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {DepthwiseConv2dNativeBackpropFilter, DepthwiseConv2dNativeBackpropFilterAttrs, DepthwiseConv2dNativeBackpropFilterInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nimport {ExplicitPadding} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\nfunction depthwiseConv2dNativeBackpropFilter_(\n x: T, dy: T, filterShape: [number, number, number, number],\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dilations: [number, number]|number = [1, 1],\n dimRoundingMode?: 'floor'|'round'|'ceil'): Tensor4D {\n let x4D = x as Tensor4D;\n if (x.rank === 3) {\n x4D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2]]);\n }\n let dy4D = dy as Tensor4D;\n if (dy4D.rank === 3) {\n dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);\n }\n\n const inputs: DepthwiseConv2dNativeBackpropFilterInputs = {x: x4D, dy: dy4D};\n const attrs: DepthwiseConv2dNativeBackpropFilterAttrs =\n {strides, pad, dimRoundingMode, dilations, filterShape};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n DepthwiseConv2dNativeBackpropFilter,\n inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor4D;\n}\n\nexport const depthwiseConv2dNativeBackpropFilter =\n op({depthwiseConv2dNativeBackpropFilter_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {DepthwiseConv2dNativeBackpropInput, DepthwiseConv2dNativeBackpropInputAttrs, DepthwiseConv2dNativeBackpropInputInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nimport {ExplicitPadding} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\nfunction depthwiseConv2dNativeBackpropInput_(\n xShape: [number, number, number, number], dy: T, filter: Tensor4D,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dilations: [number, number]|number = [1, 1],\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n let dy4D = dy as Tensor4D;\n let reshapedTo4D = false;\n if (dy.rank === 3) {\n reshapedTo4D = true;\n dy4D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]);\n }\n\n const inputs: DepthwiseConv2dNativeBackpropInputInputs = {dy: dy4D, filter};\n const attrs: DepthwiseConv2dNativeBackpropInputAttrs =\n {strides, pad, dimRoundingMode, dilations, inputShape: xShape};\n\n const res =\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n DepthwiseConv2dNativeBackpropInput,\n inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const depthwiseConv2dNativeBackpropInput =\n op({depthwiseConv2dNativeBackpropInput_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {customGrad} from '../../gradients';\nimport {FusedDepthwiseConv2D, FusedDepthwiseConv2DAttrs, FusedDepthwiseConv2DInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor, Tensor3D, Tensor4D} from '../../tensor';\nimport {GradSaveFunc, NamedTensorMap} from '../../tensor_types';\nimport {makeTypesMatch} from '../../tensor_util';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\nimport {add} from '../add';\nimport * as broadcast_util from '../broadcast_util';\nimport * as conv_util from '../conv_util';\nimport {depthwiseConv2d as unfusedDepthwiseConv2d} from '../depthwise_conv2d';\nimport {depthwiseConv2dNativeBackpropFilter} from '../depthwise_conv2d_native_backprop_filter';\nimport {depthwiseConv2dNativeBackpropInput} from '../depthwise_conv2d_native_backprop_input';\nimport {Activation} from '../fused_types';\nimport {applyActivation, getFusedBiasGradient, getFusedDyActivation, shouldFuse} from '../fused_util';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\n\n/**\n * Computes depthwise 2D convolution, optionally fused with adding a\n * bias and applying an activation.\n *\n * Given a 4D `input` array and a `filter` array of shape\n * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing\n * `inChannels` convolutional filters of depth 1, this op applies a\n * different filter to each input channel (expanding from 1 channel to\n * `channelMultiplier` channels for each), then concatenates the results\n * together. The output has `inChannels * channelMultiplier` channels.\n *\n * See\n * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](\n * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)\n * for more details.\n *\n * @param obj An object with the following properties:\n * @param x The input tensor, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is\n * assumed.\n * @param filter The filter tensor, rank 4, of shape\n * `[filterHeight, filterWidth, inChannels, channelMultiplier]`.\n * @param strides The strides of the convolution: `[strideHeight,\n * strideWidth]`. If strides is a single number, then `strideHeight ==\n * strideWidth`.\n * @param pad The type of padding algorithm.\n * - `same` and stride 1: output will be of same size as input,\n * regardless of filter size.\n * - `valid`: output will be smaller than input if filter is larger\n * than 1x1.\n * - For more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`\n * in which we sample input values across the height and width dimensions\n * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single\n * number, then `dilationHeight == dilationWidth`. If it is greater than\n * 1, then all values of `strides` must be 1.\n * @param dataFormat: An optional string from: \"NHWC\", \"NCHW\". Defaults to\n * \"NHWC\". Specify the data format of the input and output data. With the\n * default format \"NHWC\", the data is stored in the order of: [batch,\n * height, width, channels]. Only \"NHWC\" is currently supported.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n * @param bias Tensor to be added to the result.\n * @param activation Name of activation kernel (defaults to `linear`).\n * @param preluActivationWeights Tensor of prelu weights to be applied as part\n * of a `prelu` activation, typically the same shape as `x`.\n * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`\n * activation.\n */\nfunction fusedDepthwiseConv2d_({\n x,\n filter,\n strides,\n pad,\n dataFormat = 'NHWC',\n dilations = [1, 1],\n dimRoundingMode,\n bias,\n activation = 'linear',\n preluActivationWeights,\n leakyreluAlpha\n}: {\n x: T|TensorLike,\n filter: Tensor4D|TensorLike,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number,\n dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil',\n bias?: Tensor|TensorLike,\n activation?: Activation,\n preluActivationWeights?: Tensor,\n leakyreluAlpha?: number\n}): T {\n if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {\n let result = unfusedDepthwiseConv2d(\n x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);\n if (bias != null) {\n result = add(result, bias);\n }\n\n return applyActivation(\n result, activation, preluActivationWeights, leakyreluAlpha) as T;\n }\n\n const $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32');\n const $filter =\n convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32');\n\n let x4D = $x as Tensor4D;\n let reshapedTo4D = false;\n if ($x.rank === 3) {\n reshapedTo4D = true;\n x4D = reshape($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);\n }\n util.assert(\n x4D.rank === 4,\n () => `Error in fused depthwiseConv2d: input must be rank 4, but got ` +\n `rank ${x4D.rank}.`);\n util.assert(\n $filter.rank === 4,\n () => `Error in fused depthwiseConv2d: filter must be rank 4, ` +\n `but got rank ${$filter.rank}.`);\n util.assert(\n x4D.shape[3] === $filter.shape[2],\n () => `Error in fused depthwiseConv2d: number of input channels ` +\n `(${x4D.shape[3]}) must match the inChannels dimension in ` +\n `filter ${$filter.shape[2]}.`);\n if (dilations == null) {\n dilations = [1, 1];\n }\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () =>\n 'Error in fused depthwiseConv2d: Either strides or dilations must ' +\n `be 1. Got strides ${strides} and dilations '${dilations}'`);\n conv_util.checkPadOnDimRoundingMode(\n 'fused depthwiseConv2d', pad, dimRoundingMode);\n const convInfo = conv_util.computeConv2DInfo(\n x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode,\n true /* depthwise */);\n\n let $bias: Tensor;\n if (bias != null) {\n $bias = convertToTensor(bias, 'bias', 'fused conv2d');\n [$bias] = makeTypesMatch($bias, $x);\n\n broadcast_util.assertAndGetBroadcastShape(convInfo.outShape, $bias.shape);\n }\n\n let $preluActivationWeights: Tensor;\n if (preluActivationWeights != null) {\n $preluActivationWeights = convertToTensor(\n preluActivationWeights, 'prelu weights', 'fused depthwiseConv2d');\n }\n\n const grad = (dy: Tensor4D, saved: Tensor[]) => {\n util.assert(\n conv_util.tupleValuesAreOne(dilations),\n () => 'Error in gradient of fused depthwiseConv2d: dilation rates ' +\n `greater than 1 are not yet supported. Got dilations ` +\n `'${dilations}'`);\n const [$filter, x4D, y, bias] = saved;\n\n const dyActivation = getFusedDyActivation(dy, y, activation) as Tensor4D;\n\n const xDer = depthwiseConv2dNativeBackpropInput(\n (x4D as Tensor4D).shape, dyActivation, $filter as Tensor4D, strides,\n pad, dilations, dimRoundingMode);\n const filterDer = depthwiseConv2dNativeBackpropFilter(\n x4D as Tensor4D, dyActivation, ($filter as Tensor4D).shape, strides,\n pad, dilations, dimRoundingMode);\n\n if (bias != null) {\n const biasDer = getFusedBiasGradient($bias, dyActivation);\n return [xDer, filterDer, biasDer];\n }\n return [xDer, filterDer];\n };\n\n const inputs: FusedDepthwiseConv2DInputs = {\n x: x4D,\n filter: $filter,\n bias: $bias,\n preluActivationWeights: $preluActivationWeights\n };\n const attrs: FusedDepthwiseConv2DAttrs = {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n };\n\n // Depending on the the params passed in we will have different number of\n // inputs and thus a a different number of elements in the gradient.\n if (bias == null) {\n const customOp =\n customGrad((x4D: Tensor4D, filter: Tensor4D, save: GradSaveFunc) => {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n let res: Tensor4D|Tensor3D = ENGINE.runKernel(\n FusedDepthwiseConv2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n\n save([filter, x4D, res]);\n\n if (reshapedTo4D) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as\n Tensor3D;\n }\n\n return {value: res, gradFunc: grad};\n });\n return customOp(x4D, $filter) as T;\n } else {\n const customOpWithBias = customGrad(\n (x4D: Tensor4D, filter: Tensor4D, bias: Tensor, save: GradSaveFunc) => {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n let res: Tensor4D|Tensor3D = ENGINE.runKernel(\n FusedDepthwiseConv2D, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n\n save([filter, x4D, res, bias]);\n\n if (reshapedTo4D) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n res = reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as\n Tensor3D;\n }\n\n return {value: res, gradFunc: grad};\n });\n\n return customOpWithBias(x4D, $filter, $bias) as T;\n }\n}\nexport const depthwiseConv2d = /* @__PURE__ */ op({fusedDepthwiseConv2d_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {customGrad} from '../../gradients';\nimport {_FusedMatMul, _FusedMatMulAttrs, _FusedMatMulInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor, Tensor3D} from '../../tensor';\nimport {GradSaveFunc, NamedTensorMap} from '../../tensor_types';\nimport {makeTypesMatch} from '../../tensor_util';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {add} from '../add';\nimport * as broadcast_util from '../broadcast_util';\nimport {Activation} from '../fused_types';\nimport {applyActivation, getFusedBiasGradient, getFusedDyActivation, shouldFuse} from '../fused_util';\nimport {matMul as unfusedMatMul} from '../mat_mul';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\n\n/**\n * Computes the dot product of two matrices with optional activation and bias.\n *\n * ```js\n * const a = tf.tensor2d([-1, -2], [1, 2]);\n * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const bias = tf.tensor2d([1, 2], [1, 2]);\n *\n * tf.fused.matMul({a, b, bias, activation: 'relu'}).print();\n * ```\n *\n * @param obj An object with the following properties:\n * - `a` First matrix in dot product operation.\n * - `b` Second matrix in dot product operation.\n * - `transposeA` If true, `a` is transposed before multiplication.\n * - `transposeB` If true, `b` is transposed before multiplication.\n * - `bias` Matrix to be added to the result.\n * - `activation` Name of activation kernel (defaults to `linear`).\n * - `preluActivationWeights` Tensor of prelu weights.\n * - `leakyreluAlpha` Alpha of leakyrelu.\n */\nfunction fusedMatMul_({\n a,\n b,\n transposeA = false,\n transposeB = false,\n bias,\n activation = 'linear',\n preluActivationWeights,\n leakyreluAlpha = 0.2,\n}: {\n a: Tensor|TensorLike,\n b: Tensor|TensorLike,\n transposeA?: boolean,\n transposeB?: boolean,\n bias?: Tensor|TensorLike,\n activation?: Activation,\n preluActivationWeights?: Tensor\n leakyreluAlpha?: number\n}): Tensor {\n if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) {\n let result = unfusedMatMul(a, b, transposeA, transposeB);\n if (bias != null) {\n result = add(result, bias);\n }\n\n return applyActivation(\n result, activation, preluActivationWeights, leakyreluAlpha);\n }\n\n let $a = convertToTensor(a, 'a', 'fused matMul');\n let $b = convertToTensor(b, 'b', 'fused matMul');\n [$a, $b] = makeTypesMatch($a, $b);\n\n const innerShapeA =\n transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];\n const innerShapeB =\n transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];\n\n const outerShapeA =\n transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];\n const outerShapeB =\n transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];\n\n const outerDimsA = $a.shape.slice(0, -2);\n const outerDimsB = $b.shape.slice(0, -2);\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n\n util.assert(\n innerShapeA === innerShapeB,\n () => `Error in fused matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +\n `${$b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n\n const outShapeOuterDims = broadcast_util.assertAndGetBroadcastShape(\n $a.shape.slice(0, -2), $b.shape.slice(0, -2));\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n\n const a3D: Tensor3D = transposeA ?\n reshape($a, [batchDimA, innerShapeA, outerShapeA]) :\n reshape($a, [batchDimA, outerShapeA, innerShapeA]);\n const b3D: Tensor3D = transposeB ?\n reshape($b, [batchDimB, outerShapeB, innerShapeB]) :\n reshape($b, [batchDimB, innerShapeB, outerShapeB]);\n\n let $bias: Tensor;\n if (bias != null) {\n $bias = convertToTensor(bias, 'bias', 'fused matMul');\n [$bias] = makeTypesMatch($bias, $a);\n\n broadcast_util.assertAndGetBroadcastShape(outShape, $bias.shape);\n }\n\n let $preluActivationWeights: Tensor;\n if (preluActivationWeights != null) {\n $preluActivationWeights = convertToTensor(\n preluActivationWeights, 'prelu weights', 'fused matMul');\n }\n\n const grad = (dy: Tensor3D, saved: Tensor[]) => {\n const [a3D, b3D, y, $bias] = saved;\n // we reshape dy because the result of the forward is not\n // necessarily going to be a 3d tensor due to a reshape done at the end of\n // the customOp.\n const dyActivation =\n getFusedDyActivation(reshape(dy, y.shape), y, activation);\n let aDer: Tensor;\n let bDer: Tensor;\n\n if (!transposeA && !transposeB) {\n aDer = unfusedMatMul(dyActivation, b3D, false, true);\n bDer = unfusedMatMul(a3D, dyActivation, true, false);\n } else if (!transposeA && transposeB) {\n aDer = unfusedMatMul(dyActivation, b3D, false, false);\n bDer = unfusedMatMul(dyActivation, a3D, true, false);\n } else if (transposeA && !transposeB) {\n aDer = unfusedMatMul(b3D, dyActivation, false, true);\n bDer = unfusedMatMul(a3D, dyActivation, false, false);\n } else {\n aDer = unfusedMatMul(b3D, dyActivation, true, true);\n bDer = unfusedMatMul(dyActivation, a3D, true, true);\n }\n\n if (bias != null) {\n const biasDer = getFusedBiasGradient($bias, dyActivation);\n return [aDer, bDer, biasDer];\n } else {\n return [aDer, bDer];\n }\n };\n\n const inputs: _FusedMatMulInputs = {\n a: a3D,\n b: b3D,\n bias: $bias,\n preluActivationWeights: $preluActivationWeights\n };\n const attrs: _FusedMatMulAttrs =\n {transposeA, transposeB, activation, leakyreluAlpha};\n\n // Depending on the the params passed in we will have different number of\n // inputs and thus a a different number of elements in the gradient.\n if (bias == null) {\n const customOp =\n customGrad((a3D: Tensor3D, b3D: Tensor3D, save: GradSaveFunc) => {\n const res =\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n _FusedMatMul, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor;\n\n save([a3D, b3D, res]);\n\n return {value: reshape(res, outShape), gradFunc: grad};\n });\n return customOp(a3D, b3D);\n } else {\n const customOpWithBias = customGrad(\n (a3D: Tensor3D, b3D: Tensor3D, $bias: Tensor, save: GradSaveFunc) => {\n const res =\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n _FusedMatMul, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor;\n\n save([a3D, b3D, res, $bias]);\n\n return {value: reshape(res, outShape), gradFunc: grad};\n });\n\n return customOpWithBias(a3D, b3D, $bias);\n }\n }\n\n export const matMul = /* @__PURE__ */ op({fusedMatMul_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../../tensor';\nimport {op} from '../operation';\nimport {cosineWindow} from '../signal_ops_util';\n\n/**\n * Generate a hamming window.\n *\n * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows\n *\n * ```js\n * tf.signal.hammingWindow(10).print();\n * ```\n * @param The length of window\n *\n * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}\n */\nfunction hammingWindow_(windowLength: number): Tensor1D {\n return cosineWindow(windowLength, 0.54, 0.46);\n}\nexport const hammingWindow = /* @__PURE__ */ op({hammingWindow_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D} from '../../tensor';\nimport {op} from '../operation';\nimport {cosineWindow} from '../signal_ops_util';\n\n/**\n * Generate a Hann window.\n *\n * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows\n *\n * ```js\n * tf.signal.hannWindow(10).print();\n * ```\n * @param The length of window\n *\n * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}\n */\nfunction hannWindow_(windowLength: number): Tensor1D {\n return cosineWindow(windowLength, 0.5, 0.5);\n}\n\nexport const hannWindow = /* @__PURE__ */ op({hannWindow_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D} from '../../tensor';\nimport {concat} from '../concat';\nimport {fill} from '../fill';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\nimport {slice} from '../slice';\nimport {tensor2d} from '../tensor2d';\n\n/**\n * Expands input into frames of frameLength.\n * Slides a window size with frameStep.\n *\n * ```js\n * tf.signal.frame([1, 2, 3], 2, 1).print();\n * ```\n * @param signal The input tensor to be expanded\n * @param frameLength Length of each frame\n * @param frameStep The frame hop size in samples.\n * @param padEnd Whether to pad the end of signal with padValue.\n * @param padValue A number to use where the input signal does\n * not exist when padEnd is True.\n *\n * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}\n */\nfunction frame_(\n signal: Tensor1D, frameLength: number, frameStep: number, padEnd = false,\n padValue = 0): Tensor {\n let start = 0;\n const output: Tensor[] = [];\n while (start + frameLength <= signal.size) {\n output.push(slice(signal, start, frameLength));\n start += frameStep;\n }\n\n if (padEnd) {\n while (start < signal.size) {\n const padLen = (start + frameLength) - signal.size;\n const pad = concat([\n slice(signal, start, frameLength - padLen), fill([padLen], padValue)\n ]);\n output.push(pad);\n start += frameStep;\n }\n }\n\n if (output.length === 0) {\n return tensor2d([], [0, frameLength]);\n }\n\n return reshape(concat(output), [output.length, frameLength]);\n}\nexport const frame = /* @__PURE__ */ op({frame_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D} from '../../tensor';\nimport {mul} from '../mul';\nimport {op} from '../operation';\nimport {enclosingPowerOfTwo} from '../signal_ops_util';\nimport {rfft} from '../spectral/rfft';\n\nimport {frame} from './frame';\nimport {hannWindow} from './hann_window';\n\n/**\n * Computes the Short-time Fourier Transform of signals\n * See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform\n *\n * ```js\n * const input = tf.tensor1d([1, 1, 1, 1, 1])\n * tf.signal.stft(input, 3, 1).print();\n * ```\n * @param signal 1-dimensional real value tensor.\n * @param frameLength The window length of samples.\n * @param frameStep The number of samples to step.\n * @param fftLength The size of the FFT to apply.\n * @param windowFn A callable that takes a window length and returns 1-d tensor.\n *\n * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}\n */\nfunction stft_(\n signal: Tensor1D, frameLength: number, frameStep: number,\n fftLength?: number,\n windowFn: (length: number) => Tensor1D = hannWindow): Tensor {\n if (fftLength == null) {\n fftLength = enclosingPowerOfTwo(frameLength);\n }\n const framedSignal = frame(signal, frameLength, frameStep);\n const windowedSignal = mul(framedSignal, windowFn(frameLength));\n return rfft(windowedSignal, fftLength);\n}\nexport const stft = /* @__PURE__ */ op({stft_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {CropAndResize, CropAndResizeAttrs, CropAndResizeInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor1D, Tensor2D, Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\n\n/**\n * Extracts crops from the input image tensor and resizes them using bilinear\n * sampling or nearest neighbor sampling (possibly with aspect ratio change)\n * to a common output size specified by cropSize.\n *\n * @param image 4d tensor of shape `[batch,imageHeight,imageWidth, depth]`,\n * where imageHeight and imageWidth must be positive, specifying the\n * batch of images from which to take crops\n * @param boxes 2d float32 tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the normalized\n * coordinates of the box in the `boxInd[i]`th image in the batch\n * @param boxInd 1d int32 tensor of shape `[numBoxes]` with values in range\n * `[0, batch)` that specifies the image that the `i`-th box refers to.\n * @param cropSize 1d int32 tensor of 2 elements `[cropHeigh, cropWidth]`\n * specifying the size to which all crops are resized to.\n * @param method Optional string from `'bilinear' | 'nearest'`,\n * defaults to bilinear, which specifies the sampling method for resizing\n * @param extrapolationValue A threshold for deciding when to remove boxes based\n * on score. Defaults to 0.\n * @return A 4D tensor of the shape `[numBoxes,cropHeight,cropWidth,depth]`\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction cropAndResize_(\n image: Tensor4D|TensorLike,\n boxes: Tensor2D|TensorLike,\n boxInd: Tensor1D|TensorLike,\n cropSize: [number, number],\n method: 'bilinear'|'nearest' = 'bilinear',\n extrapolationValue = 0,\n ): Tensor4D {\n const $image = convertToTensor(image, 'image', 'cropAndResize');\n const $boxes = convertToTensor(boxes, 'boxes', 'cropAndResize', 'float32');\n const $boxInd = convertToTensor(boxInd, 'boxInd', 'cropAndResize', 'int32');\n\n const numBoxes = $boxes.shape[0];\n\n util.assert(\n $image.rank === 4,\n () => 'Error in cropAndResize: image must be rank 4,' +\n `but got rank ${$image.rank}.`);\n util.assert(\n $boxes.rank === 2 && $boxes.shape[1] === 4,\n () => `Error in cropAndResize: boxes must be have size [${numBoxes},4] ` +\n `but had shape ${$boxes.shape}.`);\n util.assert(\n $boxInd.rank === 1 && $boxInd.shape[0] === numBoxes,\n () => `Error in cropAndResize: boxInd must be have size [${numBoxes}] ` +\n `but had shape ${$boxes.shape}.`);\n util.assert(\n cropSize.length === 2,\n () => `Error in cropAndResize: cropSize must be of length 2, but got ` +\n `length ${cropSize.length}.`);\n util.assert(\n cropSize[0] >= 1 && cropSize[1] >= 1,\n () => `cropSize must be atleast [1,1], but was ${cropSize}`);\n util.assert(\n method === 'bilinear' || method === 'nearest',\n () => `method must be bilinear or nearest, but was ${method}`);\n\n const inputs:\n CropAndResizeInputs = {image: $image, boxes: $boxes, boxInd: $boxInd};\n const attrs: CropAndResizeAttrs = {method, extrapolationValue, cropSize};\n const res = ENGINE.runKernel(\n CropAndResize, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n return res as Tensor4D;\n}\n\nexport const cropAndResize = /* @__PURE__ */ op({cropAndResize_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {FlipLeftRight, FlipLeftRightInputs} from '../../kernel_names';\nimport {Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\nimport {op} from '../operation';\n\n/**\n * Flips the image left to right. Currently available in the CPU, WebGL, and\n * WASM backends.\n *\n * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.\n */\n/** @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} */\nfunction flipLeftRight_(image: Tensor4D|TensorLike): Tensor4D {\n const $image = convertToTensor(image, 'image', 'flipLeftRight', 'float32');\n\n util.assert(\n $image.rank === 4,\n () => 'Error in flipLeftRight: image must be rank 4,' +\n `but got rank ${$image.rank}.`);\n\n const inputs: FlipLeftRightInputs = {image: $image};\n const res =\n ENGINE.runKernel(FlipLeftRight, inputs as unknown as NamedTensorMap, {});\n return res as Tensor4D;\n}\n\nexport const flipLeftRight = /* @__PURE__ */ op({flipLeftRight_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor2D, Tensor3D, Tensor4D, Tensor5D, Tensor6D} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\nimport {tile} from '../tile';\n\n/**\n * Converts images from grayscale to RGB format.\n *\n * @param image A grayscale tensor to convert. The `image`'s last dimension must\n * be size 1 with at least a two-dimensional shape.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction grayscaleToRGB_(image: T|TensorLike): T {\n const $image = convertToTensor(image, 'image', 'grayscaleToRGB');\n\n const lastDimsIdx = $image.rank - 1;\n const lastDims = $image.shape[lastDimsIdx];\n\n util.assert(\n $image.rank >= 2,\n () => 'Error in grayscaleToRGB: images must be at least rank 2, ' +\n `but got rank ${$image.rank}.`);\n\n util.assert(\n lastDims === 1,\n () => 'Error in grayscaleToRGB: last dimension of a grayscale image ' +\n `should be size 1, but got size ${lastDims}.`);\n\n const reps = new Array($image.rank);\n\n reps.fill(1, 0, lastDimsIdx);\n reps[lastDimsIdx] = 3;\n\n return tile($image, reps);\n}\n\nexport const grayscaleToRGB = /* @__PURE__ */ op({grayscaleToRGB_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {RotateWithOffset, RotateWithOffsetAttrs, RotateWithOffsetInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\n\n/**\n * Rotates the input image tensor counter-clockwise with an optional offset\n * center of rotation. Currently available in the CPU, WebGL, and WASM backends.\n *\n * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.\n * @param radians The amount of rotation.\n * @param fillValue The value to fill in the empty space leftover\n * after rotation. Can be either a single grayscale value (0-255), or an\n * array of three numbers `[red, green, blue]` specifying the red, green,\n * and blue channels. Defaults to `0` (black).\n * @param center The center of rotation. Can be either a single value (0-1), or\n * an array of two numbers `[centerX, centerY]`. Defaults to `0.5` (rotates\n * the image around its center).\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction rotateWithOffset_(\n image: Tensor4D|TensorLike, radians: number,\n fillValue: number|[number, number, number] = 0,\n center: number|[number, number] = 0.5): Tensor4D {\n const $image = convertToTensor(image, 'image', 'rotateWithOffset', 'float32');\n\n util.assert(\n $image.rank === 4,\n () => 'Error in rotateWithOffset: image must be rank 4,' +\n `but got rank ${$image.rank}.`);\n\n const inputs: RotateWithOffsetInputs = {image: $image};\n const attrs: RotateWithOffsetAttrs = {radians, fillValue, center};\n const res = ENGINE.runKernel(\n RotateWithOffset, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n return res as Tensor4D;\n}\n\nexport const rotateWithOffset = /* @__PURE__ */ op({rotateWithOffset_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor1D, Tensor2D} from '../tensor';\nimport * as util from '../util';\n\nfunction nonMaxSuppSanityCheck(\n boxes: Tensor2D, scores: Tensor1D, maxOutputSize: number,\n iouThreshold: number, scoreThreshold: number, softNmsSigma?: number): {\n maxOutputSize: number,\n iouThreshold: number,\n scoreThreshold: number,\n softNmsSigma: number\n} {\n if (iouThreshold == null) {\n iouThreshold = 0.5;\n }\n if (scoreThreshold == null) {\n scoreThreshold = Number.NEGATIVE_INFINITY;\n }\n if (softNmsSigma == null) {\n softNmsSigma = 0.0;\n }\n\n const numBoxes = boxes.shape[0];\n maxOutputSize = Math.min(maxOutputSize, numBoxes);\n\n util.assert(\n 0 <= iouThreshold && iouThreshold <= 1,\n () => `iouThreshold must be in [0, 1], but was '${iouThreshold}'`);\n util.assert(\n boxes.rank === 2,\n () => `boxes must be a 2D tensor, but was of rank '${boxes.rank}'`);\n util.assert(\n boxes.shape[1] === 4,\n () =>\n `boxes must have 4 columns, but 2nd dimension was ${boxes.shape[1]}`);\n util.assert(scores.rank === 1, () => 'scores must be a 1D tensor');\n util.assert(\n scores.shape[0] === numBoxes,\n () => `scores has incompatible shape with boxes. Expected ${numBoxes}, ` +\n `but was ${scores.shape[0]}`);\n util.assert(\n 0 <= softNmsSigma && softNmsSigma <= 1,\n () => `softNmsSigma must be in [0, 1], but was '${softNmsSigma}'`);\n return {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma};\n}\n\nexport {nonMaxSuppSanityCheck};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {NonMaxSuppressionV3} from '../../kernel_names';\nimport {Tensor1D, Tensor2D} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {op} from '../operation';\n\n/**\n * Performs non maximum suppression of bounding boxes based on\n * iou (intersection over union).\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @return A 1D tensor with the selected box indices.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction nonMaxSuppression_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY): Tensor1D {\n const $boxes =\n convertToTensor(boxes, 'boxes', 'nonMaxSuppression', 'float32');\n const $scores =\n convertToTensor(scores, 'scores', 'nonMaxSuppression', 'float32');\n\n const inputs = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);\n maxOutputSize = inputs.maxOutputSize;\n iouThreshold = inputs.iouThreshold;\n scoreThreshold = inputs.scoreThreshold;\n\n const attrs = {maxOutputSize, iouThreshold, scoreThreshold};\n return ENGINE.runKernel(\n NonMaxSuppressionV3, {boxes: $boxes, scores: $scores}, attrs);\n}\n\nexport const nonMaxSuppression = /* @__PURE__ */ op({nonMaxSuppression_});\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Inserts a value into a sorted array. This method allows duplicate, meaning it\n * allows inserting duplicate value, in which case, the element will be inserted\n * at the lowest index of the value.\n * @param arr The array to modify.\n * @param element The element to insert.\n * @param comparator Optional. If no comparator is specified, elements are\n * compared using array_util.defaultComparator, which is suitable for Strings\n * and Numbers in ascending arrays. If the array contains multiple instances of\n * the target value, the left-most instance will be returned. To provide a\n * comparator, it should take 2 arguments to compare and return a negative,\n * zero, or a positive number.\n */\nexport function binaryInsert(\n arr: T[], element: T, comparator?: (a: T, b: T) => number) {\n const index = binarySearch(arr, element, comparator);\n const insertionPoint = index < 0 ? -(index + 1) : index;\n arr.splice(insertionPoint, 0, element);\n}\n\n/**\n * Searches the array for the target using binary search, returns the index\n * of the found element, or position to insert if element not found. If no\n * comparator is specified, elements are compared using array_\n * util.defaultComparator, which is suitable for Strings and Numbers in\n * ascending arrays. If the array contains multiple instances of the target\n * value, the left-most instance will be returned.\n * @param arr The array to be searched in.\n * @param target The target to be searched for.\n * @param comparator Should take 2 arguments to compare and return a negative,\n * zero, or a positive number.\n * @return Lowest index of the target value if found, otherwise the insertion\n * point where the target should be inserted, in the form of\n * (-insertionPoint - 1).\n */\nexport function binarySearch(\n arr: T[], target: T, comparator?: (a: T, b: T) => number) {\n return binarySearch_(arr, target, comparator || defaultComparator);\n}\n\n/**\n * Compares its two arguments for order.\n * @param a The first element to be compared.\n * @param b The second element to be compared.\n * @return A negative number, zero, or a positive number as the first\n * argument is less than, equal to, or greater than the second.\n */\nfunction defaultComparator(a: T, b: T): number {\n return a > b ? 1 : a < b ? -1 : 0;\n}\n\nfunction binarySearch_(\n arr: T[], target: T, comparator: (a: T, b: T) => number) {\n let left = 0;\n let right = arr.length;\n let middle = 0;\n let found = false;\n while (left < right) {\n middle = left + ((right - left) >>> 1);\n const compareResult = comparator(target, arr[middle]);\n if (compareResult > 0) {\n left = middle + 1;\n } else {\n right = middle;\n // If compareResult is 0, the value is found. We record it is found,\n // and then keep looking because there may be duplicate.\n found = !compareResult;\n }\n }\n\n return found ? left : -left - 1;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray} from '../types';\nimport {binaryInsert} from './non_max_suppression_util';\n\n/**\n * Implementation of the NonMaxSuppression kernel shared between webgl and cpu.\n */\ninterface Candidate {\n score: number;\n boxIndex: number;\n suppressBeginIndex: number;\n}\n\ninterface NonMaxSuppressionResult {\n selectedIndices: number[];\n selectedScores?: number[];\n validOutputs?: number;\n}\n\nexport function nonMaxSuppressionV3Impl(\n boxes: TypedArray, scores: TypedArray, maxOutputSize: number,\n iouThreshold: number, scoreThreshold: number): NonMaxSuppressionResult {\n return nonMaxSuppressionImpl_(\n boxes, scores, maxOutputSize, iouThreshold, scoreThreshold,\n 0 /* softNmsSigma */);\n}\n\nexport function nonMaxSuppressionV4Impl(\n boxes: TypedArray, scores: TypedArray, maxOutputSize: number,\n iouThreshold: number, scoreThreshold: number,\n padToMaxOutputSize: boolean): NonMaxSuppressionResult {\n return nonMaxSuppressionImpl_(\n boxes, scores, maxOutputSize, iouThreshold, scoreThreshold,\n 0 /* softNmsSigma */, false /* returnScoresTensor */,\n padToMaxOutputSize /* padToMaxOutputSize */, true\n /* returnValidOutputs */);\n}\n\nexport function nonMaxSuppressionV5Impl(\n boxes: TypedArray, scores: TypedArray, maxOutputSize: number,\n iouThreshold: number, scoreThreshold: number,\n softNmsSigma: number): NonMaxSuppressionResult {\n return nonMaxSuppressionImpl_(\n boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma,\n true /* returnScoresTensor */);\n}\n\nfunction nonMaxSuppressionImpl_(\n boxes: TypedArray, scores: TypedArray, maxOutputSize: number,\n iouThreshold: number, scoreThreshold: number, softNmsSigma: number,\n returnScoresTensor = false, padToMaxOutputSize = false,\n returnValidOutputs = false): NonMaxSuppressionResult {\n // The list is sorted in ascending order, so that we can always pop the\n // candidate with the largest score in O(1) time.\n const candidates = [];\n\n for (let i = 0; i < scores.length; i++) {\n if (scores[i] > scoreThreshold) {\n candidates.push({score: scores[i], boxIndex: i, suppressBeginIndex: 0});\n }\n }\n\n candidates.sort(ascendingComparator);\n\n // If softNmsSigma is 0, the outcome of this algorithm is exactly same as\n // before.\n const scale = softNmsSigma > 0 ? (-0.5 / softNmsSigma) : 0.0;\n\n const selectedIndices: number[] = [];\n const selectedScores: number[] = [];\n\n while (selectedIndices.length < maxOutputSize && candidates.length > 0) {\n const candidate = candidates.pop();\n const {score: originalScore, boxIndex, suppressBeginIndex} = candidate;\n\n if (originalScore < scoreThreshold) {\n break;\n }\n\n // Overlapping boxes are likely to have similar scores, therefore we\n // iterate through the previously selected boxes backwards in order to\n // see if candidate's score should be suppressed. We use\n // suppressBeginIndex to track and ensure a candidate can be suppressed\n // by a selected box no more than once. Also, if the overlap exceeds\n // iouThreshold, we simply ignore the candidate.\n let ignoreCandidate = false;\n for (let j = selectedIndices.length - 1; j >= suppressBeginIndex; --j) {\n const iou = intersectionOverUnion(boxes, boxIndex, selectedIndices[j]);\n\n if (iou >= iouThreshold) {\n ignoreCandidate = true;\n break;\n }\n\n candidate.score =\n candidate.score * suppressWeight(iouThreshold, scale, iou);\n\n if (candidate.score <= scoreThreshold) {\n break;\n }\n }\n\n // At this point, if `candidate.score` has not dropped below\n // `scoreThreshold`, then we know that we went through all of the\n // previous selections and can safely update `suppressBeginIndex` to the\n // end of the selected array. Then we can re-insert the candidate with\n // the updated score and suppressBeginIndex back in the candidate list.\n // If on the other hand, `candidate.score` has dropped below the score\n // threshold, we will not add it back to the candidates list.\n candidate.suppressBeginIndex = selectedIndices.length;\n\n if (!ignoreCandidate) {\n // Candidate has passed all the tests, and is not suppressed, so\n // select the candidate.\n if (candidate.score === originalScore) {\n selectedIndices.push(boxIndex);\n selectedScores.push(candidate.score);\n } else if (candidate.score > scoreThreshold) {\n // Candidate's score is suppressed but is still high enough to be\n // considered, so add back to the candidates list.\n binaryInsert(candidates, candidate, ascendingComparator);\n }\n }\n }\n\n // NonMaxSuppressionV4 feature: padding output to maxOutputSize.\n const validOutputs = selectedIndices.length;\n const elemsToPad = maxOutputSize - validOutputs;\n\n if (padToMaxOutputSize && elemsToPad > 0) {\n selectedIndices.push(...new Array(elemsToPad).fill(0));\n selectedScores.push(...new Array(elemsToPad).fill(0.0));\n }\n\n const result: NonMaxSuppressionResult = {selectedIndices};\n\n if (returnScoresTensor) {\n result['selectedScores'] = selectedScores;\n }\n\n if (returnValidOutputs) {\n result['validOutputs'] = validOutputs;\n }\n\n return result;\n}\n\nfunction intersectionOverUnion(boxes: TypedArray, i: number, j: number) {\n const iCoord = boxes.subarray(i * 4, i * 4 + 4);\n const jCoord = boxes.subarray(j * 4, j * 4 + 4);\n const yminI = Math.min(iCoord[0], iCoord[2]);\n const xminI = Math.min(iCoord[1], iCoord[3]);\n const ymaxI = Math.max(iCoord[0], iCoord[2]);\n const xmaxI = Math.max(iCoord[1], iCoord[3]);\n const yminJ = Math.min(jCoord[0], jCoord[2]);\n const xminJ = Math.min(jCoord[1], jCoord[3]);\n const ymaxJ = Math.max(jCoord[0], jCoord[2]);\n const xmaxJ = Math.max(jCoord[1], jCoord[3]);\n const areaI = (ymaxI - yminI) * (xmaxI - xminI);\n const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ);\n if (areaI <= 0 || areaJ <= 0) {\n return 0.0;\n }\n const intersectionYmin = Math.max(yminI, yminJ);\n const intersectionXmin = Math.max(xminI, xminJ);\n const intersectionYmax = Math.min(ymaxI, ymaxJ);\n const intersectionXmax = Math.min(xmaxI, xmaxJ);\n const intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) *\n Math.max(intersectionXmax - intersectionXmin, 0.0);\n return intersectionArea / (areaI + areaJ - intersectionArea);\n}\n\n// A Gaussian penalty function, this method always returns values in [0, 1].\n// The weight is a function of similarity, the more overlap two boxes are, the\n// smaller the weight is, meaning highly overlapping boxe will be significantly\n// penalized. On the other hand, a non-overlapping box will not be penalized.\nfunction suppressWeight(iouThreshold: number, scale: number, iou: number) {\n const weight = Math.exp(scale * iou * iou);\n return iou <= iouThreshold ? weight : 0.0;\n}\n\nfunction ascendingComparator(c1: Candidate, c2: Candidate) {\n // For objects with same scores, we make the object with the larger index go\n // first. In an array that pops from the end, this means that the object with\n // the smaller index will be popped first. This ensures the same output as\n // the TensorFlow python version.\n return (c1.score - c2.score) ||\n ((c1.score === c2.score) && (c2.boxIndex - c1.boxIndex));\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {nonMaxSuppressionV3Impl} from '../../backends/non_max_suppression_impl';\nimport {Tensor1D, Tensor2D} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {tensor1d} from '../tensor1d';\n\n/**\n * Performs non maximum suppression of bounding boxes based on\n * iou (intersection over union).\n *\n * This is the async version of `nonMaxSuppression`\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @return A 1D tensor with the selected box indices.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nasync function nonMaxSuppressionAsync_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY): Promise {\n const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');\n const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');\n\n const inputs = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);\n maxOutputSize = inputs.maxOutputSize;\n iouThreshold = inputs.iouThreshold;\n scoreThreshold = inputs.scoreThreshold;\n\n const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]);\n const boxesVals = boxesAndScores[0];\n const scoresVals = boxesAndScores[1];\n\n // We call a cpu based impl directly with the typedarray data here rather\n // than a kernel because all kernels are synchronous (and thus cannot await\n // .data()).\n const {selectedIndices} = nonMaxSuppressionV3Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);\n if ($boxes !== boxes) {\n $boxes.dispose();\n }\n if ($scores !== scores) {\n $scores.dispose();\n }\n\n return tensor1d(selectedIndices, 'int32');\n}\n\nexport const nonMaxSuppressionAsync = nonMaxSuppressionAsync_;\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {NonMaxSuppressionV5, NonMaxSuppressionV5Attrs, NonMaxSuppressionV5Inputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor, Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\n\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {op} from '../operation';\n\n/**\n * Performs non maximum suppression of bounding boxes based on\n * iou (intersection over union).\n *\n * This op also supports a Soft-NMS mode (cf.\n * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score\n * of other overlapping boxes, therefore favoring different regions of the image\n * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`\n * parameter to be larger than 0.\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @param softNmsSigma A float representing the sigma parameter for Soft NMS.\n * When sigma is 0, it falls back to nonMaxSuppression.\n * @return A map with the following properties:\n * - selectedIndices: A 1D tensor with the selected box indices.\n * - selectedScores: A 1D tensor with the corresponding scores for each\n * selected box.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction nonMaxSuppressionWithScore_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY,\n softNmsSigma = 0.0): NamedTensorMap {\n const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');\n const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');\n\n const params = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold,\n softNmsSigma);\n maxOutputSize = params.maxOutputSize;\n iouThreshold = params.iouThreshold;\n scoreThreshold = params.scoreThreshold;\n softNmsSigma = params.softNmsSigma;\n\n const inputs: NonMaxSuppressionV5Inputs = {boxes: $boxes, scores: $scores};\n const attrs: NonMaxSuppressionV5Attrs =\n {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const result = ENGINE.runKernel(\n NonMaxSuppressionV5, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor[];\n\n return {selectedIndices: result[0], selectedScores: result[1]};\n}\n\nexport const nonMaxSuppressionWithScore = /* @__PURE__ */ op({nonMaxSuppressionWithScore_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {nonMaxSuppressionV5Impl} from '../../backends/non_max_suppression_impl';\nimport {Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {tensor1d} from '../tensor1d';\n\n/**\n * Asynchronously performs non maximum suppression of bounding boxes based on\n * iou (intersection over union).\n *\n * This op also supports a Soft-NMS mode (cf.\n * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score\n * of other overlapping boxes, therefore favoring different regions of the image\n * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`\n * parameter to be larger than 0.\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @param softNmsSigma A float representing the sigma parameter for Soft NMS.\n * When sigma is 0, it falls back to nonMaxSuppression.\n * @return A map with the following properties:\n * - selectedIndices: A 1D tensor with the selected box indices.\n * - selectedScores: A 1D tensor with the corresponding scores for each\n * selected box.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nasync function nonMaxSuppressionWithScoreAsync_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY,\n softNmsSigma = 0.0): Promise {\n const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');\n const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');\n\n const params = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold,\n softNmsSigma);\n maxOutputSize = params.maxOutputSize;\n iouThreshold = params.iouThreshold;\n scoreThreshold = params.scoreThreshold;\n softNmsSigma = params.softNmsSigma;\n\n const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]);\n const boxesVals = boxesAndScores[0];\n const scoresVals = boxesAndScores[1];\n\n // We call a cpu based impl directly with the typedarray data here rather\n // than a kernel because all kernels are synchronous (and thus cannot await\n // .data()).\n const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold,\n softNmsSigma);\n\n if ($boxes !== boxes) {\n $boxes.dispose();\n }\n if ($scores !== scores) {\n $scores.dispose();\n }\n\n return {\n selectedIndices: tensor1d(selectedIndices, 'int32'),\n selectedScores: tensor1d(selectedScores)\n };\n}\n\nexport const nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_;\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {NonMaxSuppressionV4, NonMaxSuppressionV4Attrs, NonMaxSuppressionV4Inputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor, Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\n\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {op} from '../operation';\n\n/**\n * Asynchronously performs non maximum suppression of bounding boxes based on\n * iou (intersection over union), with an option to pad results.\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @param padToMaxOutputSize Defaults to false. If true, size of output\n * `selectedIndices` is padded to maxOutputSize.\n * @return A map with the following properties:\n * - selectedIndices: A 1D tensor with the selected box indices.\n * - validOutputs: A scalar denoting how many elements in `selectedIndices`\n * are valid. Valid elements occur first, then padding.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction nonMaxSuppressionPadded_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY,\n padToMaxOutputSize = false): NamedTensorMap {\n const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression');\n const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression');\n\n const params = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold,\n null /* softNmsSigma */);\n const $maxOutputSize = params.maxOutputSize;\n const $iouThreshold = params.iouThreshold;\n const $scoreThreshold = params.scoreThreshold;\n\n const inputs: NonMaxSuppressionV4Inputs = {boxes: $boxes, scores: $scores};\n const attrs: NonMaxSuppressionV4Attrs = {\n maxOutputSize: $maxOutputSize,\n iouThreshold: $iouThreshold,\n scoreThreshold: $scoreThreshold,\n padToMaxOutputSize\n };\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const result = ENGINE.runKernel(\n NonMaxSuppressionV4, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor[];\n\n return {selectedIndices: result[0], validOutputs: result[1]};\n}\n\nexport const nonMaxSuppressionPadded = /* @__PURE__ */ op({nonMaxSuppressionPadded_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {nonMaxSuppressionV4Impl} from '../../backends/non_max_suppression_impl';\nimport {Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {nonMaxSuppSanityCheck} from '../nonmax_util';\nimport {scalar} from '../scalar';\nimport {tensor1d} from '../tensor1d';\n\n/**\n * Asynchronously performs non maximum suppression of bounding boxes based on\n * iou (intersection over union), with an option to pad results.\n *\n * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is\n * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of\n * the bounding box.\n * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.\n * @param maxOutputSize The maximum number of boxes to be selected.\n * @param iouThreshold A float representing the threshold for deciding whether\n * boxes overlap too much with respect to IOU. Must be between [0, 1].\n * Defaults to 0.5 (50% box overlap).\n * @param scoreThreshold A threshold for deciding when to remove boxes based\n * on score. Defaults to -inf, which means any score is accepted.\n * @param padToMaxOutputSize Defaults to false. If true, size of output\n * `selectedIndices` is padded to maxOutputSize.\n * @return A map with the following properties:\n * - selectedIndices: A 1D tensor with the selected box indices.\n * - validOutputs: A scalar denoting how many elements in `selectedIndices`\n * are valid. Valid elements occur first, then padding.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nasync function nonMaxSuppressionPaddedAsync_(\n boxes: Tensor2D|TensorLike, scores: Tensor1D|TensorLike,\n maxOutputSize: number, iouThreshold = 0.5,\n scoreThreshold = Number.NEGATIVE_INFINITY,\n padToMaxOutputSize = false): Promise {\n const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync');\n const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync');\n\n const params = nonMaxSuppSanityCheck(\n $boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold,\n null /* softNmsSigma */);\n const $maxOutputSize = params.maxOutputSize;\n const $iouThreshold = params.iouThreshold;\n const $scoreThreshold = params.scoreThreshold;\n\n const [boxesVals, scoresVals] =\n await Promise.all([$boxes.data(), $scores.data()]);\n\n // We call a cpu based impl directly with the typedarray data here rather\n // than a kernel because all kernels are synchronous (and thus cannot await\n // .data()).\n const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl(\n boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold,\n padToMaxOutputSize);\n\n if ($boxes !== boxes) {\n $boxes.dispose();\n }\n if ($scores !== scores) {\n $scores.dispose();\n }\n\n return {\n selectedIndices: tensor1d(selectedIndices, 'int32'),\n validOutputs: scalar(validOutputs, 'int32')\n };\n}\n\nexport const nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_;\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {ResizeBilinear, ResizeBilinearAttrs, ResizeBilinearInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\n\n/**\n * Bilinear resize a single 3D image or a batch of 3D images to a new shape.\n *\n * @param images The images, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param size The new shape `[newHeight, newWidth]` to resize the\n * images to. Each channel is resized individually.\n * @param alignCorners Defaults to `false`. If true, rescale\n * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4\n * corners of images and resized images. If false, rescale by\n * `new_height / height`. Treat similarly the width dimension.\n * @param halfPixelCenters Defaults to `false`. Whether to assume pixel centers\n * are at 0.5, which would make the floating point coordinates of the top\n * left pixel 0.5, 0.5.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction resizeBilinear_(\n images: T|TensorLike, size: [number, number], alignCorners = false,\n halfPixelCenters = false): T {\n const $images = convertToTensor(images, 'images', 'resizeBilinear');\n\n util.assert(\n $images.rank === 3 || $images.rank === 4,\n () => `Error in resizeBilinear: x must be rank 3 or 4, but got ` +\n `rank ${$images.rank}.`);\n util.assert(\n size.length === 2,\n () => `Error in resizeBilinear: new shape must 2D, but got shape ` +\n `${size}.`);\n util.assert(\n halfPixelCenters === false || alignCorners === false,\n () => `Error in resizeBilinear: If halfPixelCenters is true, ` +\n `alignCorners must be false.`);\n\n let batchImages = $images as Tensor4D;\n let reshapedTo4D = false;\n if ($images.rank === 3) {\n reshapedTo4D = true;\n batchImages = reshape(\n $images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);\n }\n\n const [] = size;\n\n const inputs: ResizeBilinearInputs = {images: batchImages};\n const attrs: ResizeBilinearAttrs = {alignCorners, halfPixelCenters, size};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n ResizeBilinear, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const resizeBilinear = /* @__PURE__ */ op({resizeBilinear_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {ResizeNearestNeighbor, ResizeNearestNeighborAttrs, ResizeNearestNeighborInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\n\n/**\n * NearestNeighbor resize a batch of 3D images to a new shape.\n *\n * @param images The images, of rank 4 or rank 3, of shape\n * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed.\n * @param size The new shape `[newHeight, newWidth]` to resize the\n * images to. Each channel is resized individually.\n * @param alignCorners Defaults to False. If true, rescale\n * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4\n * corners of images and resized images. If false, rescale by\n * `new_height / height`. Treat similarly the width dimension.\n * @param halfPixelCenters Defaults to `false`. Whether to assume pixels are of\n * half the actual dimensions, and yield more accurate resizes. This flag\n * would also make the floating point coordinates of the top left pixel\n * 0.5, 0.5.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction resizeNearestNeighbor_(\n images: T|TensorLike, size: [number, number], alignCorners = false,\n halfPixelCenters = false): T {\n const $images = convertToTensor(images, 'images', 'resizeNearestNeighbor');\n\n util.assert(\n $images.rank === 3 || $images.rank === 4,\n () => `Error in resizeNearestNeighbor: x must be rank 3 or 4, but got ` +\n `rank ${$images.rank}.`);\n util.assert(\n size.length === 2,\n () =>\n `Error in resizeNearestNeighbor: new shape must 2D, but got shape ` +\n `${size}.`);\n util.assert(\n $images.dtype === 'float32' || $images.dtype === 'int32',\n () => '`images` must have `int32` or `float32` as dtype');\n util.assert(\n halfPixelCenters === false || alignCorners === false,\n () => `Error in resizeNearestNeighbor: If halfPixelCenters is true, ` +\n `alignCorners must be false.`);\n let batchImages = $images as Tensor4D;\n let reshapedTo4D = false;\n if ($images.rank === 3) {\n reshapedTo4D = true;\n batchImages = reshape(\n $images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]);\n }\n const [] = size;\n\n const inputs: ResizeNearestNeighborInputs = {images: batchImages};\n const attrs:\n ResizeNearestNeighborAttrs = {alignCorners, halfPixelCenters, size};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n ResizeNearestNeighbor, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const resizeNearestNeighbor = /* @__PURE__ */ op({resizeNearestNeighbor_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * https://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport { Tensor1D, Tensor3D } from '../../tensor';\nimport { tensor1d } from '../tensor1d';\nimport { TensorLike } from '../../types';\nimport { op } from '../operation';\nimport { cast } from '../cast';\nimport { split } from '../split';\nimport { bincount } from '../bincount';\nimport { lessEqual } from '../less_equal';\nimport { greater } from '../greater';\nimport { sum } from '../sum';\nimport { add } from '../add';\nimport { mul } from '../mul';\nimport { div } from '../div';\nimport { sub } from '../sub';\nimport { round } from '../round';\nimport { where } from '../where';\nimport { fill } from '../fill';\nimport {slice} from '../slice';\nimport { range } from '../range';\nimport { tensor } from '../tensor';\nimport * as util from '../../util';\nimport { convertToTensor } from '../../tensor_util_env';\n\n/**\n * Performs image binarization with corresponding threshold\n * (depends on the method)value, which creates a binary image from a grayscale.\n * @param image 3d tensor of shape [imageHeight,imageWidth, depth],\n * where imageHeight and imageWidth must be positive.The image color\n * range should be [0, 255].\n * @param method Optional string from `'binary' | 'otsu'`\n * which specifies the method for thresholding. Defaults to 'binary'.\n * @param inverted Optional boolean whichspecifies\n * if colours should be inverted. Defaults to false.\n * @param threshValue Optional number which defines threshold value from 0 to 1.\n * Defaults to 0.5.\n * @return A 3d tensor of shape [imageHeight,imageWidth, depth], which\n * contains binarized image.\n */\n\nfunction threshold_(\n image: Tensor3D | TensorLike,\n method = 'binary',\n inverted = false,\n threshValue = 0.5\n): Tensor3D {\n const $image = convertToTensor(image, 'image', 'threshold');\n\n /* 0.2989, 0.5870, 0.1140 are represent luma coefficients in CCIR601.\n\tReference for converting between RGB and grayscale: https://en.wikipedia.org/wiki/Luma_%28video%29 */\n\n const RED_INTENCITY_COEF = 0.2989;\n const GREEN_INTENCITY_COEF = 0.5870;\n const BLUE_INTENCITY_COEF = 0.1140;\n const totalPixelsInImage = $image.shape[0] * $image.shape[1];\n\n let $threshold = mul(tensor1d([threshValue]), 255);\n let r, g, b, grayscale;\n\n util.assert(\n $image.rank === 3,\n () => 'Error in threshold: image must be rank 3,' +\n `but got rank ${$image.rank}.`);\n\n util.assert(\n $image.shape[2] === 3 || $image.shape[2]=== 1,\n () => 'Error in threshold: ' +\n 'image color channel must be equal to 3 or 1' +\n `but got ${$image.shape[2]}.`);\n\n util.assert(\n $image.dtype === 'int32' || $image.dtype === 'float32',\n () => 'Error in dtype: image dtype must be int32 or float32,' +\n `but got dtype ${$image.dtype}.`);\n\n util.assert(\n method === 'otsu' || method === 'binary',\n () => `Method must be binary or otsu, but was ${method}`);\n\n if ($image.shape[2] === 3) {\n [r, g, b] = split($image, [1, 1, 1], -1);\n const $r = mul(r,RED_INTENCITY_COEF);\n const $g = mul(g,GREEN_INTENCITY_COEF);\n const $b = mul(b,BLUE_INTENCITY_COEF);\n grayscale = add(add($r, $g), $b);\n } else {\n grayscale = image;\n }\n\n if (method === 'otsu') {\n const $histogram = bincount(cast(round(grayscale), 'int32') as Tensor1D,\n tensor([]),\n 256);\n $threshold = otsu($histogram, totalPixelsInImage);\n }\n\n const invCondition = inverted ?\n lessEqual(grayscale, $threshold) : greater(grayscale, $threshold);\n\n const result = cast(mul(invCondition,255), 'int32');\n\n return result as Tensor3D;\n}\n\nfunction otsu(histogram: Tensor1D, total: number):Tensor1D {\n\n let bestThresh = tensor1d([-1]);\n let bestInBetVar = tensor1d([0]);\n let cInBetVar = tensor1d([0]);\n let classFirst, classSecond, meanFirst,\n meanSec, weightForeground, weightBack;\n\n for (let index = 0; index < histogram.size-1; index++) {\n\n classFirst = slice(histogram, 0, index + 1);\n\n classSecond = slice(histogram,index + 1);\n\n weightForeground = div(sum(classFirst),total);\n\n weightBack = div(sum(classSecond),total);\n\n const meanFirstDivA = sum(mul(classFirst, range(0, classFirst.size)));\n\n meanFirst = div(meanFirstDivA, sum(classFirst) );\n\n const meanSecFill = fill(classSecond.shape, classFirst.size);\n const meanSecAdd = add(range(0,classSecond.size),meanSecFill);\n const meanSecMul = mul(classSecond, (meanSecAdd));\n meanSec = div(sum(meanSecMul), sum(classSecond));\n\n const cInBetVarSubA = sub(meanFirst, meanSec);\n const cInBetVarSubB = sub(meanFirst, meanSec);\n const cInBetVarMul = mul(weightForeground, weightBack);\n cInBetVar = mul(mul(cInBetVarMul,cInBetVarSubA), cInBetVarSubB);\n\n const condition = greater(cInBetVar, bestInBetVar);\n\n bestInBetVar = where(condition, cInBetVar, bestInBetVar);\n\n bestThresh = where(condition, tensor1d([index]), bestThresh);\n\n }\n return bestThresh;\n}\n\nexport const threshold = /* @__PURE__ */ op({ threshold_ });\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../../engine';\nimport {Transform, TransformAttrs, TransformInputs} from '../../kernel_names';\nimport {NamedAttrMap} from '../../kernel_registry';\nimport {Tensor2D, Tensor4D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport * as util from '../../util';\n\nimport {op} from '../operation';\n\n/**\n * Applies the given transform(s) to the image(s).\n *\n * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.\n * @param transforms Projective transform matrix/matrices. A tensor1d of length\n * 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0,\n * b1, b2, c0, c1], then it maps the output point (x, y) to a transformed\n * input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),\n * where k = c0 x + c1 y + 1. The transforms are inverted compared to the\n * transform mapping input points to output points.\n * @param interpolation Interpolation mode.\n * Supported values: 'nearest', 'bilinear'. Default to 'nearest'.\n * @param fillMode Points outside the boundaries of the input are filled\n * according to the given mode, one of 'constant', 'reflect', 'wrap',\n * 'nearest'. Default to 'constant'.\n * 'reflect': (d c b a | a b c d | d c b a ) The input is extended by\n * reflecting about the edge of the last pixel.\n * 'constant': (k k k k | a b c d | k k k k) The input is extended by\n * filling all values beyond the edge with the same constant value k.\n * 'wrap': (a b c d | a b c d | a b c d) The input is extended by\n * wrapping around to the opposite edge.\n * 'nearest': (a a a a | a b c d | d d d d) The input is extended by\n * the nearest pixel.\n * @param fillValue A float represents the value to be filled outside the\n * boundaries when fillMode is 'constant'.\n * @param Output dimension after the transform, [height, width]. If undefined,\n * output is the same size as input image.\n *\n * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}\n */\nfunction transform_(\n image: Tensor4D|TensorLike, transforms: Tensor2D|TensorLike,\n interpolation: 'nearest'|'bilinear' = 'nearest',\n fillMode: 'constant'|'reflect'|'wrap'|'nearest' = 'constant', fillValue = 0,\n outputShape?: [number, number]): Tensor4D {\n const $image = convertToTensor(image, 'image', 'transform', 'float32');\n const $transforms =\n convertToTensor(transforms, 'transforms', 'transform', 'float32');\n\n util.assert(\n $image.rank === 4,\n () => 'Error in transform: image must be rank 4,' +\n `but got rank ${$image.rank}.`);\n\n util.assert(\n $transforms.rank === 2 &&\n ($transforms.shape[0] === $image.shape[0] ||\n $transforms.shape[0] === 1) &&\n $transforms.shape[1] === 8,\n () => `Error in transform: Input transform should be batch x 8 or 1 x 8`);\n\n util.assert(\n outputShape == null || outputShape.length === 2,\n () =>\n 'Error in transform: outputShape must be [height, width] or null, ' +\n `but got ${outputShape}.`);\n\n const inputs: TransformInputs = {image: $image, transforms: $transforms};\n const attrs:\n TransformAttrs = {interpolation, fillMode, fillValue, outputShape};\n\n return ENGINE.runKernel(\n Transform, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const transform = /* @__PURE__ */ op({transform_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assert} from '../../util';\n\nimport {greaterEqual} from '../greater_equal';\nimport {lessEqual} from '../less_equal';\nimport {logicalAnd} from '../logical_and';\nimport {op} from '../operation';\nimport {range} from '../range';\nimport {reshape} from '../reshape';\nimport {scalar} from '../scalar';\nimport {stack} from '../stack';\nimport {sub} from '../sub';\nimport {unstack} from '../unstack';\nimport {where} from '../where';\nimport {zeros} from '../zeros';\n\n/**\n * Copy a tensor setting everything outside a central band in each innermost\n * matrix to zero.\n *\n * The band part is computed as follows: Assume input has `k` dimensions\n * `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where\n * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.\n * The indicator function\n * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)`\n * `&& (num_upper < 0 || (n-m) <= num_upper)`\n *\n * ```js\n * const x = tf.tensor2d([[ 0, 1, 2, 3],\n * [-1, 0, 1, 2],\n * [-2, -1, 0, 1],\n * [-3, -2, -1, 0]]);\n * let y = tf.linalg.bandPart(x, 1, -1);\n * y.print(); // [[ 0, 1, 2, 3],\n * // [-1, 0, 1, 2],\n * // [ 0, -1, 0, 1],\n * // [ 0, 0 , -1, 0]]\n * let z = tf.linalg.bandPart(x, 2, 1);\n * z.print(); // [[ 0, 1, 0, 0],\n * // [-1, 0, 1, 0],\n * // [-2, -1, 0, 1],\n * // [ 0, -2, -1, 0]]\n * ```\n *\n * @param x Rank `k` tensor\n * @param numLower Number of subdiagonals to keep.\n * If negative, keep entire lower triangle.\n * @param numUpper Number of subdiagonals to keep.\n * If negative, keep entire upper triangle.\n * @returns Rank `k` tensor of the same shape as input.\n * The extracted banded tensor.\n *\n * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}\n */\nfunction bandPart_(\n a: T|TensorLike, numLower: number, numUpper: number): T {\n assert(\n numLower % 1 === 0,\n () => `bandPart(): numLower must be an integer, got ${numLower}.`);\n assert(\n numUpper % 1 === 0,\n () => `bandPart(): numUpper must be an integer, got ${numUpper}.`);\n\n const $a = convertToTensor(a, 'a', 'bandPart');\n\n assert(\n $a.rank >= 2,\n () => `bandPart(): Rank must be at least 2, got ${$a.rank}.`);\n\n const shape = $a.shape;\n const [M, N] = $a.shape.slice(-2);\n\n if (!(numLower <= M)) {\n throw new Error(\n `bandPart(): numLower (${numLower})` +\n ` must not be greater than the number of rows (${M}).`);\n }\n if (!(numUpper <= N)) {\n throw new Error(\n `bandPart(): numUpper (${numUpper})` +\n ` must not be greater than the number of columns (${N}).`);\n }\n\n if (numLower < 0) {\n numLower = M;\n }\n if (numUpper < 0) {\n numUpper = N;\n }\n\n const i = reshape(range(0, M, 1, 'int32'), [-1, 1]);\n const j = range(0, N, 1, 'int32');\n const ij = sub(i, j);\n\n const inBand = logicalAnd(\n lessEqual(ij, scalar(+numLower, 'int32')),\n greaterEqual(ij, scalar(-numUpper, 'int32')));\n\n const zero = zeros([M, N], $a.dtype);\n\n return reshape(\n stack(unstack(reshape($a, [-1, M, N]))\n .map(mat => where(inBand, mat, zero))),\n shape) as T;\n}\n\nexport const bandPart = /* @__PURE__ */ op({bandPart_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {Tensor1D, Tensor2D} from '../../tensor';\nimport {assert} from '../../util';\n\nimport {div} from '../div';\nimport {mul} from '../mul';\nimport {norm} from '../norm';\nimport {op} from '../operation';\nimport {split} from '../split';\nimport {squeeze} from '../squeeze';\nimport {stack} from '../stack';\nimport {sub} from '../sub';\nimport {sum} from '../sum';\n\n/**\n * Gram-Schmidt orthogonalization.\n *\n * ```js\n * const x = tf.tensor2d([[1, 2], [3, 4]]);\n * let y = tf.linalg.gramSchmidt(x);\n * y.print();\n * console.log('Orthogonalized:');\n * y.dot(y.transpose()).print(); // should be nearly the identity matrix.\n * console.log('First row direction maintained:');\n * const data = await y.array();\n * console.log(data[0][1] / data[0][0]); // should be nearly 2.\n * ```\n *\n * @param xs The vectors to be orthogonalized, in one of the two following\n * formats:\n * - An Array of `tf.Tensor1D`.\n * - A `tf.Tensor2D`, i.e., a matrix, in which case the vectors are the rows\n * of `xs`.\n * In each case, all the vectors must have the same length and the length\n * must be greater than or equal to the number of vectors.\n * @returns The orthogonalized and normalized vectors or matrix.\n * Orthogonalization means that the vectors or the rows of the matrix\n * are orthogonal (zero inner products). Normalization means that each\n * vector or each row of the matrix has an L2 norm that equals `1`.\n *\n * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}\n */\nfunction gramSchmidt_(xs: Tensor1D[]|Tensor2D): Tensor1D[]|Tensor2D {\n let inputIsTensor2D: boolean;\n if (Array.isArray(xs)) {\n inputIsTensor2D = false;\n assert(\n xs != null && xs.length > 0,\n () => 'Gram-Schmidt process: input must not be null, undefined, or ' +\n 'empty');\n const dim = xs[0].shape[0];\n for (let i = 1; i < xs.length; ++i) {\n assert(\n xs[i].shape[0] === dim,\n () =>\n 'Gram-Schmidt: Non-unique lengths found in the input vectors: ' +\n `(${(xs as Tensor1D[])[i].shape[0]} vs. ${dim})`);\n }\n } else {\n inputIsTensor2D = true;\n xs = split(xs, xs.shape[0], 0).map(x => squeeze(x, [0]));\n }\n\n assert(\n xs.length <= xs[0].shape[0],\n () => `Gram-Schmidt: Number of vectors (${\n (xs as Tensor1D[]).length}) exceeds ` +\n `number of dimensions (${(xs as Tensor1D[])[0].shape[0]}).`);\n\n const ys: Tensor1D[] = [];\n const xs1d = xs;\n for (let i = 0; i < xs.length; ++i) {\n ys.push(ENGINE.tidy(() => {\n let x = xs1d[i];\n if (i > 0) {\n for (let j = 0; j < i; ++j) {\n const proj = mul(sum(mul(ys[j], x)), ys[j]);\n x = sub(x, proj);\n }\n }\n return div(x, norm(x, 'euclidean'));\n }));\n }\n\n if (inputIsTensor2D) {\n return stack(ys, 0) as Tensor2D;\n } else {\n return ys;\n }\n}\n\nexport const gramSchmidt = /* @__PURE__ */ op({gramSchmidt_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../../engine';\nimport {dispose} from '../../globals';\nimport {Tensor, Tensor2D} from '../../tensor';\nimport {assert} from '../../util';\n\nimport {clone} from '../clone';\nimport {concat} from '../concat';\nimport {div} from '../div';\nimport {eye} from '../eye';\nimport {greater} from '../greater';\nimport {matMul} from '../mat_mul';\nimport {mul} from '../mul';\nimport {neg} from '../neg';\nimport {norm} from '../norm';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\nimport {slice} from '../slice';\nimport {stack} from '../stack';\nimport {sub} from '../sub';\nimport {tensor2d} from '../tensor2d';\nimport {transpose} from '../transpose';\nimport {unstack} from '../unstack';\nimport {where} from '../where';\n\n/**\n * Compute QR decomposition of m-by-n matrix using Householder transformation.\n *\n * Implementation based on\n * [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf]\n * (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf)\n *\n * ```js\n * const a = tf.tensor2d([[1, 2], [3, 4]]);\n * let [q, r] = tf.linalg.qr(a);\n * console.log('Q');\n * q.print();\n * console.log('R');\n * r.print();\n * console.log('Orthogonalized');\n * q.dot(q.transpose()).print() // should be nearly the identity matrix.\n * console.log('Reconstructed');\n * q.dot(r).print(); // should be nearly [[1, 2], [3, 4]];\n * ```\n *\n * @param x The `tf.Tensor` to be QR-decomposed. Must have rank >= 2. Suppose\n * it has the shape `[..., M, N]`.\n * @param fullMatrices An optional boolean parameter. Defaults to `false`.\n * If `true`, compute full-sized `Q`. If `false` (the default),\n * compute only the leading N columns of `Q` and `R`.\n * @returns An `Array` of two `tf.Tensor`s: `[Q, R]`. `Q` is a unitary matrix,\n * i.e., its columns all have unit norm and are mutually orthogonal.\n * If `M >= N`,\n * If `fullMatrices` is `false` (default),\n * - `Q` has a shape of `[..., M, N]`,\n * - `R` has a shape of `[..., N, N]`.\n * If `fullMatrices` is `true` (default),\n * - `Q` has a shape of `[..., M, M]`,\n * - `R` has a shape of `[..., M, N]`.\n * If `M < N`,\n * - `Q` has a shape of `[..., M, M]`,\n * - `R` has a shape of `[..., M, N]`.\n * @throws If the rank of `x` is less than 2.\n *\n * @doc {heading:'Operations',\n * subheading:'Linear Algebra',\n * namespace:'linalg'}\n */\nfunction qr_(x: Tensor, fullMatrices = false): [Tensor, Tensor] {\n assert(\n x.rank >= 2,\n () => `qr() requires input tensor to have a rank >= 2, but got rank ${\n x.rank}`);\n\n if (x.rank === 2) {\n return qr2d(x as Tensor2D, fullMatrices);\n } else {\n // Rank > 2.\n // TODO(cais): Below we split the input into individual 2D tensors,\n // perform QR decomposition on them and then stack the results back\n // together. We should explore whether this can be parallelized.\n const outerDimsProd = x.shape.slice(0, x.shape.length - 2)\n .reduce((value, prev) => value * prev);\n const x2ds = unstack(\n reshape(\n x,\n [\n outerDimsProd, x.shape[x.shape.length - 2],\n x.shape[x.shape.length - 1]\n ]),\n 0);\n const q2ds: Tensor2D[] = [];\n const r2ds: Tensor2D[] = [];\n x2ds.forEach(x2d => {\n const [q2d, r2d] = qr2d(x2d as Tensor2D, fullMatrices);\n q2ds.push(q2d);\n r2ds.push(r2d);\n });\n const q = reshape(stack(q2ds, 0), x.shape);\n const r = reshape(stack(r2ds, 0), x.shape);\n return [q, r];\n }\n}\n\nfunction qr2d(x: Tensor2D, fullMatrices = false): [Tensor2D, Tensor2D] {\n return ENGINE.tidy(() => {\n assert(\n x.shape.length === 2,\n () => `qr2d() requires a 2D Tensor, but got a ${\n x.shape.length}D Tensor.`);\n\n const m = x.shape[0];\n const n = x.shape[1];\n\n let q = eye(m); // Orthogonal transform so far.\n let r = clone(x); // Transformed matrix so far.\n\n const one2D = tensor2d([[1]], [1, 1]);\n let w: Tensor2D = clone(one2D);\n\n const iters = m >= n ? n : m;\n for (let j = 0; j < iters; ++j) {\n // This tidy within the for-loop ensures we clean up temporary\n // tensors as soon as they are no longer needed.\n const rTemp = r;\n const wTemp = w;\n const qTemp = q;\n [w, r, q] = ENGINE.tidy((): [Tensor2D, Tensor2D, Tensor2D] => {\n // Find H = I - tau * w * w', to put zeros below R(j, j).\n const rjEnd1 = slice(r, [j, j], [m - j, 1]);\n const normX = norm(rjEnd1);\n const rjj = slice(r, [j, j], [1, 1]);\n\n // The sign() function returns 0 on 0, which causes division by zero.\n const s = where(greater(rjj, 0), tensor2d([[-1]]), tensor2d([[1]]));\n\n const u1 = sub(rjj, mul(s, normX));\n const wPre = div(rjEnd1, u1);\n if (wPre.shape[0] === 1) {\n w = clone(one2D);\n } else {\n w = concat(\n [\n one2D,\n slice(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]]) as\n Tensor2D\n ],\n 0);\n }\n const tau = neg(div(matMul(s, u1), normX)) as Tensor2D;\n\n // -- R := HR, Q := QH.\n const rjEndAll = slice(r, [j, 0], [m - j, n]);\n const tauTimesW: Tensor2D = mul(tau, w);\n const wT: Tensor2D = transpose(w);\n if (j === 0) {\n r = sub(rjEndAll, matMul(tauTimesW, matMul(wT, rjEndAll)));\n } else {\n const rTimesTau: Tensor2D =\n sub(rjEndAll, matMul(tauTimesW, matMul(wT, rjEndAll)));\n r = concat([slice(r, [0, 0], [j, n]), rTimesTau], 0);\n }\n const tawTimesWT: Tensor2D = transpose(tauTimesW);\n const qAllJEnd = slice(q, [0, j], [m, q.shape[1] - j]);\n if (j === 0) {\n q = sub(qAllJEnd, matMul(matMul(qAllJEnd, w), tawTimesWT));\n } else {\n const qTimesTau: Tensor2D =\n sub(qAllJEnd, matMul(matMul(qAllJEnd, w), tawTimesWT));\n q = concat([slice(q, [0, 0], [m, j]), qTimesTau], 1);\n }\n return [w, r, q];\n });\n dispose([rTemp, wTemp, qTemp]);\n }\n\n if (!fullMatrices && m > n) {\n q = slice(q, [0, 0], [m, n]);\n r = slice(r, [0, 0], [n, n]);\n }\n\n return [q, r];\n }) as [Tensor2D, Tensor2D];\n}\n\nexport const qr = /* @__PURE__ */ op({qr_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport enum Reduction {\n NONE,\n MEAN,\n SUM,\n SUM_BY_NONZERO_WEIGHTS\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\n\nimport {cast} from '../cast';\nimport {div} from '../div';\nimport {Reduction} from '../loss_ops_utils';\nimport {mean} from '../mean';\nimport {mul} from '../mul';\nimport {notEqual} from '../not_equal';\nimport {ones} from '../ones';\nimport {op} from '../operation';\nimport {scalar} from '../scalar';\nimport {sum} from '../sum';\n\n/**\n * Computes the weighted loss between two tensors.\n *\n * @param losses Tensor of shape `[batch_size, d1, ..., dN]`.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `losses`, and must be broadcastable to `losses` (i.e., all\n * dimensions must be either `1`, or the same as the corresponding\n * `losses` dimension).\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction computeWeightedLoss_(\n losses: T|TensorLike, weights?: Tensor|TensorLike,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss');\n }\n\n const weightedLoss = ($weights == null) ? $losses : mul($losses, $weights);\n\n if (reduction === Reduction.NONE) {\n return weightedLoss as O;\n }\n if (reduction === Reduction.SUM) {\n return sum(weightedLoss);\n }\n if (reduction === Reduction.MEAN) {\n if ($weights == null) {\n return mean(weightedLoss);\n } else {\n const broadcastFactor = $losses.size / $weights.size;\n const result = div(sum(weightedLoss), sum($weights));\n return broadcastFactor > 1 ? div(result, scalar(broadcastFactor)) :\n result as O;\n }\n }\n if (reduction === Reduction.SUM_BY_NONZERO_WEIGHTS) {\n if ($weights == null) {\n return div(sum(weightedLoss), scalar($losses.size));\n } else {\n const broadcastedWeights = mul($weights, ones($losses.shape));\n\n const numNonZeros =\n cast(sum(notEqual(broadcastedWeights, scalar(0))), 'float32');\n return div(sum(weightedLoss), numNonZeros);\n }\n }\n\n throw Error(`Unknown reduction: ${reduction}`);\n}\nexport const computeWeightedLoss = /* @__PURE__ */ op({computeWeightedLoss_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {abs} from '../abs';\nimport {Reduction} from '../loss_ops_utils';\nimport {op} from '../operation';\nimport {sub} from '../sub';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the absolute difference loss between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction absoluteDifference_(\n labels: T|TensorLike, predictions: T|TensorLike,\n weights?: Tensor|TensorLike,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $labels = convertToTensor(labels, 'labels', 'absoluteDifference');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'absoluteDifference');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'absoluteDifference');\n }\n assertShapesMatch(\n $labels.shape, $predictions.shape, 'Error in absoluteDifference: ');\n\n const losses = abs(sub($labels, $predictions));\n return computeWeightedLoss(losses, $weights, reduction);\n}\n\nexport const absoluteDifference = /* @__PURE__ */ op({absoluteDifference_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {Reduction} from '../loss_ops_utils';\nimport {mul} from '../mul';\nimport {op} from '../operation';\nimport {scalar} from '../scalar';\nimport {sub} from '../sub';\nimport {sum} from '../sum';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the cosine distance loss between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param axis The dimension along which the cosine distance is computed.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction cosineDistance_(\n labels: T|TensorLike, predictions: T|TensorLike, axis: number,\n weights?: Tensor|TensorLike,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $labels = convertToTensor(labels, 'labels', 'cosineDistance');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'cosineDistance');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'cosineDistance');\n }\n assertShapesMatch(\n $labels.shape, $predictions.shape, 'Error in cosineDistance: ');\n\n const one = scalar(1);\n const losses = sub(one, sum(mul($labels, $predictions), axis, true));\n return computeWeightedLoss(losses, $weights, reduction);\n}\nexport const cosineDistance = /* @__PURE__ */ op({cosineDistance_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {Reduction} from '../loss_ops_utils';\nimport {mul} from '../mul';\nimport {op} from '../operation';\nimport {relu} from '../relu';\nimport {scalar} from '../scalar';\nimport {sub} from '../sub';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the Hinge loss between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction hingeLoss_(\n labels: T|TensorLike, predictions: T|TensorLike,\n weights?: Tensor|TensorLike,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n let $labels = convertToTensor(labels, 'labels', 'hingeLoss');\n const $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'hingeLoss');\n }\n assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: ');\n\n const one = scalar(1);\n // Convert binary labels to (-1, 1)\n $labels = sub(mul(scalar(2), $labels), one);\n const losses = relu(sub(one, mul($labels, $predictions)));\n return computeWeightedLoss(losses, $weights, reduction);\n}\nexport const hingeLoss = /* @__PURE__ */ op({hingeLoss_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {abs} from '../abs';\nimport {add} from '../add';\nimport {Reduction} from '../loss_ops_utils';\nimport {minimum} from '../minimum';\nimport {mul} from '../mul';\nimport {op} from '../operation';\nimport {scalar} from '../scalar';\nimport {square} from '../square';\nimport {sub} from '../sub';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the Huber loss between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param delta Point where Huber loss changes from quadratic to linear.\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`.\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction huberLoss_(\n labels: T|TensorLike, predictions: T|TensorLike,\n weights?: Tensor|TensorLike, delta = 1.0,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $labels = convertToTensor(labels, 'labels', 'huberLoss');\n const $predictions = convertToTensor(predictions, 'predictions', 'huberLoss');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'huberLoss');\n }\n assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: ');\n\n const deltaScalar = scalar(delta);\n const error = abs(sub($predictions, $labels));\n const quadratic = minimum(error, deltaScalar);\n const linear = sub(error, quadratic);\n\n const losses =\n add(mul(scalar(0.5), square(quadratic)), mul(deltaScalar, linear));\n return computeWeightedLoss(losses, $weights, reduction);\n}\nexport const huberLoss = /* @__PURE__ */ op({huberLoss_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {add} from '../add';\nimport {log} from '../log';\nimport {Reduction} from '../loss_ops_utils';\nimport {mul} from '../mul';\nimport {neg} from '../neg';\nimport {op} from '../operation';\nimport {scalar} from '../scalar';\nimport {sub} from '../sub';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the log loss between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param epsilon A small increment to avoid taking log of zero\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction logLoss_(\n labels: T|TensorLike, predictions: T|TensorLike,\n weights?: Tensor|TensorLike, epsilon = 1e-7,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $labels = convertToTensor(labels, 'labels', 'logLoss');\n const $predictions = convertToTensor(predictions, 'predictions', 'logLoss');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'logLoss');\n }\n assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: ');\n\n const one = scalar(1);\n const epsilonScalar = scalar(epsilon);\n\n const l1 = neg(mul($labels, log(add($predictions, epsilonScalar))));\n const l2 =\n mul(sub(one, $labels), log(add(sub(one, $predictions), epsilonScalar)));\n const losses = sub(l1, l2);\n return computeWeightedLoss(losses, $weights, reduction);\n}\nexport const logLoss = /* @__PURE__ */ op({logLoss_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {Reduction} from '../loss_ops_utils';\nimport {op} from '../operation';\nimport {squaredDifference} from '../squared_difference';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes the mean squared error between two tensors.\n *\n * @param labels The ground truth output tensor, same dimensions as\n * 'predictions'.\n * @param predictions The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}\n */\nfunction meanSquaredError_(\n labels: T|TensorLike, predictions: T|TensorLike,\n weights?: Tensor|TensorLike,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n const $labels = convertToTensor(labels, 'labels', 'meanSquaredError');\n const $predictions =\n convertToTensor(predictions, 'predictions', 'meanSquaredError');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'meanSquaredError');\n }\n assertShapesMatch(\n $labels.shape, $predictions.shape, 'Error in meanSquaredError: ');\n\n const losses = squaredDifference($labels, $predictions);\n return computeWeightedLoss(losses, $weights, reduction);\n}\nexport const meanSquaredError = /* @__PURE__ */ op({meanSquaredError_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {abs} from '../abs';\nimport {add} from '../add';\nimport {exp} from '../exp';\nimport {log1p} from '../log1p';\nimport {Reduction} from '../loss_ops_utils';\nimport {mul} from '../mul';\nimport {neg} from '../neg';\nimport {op} from '../operation';\nimport {relu} from '../relu';\nimport {scalar} from '../scalar';\nimport {sub} from '../sub';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\nfunction sigmoidCrossEntropyWithLogits_(\n labels: T|TensorLike, logits: T|TensorLike): O {\n const $labels =\n convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits');\n const $logits =\n convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits');\n assertShapesMatch(\n $labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: ');\n\n /**\n * Implementation Details:\n *\n * For brevity, let `x = logits`, `z = labels`. The logistic loss is\n * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n * = (1 - z) * x + log(1 + exp(-x))\n * = x - x * z + log(1 + exp(-x))\n *\n * For x < 0, to avoid overflow in exp(-x), we reformulate the above\n * x - x * z + log(1 + exp(-x))\n * = log(exp(x)) - x * z + log(1 + exp(-x))\n * = - x * z + log(1 + exp(x))\n *\n * Hence, to ensure stability and avoid overflow, the implementation uses\n * this equivalent formulation:\n * max(x, 0) - x * z + log(1 + exp(-abs(x)))\n */\n const maxOutput = relu($logits);\n const outputXTarget = mul($logits, $labels);\n const sigmoidOutput = log1p(exp(neg(abs($logits))));\n\n return add(sub(maxOutput, outputXTarget), sigmoidOutput);\n}\n\n/**\n * Computes the sigmoid cross entropy loss between two tensors.\n *\n * If labelSmoothing is nonzero, smooth the labels towards 1/2:\n *\n * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing)\n * + 0.5 * labelSmoothing\n *\n * @param multiClassLabels The ground truth output tensor of shape\n * [batch_size, num_classes], same dimensions as 'predictions'.\n * @param logits The predicted outputs.\n * @param weights Tensor whose rank is either 0, or the same rank as\n * `labels`, and must be broadcastable to `labels` (i.e., all dimensions\n * must be either `1`, or the same as the corresponding `losses`\n * dimension).\n * @param labelSmoothing If greater than 0, then smooth the labels.\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }\n */\nfunction sigmoidCrossEntropy_(\n multiClassLabels: T|TensorLike, logits: T|TensorLike,\n weights?: Tensor|TensorLike, labelSmoothing = 0,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n let $multiClassLabels = convertToTensor(\n multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy');\n const $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy');\n let $weights: Tensor = null;\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy');\n }\n assertShapesMatch(\n $multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: ');\n\n if (labelSmoothing > 0) {\n const labelSmoothingScalar = scalar(labelSmoothing);\n const one = scalar(1);\n const half = scalar(0.5);\n\n $multiClassLabels =\n add(mul($multiClassLabels, sub(one, labelSmoothingScalar)),\n mul(half, labelSmoothingScalar));\n }\n const losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits);\n\n return computeWeightedLoss(losses, $weights, reduction);\n}\n\nexport const sigmoidCrossEntropy = /* @__PURE__ */ op({sigmoidCrossEntropy_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {customGrad} from '../../gradients';\nimport {Tensor} from '../../tensor';\nimport {GradSaveFunc} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {assertShapesMatch} from '../../util';\nimport {add} from '../add';\nimport {expandShapeToKeepDim} from '../axis_util';\nimport {cast} from '../cast';\nimport {div} from '../div';\nimport {exp} from '../exp';\nimport {logSumExp} from '../log_sum_exp';\nimport {Reduction} from '../loss_ops_utils';\nimport {mul} from '../mul';\nimport {neg} from '../neg';\nimport {op} from '../operation';\nimport {reshape} from '../reshape';\nimport {scalar} from '../scalar';\nimport {sub} from '../sub';\nimport {sum} from '../sum';\n\nimport {computeWeightedLoss} from './compute_weighted_loss';\n\n/**\n * Computes softmax cross entropy between logits and labels.\n *\n * Measures the probability error in discrete classification tasks in which\n * the classes are mutually exclusive (each entry is in exactly one class).\n * For example, each CIFAR-10 image is labeled with one and only one label: an\n * image can be a dog or a truck, but not both.\n *\n * `NOTE`: While the classes are mutually exclusive, their probabilities need\n * not be. All that is required is that each row of labels is a valid\n * probability distribution. If they are not, the computation of the gradient\n * will be incorrect.\n *\n * `WARNING`: This op expects unscaled logits, since it performs a softmax on\n * logits internally for efficiency. Do not call this op with the output of\n * softmax, as it will produce incorrect results.\n *\n * logits and labels must have the same shape, e.g. [batch_size, num_classes]\n * and the same dtype.\n * @param labels The labels array.\n * @param logits The logits array.\n * @param dim The dimension softmax would be performed on. Defaults to `-1`\n * which indicates the last dimension.\n */\nfunction softmaxCrossEntropyWithLogits_(\n labels: T, logits: T, dim = -1): O {\n if (dim === -1) {\n dim = logits.rank - 1;\n }\n\n if (dim !== logits.rank - 1) {\n throw Error(\n `Softmax cross entropy along a non-last dimension is not yet ` +\n `supported. Labels / logits was rank ${logits.rank} ` +\n `and dim was ${dim}`);\n }\n // Use a custom gradient for numerical stability.\n const customOp =\n customGrad((labels: Tensor, logits: Tensor, save: GradSaveFunc) => {\n // Reference:\n // 1. http://cs231n.github.io/linear-classify/#softmax\n // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/\n const keepDims = true;\n const lse = logSumExp(logits, [dim], keepDims);\n const logResult = sub(cast(logits, 'float32'), lse);\n save([labels, logResult]);\n\n const costVector = neg(mul(logResult, labels));\n const value: O = sum(costVector, [dim]);\n\n const gradFunc = (dy: O, saved: Tensor[]) => {\n const [labels, logResult] = saved;\n const dyShape = expandShapeToKeepDim(dy.shape, [dim]);\n return [\n mul(reshape(dy, dyShape),\n sub(cast(labels, 'float32'), exp(logResult))),\n mul(reshape(dy, dyShape),\n sub(exp(logResult), cast(labels, 'float32'))),\n ];\n };\n return {value, gradFunc};\n });\n\n return customOp(labels, logits);\n}\n\n/**\n * Computes the softmax cross entropy loss between two tensors.\n *\n * If labelSmoothing is nonzero, smooth the labels towards 1/2:\n *\n * newOnehotLabels = onehotLabels * (1 - labelSmoothing)\n * + labelSmoothing / numClasses\n *\n * @param onehotLabels One hot encoded labels\n * [batch_size, num_classes], same dimensions as 'predictions'.\n * @param logits The predicted outputs.\n * @param weights Tensor whose rank is either 0, or 1, and must be\n * broadcastable to `loss` of shape [batch_size]\n * @param labelSmoothing If greater than 0, then smooth the labels.\n * @param reduction Type of reduction to apply to loss. Should be of type\n * `Reduction`\n *\n * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }\n */\nfunction softmaxCrossEntropy_(\n onehotLabels: T|TensorLike, logits: T|TensorLike,\n weights?: Tensor|TensorLike, labelSmoothing = 0,\n reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): O {\n let $onehotLabels =\n convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy');\n const $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy');\n let $weights: Tensor = null;\n\n if (weights != null) {\n $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy');\n }\n\n assertShapesMatch(\n $onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: ');\n\n if (labelSmoothing > 0) {\n const labelSmoothingScalar = scalar(labelSmoothing);\n const one = scalar(1);\n const numClasses = scalar($onehotLabels.shape[1]);\n\n $onehotLabels =\n add(mul($onehotLabels, sub(one, labelSmoothingScalar)),\n div(labelSmoothingScalar, numClasses));\n }\n\n const losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits);\n\n return computeWeightedLoss(losses, $weights, reduction);\n}\n\nexport const softmaxCrossEntropy = /* @__PURE__ */ op({softmaxCrossEntropy_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {SparseFillEmptyRows, SparseFillEmptyRowsInputs} from '../../kernel_names';\nimport {Scalar, Tensor, Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {ScalarLike, TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * The input SparseTensor is represented via the map of inputs {`indices`,\n * `values`, `denseShape`}. The output SparseTensor has the same `denseShape`\n * but with indices `outputIndices` and values `outputValues`. This op inserts a\n * single entry for every row that doesn't have any values. The index is created\n * as `[row, 0, ..., 0]` and the inserted value is `defaultValue`.\n *\n * For example, suppose `spInput` has shape [5, 6] and non-empty values:\n * [0, 1]: a\n * [0, 3]: b\n * [2, 0]: c\n * [3, 1]: d\n *\n * Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values:\n * [0, 1]: a\n * [0, 3]: b\n * [1, 0]: `defaultValue`\n * [2, 0]: c\n * [3, 1]: d\n * [4, 0]: `defaultValue`\n *\n * The output SparseTensor will be in row-major order and will have the same\n * shape as the input.\n *\n * This op also returns an indicator vector shaped [dense_shape[0]] such that\n * emptyRowIndicator[i] = True iff row i was an empty row.\n *\n * And a reverse index map vector shaped [indices.shape[0]] that is used during\n * backpropagation, reverseIndexMap[i] = outi s.t. indices[i, j] ==\n * outputIndices[outi, j] for all j\n *\n * ```js\n * const result = tf.sparse.sparseFillEmptyRows(\n * [[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]],\n * [0, 10, 13, 14, 32, 33], [5, 6], -1);\n * console.log(result);\n * result['outputIndices'].print(); // [[0, 0], [1, 0], [1, 3], [1, 4],\n * // [2, 0], [3, 2], [3, 3], [4, 0]]\n * result['outputValues'].print(); // [0, 10, 13, 14,-1, 32, 33, -1]\n * result['emptyRowIndicator'].print(); // [false, false, true, false, true]\n * result['reverseIndexMap'].print(); // [0, 1, 2, 3, 5, 6]\n * ```\n * @param indices: 2-D. The indices of the sparse tensor.\n * @param values: 1-D. The values of the sparse tensor.\n * @param denseShape: 1-D. The shape of the sparse tensor.\n * @param defaultValue: 0-D. Default value to insert into location [row, 0, ...,\n * 0] for rows missing from the input sparse tensor.\n * @return A map with the following properties:\n * - outputIndices\n * - outputValues: 1-D. The values of the filled sparse tensor.\n * - emptyRowIndicator: 1-D. Whether the dense row was missing in the input\n * sparse tensor.\n * - reverseIndexMap: 1-D. A map from the input indices to the output\n * indices.\n * @doc {heading: 'Operations', subheading: 'Sparse'}\n */\nfunction sparseFillEmptyRows_(\n indices: Tensor2D|TensorLike, values: Tensor1D|TensorLike,\n denseShape: Tensor1D|TensorLike,\n defaultValue: Scalar|ScalarLike): NamedTensorMap {\n const $indices =\n convertToTensor(indices, 'indices', 'sparseFillEmptyRows', 'int32');\n const $values = convertToTensor(values, 'values', 'sparseFillEmptyRows');\n const $denseShape =\n convertToTensor(denseShape, 'denseShape', 'sparseFillEmptyRows', 'int32');\n const $defaultValue = convertToTensor(\n defaultValue, 'defaultValue', 'sparseFillEmptyRows', $values.dtype);\n\n if ($indices.rank !== 2) {\n throw new Error(`Indices should be Tensor2D but received shape\n ${$indices.shape}`);\n }\n if ($values.rank !== 1) {\n throw new Error(\n `Values should be Tensor1D but received shape ${$values.shape}`);\n }\n if ($denseShape.rank !== 1) {\n throw new Error(`Dense shape should be Tensor1D but received shape ${\n $denseShape.shape}`);\n }\n if ($defaultValue.rank !== 0) {\n throw new Error(`Default value should be a scalar but received shape ${\n $defaultValue.shape}`);\n }\n\n const inputs: SparseFillEmptyRowsInputs = {\n indices: $indices,\n values: $values,\n denseShape: $denseShape,\n defaultValue: $defaultValue\n };\n\n const result: Tensor[] = ENGINE.runKernel(SparseFillEmptyRows, inputs as {});\n return {\n outputIndices: result[0],\n outputValues: result[1],\n emptyRowIndicator: result[2],\n reverseIndexMap: result[3]\n };\n}\n\nexport const sparseFillEmptyRows = /* @__PURE__ */ op({sparseFillEmptyRows_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {SparseReshape, SparseReshapeInputs} from '../../kernel_names';\nimport {Tensor, Tensor1D, Tensor2D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * This operation has the same semantics as reshape on the represented dense\n * tensor. The `inputIndices` are recomputed based on the requested `newShape`.\n * If one component of `newShape` is the special value -1, the size of that\n * dimension is computed so that the total dense size remains constant. At most\n * one component of `newShape` can be -1. The number of dense elements implied\n * by `newShape` must be the same as the number of dense elements originally\n * implied by `inputShape`. Reshaping does not affect the order of values in the\n * SparseTensor. If the input tensor has rank R_in and N non-empty values, and\n * `newShape` has length R_out, then `inputIndices` has shape [N, R_in],\n * `inputShape` has length R_in, `outputIndices` has shape [N, R_out], and\n * `outputShape` has length R_out.\n *\n * ```js\n * const result = tf.sparse.sparseReshape(\n * [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]],\n * [2, 3, 6], [9, -1]);\n * console.log(result);\n * result['outputIndices'].print(); //[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]]\n * result['outputShape'].print(); // [9, 4]\n * ```\n * @param inputIndices: 2-D. N x R_in matrix with the indices of non-empty\n * values in a SparseTensor.\n * @param inputShape: 1-D. R_in Tensor1D with the input SparseTensor's dense\n * shape.\n * @param newShape: 1-D. R_out Tensor1D with the requested new dense shape.\n * @return A map with the following properties:\n * - outputIndices: 2-D. N x R_out matrix with the updated indices of\n * non-empty values in the output SparseTensor.\n * - outputShape: 1-D. R_out vector with the full dense shape of the output\n * SparseTensor. This is the same as newShape but with any -1 dimensions\n * filled in.\n * @doc {heading: 'Operations', subheading: 'Sparse'}\n */\nfunction sparseReshape_(\n inputIndices: Tensor2D|TensorLike, inputShape: Tensor1D|TensorLike,\n newShape: Tensor1D|TensorLike): NamedTensorMap {\n const $inputIndices =\n convertToTensor(inputIndices, 'inputIndices', 'sparseReshape', 'int32');\n const $inputShape =\n convertToTensor(inputShape, 'inputShape', 'sparseReshape', 'int32');\n const $newShape =\n convertToTensor(newShape, 'newShape', 'sparseReshape', 'int32');\n\n if ($inputIndices.rank !== 2) {\n throw new Error(`Input indices should be Tensor2D but received shape\n ${$inputIndices.shape}`);\n }\n if ($inputShape.rank !== 1) {\n throw new Error(`Input shape should be Tensor1D but received shape ${\n $inputShape.shape}`);\n }\n if ($newShape.rank !== 1) {\n throw new Error(\n `New shape should be Tensor1D but received shape ${$newShape.shape}`);\n }\n\n const inputs: SparseReshapeInputs = {\n inputIndices: $inputIndices,\n inputShape: $inputShape,\n newShape: $newShape\n };\n const result: Tensor[] = ENGINE.runKernel(SparseReshape, inputs as {});\n return {outputIndices: result[0], outputShape: result[1]};\n}\n\nexport const sparseReshape = /* @__PURE__ */ op({sparseReshape_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {SparseSegmentMean, SparseSegmentMeanInputs} from '../../kernel_names';\nimport {Tensor, Tensor1D} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * Computes the mean along sparse segments of a tensor.\n *\n * ```js\n * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [6,7,8,9]]);\n * // Select two rows, one segment.\n * const result1 = tf.sparse.sparseSegmentMean(c,\n * tf.tensor1d([0, 1], 'int32'),\n * tf.tensor1d([0, 0], 'int32'));\n * result1.print(); // [[0, 0, 0, 0]]\n *\n * // Select two rows, two segments.\n * const result2 = tf.sparse.sparseSegmentMean(c,\n * tf.tensor1d([0, 1], 'int32'),\n * tf.tensor1d([0, 1], 'int32'));\n * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]\n *\n * // Select all rows, two segments.\n * const result3 = tf.sparse.sparseSegmentMean(c,\n * tf.tensor1d([0, 1, 2], 'int32'),\n * tf.tensor1d([0, 1, 1], 'int32'));\n * result3.print(); // [[1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5]]\n * ```\n * @param data: A Tensor of at least one dimension with data that will be\n * assembled in the output.\n * @param indices: A 1-D Tensor with indices into data. Has same rank as\n * segmentIds.\n * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values\n * should be sorted and can be repeated.\n * @return Has same shape as data, except for dimension 0 which has equal to\n * the number of segments.\n *\n * @doc {heading: 'Operations', subheading: 'Sparse'}\n */\nfunction sparseSegmentMean_(\n data: Tensor|TensorLike, indices: Tensor1D|TensorLike,\n segmentIds: Tensor1D|TensorLike): Tensor {\n const $data = convertToTensor(data, 'data', 'sparseSegmentMean');\n const $indices =\n convertToTensor(indices, 'indices', 'sparseSegmentMean', 'int32');\n const $segmentIds =\n convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentMean', 'int32');\n\n if ($data.rank < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if ($indices.rank !== 1) {\n throw new Error(`Indices should be Tensor1D but received shape\n ${$indices.shape}`);\n }\n if ($segmentIds.rank !== 1) {\n throw new Error(`Segment ids should be Tensor1D but received shape\n ${$segmentIds.shape}`);\n }\n\n const inputs: SparseSegmentMeanInputs = {\n data: $data,\n indices: $indices,\n segmentIds: $segmentIds\n };\n\n return ENGINE.runKernel(SparseSegmentMean, inputs as {});\n}\n\nexport const sparseSegmentMean = /* @__PURE__ */ op({sparseSegmentMean_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {SparseSegmentSum, SparseSegmentSumInputs} from '../../kernel_names';\nimport {Tensor, Tensor1D} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * Computes the sum along sparse segments of a tensor.\n *\n * ```js\n * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]);\n * // Select two rows, one segment.\n * const result1 = tf.sparse.sparseSegmentSum(c,\n * tf.tensor1d([0, 1], 'int32'),\n * tf.tensor1d([0, 0], 'int32'));\n * result1.print(); // [[0, 0, 0, 0]]\n *\n * // Select two rows, two segments.\n * const result2 = tf.sparse.sparseSegmentSum(c,\n * tf.tensor1d([0, 1], 'int32'),\n * tf.tensor1d([0, 1], 'int32'));\n * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]]\n *\n * // Select all rows, two segments.\n * const result3 = tf.sparse.sparseSegmentSum(c,\n * tf.tensor1d([0, 1, 2], 'int32'),\n * tf.tensor1d([0, 0, 1], 'int32'));\n * result3.print(); // [[0, 0, 0, 0], [5, 6, 7, 8]]\n * ```\n * @param data: A Tensor of at least one dimension with data that will be\n * assembled in the output.\n * @param indices: A 1-D Tensor with indices into data. Has same rank as\n * segmentIds.\n * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values\n * should be sorted and can be repeated.\n * @return Has same shape as data, except for dimension 0 which has equal to\n * the number of segments.\n *\n * @doc {heading: 'Operations', subheading: 'Sparse'}\n */\nfunction sparseSegmentSum_(\n data: Tensor|TensorLike, indices: Tensor1D|TensorLike,\n segmentIds: Tensor1D|TensorLike): Tensor {\n const $data = convertToTensor(data, 'data', 'sparseSegmentSum');\n const $indices =\n convertToTensor(indices, 'indices', 'sparseSegmentSum', 'int32');\n const $segmentIds =\n convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentSum', 'int32');\n\n if ($data.rank < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if ($indices.rank !== 1) {\n throw new Error(`Indices should be Tensor1D but received shape\n ${$indices.shape}`);\n }\n if ($segmentIds.rank !== 1) {\n throw new Error(`Segment ids should be Tensor1D but received shape\n ${$segmentIds.shape}`);\n }\n\n const inputs: SparseSegmentSumInputs = {\n data: $data,\n indices: $indices,\n segmentIds: $segmentIds\n };\n\n return ENGINE.runKernel(SparseSegmentSum, inputs as {});\n}\n\nexport const sparseSegmentSum = /* @__PURE__ */ op({sparseSegmentSum_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {StringNGrams, StringNGramsAttrs, StringNGramsInputs} from '../../kernel_names';\nimport {Tensor, Tensor1D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * Creates ngrams from ragged string data.\n *\n * This op accepts a ragged tensor with 1 ragged dimension containing only\n * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams\n * of that string, joined along the innermost axis.\n *\n * ```js\n * const result = tf.string.stringNGrams(\n * ['a', 'b', 'c', 'd'], tf.tensor1d([0, 2, 4], 'int32'),\n * '|', [1, 2], 'LP', 'RP', -1, false);\n * result['nGrams'].print(); // ['a', 'b', 'LP|a', 'a|b', 'b|RP',\n * // 'c', 'd', 'LP|c', 'c|d', 'd|RP']\n * result['nGramsSplits'].print(); // [0, 5, 10]\n * ```\n * @param data: The values tensor of the ragged string tensor to make ngrams out\n * of. Must be a 1D string tensor.\n * @param dataSplits: The splits tensor of the ragged string tensor to make\n * ngrams out of.\n * @param separator: The string to append between elements of the token. Use \"\"\n * for no separator.\n * @param nGramWidths: The sizes of the ngrams to create.\n * @param leftPad: The string to use to pad the left side of the ngram sequence.\n * Only used if pad_width !== 0.\n * @param rightPad: The string to use to pad the right side of the ngram\n * sequence. Only used if pad_width !== 0.\n * @param padWidth: The number of padding elements to add to each side of each\n * sequence. Note that padding will never be greater than `nGramWidths`-1\n * regardless of this value. If `padWidth`=-1, then add max(`nGramWidths`)-1\n * elements.\n * @param preserveShortSequences: If true, then ensure that at least one ngram\n * is generated for each input sequence. In particular, if an input sequence\n * is shorter than min(ngramWidth) + 2*padWidth, then generate a single\n * ngram containing the entire sequence. If false, then no ngrams are\n * generated for these short input sequences.\n * @return A map with the following properties:\n * - nGrams: The values tensor of the output ngrams ragged tensor.\n * - nGramsSplits: The splits tensor of the output ngrams ragged tensor.\n *\n * @doc {heading: 'Operations', subheading: 'String'}\n */\nfunction stringNGrams_(\n data: Tensor1D|TensorLike, dataSplits: Tensor|TensorLike, separator: string,\n nGramWidths: number[], leftPad: string, rightPad: string, padWidth: number,\n preserveShortSequences: boolean): NamedTensorMap {\n const $data = convertToTensor(data, 'data', 'stringNGrams', 'string');\n if ($data.dtype !== 'string') {\n throw new Error('Data must be of datatype string');\n }\n if ($data.shape.length !== 1) {\n throw new Error(`Data must be a vector, saw: ${$data.shape}`);\n }\n\n const $dataSplits = convertToTensor(dataSplits, 'dataSplits', 'stringNGrams');\n if ($dataSplits.dtype !== 'int32') {\n throw new Error('Data splits must be of datatype int32');\n }\n\n const attrs: StringNGramsAttrs = {\n separator,\n nGramWidths,\n leftPad,\n rightPad,\n padWidth,\n preserveShortSequences\n };\n\n const inputs: StringNGramsInputs = {data: $data, dataSplits: $dataSplits};\n const result: Tensor[] =\n ENGINE.runKernel(StringNGrams, inputs as {}, attrs as {});\n return {nGrams: result[0], nGramsSplits: result[1]};\n}\n\nexport const stringNGrams = /* @__PURE__ */ op({stringNGrams_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {StringSplit, StringSplitAttrs, StringSplitInputs} from '../../kernel_names';\nimport {Scalar, Tensor, Tensor1D} from '../../tensor';\nimport {NamedTensorMap} from '../../tensor_types';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {ScalarLike, TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * Split elements of `input` based on `delimiter` into a SparseTensor .\n *\n * Let N be the size of source (typically N will be the batch size). Split each\n * element of `input` based on `delimiter` and return a SparseTensor containing\n * the splitted tokens. Empty tokens are ignored if `skipEmpty` is set to True.\n *\n * `delimiter` can be empty, or a string of split characters. If `delimiter` is\n * an empty string, each element of `input` is split into individual\n * character strings. Otherwise every character of `delimiter` is a potential\n * split point.\n *\n * ```js\n * const result = tf.string.stringSplit(['hello world', 'a b c'], ' ');\n * result['indices'].print(); // [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]]\n * result['values'].print(); // ['hello', 'world', 'a', 'b', 'c']\n * result['shape'].print(); // [2, 3]\n * ```\n * @param input: 1-D. Strings to split.\n * @param delimiter: 0-D. Delimiter characters, or empty string.\n * @param skipEmpty: Optional. If true, skip the empty strings from the result.\n * Defaults to true.\n * @return A map with the following properties:\n * - indices: A dense matrix of int32 representing the indices of the sparse\n * tensor.\n * - values: A vector of strings corresponding to the splited values.\n * - shape: a length-2 vector of int32 representing the shape of the sparse\n * tensor, where the first value is N and the second value is the maximum number\n * of tokens in a single input entry.\n *\n * @doc {heading: 'Operations', subheading: 'String'}\n */\nfunction stringSplit_(\n input: Tensor1D|TensorLike, delimiter: Scalar|ScalarLike,\n skipEmpty = true): NamedTensorMap {\n const $input = convertToTensor(input, 'input', 'stringSplit', 'string');\n const $delimiter =\n convertToTensor(delimiter, 'delimiter', 'stringSplit', 'string');\n\n if ($input.rank !== 1) {\n throw new Error(\n `Input should be Tensor1D but received shape ${$input.shape}`);\n }\n if ($delimiter.rank !== 0) {\n throw new Error(\n `Delimiter should be a scalar but received shape ${$delimiter.shape}`);\n }\n\n const attrs: StringSplitAttrs = {skipEmpty};\n const inputs: StringSplitInputs = {input: $input, delimiter: $delimiter};\n const result: Tensor[] =\n ENGINE.runKernel(StringSplit, inputs as {}, attrs as {});\n return {indices: result[0], values: result[1], shape: result[2]};\n}\n\nexport const stringSplit = /* @__PURE__ */ op({stringSplit_});\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../../engine';\nimport {StringToHashBucketFast, StringToHashBucketFastAttrs, StringToHashBucketFastInputs} from '../../kernel_names';\nimport {Tensor} from '../../tensor';\nimport {convertToTensor} from '../../tensor_util_env';\nimport {TensorLike} from '../../types';\nimport {op} from '../operation';\n\n/**\n * Converts each string in the input Tensor to its hash mod by a number of\n * buckets.\n *\n * The hash function is deterministic on the content of the string within the\n * process and will never change. However, it is not suitable for cryptography.\n * This function may be used when CPU time is scarce and inputs are trusted or\n * unimportant. There is a risk of adversaries constructing inputs that all hash\n * to the same bucket.\n *\n * ```js\n * const result = tf.string.stringToHashBucketFast(\n * ['Hello', 'TensorFlow', '2.x'], 3);\n * result.print(); // [0, 2, 2]\n * ```\n * @param input: The strings to assign a hash bucket.\n * @param numBuckets: The number of buckets.\n * @return A Tensor of the same shape as the input tensor.\n *\n * @doc {heading: 'Operations', subheading: 'String'}\n */\nfunction stringToHashBucketFast_(\n input: Tensor|TensorLike, numBuckets: number): Tensor {\n const $input =\n convertToTensor(input, 'input', 'stringToHashBucketFast', 'string');\n const attrs: StringToHashBucketFastAttrs = {numBuckets};\n\n if (numBuckets <= 0) {\n throw new Error(`Number of buckets must be at least 1`);\n }\n\n const inputs: StringToHashBucketFastInputs = {input: $input};\n return ENGINE.runKernel(StringToHashBucketFast, inputs as {}, attrs as {});\n}\n\nexport const stringToHashBucketFast = /* @__PURE__ */ op({stringToHashBucketFast_});\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport enum RowPartitionType {\n FIRST_DIM_SIZE,\n VALUE_ROWIDS,\n ROW_LENGTHS,\n ROW_SPLITS,\n ROW_LIMITS,\n ROW_STARTS\n}\n\nexport function combineRaggedTensorToTensorShapes(\n raggedRank: number, shape: number[], valueShape: number[]) {\n // Test for consistency of valueShape and shape specified.\n // If shape is unspecified and valueShape is specified, then copy\n // over the size from the valueShape dimension.\n\n let outputShape: number[] = new Array();\n if (valueShape == null && shape == null) {\n return outputShape;\n }\n\n if (shape == null) {\n // Here, value_shape must be of known size.\n while (outputShape.length < raggedRank + valueShape.length) {\n outputShape.push(-1);\n }\n } else {\n outputShape = shape.slice();\n }\n if (valueShape == null) {\n return outputShape;\n }\n // At this point, valueShape and output_shape have known ranks.\n if (raggedRank + valueShape.length !== outputShape.length) {\n throw new Error(\n `rt input.shape and shape=${shape} are incompatible: rt input.rank = ${\n raggedRank +\n valueShape.length}, but shape.rank = ${outputShape.length}`);\n }\n\n for (let i = 1; i < valueShape.length; ++i) {\n const valueDim = valueShape[i];\n const outputShapeDimIndex =\n outputShape[outputShape.length - valueShape.length + i];\n const outputShapeDim = outputShape[outputShapeDimIndex];\n\n if (valueDim >= 0) {\n if (outputShapeDim >= 0) {\n if (outputShapeDim !== valueDim) {\n throw new Error(`rt input.shape and shape=${\n shape} are incompatible: rt input.shape[${i + raggedRank}] = ${\n valueDim} but shape[${i + raggedRank}] = ${outputShapeDim}`);\n }\n } else {\n outputShape[outputShapeDimIndex] = valueDim;\n }\n }\n }\n return outputShape;\n}\n\nexport function getRowPartitionTypesHelper(rowPartitionTypeStrings: string[]) {\n const stringToType = {\n 'FIRST_DIM_SIZE': RowPartitionType.FIRST_DIM_SIZE,\n 'VALUE_ROWIDS': RowPartitionType.VALUE_ROWIDS,\n 'ROW_LENGTHS': RowPartitionType.ROW_LENGTHS,\n 'ROW_SPLITS': RowPartitionType.ROW_SPLITS,\n 'ROW_LIMITS': RowPartitionType.ROW_LIMITS,\n 'ROW_STARTS': RowPartitionType.ROW_STARTS\n };\n\n const result: RowPartitionType[] = [];\n for (const typeStr of rowPartitionTypeStrings) {\n if (typeStr in stringToType) {\n result.push(stringToType[typeStr as keyof typeof stringToType]);\n } else {\n break;\n }\n }\n\n return result;\n}\n\nexport function getRaggedRank(rowPartitionTypes: RowPartitionType[]) {\n if (rowPartitionTypes.length === 0) {\n return 0;\n }\n if (rowPartitionTypes[0] === RowPartitionType.FIRST_DIM_SIZE) {\n return rowPartitionTypes.length - 1;\n }\n return rowPartitionTypes.length;\n}\n\nexport function validateDefaultValueShape(\n defaultValueShape: number[], valueShape: number[]) {\n if (defaultValueShape == null || valueShape == null) {\n return;\n }\n\n const defaultNDims = defaultValueShape.length;\n const valuesNDims = valueShape.length;\n if (defaultNDims >= valuesNDims) {\n throw new Error(`defaultValue.shape=${\n defaultValueShape} and ragged tensor flatValues.shape=${\n valueShape}, are incompatible: defaultValue.rank = ${\n defaultNDims} must be less than ragged tensor input flatValues.rank = ${\n valuesNDims})`);\n }\n for (let i = 0; i < Math.min(defaultNDims, valuesNDims - 1); ++i) {\n const defaultDim = defaultValueShape[i];\n const valueDim = valueShape[i + 1];\n if (defaultDim >= 0 && valueDim >= 0 && defaultDim !== 1 &&\n defaultDim !== valueDim) {\n throw new Error(`defaultValue.shape=${\n defaultValueShape}, and ragged tensor input flatValues.shape=${\n valueShape} are incompatible: defaultValue.shape[${\n i - defaultValueShape.length}] = ${\n defaultDim} but ragged tensor input.flatValues.shape[${\n i - defaultValueShape.length}] = ${valueDim}`);\n }\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Modularized ops.\nexport {abs} from './abs';\nexport {acos} from './acos';\nexport {acosh} from './acosh';\nexport {add} from './add';\nexport {addN} from './add_n';\nexport {all} from './all';\nexport {any} from './any';\nexport {argMax} from './arg_max';\nexport {argMin} from './arg_min';\nexport {asin} from './asin';\nexport {asinh} from './asinh';\nexport {atan} from './atan';\nexport {atan2} from './atan2';\nexport {atanh} from './atanh';\nexport {avgPool} from './avg_pool';\nexport {avgPool3d} from './avg_pool_3d';\nexport {basicLSTMCell} from './basic_lstm_cell';\nexport {batchToSpaceND} from './batch_to_space_nd';\nexport {batchNorm} from './batchnorm';\nexport {batchNorm2d} from './batchnorm2d';\nexport {batchNorm3d} from './batchnorm3d';\nexport {batchNorm4d} from './batchnorm4d';\nexport {bincount} from './bincount';\nexport {broadcastArgs} from './broadcast_args';\nexport {broadcastTo} from './broadcast_to';\nexport {buffer} from './buffer';\nexport {cast} from './cast';\nexport {ceil} from './ceil';\nexport {clipByValue} from './clip_by_value';\nexport {clone} from './clone';\nexport {complex} from './complex';\nexport {concat} from './concat';\nexport {concat1d} from './concat_1d';\nexport {concat2d} from './concat_2d';\nexport {concat3d} from './concat_3d';\nexport {concat4d} from './concat_4d';\nexport {conv1d} from './conv1d';\nexport {conv2d} from './conv2d';\nexport {conv2dTranspose} from './conv2d_transpose';\nexport {conv3d} from './conv3d';\nexport {conv3dTranspose} from './conv3d_transpose';\nexport {cos} from './cos';\nexport {cosh} from './cosh';\nexport {cumprod} from './cumprod';\nexport {cumsum} from './cumsum';\nexport {denseBincount} from './dense_bincount';\nexport {depthToSpace} from './depth_to_space';\nexport {depthwiseConv2d} from './depthwise_conv2d';\nexport {diag} from './diag';\nexport {dilation2d} from './dilation2d';\nexport {div} from './div';\nexport {divNoNan} from './div_no_nan';\nexport {dot} from './dot';\nexport {einsum} from './einsum';\nexport {elu} from './elu';\nexport {equal} from './equal';\nexport {erf} from './erf';\nexport {euclideanNorm} from './euclidean_norm';\nexport {exp} from './exp';\nexport {expandDims} from './expand_dims';\nexport {expm1} from './expm1';\nexport {eye} from './eye';\nexport {fill} from './fill';\nexport {floor} from './floor';\nexport {floorDiv} from './floorDiv';\nexport {gather} from './gather';\nexport {greater} from './greater';\nexport {greaterEqual} from './greater_equal';\nexport {imag} from './imag';\nexport {isFinite} from './is_finite';\nexport {isInf} from './is_inf';\nexport {isNaN} from './is_nan';\nexport {leakyRelu} from './leaky_relu';\nexport {less} from './less';\nexport {lessEqual} from './less_equal';\nexport {linspace} from './linspace';\nexport {localResponseNormalization} from './local_response_normalization';\nexport {log} from './log';\nexport {log1p} from './log1p';\nexport {logSigmoid} from './log_sigmoid';\nexport {logSoftmax} from './log_softmax';\nexport {logSumExp} from './log_sum_exp';\nexport {logicalAnd} from './logical_and';\nexport {logicalNot} from './logical_not';\nexport {logicalOr} from './logical_or';\nexport {logicalXor} from './logical_xor';\nexport {lowerBound} from './lower_bound';\nexport {matMul} from './mat_mul';\nexport {max} from './max';\nexport {maxPool} from './max_pool';\nexport {maxPool3d} from './max_pool_3d';\nexport {maxPoolWithArgmax} from './max_pool_with_argmax';\nexport {maximum} from './maximum';\nexport {mean} from './mean';\nexport {meshgrid} from './meshgrid';\nexport {min} from './min';\nexport {minimum} from './minimum';\nexport {mirrorPad} from './mirror_pad';\nexport {mod} from './mod';\nexport {moments} from './moments';\nexport {mul} from './mul';\nexport {LSTMCellFunc, multiRNNCell} from './multi_rnn_cell';\nexport {multinomial} from './multinomial';\nexport {neg} from './neg';\nexport {notEqual} from './not_equal';\nexport {oneHot} from './one_hot';\nexport {ones} from './ones';\nexport {onesLike} from './ones_like';\nexport {outerProduct} from './outer_product';\nexport {pad} from './pad';\nexport {pad1d} from './pad1d';\nexport {pad2d} from './pad2d';\nexport {pad3d} from './pad3d';\nexport {pad4d} from './pad4d';\nexport {pool} from './pool';\nexport {pow} from './pow';\nexport {prelu} from './prelu';\nexport {print} from './print';\nexport {prod} from './prod';\nexport {raggedGather} from './ragged_gather';\nexport {raggedRange} from './ragged_range';\nexport {raggedTensorToTensor} from './ragged_tensor_to_tensor';\nexport {rand} from './rand';\nexport {randomGamma} from './random_gamma';\nexport {randomNormal} from './random_normal';\nexport {randomStandardNormal} from './random_standard_normal';\nexport {randomUniform} from './random_uniform';\nexport {range} from './range';\nexport {real} from './real';\nexport {reciprocal} from './reciprocal';\nexport {relu} from './relu';\nexport {relu6} from './relu6';\nexport {reshape} from './reshape';\nexport {reverse} from './reverse';\nexport {reverse1d} from './reverse_1d';\nexport {reverse2d} from './reverse_2d';\nexport {reverse3d} from './reverse_3d';\nexport {reverse4d} from './reverse_4d';\nexport {round} from './round';\nexport {rsqrt} from './rsqrt';\nexport {scalar} from './scalar';\nexport {selu} from './selu';\nexport {separableConv2d} from './separable_conv2d';\nexport {setdiff1dAsync} from './setdiff1d_async';\nexport {sigmoid} from './sigmoid';\nexport {sign} from './sign';\nexport {sin} from './sin';\nexport {sinh} from './sinh';\nexport {slice} from './slice';\nexport {slice1d} from './slice1d';\nexport {slice2d} from './slice2d';\nexport {slice3d} from './slice3d';\nexport {slice4d} from './slice4d';\nexport {softmax} from './softmax';\nexport {softplus} from './softplus';\nexport {spaceToBatchND} from './space_to_batch_nd';\nexport {fft} from './spectral/fft';\nexport {ifft} from './spectral/ifft';\nexport {irfft} from './spectral/irfft';\nexport {rfft} from './spectral/rfft';\nexport {split} from './split';\nexport {sqrt} from './sqrt';\nexport {square} from './square';\nexport {squaredDifference} from './squared_difference';\nexport {squeeze} from './squeeze';\nexport {stack} from './stack';\nexport {step} from './step';\nexport {stridedSlice} from './strided_slice';\nexport {sub} from './sub';\nexport {sum} from './sum';\nexport {tan} from './tan';\nexport {tanh} from './tanh';\nexport {tensor} from './tensor';\nexport {tensor1d} from './tensor1d';\nexport {tensor2d} from './tensor2d';\nexport {tensor3d} from './tensor3d';\nexport {tensor4d} from './tensor4d';\nexport {tensor5d} from './tensor5d';\nexport {tensor6d} from './tensor6d';\nexport {tile} from './tile';\nexport {topk} from './topk';\nexport {truncatedNormal} from './truncated_normal';\nexport {unique} from './unique';\nexport {unsortedSegmentSum} from './unsorted_segment_sum';\nexport {unstack} from './unstack';\nexport {upperBound} from './upper_bound';\nexport {variable} from './variable';\nexport {where} from './where';\nexport {whereAsync} from './where_async';\nexport {zeros} from './zeros';\nexport {zerosLike} from './zeros_like';\n\nexport * from './boolean_mask';\nexport * from './transpose';\nexport * from './norm';\nexport * from './moving_average';\nexport * from './scatter_nd';\nexport * from './search_sorted';\nexport * from './sparse_to_dense';\nexport * from './gather_nd';\nexport * from './dropout';\nexport * from './signal_ops_util';\nexport * from './in_top_k';\n\nexport {op, OP_SCOPE_SUFFIX} from './operation';\n\nimport {rfft} from './spectral/rfft';\nimport {fft} from './spectral/fft';\nimport {ifft} from './spectral/ifft';\nimport {irfft} from './spectral/irfft';\nconst spectral = {\n fft,\n ifft,\n rfft,\n irfft\n};\n\nimport * as fused from './fused_ops';\n\nimport {hammingWindow} from './signal/hamming_window';\nimport {hannWindow} from './signal/hann_window';\nimport {frame} from './signal/frame';\nimport {stft} from './signal/stft';\nconst signal = {\n hammingWindow,\n hannWindow,\n frame,\n stft,\n};\n\n// Image Ops namespace\nimport {cropAndResize} from './image/crop_and_resize';\nimport {flipLeftRight} from './image/flip_left_right';\nimport {grayscaleToRGB} from './image/grayscale_to_rgb';\nimport {rotateWithOffset} from './image/rotate_with_offset';\nimport {nonMaxSuppression} from './image/non_max_suppression';\nimport {nonMaxSuppressionAsync} from './image/non_max_suppression_async';\nimport {nonMaxSuppressionWithScore} from './image/non_max_suppression_with_score';\nimport {nonMaxSuppressionWithScoreAsync} from './image/non_max_suppression_with_score_async';\nimport {nonMaxSuppressionPadded} from './image/non_max_suppression_padded';\nimport {nonMaxSuppressionPaddedAsync} from './image/non_max_suppression_padded_async';\nimport {resizeBilinear} from './image/resize_bilinear';\nimport {resizeNearestNeighbor} from './image/resize_nearest_neighbor';\nimport {threshold} from './image/threshold';\nimport {transform} from './image/transform';\nconst image = {\n flipLeftRight,\n grayscaleToRGB,\n resizeNearestNeighbor,\n resizeBilinear,\n rotateWithOffset,\n cropAndResize,\n nonMaxSuppression,\n nonMaxSuppressionAsync,\n nonMaxSuppressionWithScore,\n nonMaxSuppressionWithScoreAsync,\n nonMaxSuppressionPadded,\n nonMaxSuppressionPaddedAsync,\n threshold,\n transform\n};\n\n// linalg namespace\nimport {bandPart} from './linalg/band_part';\nimport {gramSchmidt} from './linalg/gram_schmidt';\nimport {qr} from './linalg/qr';\nconst linalg = {\n bandPart,\n gramSchmidt,\n qr\n};\n\n// losses namespace;\nimport {absoluteDifference} from './losses/absolute_difference';\nimport {computeWeightedLoss} from './losses/compute_weighted_loss';\nimport {cosineDistance} from './losses/cosine_distance';\nimport {hingeLoss} from './losses/hinge_loss';\nimport {huberLoss} from './losses/huber_loss';\nimport {logLoss} from './losses/log_loss';\nimport {meanSquaredError} from './losses/mean_squared_error';\nimport {sigmoidCrossEntropy} from './losses/sigmoid_cross_entropy';\nimport {softmaxCrossEntropy} from './losses/softmax_cross_entropy';\nconst losses = {\n absoluteDifference,\n computeWeightedLoss,\n cosineDistance,\n hingeLoss,\n huberLoss,\n logLoss,\n meanSquaredError,\n sigmoidCrossEntropy,\n softmaxCrossEntropy\n};\n\nimport {sparseFillEmptyRows} from './sparse/sparse_fill_empty_rows';\nimport {sparseReshape} from './sparse/sparse_reshape';\nimport {sparseSegmentMean} from './sparse/sparse_segment_mean';\nimport {sparseSegmentSum} from './sparse/sparse_segment_sum';\nconst sparse = {\n sparseFillEmptyRows,\n sparseReshape,\n sparseSegmentMean,\n sparseSegmentSum\n};\n\nimport {stringNGrams} from './string/string_n_grams';\nimport {stringSplit} from './string/string_split';\nimport {stringToHashBucketFast} from './string/string_to_hash_bucket_fast';\n// tslint:disable-next-line:variable-name\nconst string = {\n stringNGrams,\n stringSplit,\n stringToHashBucketFast\n};\n\n// Second level exports.\nexport {image, linalg, losses, spectral, fused, signal, sparse, string};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AdadeltaOptimizer} from './adadelta_optimizer';\nimport {AdagradOptimizer} from './adagrad_optimizer';\nimport {AdamOptimizer} from './adam_optimizer';\nimport {AdamaxOptimizer} from './adamax_optimizer';\nimport {MomentumOptimizer} from './momentum_optimizer';\nimport {RMSPropOptimizer} from './rmsprop_optimizer';\nimport {SGDOptimizer} from './sgd_optimizer';\n\nexport class OptimizerConstructors {\n /**\n * Constructs a `tf.SGDOptimizer` that uses stochastic gradient descent.\n *\n * ```js\n * // Fit a quadratic function by learning the coefficients a, b, c.\n * const xs = tf.tensor1d([0, 1, 2, 3]);\n * const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]);\n *\n * const a = tf.scalar(Math.random()).variable();\n * const b = tf.scalar(Math.random()).variable();\n * const c = tf.scalar(Math.random()).variable();\n *\n * // y = a * x^2 + b * x + c.\n * const f = x => a.mul(x.square()).add(b.mul(x)).add(c);\n * const loss = (pred, label) => pred.sub(label).square().mean();\n *\n * const learningRate = 0.01;\n * const optimizer = tf.train.sgd(learningRate);\n *\n * // Train the model.\n * for (let i = 0; i < 10; i++) {\n * optimizer.minimize(() => loss(f(xs), ys));\n * }\n *\n * // Make predictions.\n * console.log(\n * `a: ${a.dataSync()}, b: ${b.dataSync()}, c: ${c.dataSync()}`);\n * const preds = f(xs).dataSync();\n * preds.forEach((pred, i) => {\n * console.log(`x: ${i}, pred: ${pred}`);\n * });\n * ```\n *\n * @param learningRate The learning rate to use for the SGD algorithm.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static sgd(learningRate: number): SGDOptimizer {\n return new SGDOptimizer(learningRate);\n }\n\n /**\n * Constructs a `tf.MomentumOptimizer` that uses momentum gradient\n * descent.\n *\n * See\n * [http://proceedings.mlr.press/v28/sutskever13.pdf](\n * http://proceedings.mlr.press/v28/sutskever13.pdf)\n *\n * @param learningRate The learning rate to use for the Momentum gradient\n * descent algorithm.\n * @param momentum The momentum to use for the momentum gradient descent\n * algorithm.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static momentum(learningRate: number, momentum: number, useNesterov = false):\n MomentumOptimizer {\n return new MomentumOptimizer(learningRate, momentum, useNesterov);\n }\n\n /**\n * Constructs a `tf.RMSPropOptimizer` that uses RMSProp gradient\n * descent. This implementation uses plain momentum and is not centered\n * version of RMSProp.\n *\n * See\n * [http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf](\n * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)\n *\n * @param learningRate The learning rate to use for the RMSProp gradient\n * descent algorithm.\n * @param decay The discounting factor for the history/coming gradient.\n * @param momentum The momentum to use for the RMSProp gradient descent\n * algorithm.\n * @param epsilon Small value to avoid zero denominator.\n * @param centered If true, gradients are normalized by the estimated\n * variance of the gradient.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static rmsprop(\n learningRate: number, decay = .9, momentum = 0.0, epsilon: number = null,\n centered = false): RMSPropOptimizer {\n return new RMSPropOptimizer(\n learningRate, decay, momentum, epsilon, centered);\n }\n\n /**\n * Constructs a `tf.AdamOptimizer` that uses the Adam algorithm.\n * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)\n *\n * @param learningRate The learning rate to use for the Adam gradient\n * descent algorithm.\n * @param beta1 The exponential decay rate for the 1st moment estimates.\n * @param beta2 The exponential decay rate for the 2nd moment estimates.\n * @param epsilon A small constant for numerical stability.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static adam(\n learningRate = 0.001, beta1 = 0.9, beta2 = 0.999,\n epsilon: number = null): AdamOptimizer {\n return new AdamOptimizer(learningRate, beta1, beta2, epsilon);\n }\n\n /**\n * Constructs a `tf.AdadeltaOptimizer` that uses the Adadelta algorithm.\n * See [https://arxiv.org/abs/1212.5701](https://arxiv.org/abs/1212.5701)\n *\n * @param learningRate The learning rate to use for the Adadelta gradient\n * descent algorithm.\n * @param rho The learning rate decay over each update.\n * @param epsilon A constant epsilon used to better condition the grad\n * update.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static adadelta(learningRate = .001, rho = .95, epsilon: number = null):\n AdadeltaOptimizer {\n return new AdadeltaOptimizer(learningRate, rho, epsilon);\n }\n\n /**\n * Constructs a `tf.AdamaxOptimizer` that uses the Adamax algorithm.\n * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)\n *\n * @param learningRate The learning rate to use for the Adamax gradient\n * descent algorithm.\n * @param beta1 The exponential decay rate for the 1st moment estimates.\n * @param beta2 The exponential decay rate for the 2nd moment estimates.\n * @param epsilon A small constant for numerical stability.\n * @param decay The learning rate decay over each update.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static adamax(\n learningRate = 0.002, beta1 = 0.9, beta2 = 0.999, epsilon: number = null,\n decay = 0.0): AdamaxOptimizer {\n return new AdamaxOptimizer(learningRate, beta1, beta2, epsilon, decay);\n }\n\n /**\n * Constructs a `tf.AdagradOptimizer` that uses the Adagrad algorithm.\n * See\n * [http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf](\n * http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n * or\n * [http://ruder.io/optimizing-gradient-descent/index.html#adagrad](\n * http://ruder.io/optimizing-gradient-descent/index.html#adagrad)\n *\n * @param learningRate The learning rate to use for the Adagrad gradient\n * descent algorithm.\n * @param initialAccumulatorValue Starting value for the accumulators, must be\n * positive.\n *\n * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}\n */\n static adagrad(learningRate: number, initialAccumulatorValue = 0.1):\n AdagradOptimizer {\n return new AdagradOptimizer(learningRate, initialAccumulatorValue);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OptimizerConstructors} from './optimizers/optimizer_constructors';\n\nexport const train = OptimizerConstructors;\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nconst delayCallback: Function = (() => {\n if (typeof requestAnimationFrame !== 'undefined') {\n return requestAnimationFrame;\n } else if (typeof setImmediate !== 'undefined') {\n return setImmediate;\n }\n return (f: Function) => f(); // no delays\n})();\n\n/**\n * Returns a promise that resolves when a requestAnimationFrame has completed.\n *\n * On Node.js this uses setImmediate instead of requestAnimationFrame.\n *\n * This is simply a sugar method so that users can do the following:\n * `await tf.nextFrame();`\n *\n * @doc {heading: 'Performance', subheading: 'Timing'}\n */\nfunction nextFrame(): Promise {\n return new Promise(resolve => delayCallback(() => resolve()));\n}\n\nexport {nextFrame};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as util from '../util';\n\nexport function assertParamsConsistent(shapes: number[][], axis: number) {\n const rank = shapes[0].length;\n shapes.forEach((shape, i) => {\n util.assert(\n shape.length === rank,\n () =>\n `Error in concat${rank}D: rank of tensors[${i}] must be the same ` +\n `as the rank of the rest (${rank})`);\n });\n\n util.assert(\n axis >= 0 && axis < rank,\n () => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`);\n\n const firstShape = shapes[0];\n shapes.forEach((shape, i) => {\n for (let r = 0; r < rank; r++) {\n util.assert(\n (r === axis) || (shape[r] === firstShape[r]),\n () => `Error in concat${rank}D: Shape of tensors[${i}] (${shape}) ` +\n `does not match the shape of the rest (${firstShape}) ` +\n `along the non-concatenated axis ${i}.`);\n }\n });\n}\n\nexport function computeOutShape(shapes: number[][], axis: number): number[] {\n const outputShape = shapes[0].slice();\n for (let i = 1; i < shapes.length; i++) {\n outputShape[axis] += shapes[i][axis];\n }\n return outputShape;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Inputs of size above this threshold will be parallelized by calling multiple\n * shader programs.\n */\nimport {nearestDivisor} from '../util';\n\nexport const PARALLELIZE_THRESHOLD = 30;\n\nexport interface ReduceInfo {\n windowSize: number;\n batchSize: number;\n inSize: number;\n outSize: number;\n}\n\nexport function computeOptimalWindowSize(inSize: number): number {\n if (inSize <= PARALLELIZE_THRESHOLD) {\n return inSize;\n }\n return nearestDivisor(inSize, Math.floor(Math.sqrt(inSize)));\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Returns the image center in pixels.\nexport function getImageCenter(\n center: number|[number, number], imageHeight: number,\n imageWidth: number): [number, number] {\n const centerX =\n imageWidth * (typeof center === 'number' ? center : center[0]);\n const centerY =\n imageHeight * (typeof center === 'number' ? center : center[1]);\n return [centerX, centerY];\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Gets the new shape of the input Tensor after it's been reshaped\n * to:\n * [blockShape[0], ..., blockShape[M-1], batch / prod(blockShape),\n * inputShape[1], ..., inputShape[N-1]]\n *\n * See step 1: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd\n */\nexport function getReshaped(\n inputShape: number[], blockShape: number[], prod: number,\n batchToSpace = true): number[] {\n let reshaped: number[] = [];\n if (batchToSpace) {\n reshaped = reshaped.concat(blockShape.slice(0));\n reshaped.push(inputShape[0] / prod);\n reshaped = reshaped.concat(inputShape.slice(1));\n } else {\n reshaped = reshaped.concat(inputShape[0]);\n const spatialLength = blockShape.length;\n for (let i = 0; i < spatialLength; ++i) {\n reshaped =\n reshaped.concat([inputShape[i + 1] / blockShape[i], blockShape[i]]);\n }\n reshaped = reshaped.concat(inputShape.slice(spatialLength + 1));\n }\n return reshaped;\n}\n\n/**\n * Gets the permutation that will transpose the dimensions of the\n * reshaped tensor to shape:\n *\n * [batch / prod(block_shape),inputShape[1], blockShape[0], ...,\n * inputShape[M], blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]\n *\n * see step 2: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd\n */\nexport function getPermuted(\n reshapedRank: number, blockShapeRank: number,\n batchToSpace = true): number[] {\n const permuted = [];\n if (batchToSpace) {\n permuted.push(blockShapeRank);\n for (let i = blockShapeRank + 1; i < reshapedRank; ++i) {\n if (i <= 2 * blockShapeRank) {\n permuted.push(i);\n permuted.push(i - (blockShapeRank + 1));\n } else {\n permuted.push(i);\n }\n }\n } else {\n const permutedBeforeBatch = [];\n const permutedAfterBatch = [];\n for (let i = 1; i < reshapedRank; ++i) {\n if (i >= blockShapeRank * 2 + 1 || i % 2 === 1) {\n permutedAfterBatch.push(i);\n } else {\n permutedBeforeBatch.push(i);\n }\n }\n permuted.push(...permutedBeforeBatch);\n permuted.push(0);\n permuted.push(...permutedAfterBatch);\n }\n return permuted;\n}\n\n/**\n * Gets the shape of the reshaped and permuted input Tensor before any cropping\n * is applied. The new shape will be:\n *\n * [batch / prod(blockShape),inputShape[1] * blockShape[0], ...,\n * inputShape[M] * blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]\n *\n * See step 3: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd\n */\nexport function getReshapedPermuted(\n inputShape: number[], blockShape: number[], prod: number,\n batchToSpace = true): number[] {\n const reshapedPermuted = [];\n\n if (batchToSpace) {\n reshapedPermuted.push(inputShape[0] / prod);\n } else {\n reshapedPermuted.push(inputShape[0] * prod);\n }\n\n for (let i = 1; i < inputShape.length; ++i) {\n if (i <= blockShape.length) {\n if (batchToSpace) {\n reshapedPermuted.push(blockShape[i - 1] * inputShape[i]);\n } else {\n reshapedPermuted.push(inputShape[i] / blockShape[i - 1]);\n }\n } else {\n reshapedPermuted.push(inputShape[i]);\n }\n }\n\n return reshapedPermuted;\n}\n\n/**\n * Converts the crops argument into the beginning coordinates of a slice\n * operation.\n */\nexport function getSliceBeginCoords(\n crops: number[][], blockShape: number): number[] {\n const sliceBeginCoords = [0];\n for (let i = 0; i < blockShape; ++i) {\n sliceBeginCoords.push(crops[i][0]);\n }\n return sliceBeginCoords;\n}\n\n/**\n * Converts the crops argument into the size of a slice operation. When\n * combined with getSliceBeginCoords this function allows the reshaped and\n * permuted Tensor to be cropped to its final output shape of:\n *\n * inputShape[1] * blockShape[0] - crops[0,0] - crops[0,1], ...,\n * inputShape[M] * blockShape[M-1] -crops[M-1,0] -\n * crops[M-1,1],inputShape[M+1], ..., inputShape[N-1]]\n *\n * See step 4: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd\n */\nexport function getSliceSize(\n uncroppedShape: number[], crops: number[][], blockShape: number): number[] {\n const sliceSize = uncroppedShape.slice(0, 1);\n for (let i = 0; i < blockShape; ++i) {\n sliceSize.push(uncroppedShape[i + 1] - crops[i][0] - crops[i][1]);\n }\n\n return sliceSize;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorInfo } from '../tensor_info';\nimport {computeStrides, sizeFromShape} from '../util';\n\n/**\n * Validate gather nd inputs.\n *\n * @param tensor The tensor contains the source values.\n * @param indices The tensor contains the indices to slice the source.\n *\n * @returns [resultShape, numUpdates, sliceSize, strides]\n */\nexport function prepareAndValidate(tensor: TensorInfo, indices: TensorInfo):\n [number[], number, number, number[]] {\n const tensorRank = tensor.shape.length;\n const indicesRank = indices.shape.length;\n if (tensorRank < 1) {\n throw new Error(\n 'tf.gatherND() expects the input to be rank 1 or higher,' +\n ` but the rank was ${tensorRank}.`);\n }\n if (indicesRank < 1) {\n throw new Error(\n 'tf.gatherND() expects the indices to be rank 1 or higher,' +\n ` but the rank was ${indicesRank}.`);\n }\n if (indices.dtype !== 'int32') {\n throw new Error(\n 'tf.gatherND() expects the indices to be int32 type,' +\n ` but the dtype was ${indices.dtype}.`);\n }\n if (indices.shape[indicesRank - 1] > tensorRank) {\n throw new Error(\n 'index innermost dimension length must be <= tensor rank; saw: ' +\n `${indices.shape[indicesRank - 1]} vs. ${tensorRank}`);\n }\n\n if (sizeFromShape(tensor.shape) === 0) {\n throw new Error(\n 'Requested more than 0 entries, but input is empty.' +\n ` Input shape: ${tensor.shape}.`);\n }\n\n const indicesShape = indices.shape;\n const sliceRank = indicesShape[indicesShape.length - 1];\n\n // The result shape is\n // indices.shape[:-1] + params.shape[indices.shape[-1]:]\n let nResult = 1;\n for (let i = 0; i < indicesShape.length - 1; ++i) {\n nResult *= indicesShape[i];\n }\n\n const inputShape = tensor.shape;\n\n const resultShape = indicesShape.slice();\n resultShape.pop();\n\n let sliceSize = 1;\n for (let i = sliceRank; i < tensorRank; ++i) {\n sliceSize *= inputShape[i];\n resultShape.push(inputShape[i]);\n }\n\n const strides =\n [...computeStrides(tensor.shape).map(stride => stride / sliceSize),\n 1].slice(0, sliceRank);\n\n return [resultShape, nResult, sliceSize, strides];\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport const SELU_SCALEALPHA = 1.7580993408473768599402175208123;\nexport const SELU_SCALE = 1.0507009873554804934193349852946;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport const ERF_P = 0.3275911;\nexport const ERF_A1 = 0.254829592;\nexport const ERF_A2 = -0.284496736;\nexport const ERF_A3 = 1.421413741;\nexport const ERF_A4 = -1.453152027;\nexport const ERF_A5 = 1.061405429;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray} from '../types';\n/**\n * Merges real and imaginary Float32Arrays into a single complex Float32Array.\n *\n * The memory layout is interleaved as follows:\n * real: [r0, r1, r2]\n * imag: [i0, i1, i2]\n * complex: [r0, i0, r1, i1, r2, i2]\n *\n * This is the inverse of splitRealAndImagArrays.\n *\n * @param real The real values of the complex tensor values.\n * @param imag The imag values of the complex tensor values.\n * @returns A complex tensor as a Float32Array with merged values.\n */\nexport function mergeRealAndImagArrays(\n real: Float32Array, imag: Float32Array): Float32Array {\n if (real.length !== imag.length) {\n throw new Error(\n `Cannot merge real and imag arrays of different lengths. real:` +\n `${real.length}, imag: ${imag.length}.`);\n }\n const result = new Float32Array(real.length * 2);\n for (let i = 0; i < result.length; i += 2) {\n result[i] = real[i / 2];\n result[i + 1] = imag[i / 2];\n }\n return result;\n}\n\n/**\n * Splits a complex Float32Array into real and imag parts.\n *\n * The memory layout is interleaved as follows:\n * complex: [r0, i0, r1, i1, r2, i2]\n * real: [r0, r1, r2]\n * imag: [i0, i1, i2]\n *\n * This is the inverse of mergeRealAndImagArrays.\n *\n * @param complex The complex tensor values.\n * @returns An object with real and imag Float32Array components of the complex\n * tensor.\n */\nexport function splitRealAndImagArrays(complex: Float32Array):\n {real: Float32Array, imag: Float32Array} {\n const real = new Float32Array(complex.length / 2);\n const imag = new Float32Array(complex.length / 2);\n for (let i = 0; i < complex.length; i += 2) {\n real[i / 2] = complex[i];\n imag[i / 2] = complex[i + 1];\n }\n return {real, imag};\n}\n\n/**\n * Extracts even indexed complex values in the given array.\n * @param complex The complex tensor values\n */\nexport function complexWithEvenIndex(complex: Float32Array):\n {real: Float32Array, imag: Float32Array} {\n const len = Math.ceil(complex.length / 4);\n const real = new Float32Array(len);\n const imag = new Float32Array(len);\n for (let i = 0; i < complex.length; i += 4) {\n real[Math.floor(i / 4)] = complex[i];\n imag[Math.floor(i / 4)] = complex[i + 1];\n }\n return {real, imag};\n}\n\n/**\n * Extracts odd indexed comple values in the given array.\n * @param complex The complex tensor values\n */\nexport function complexWithOddIndex(complex: Float32Array):\n {real: Float32Array, imag: Float32Array} {\n const len = Math.floor(complex.length / 4);\n const real = new Float32Array(len);\n const imag = new Float32Array(len);\n for (let i = 2; i < complex.length; i += 4) {\n real[Math.floor(i / 4)] = complex[i];\n imag[Math.floor(i / 4)] = complex[i + 1];\n }\n return {real, imag};\n}\n\n/**\n * Get the map representing a complex value in the given array.\n * @param complex The complex tensor values.\n * @param index An index of the target complex value.\n */\nexport function getComplexWithIndex(\n complex: Float32Array, index: number): {real: number, imag: number} {\n const real = complex[index * 2];\n const imag = complex[index * 2 + 1];\n return {real, imag};\n}\n\n/**\n * Insert a given complex value into the TypedArray.\n * @param data The array in which the complex value is inserted.\n * @param c The complex value to be inserted.\n * @param index An index of the target complex value.\n */\nexport function assignToTypedArray(\n data: TypedArray, real: number, imag: number, index: number) {\n data[index * 2] = real;\n data[index * 2 + 1] = imag;\n}\n\n/**\n * Make the list of exponent terms used by FFT.\n */\nexport function exponents(\n n: number, inverse: boolean): {real: Float32Array, imag: Float32Array} {\n const real = new Float32Array(n / 2);\n const imag = new Float32Array(n / 2);\n for (let i = 0; i < Math.ceil(n / 2); i++) {\n const x = (inverse ? 2 : -2) * Math.PI * (i / n);\n real[i] = Math.cos(x);\n imag[i] = Math.sin(x);\n }\n return {real, imag};\n}\n\n/**\n * Make the exponent term used by FFT.\n */\nexport function exponent(\n k: number, n: number, inverse: boolean): {real: number, imag: number} {\n const x = (inverse ? 2 : -2) * Math.PI * (k / n);\n const real = Math.cos(x);\n const imag = Math.sin(x);\n return {real, imag};\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Utility functions for computing einsum (tensor contraction and summation\n * based on Einstein summation.)\n */\n\nimport {Tensor} from '../tensor';\nimport {assert} from '../util_base';\n\nconst ARROW = '->';\nconst ARROW_REGEX = /->/g;\nconst COMMA = ',';\nconst ELLIPSIS = '...';\n\n/**\n * Parse an equation for einsum.\n *\n * @param equation The einsum equation (e.g., \"ij,jk->ik\").\n * @param numTensors Number of tensors provided along with `equation`. Used to\n * check matching number of input tensors.\n * @returns An object consisting of the following fields:\n * - allDims: all dimension names as strings.\n * - summedDims: a list of all dimensions being summed over, as indices to\n * the elements of `allDims`.\n * - idDims: indices of the dimensions in each input tensor, as indices to\n * the elements of `allDims.\n */\nexport function decodeEinsumEquation(equation: string, numTensors: number): {\n allDims: string[],\n summedDims: number[],\n idDims: number[][],\n} {\n equation = equation.replace(/\\s/g, ''); // Remove witespace in equation.\n const numArrows =\n (equation.length - equation.replace(ARROW_REGEX, '').length) /\n ARROW.length;\n if (numArrows < 1) {\n throw new Error('Equations without an arrow are not supported.');\n } else if (numArrows > 1) {\n throw new Error(`Equation must contain exactly one arrow (\"${ARROW}\").`);\n }\n const [inputString, outputString] = equation.split(ARROW);\n assert(\n inputString.indexOf(ELLIPSIS) === -1,\n () => `The ellipsis notation (\"${ELLIPSIS}\") is not supported yet.`);\n const inputTerms = inputString.split(COMMA);\n const numInputs = inputTerms.length;\n if (numTensors !== numInputs) {\n throw new Error(\n `Expected ${numInputs} input tensors, received ${numTensors}`);\n }\n if (numInputs > 2) {\n throw new Error(\n 'Support for more than 2 input tensors is not implemented yet.');\n }\n\n const allDims: string[] = [];\n for (let i = 0; i < outputString.length; ++i) {\n const dimName = outputString[i];\n if (!inputTerms.some(inputTerm => inputTerm.indexOf(dimName) !== -1)) {\n throw new Error(\n `Output subscripts contain the label ${dimName} ` +\n `not present in the input subscripts.`);\n }\n if (allDims.indexOf(dimName) === -1) {\n allDims.push(dimName);\n }\n }\n for (let i = 0; i < inputString.length; ++i) {\n const dimName = inputString[i];\n if (allDims.indexOf(dimName) === -1 && dimName !== COMMA) {\n allDims.push(dimName);\n }\n }\n\n const idDims: number[][] = new Array(inputTerms.length);\n for (let i = 0; i < numInputs; ++i) {\n if (new Set(inputTerms[i].split('')).size !== inputTerms[i].length) {\n throw new Error(\n `Found duplicate axes in input component ${inputTerms[i]}. ` +\n `Support for duplicate axes in input is not implemented yet.`);\n }\n idDims[i] = [];\n for (let j = 0; j < inputTerms[i].length; ++j) {\n idDims[i].push(allDims.indexOf(inputTerms[i][j]));\n }\n }\n\n const numDims = allDims.length; // Number of unique dimensions.\n const numOutDims = outputString.length; // Number of output dimensions.\n const summedDims: number[] = []; // Dimensions being summed over.\n for (let i = numOutDims; i < numDims; ++i) {\n summedDims.push(i);\n }\n return {allDims, summedDims, idDims};\n}\n\n/**\n * Get the permutation for a given input tensor.\n *\n * @param nDims Total number of dimension of all tensors involved in the einsum\n * operation.\n * @param idDims Dimension indices involve in the tensor in question.\n * @returns An object consisting of the following fields:\n * - permutationIndices: Indices to permute the axes of the tensor with.\n * - expandDims: Indices to the dimension that need to be expanded from the\n * tensor after permutation.\n */\nexport function getEinsumPermutation(nDims: number, idDims: number[]):\n {permutationIndices: number[], expandDims: number[]} {\n let permutationIndices: number[] = new Array(nDims);\n permutationIndices.fill(-1);\n for (let i = 0; i < idDims.length; ++i) {\n permutationIndices[idDims[i]] = i;\n }\n const expandDims: number[] = [];\n for (let i = 0; i < nDims; ++i) {\n if (permutationIndices[i] === -1) {\n expandDims.push(i);\n }\n }\n permutationIndices = permutationIndices.filter(d => d !== -1);\n return {permutationIndices, expandDims};\n}\n\n/**\n * Checks that the dimension sizes from different input tensors match the\n * equation.\n */\nexport function checkEinsumDimSizes(\n nDims: number, idDims: number[][], tensors: Tensor[]) {\n const dimSizes: number[] = new Array(nDims);\n for (let i = 0; i < tensors.length; ++i) {\n const shape: number[] = tensors[i].shape;\n for (let j = 0; j < idDims[i].length; ++j) {\n if (dimSizes[idDims[i][j]] === undefined) {\n dimSizes[idDims[i][j]] = shape[j];\n } else {\n assert(\n dimSizes[idDims[i][j]] === shape[j],\n () => `Expected dimension ${dimSizes[idDims[i][j]]} at axis ${j} ` +\n `of input shaped ${JSON.stringify(shape)}, ` +\n `but got dimension ${shape[j]}`);\n }\n }\n }\n}\n\n/**\n * Gets path of computation for einsum.\n *\n * @param summedDims indices to the dimensions being summed over.\n * @param idDims A look up table for the dimensions present in each input\n * tensor. Each consituent array contains indices for the dimensions in the\n * corresponding input tensor.\n *\n * @return A map with two fields:\n * - path: The path of computation, with each element indicating the dimension\n * being summed over after the element-wise multiplication in that step.\n * - steps: With the same length as `path`. Each element contains the indices\n * to the input tensors being used for element-wise multiplication in the\n * corresponding step.\n */\nexport function getEinsumComputePath(summedDims: number[], idDims: number[][]):\n {path: number[], steps: number[][]} {\n const path: number[] = summedDims;\n const steps: number[][] = [];\n let nSteps = 0;\n if (summedDims.length === 0) {\n // Einsum that involes no summing: e.g., transpose and outer product.\n path.push(-1);\n }\n nSteps = summedDims.length + 1;\n for (let i = 0; i < nSteps; ++i) {\n steps.push([]);\n }\n const computedTermIndices: number[] = [];\n for (let i = 0; i < path.length; ++i) {\n const summedDim = path[i];\n const termIndices = findTermsWithDim(idDims, summedDim);\n for (const termIndex of termIndices) {\n if (computedTermIndices.indexOf(termIndex) === -1) {\n steps[i].push(termIndex);\n computedTermIndices.push(termIndex);\n }\n }\n }\n return {path, steps};\n}\n\n/** Determines if an axes permutation is the identity permutation. */\nexport function isIdentityPermutation(perm: number[]): boolean {\n return perm.every((dim: number, index: number) => dim === index);\n}\n\nfunction findTermsWithDim(idDims: number[][], dim: number): number[] {\n const termIndices: number[] = [];\n for (let i = 0; i < idDims.length; ++i) {\n if (idDims[i].length === 0 || idDims[i].indexOf(dim) !== -1 || dim === -1) {\n termIndices.push(i);\n }\n }\n return termIndices;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { TensorInfo } from '../tensor_info';\nimport {Tensor} from '../tensor';\nimport {assert} from '../util';\n\n/**\n * Prepare the split size array. When the input is a number, the axis is evenly\n * divided among the split size. When the input contains the negative value, the\n * rest of the axis is allocated toward that.\n */\nexport function prepareSplitSize(\n x: Tensor|TensorInfo, numOrSizeSplits: number[]|number,\n axis = 0): number[] {\n let splitSizes = [];\n if (typeof (numOrSizeSplits) === 'number') {\n assert(\n x.shape[axis] % numOrSizeSplits === 0,\n () => 'Number of splits must evenly divide the axis.');\n splitSizes =\n new Array(numOrSizeSplits).fill(x.shape[axis] / numOrSizeSplits);\n } else {\n const numOfNegs = numOrSizeSplits.reduce((count, value) => {\n if (value === -1) {\n count += 1;\n }\n return count;\n }, 0);\n assert(\n numOfNegs <= 1,\n () => 'There should be only one negative value in split array.');\n const negIndex = numOrSizeSplits.indexOf(-1);\n // Allow the number of split array to be -1, which indicates the rest\n // of dimension is allocated to that split.\n if (negIndex !== -1) {\n const total = numOrSizeSplits.reduce((a, b) => b > 0 ? a + b : a);\n numOrSizeSplits[negIndex] = x.shape[axis] - total;\n }\n assert(\n x.shape[axis] === numOrSizeSplits.reduce((a, b) => a + b),\n () => 'The sum of sizes must match the size of the axis dimension.');\n splitSizes = numOrSizeSplits;\n }\n\n return splitSizes;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Generates sparse fill empty rows indices, dense shape mismatch error message.\n *\n * @param indicesLength The first dimension of indices.\n */\nexport function getSparseFillEmptyRowsIndicesDenseShapeMismatch(\n indicesLength: number) {\n return `Received SparseTensor with denseShape[0] = 0 but\n indices.shape[0] = ${indicesLength}`;\n}\n\n/**\n * Generates sparse fill empty rows negative index error message.\n *\n * @param index The index with a negative value.\n * @param value The negative value.\n */\nexport function getSparseFillEmptyRowsNegativeIndexErrorMessage(\n index: number, value: number) {\n return `indices(${index}, 0) is invalid: ${value} < 0`;\n}\n\n/**\n * Generates sparse fill empty rows out of range index error message.\n *\n * @param index The index with an out of range value.\n * @param value The out of range value.\n * @param limit The upper limit for indices.\n */\nexport function getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(\n index: number, value: number, limit: number) {\n return `indices(${index}, 0) is invalid: ${value} >= ${limit}`;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {sizeFromShape} from '../../util';\n\n/**\n * Generates sparse reshape multiple negative 1 output dimension error message.\n *\n * @param dim1 The first dimension with a negative 1 value.\n * @param dim2 The second dimension with a negative 1 value.\n */\nexport function getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(\n dim1: number, dim2: number) {\n return `only one output dimension may be -1, not both ${dim1} and ${dim2}`;\n}\n\n/**\n * Generates sparse reshape negative output dimension error message.\n *\n * @param dim The dimension with a negative value.\n * @param value The negative value.\n */\nexport function getSparseReshapeNegativeOutputDimErrorMessage(\n dim: number, value: number) {\n return `size ${dim} must be non-negative, not ${value}`;\n}\n\n/**\n * Generates sparse reshape empty tensor zero output dimension error message.\n *\n */\nexport function getSparseReshapeEmptyTensorZeroOutputDimErrorMessage() {\n return 'reshape cannot infer the missing input size for an empty tensor ' +\n 'unless all specified input sizes are non-zero';\n}\n\n/**\n * Generates sparse reshape input output multiple mismatch error message.\n *\n * @param inputShape the input shape.\n * @param outputShape the requested output shape.\n */\nexport function getSparseReshapeInputOutputMultipleErrorMessage(\n inputShape: number[], outputShape: number[]) {\n const inputSize = sizeFromShape(inputShape);\n const outputSize = sizeFromShape(outputShape);\n return `Input to reshape is a SparseTensor with ${inputSize}\n dense values, but the requested shape requires a multiple of ${\n outputSize}. inputShape=${inputShape} outputShape= ${outputShape}`;\n}\n\n/**\n * Generates sparse reshape input output inequality error message.\n *\n * @param inputShape the input shape.\n * @param outputShape the requested output shape.\n */\nexport function getSparseReshapeInputOutputMismatchErrorMessage(\n inputShape: number[], outputShape: number[]) {\n const inputSize = sizeFromShape(inputShape);\n const outputSize = sizeFromShape(outputShape);\n return `Input to reshape is a tensor with ${\n inputSize} dense values, but the requested shape has ${\n outputSize}. inputShape=${inputShape} outputShape=${outputShape}`;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/**\n * Generates sparse segment reduction negative segment ids error message.\n *\n */\nexport function getSparseSegmentReductionNegativeSegmentIdsErrorMessage() {\n return `segment ids must be >= 0`;\n}\n\n/**\n * Generates sparse segment reduction non increasing segment ids error message.\n *\n */\nexport function getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage() {\n return `segment ids are not increasing`;\n}\n\n/**\n * Generates sparse segment reduction segment id out of range error message.\n *\n * @param segmentId The segment id index that is out of range.\n * @param outputRows Upper bound of valid segment id values.\n */\nexport function getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(\n segmentId: number, outputRows: number) {\n return `Segment id ${segmentId} out of range [0, ${\n outputRows}), possibly because segmentIds input is not sorted.`;\n}\n\n/**\n * Generates sparse segment reduction input indice out of range error message.\n *\n * @param index The index that holds the out of range value.\n * @param indexValue The value that is out of range.\n * @param inputRows Upper bound of valid index values.\n */\nexport function getSparseSegmentReductionIndicesOutOfRangeErrorMessage(\n index: number, indexValue: number, inputRows: number) {\n return `Bad: indices[${index}] == ${indexValue} out of range [0, ${\n inputRows})`;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport { TensorInfo } from '../tensor_info';\nimport {nearestDivisor} from '../util';\n\nimport {PARALLELIZE_THRESHOLD} from './reduce_util';\n\nexport interface SegOpInfo {\n windowSize: number;\n batchSize: number;\n inSize: number;\n numSegments: number;\n}\n\nexport function segOpComputeOptimalWindowSize(\n inSize: number, numSegments: number): number {\n let done = false;\n let res;\n\n if (inSize <= PARALLELIZE_THRESHOLD) {\n res = inSize;\n done = true;\n } else {\n res = nearestDivisor(inSize, Math.floor(Math.sqrt(inSize)));\n }\n\n while (!done) {\n if (res > numSegments || res === inSize) {\n done = true;\n } else {\n res = nearestDivisor(inSize, res + 1);\n }\n }\n return res;\n}\n\nexport function computeOutShape(\n aShape: number[], axis: number, numSegments: number): number[] {\n const outShape = [];\n const rank = aShape.length;\n for (let dim = 0; dim < rank; dim++) {\n if (dim !== axis) {\n outShape.push(aShape[dim]);\n } else {\n outShape.push(numSegments);\n }\n }\n return outShape;\n}\n\nexport interface GatherOpShapeInfo {\n batchSize: number;\n sliceSize: number;\n outerSize: number;\n dimSize: number;\n outputShape: number[];\n}\n\nexport function collectGatherOpShapeInfo(\n x: TensorInfo, indices: TensorInfo, axis: number,\n batchDims: number): GatherOpShapeInfo {\n const indicesRank = indices.shape.length;\n const xRank = x.shape.length;\n\n if (batchDims !== 0) {\n if (batchDims < -indicesRank || batchDims > indicesRank) {\n throw new Error(`Expect batchDims in the range of [-${indicesRank}, ${\n indicesRank}], but got ${batchDims}`);\n }\n }\n\n if (batchDims < 0) {\n batchDims += indicesRank;\n }\n\n if (batchDims > xRank) {\n throw new Error(`batchDims (${batchDims}) must be less than rank(x) (\n ${xRank}).`);\n }\n\n if (axis < batchDims) {\n throw new Error(`batchDims (${\n batchDims}) must be less than or equal to axis (${axis}).`);\n }\n\n for (let i = 0; i < batchDims; ++i) {\n if (x.shape[i] !== indices.shape[i]) {\n throw new Error(\n `x.shape[${i}]: ${x.shape[i]} should be equal to indices.shape[${\n i}]: ${indices.shape[i]}.`);\n }\n }\n const dimSize = x.shape[axis];\n\n const outputShape: number[] = [];\n let batchSize = 1;\n let outerSize = 1;\n let sliceSize = 1;\n\n for (let i = 0; i < batchDims; ++i) {\n outputShape.push(x.shape[i]);\n batchSize *= x.shape[i];\n }\n\n for (let i = batchDims; i < axis; i++) {\n outputShape.push(x.shape[i]);\n outerSize *= x.shape[i];\n }\n\n for (let i = batchDims; i < indicesRank; i++) {\n outputShape.push(indices.shape[i]);\n }\n\n for (let i = axis + 1; i < xRank; i++) {\n outputShape.push(x.shape[i]);\n sliceSize *= x.shape[i];\n }\n\n return {batchSize, sliceSize, outerSize, dimSize, outputShape};\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {decodeString, encodeString} from '../util';\n\n// Utilities needed by backend consumers of tf-core.\nexport * from '../ops/axis_util';\nexport * from '../ops/broadcast_util';\nexport * from '../ops/concat_util';\nexport * from '../ops/conv_util';\nexport * from '../ops/fused_util';\nexport * from '../ops/fused_types';\nexport * from '../ops/ragged_to_dense_util';\nexport * from '../ops/reduce_util';\n\nimport * as slice_util from '../ops/slice_util';\nexport {slice_util};\n\nexport {BackendValues, TypedArray, upcastType, PixelData} from '../types';\nexport {MemoryInfo, TimingInfo} from '../engine';\nexport * from '../ops/rotate_util';\nexport * from '../ops/array_ops_util';\nexport * from '../ops/gather_nd_util';\nexport * from '../ops/scatter_nd_util';\nexport * from '../ops/selu_util';\nexport * from '../ops/fused_util';\nexport * from '../ops/erf_util';\nexport * from '../log';\nexport * from '../backends/complex_util';\nexport * from '../backends/einsum_util';\nexport * from '../ops/split_util';\nexport * from '../ops/sparse/sparse_fill_empty_rows_util';\nexport * from '../ops/sparse/sparse_reshape_util';\nexport * from '../ops/sparse/sparse_segment_reduction_util';\n\nimport * as segment_util from '../ops/segment_util';\nexport {segment_util};\n\nexport function fromUint8ToStringArray(vals: Uint8Array[]) {\n try {\n // Decode the bytes into string.\n return vals.map(val => decodeString(val));\n } catch (err) {\n throw new Error(\n `Failed to decode encoded string bytes into utf-8, error: ${err}`);\n }\n}\n\nexport function fromStringArrayToUint8(strings: string[]) {\n return strings.map(s => encodeString(s));\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Required side effectful code.\nimport './base_side_effects';\n\n// TODO(mattSoulanille): Move this to base_side_effects.ts\n// It is here for now because custom bundles need to avoid calling it, and they\n// only replace the index.js file, not the base_side_effects file.\nimport {registerOptimizers} from './optimizers/register_optimizers';\nregisterOptimizers();\n\n// All exports from this package should be in base.\nexport * from './base';\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Abs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {step} from '../ops/step';\nimport {Tensor} from '../tensor';\n\nexport const absGradConfig: GradConfig = {\n kernelName: Abs,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => mul(dy, step(cast(x, 'float32'), -1))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acos} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {neg} from '../ops/neg';\nimport {scalar} from '../ops/scalar';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const acosGradConfig: GradConfig = {\n kernelName: Acos,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {\n x: () => {\n const a = square(cast(x, 'float32'));\n const b = sqrt(sub(scalar(1), a));\n return neg(div(dy, b));\n }\n\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acosh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const acoshGradConfig: GradConfig = {\n kernelName: Acosh,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {\n x: () => {\n const a = sqrt(sub(square(cast(x, 'float32')), 1));\n return div(dy, a);\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Add} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport * as broadcast_util from '../ops/broadcast_util';\nimport {reshape} from '../ops/reshape';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const addGradConfig: GradConfig = {\n kernelName: Add,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape =\n broadcast_util.assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n let res = dy;\n const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, a.shape);\n };\n const derB = () => {\n let res = dy;\n const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, b.shape);\n };\n\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AddN} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {Tensor} from '../tensor';\n\nexport const addNGradConfig: GradConfig = {\n kernelName: AddN,\n saveAllInputs: true,\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const ders: {[key: string]: () => Tensor} = {};\n saved.forEach((_, i) => {\n ders[i] = () => dy.clone();\n });\n return ders;\n }\n};\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMax} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const argMaxGradConfig: GradConfig = {\n kernelName: ArgMax,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => zerosLike(x)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMin} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const argMinGradConfig: GradConfig = {\n kernelName: ArgMin,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => zerosLike(x)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asin} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {scalar} from '../ops/scalar';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const asinGradConfig: GradConfig = {\n kernelName: Asin,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => div(dy, sqrt(sub(scalar(1), square(cast(x, 'float32')))))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asinh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {scalar} from '../ops/scalar';\nimport {sqrt} from '../ops/sqrt';\nimport {square} from '../ops/square';\nimport {Tensor} from '../tensor';\n\nexport const asinhGradConfig: GradConfig = {\n kernelName: Asinh,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {\n x: () => {\n const a = sqrt(add(scalar(1), square(cast(x, 'float32'))));\n return div(dy, a);\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan2} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {reshape} from '../ops/reshape';\nimport {square} from '../ops/square';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const atan2GradConfig: GradConfig = {\n kernelName: Atan2,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape = assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n const d = add(square(a), square(b));\n let res = mul(dy, div(b, d));\n const reduceAxes = getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, a.shape);\n };\n const derB = () => {\n const d = add(square(a), square(b));\n let res = neg(mul(dy, div(a, d)));\n const reduceAxes = getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, b.shape);\n };\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {square} from '../ops/square';\nimport {Tensor} from '../tensor';\n\nexport const atanGradConfig: GradConfig = {\n kernelName: Atan,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => div(dy, add(square(cast(x, 'float32')), 1))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atanh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {scalar} from '../ops/scalar';\nimport {Tensor} from '../tensor';\n\nexport const atanhGradConfig: GradConfig = {\n kernelName: Atanh,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => div(dy, sub(scalar(1), square(cast(x, 'float32'))))};\n }\n};\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {AvgPool3DGrad, AvgPool3DGradAttrs, AvgPool3DGradInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {checkPadOnDimRoundingMode} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the backprop of a 3d avg pool.\n *\n * @param dy The dy error, of rank 5 of shape\n * [batchSize, depth, height, width, channels].\n * assumed.\n * @param input The original input image, of rank 5 or rank4 of shape\n * [batchSize, depth, height, width, channels].\n * @param filterSize The filter size:\n * `[filterDepth, filterHeight, filterWidth]`.\n * `filterSize` is a single number,\n * then `filterDepth == filterHeight == filterWidth`.\n * @param strides The strides of the pooling:\n * `[strideDepth, strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param pad A string from: 'same', 'valid'. The type of padding algorithm\n * used in the forward prop of the op.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction avgPool3dGrad_(\n dy: T|TensorLike, input: T|TensorLike,\n filterSize: [number, number, number]|number,\n strides: [number, number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $dy = convertToTensor(dy, 'dy', 'avgPool3dGrad');\n const $input = convertToTensor(input, 'input', 'avgPool3dGrad');\n\n let dy5D = $dy as Tensor5D;\n let input5D = $input as Tensor5D;\n let reshapedTo5D = false;\n\n if ($input.rank === 4) {\n reshapedTo5D = true;\n dy5D = reshape(\n $dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);\n input5D = reshape($input, [\n 1, $input.shape[0], $input.shape[1], $input.shape[2], $input.shape[3]\n ]);\n }\n\n util.assert(\n dy5D.rank === 5,\n () => `Error in avgPool3dGrad: dy must be rank 5 but got rank ` +\n `${dy5D.rank}.`);\n util.assert(\n input5D.rank === 5,\n () => `Error in avgPool3dGrad: input must be rank 5 but got rank ` +\n `${input5D.rank}.`);\n checkPadOnDimRoundingMode('avgPool3dGrad', pad, dimRoundingMode);\n const inputs: AvgPool3DGradInputs = {dy: dy5D, input: input5D};\n const attrs: AvgPool3DGradAttrs = {filterSize, strides, pad, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n AvgPool3DGrad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n\n return res;\n}\n\nexport const avgPool3dGrad = /* @__PURE__ */ op({avgPool3dGrad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AvgPool3D, AvgPool3DAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {avgPool3dGrad} from '../ops/avg_pool_3d_grad';\nimport {Tensor, Tensor5D} from '../tensor';\n\nexport const avgPool3DGradConfig: GradConfig = {\n kernelName: AvgPool3D,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved as [Tensor5D];\n const {filterSize, strides, pad, dimRoundingMode} =\n attrs as unknown as AvgPool3DAttrs;\n\n return {\n x: () => avgPool3dGrad(\n dy as Tensor5D, x, filterSize, strides, pad, dimRoundingMode)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {AvgPoolGrad, AvgPoolGradAttrs, AvgPoolGradInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {ExplicitPadding} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the backprop of an 2D avg pool.\n *\n * @param dy The dy error, of rank 4 or rank 3 of shape\n * [batchSize, height, width, channels]. If rank 3, batch of 1 is\n * assumed.\n * @param input The input image, of rank 4 or rank 3 of shape\n * [batchSize, height, width, channels]. If rank 3, batch of 1 is\n * assumed.\n * @param filterSize The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param pad The type of padding algorithm used in the forward prop of the op.\n * 'same', 'valid', for more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n */\nfunction avgPoolGrad_(\n dy: T|TensorLike, input: T|TensorLike, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding): T {\n const $dy = convertToTensor(dy, 'dy', 'avgPoolGrad');\n const $input = convertToTensor(input, 'input', 'avgPoolGrad');\n\n util.assert(\n $input.rank === $dy.rank,\n () => `Rank of input (${$input.rank}) does not match rank of dy (${\n $dy.rank})`);\n\n let input4D = $input as Tensor4D;\n let dy4D = $dy as Tensor4D;\n let reshapedTo4D = false;\n\n if ($input.rank === 3) {\n reshapedTo4D = true;\n input4D =\n reshape($input, [1, $input.shape[0], $input.shape[1], $input.shape[2]]);\n dy4D = reshape($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2]]);\n }\n\n util.assert(\n dy4D.rank === 4,\n () => `Error in avgPoolGrad: dy must be rank 4 but got rank ` +\n `${dy4D.rank}.`);\n util.assert(\n input4D.rank === 4,\n () => `Error in avgPoolGrad: input must be rank 4 but got rank ` +\n `${input4D.rank}.`);\n\n const inputs: AvgPoolGradInputs = {dy: dy4D, input: input4D};\n\n const attrs: AvgPoolGradAttrs = {filterSize, strides, pad};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n AvgPoolGrad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo4D) {\n return reshape(res, [res.shape[1], res.shape[2], res.shape[3]]) as T;\n }\n return res;\n}\n\nexport const avgPoolGrad = /* @__PURE__ */ op({avgPoolGrad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AvgPool, AvgPoolAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {avgPoolGrad} from '../ops/avg_pool_grad';\nimport {Tensor, Tensor4D} from '../tensor';\n\nexport const avgPoolGradConfig: GradConfig = {\n kernelName: AvgPool,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved as [Tensor4D];\n const {filterSize, strides, pad} = attrs as unknown as AvgPoolAttrs;\n return {x: () => avgPoolGrad(dy as Tensor4D, x, filterSize, strides, pad)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BatchMatMul, BatchMatMulAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {matMul} from '../ops/mat_mul';\nimport {Tensor} from '../tensor';\n\nexport const batchMatMulGradConfig: GradConfig = {\n kernelName: BatchMatMul,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [a, b] = saved;\n\n const {transposeA, transposeB} = attrs as unknown as BatchMatMulAttrs;\n\n if (!transposeA && !transposeB) {\n return {\n a: () => matMul(dy, b, false, true),\n b: () => matMul(a, dy, true, false)\n };\n } else if (!transposeA && transposeB) {\n return {\n a: () => matMul(dy, b, false, false),\n b: () => matMul(dy, a, true, false)\n };\n } else if (transposeA && !transposeB) {\n return {\n a: () => matMul(b, dy, false, true),\n b: () => matMul(a, dy, false, false)\n };\n } else {\n return {\n a: () => matMul(b, dy, true, true),\n b: () => matMul(dy, a, true, true)\n };\n }\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BatchToSpaceND, BatchToSpaceNDAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {spaceToBatchND} from '../ops/space_to_batch_nd';\nimport {Tensor} from '../tensor';\n\nexport const batchToSpaceNDGradConfig: GradConfig = {\n kernelName: BatchToSpaceND,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const {blockShape, crops} = attrs as unknown as BatchToSpaceNDAttrs;\n return {x: () => spaceToBatchND(dy, blockShape, crops)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BroadcastTo, BroadCastToAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const broadcastToGradConfig: GradConfig = {\n kernelName: BroadcastTo,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const broadCastToAttrs: BroadCastToAttrs =\n attrs as unknown as BroadCastToAttrs;\n\n const inputShape = broadCastToAttrs.inputShape;\n const outputShape = broadCastToAttrs.shape;\n\n const reps: number[] = Array.from(outputShape);\n for (let i = inputShape.length - 1; i >= 0; i--) {\n if (inputShape[i] === outputShape[i]) {\n reps[i] = 1;\n } else if (inputShape[i] !== 1) {\n throw new Error(`broadcastTo(): [${\n inputShape}] cannot be broadcast to [${outputShape}].`);\n }\n }\n const axes: number[] = [];\n for (let i = 0; i < reps.length; i++) {\n if (reps[i] > 1) {\n axes.push(i);\n }\n }\n\n return {x: () => sum(dy, axes, true /* keepDims */)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cast} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {Tensor} from '../tensor';\n\nexport const castGradConfig: GradConfig = {\n kernelName: Cast,\n gradFunc: (dy: Tensor) => {\n return {x: () => dy.clone()};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Ceil} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const ceilGradConfig: GradConfig = {\n kernelName: Ceil,\n gradFunc: (dy: Tensor) => {\n // TODO(manrajgrover): Return null for gradients when backprop supports it.\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ClipByValue, ClipByValueAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {greaterEqual} from '../ops/greater_equal';\nimport {lessEqual} from '../ops/less_equal';\nimport {logicalAnd} from '../ops/logical_and';\nimport {where} from '../ops/where';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const clipByValueGradConfig: GradConfig = {\n kernelName: ClipByValue,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {clipValueMin, clipValueMax} = attrs as unknown as ClipByValueAttrs;\n return {\n x: () => where(\n logicalAnd(greaterEqual(x, clipValueMin), lessEqual(x, clipValueMax)),\n dy, zerosLike(dy)),\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ComplexAbs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {absGradConfig} from './Abs_grad';\n\nexport const complexAbsGradConfig: GradConfig = {\n kernelName: ComplexAbs,\n inputsToSave: ['x'],\n gradFunc: absGradConfig.gradFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Concat, ConcatAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {split} from '../ops/split';\nimport {Tensor} from '../tensor';\nimport {parseAxisParam} from '../util';\n\nexport const concatGradConfig: GradConfig = {\n kernelName: Concat,\n saveAllInputs: true,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const shapes = saved.map(t => t.shape);\n const {axis} = attrs as unknown as ConcatAttrs;\n const $axis = parseAxisParam(axis, saved[0].shape)[0];\n const sizeSplits = shapes.map(s => s[$axis]);\n const derTensors = split(dy, sizeSplits, $axis);\n return derTensors.map(t => () => t) as {};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Conv2D, Conv2DAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {conv2DBackpropFilter} from '../ops/conv2d_backprop_filter';\nimport {conv2DBackpropInput} from '../ops/conv2d_backprop_input';\nimport * as conv_util from '../ops/conv_util';\nimport {Tensor, Tensor4D} from '../tensor';\nimport * as util from '../util';\n\nexport const conv2DGradConfig: GradConfig = {\n kernelName: Conv2D,\n inputsToSave: ['x', 'filter'],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x4D, $filter] = saved as [Tensor4D, Tensor4D];\n const {dilations, strides, pad, dataFormat} =\n attrs as unknown as Conv2DAttrs;\n\n util.assert(\n conv_util.tupleValuesAreOne(dilations),\n () => 'Error in gradient of conv2D: dilation rates greater than 1 ' +\n `are not yet supported in gradients. Got dilations '${dilations}'`);\n\n return {\n x: () =>\n conv2DBackpropInput(x4D.shape, dy, $filter, strides, pad, dataFormat),\n filter: () =>\n conv2DBackpropFilter(x4D, dy, $filter.shape, strides, pad, dataFormat)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Conv2DBackpropInput, Conv2DBackpropInputAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {conv2d} from '../ops/conv2d';\nimport {conv2DBackpropFilter} from '../ops/conv2d_backprop_filter';\nimport {Tensor, Tensor4D} from '../tensor';\n\nexport const conv2DBackpropInputGradConfig: GradConfig = {\n kernelName: Conv2DBackpropInput,\n inputsToSave: ['dy', 'filter'],\n gradFunc: (ddx: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [dy, filter] = saved as [Tensor4D, Tensor4D];\n\n const {strides, pad, dataFormat, dimRoundingMode} =\n attrs as unknown as Conv2DBackpropInputAttrs;\n\n return {\n dy: () => conv2d(\n ddx, filter, strides, pad, dataFormat, 1 /* dilations */,\n dimRoundingMode),\n filter: () => conv2DBackpropFilter(\n ddx, dy, filter.shape, strides, pad, dataFormat, dimRoundingMode)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Conv3DBackpropFilterV2, Conv3DBackpropFilterV2Attrs, Conv3DBackpropFilterV2Inputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport * as util from '../util';\n\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the derivative of the filter of a 3D convolution.\n *\n * @param x The input tensor, of rank 5 or rank 4 of shape\n * [batch, depth, height, width, inChannels]. If rank 4, batch of 1 is\n * assumed.\n * @param dy The dy image, of rank 5 or rank 4, of shape\n * [batch, depth, height, width, outDepth]. If rank 4, batch of 1 is\n * assumed.\n * @param filterShape The shape of the filter, length 5,\n * [filterDepth, filterHeight, filterWidth, inDepth, outDepth].\n * @param strides The strides of the convolution: [strideDepth, strideHeight,\n * strideWidth].\n * @param pad A string from: 'same', 'valid'. The type of padding algorithm\n * used in the forward prop of the op.\n */\nfunction conv3DBackpropFilter_(\n x: T, dy: T, filterShape: [number, number, number, number, number],\n strides: [number, number, number]|number, pad: 'valid'|'same'): Tensor5D {\n let x5D = x as Tensor5D;\n if (x.rank === 4) {\n x5D = reshape(x, [1, x.shape[0], x.shape[1], x.shape[2], x.shape[3]]);\n }\n let dy5D = dy as Tensor5D;\n if (dy5D.rank === 4) {\n dy5D = reshape(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]);\n }\n util.assert(\n x5D.rank === 5,\n () => `Error in conv3dDerFilter: input must be rank 5, but got shape ` +\n `${x5D.shape}.`);\n util.assert(\n dy5D.rank === 5,\n () => `Error in conv3dDerFilter: dy must be rank 5, but got shape ` +\n `${dy5D.shape}.`);\n util.assert(\n filterShape.length === 5,\n () => `Error in conv3dDerFilter: filterShape must be length 5, but got ` +\n `${filterShape}.`);\n util.assert(\n x5D.shape[4] === filterShape[3],\n () => `Error in conv3dDerFilter: depth of input ${x5D.shape[4]}) must ` +\n `match input depth in filter (${filterShape[3]}.`);\n util.assert(\n dy5D.shape[4] === filterShape[4],\n () => `Error in conv3dDerFilter: depth of dy (${dy5D.shape[4]}) must ` +\n `match output depth for filter (${filterShape[4]}).`);\n\n const inputs: Conv3DBackpropFilterV2Inputs = {x: x5D, dy: dy5D};\n\n const attrs: Conv3DBackpropFilterV2Attrs = {strides, pad, filterShape};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n Conv3DBackpropFilterV2, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor5D;\n}\n\nexport const conv3DBackpropFilter = /* @__PURE__ */ op({conv3DBackpropFilter_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Conv3D, Conv3DAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {conv3DBackpropFilter} from '../ops/conv3d_backprop_filter';\nimport {conv3DBackpropInput} from '../ops/conv3d_backprop_input';\nimport {tupleValuesAreOne} from '../ops/conv_util';\nimport {Tensor, Tensor5D} from '../tensor';\nimport * as util from '../util';\n\nexport const conv3DGradConfig: GradConfig = {\n kernelName: Conv3D,\n inputsToSave: ['x', 'filter'],\n gradFunc: (dy: Tensor5D, saved: Tensor[], attrs: NamedAttrMap) => {\n const {dilations, strides, pad} = attrs as unknown as Conv3DAttrs;\n util.assert(\n tupleValuesAreOne(dilations),\n () =>\n 'Error in gradient of conv3D: dilation rates greater than 1 are ' +\n `not yet supported in gradients. Got dilations '${dilations}'`);\n\n const [x5D, $filter] = saved;\n\n return {\n x: () => conv3DBackpropInput(\n (x5D as Tensor5D).shape, dy, $filter as Tensor5D, strides, pad),\n filter: () => conv3DBackpropFilter(\n x5D as Tensor5D, dy, ($filter as Tensor5D).shape, strides, pad)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cos} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {sin} from '../ops/sin';\nimport {Tensor} from '../tensor';\n\nexport const cosGradConfig: GradConfig = {\n kernelName: Cos,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => mul(neg(sin(cast(x, 'float32'))), dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cosh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {sinh} from '../ops/sinh';\nimport {Tensor} from '../tensor';\n\nexport const coshGradConfig: GradConfig = {\n kernelName: Cosh,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => mul(sinh(cast(x, 'float32')), dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cumsum, CumsumAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {getAxesPermutation} from '../ops/axis_util';\nimport {cumsum} from '../ops/cumsum';\nimport {transpose} from '../ops/transpose';\nimport {Tensor} from '../tensor';\n\nexport const cumsumGradConfig: GradConfig = {\n kernelName: Cumsum,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {axis, exclusive, reverse}: CumsumAttrs =\n attrs as unknown as CumsumAttrs;\n\n return {\n x: () => {\n const permutation = getAxesPermutation([axis], x.rank);\n\n let out = cumsum(dy, axis, exclusive, !reverse);\n\n if (permutation != null) {\n out = transpose(out, permutation);\n }\n\n return out;\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {DepthwiseConv2dNative, DepthwiseConv2dNativeAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport * as conv_util from '../ops/conv_util';\nimport {depthwiseConv2dNativeBackpropFilter} from '../ops/depthwise_conv2d_native_backprop_filter';\nimport {depthwiseConv2dNativeBackpropInput} from '../ops/depthwise_conv2d_native_backprop_input';\nimport {Tensor, Tensor4D} from '../tensor';\nimport * as util from '../util';\n\nexport const depthwiseConv2dNativeGradConfig: GradConfig = {\n kernelName: DepthwiseConv2dNative,\n inputsToSave: ['x', 'filter'],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const {dilations, strides, pad, dimRoundingMode} =\n attrs as unknown as DepthwiseConv2dNativeAttrs;\n const $dilations = dilations == null ? [1, 1] as[number,number] : dilations;\n\n util.assert(\n conv_util.tupleValuesAreOne($dilations),\n () => 'Error in gradient of depthwiseConv2dNative: dilation rates ' +\n `greater than 1 are not yet supported. Got dilations ` +\n `'${$dilations}'`);\n\n const [x, filter] = saved as [Tensor4D, Tensor4D];\n\n util.assert(\n x.rank === 4,\n () => `Error in gradient of depthwiseConv2dNative: input must be ` +\n `rank 4, but got rank ${x.rank}.`);\n util.assert(\n filter.rank === 4,\n () => `Error in gradient of depthwiseConv2dNative: filter must be ` +\n `rank 4, but got rank ${filter.rank}.`);\n util.assert(\n x.shape[3] === filter.shape[2],\n () => `Error in gradient of depthwiseConv2d: number of input ` +\n `channels (${x.shape[3]}) must match the inChannels dimension ` +\n `in filter ${filter.shape[2]}.`);\n\n util.assert(\n conv_util.eitherStridesOrDilationsAreOne(strides, $dilations),\n () => 'Error in gradient of depthwiseConv2d: Either strides or ' +\n `dilations must be 1. Got strides ${strides} and dilations ` +\n `'${$dilations}'.`);\n\n conv_util.checkPadOnDimRoundingMode(\n 'depthwiseConv2d', pad, dimRoundingMode);\n\n return {\n x: () => depthwiseConv2dNativeBackpropInput(\n x.shape, dy, filter, strides, pad, $dilations, dimRoundingMode),\n filter: () => depthwiseConv2dNativeBackpropFilter(\n x, dy, filter.shape, strides, pad, $dilations, dimRoundingMode),\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Dilation2D, Dilation2DBackpropFilter, Dilation2DBackpropFilterInputs, Dilation2DBackpropInput, Dilation2DBackpropInputInputs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor3D, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nexport const dilation2dGradConfig: GradConfig = {\n kernelName: Dilation2D,\n inputsToSave: ['x', 'filter'],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x, filter] = saved as [Tensor4D, Tensor3D];\n\n const inputInputs: Dilation2DBackpropInputInputs = {x, filter, dy};\n const filterInputs: Dilation2DBackpropFilterInputs = {x, filter, dy};\n\n return {\n x: () => ENGINE.runKernel(\n Dilation2DBackpropInput,\n inputInputs as unknown as NamedTensorMap, attrs),\n filter: () => ENGINE.runKernel(\n Dilation2DBackpropFilter,\n filterInputs as unknown as NamedTensorMap, attrs)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {Elu, EluGrad, EluGradInputs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nexport const eluGradConfig: GradConfig = {\n kernelName: Elu,\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [y] = saved;\n\n const inputs: EluGradInputs = {dy, y};\n\n return {x: () => ENGINE.runKernel(EluGrad,\n inputs as unknown as NamedTensorMap)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Erf} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {exp} from '../ops/exp';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {square} from '../ops/square';\nimport {Tensor} from '../tensor';\n\nexport const erfGradConfig: GradConfig = {\n kernelName: Erf,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n const a = mul(exp(neg(square(x))), 2 / Math.sqrt(Math.PI));\n return {x: () => mul(dy, a)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Exp} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const expGradConfig: GradConfig = {\n kernelName: Exp,\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [y] = saved;\n return {x: () => mul(dy, y)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ExpandDims} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {reshape} from '../ops/reshape';\nimport {Tensor} from '../tensor';\n\nexport const expandDimsGradConfig: GradConfig = {\n kernelName: ExpandDims,\n inputsToSave: ['input'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [input] = saved;\n return {input: () => reshape(dy, input.shape)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Expm1} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {exp} from '../ops/exp';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const expm1GradConfig: GradConfig = {\n kernelName: Expm1,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => mul(dy, exp(x))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Floor} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const floorGradConfig: GradConfig = {\n kernelName: Floor,\n gradFunc: (dy: Tensor) => {\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FloorDiv} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {reshape} from '../ops/reshape';\nimport {square} from '../ops/square';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const floorDivGradConfig: GradConfig = {\n kernelName: FloorDiv,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape = assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n const res = div(dy, cast(b, 'float32'));\n const reduceAxes = getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(res, reduceAxes), a.shape);\n }\n return res;\n };\n const derB = () => {\n let res = mul(dy, cast(a, 'float32'));\n const reduceAxes = getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n res = reshape(sum(res, reduceAxes), b.shape);\n }\n const tmp = square(b);\n return neg(div(res, cast(tmp, 'float32')));\n };\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {FusedBatchNorm, FusedBatchNormAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {getReductionAxes} from '../ops/broadcast_util';\nimport {mul} from '../ops/mul';\nimport {reshape} from '../ops/reshape';\nimport {rsqrt} from '../ops/rsqrt';\nimport {scalar} from '../ops/scalar';\nimport {sub} from '../ops/sub';\nimport {sum} from '../ops/sum';\nimport {tile} from '../ops/tile';\nimport {Tensor} from '../tensor';\nimport {Rank, ShapeMap} from '../types';\n\nexport const fusedBatchNormGradConfig: GradConfig = {\n kernelName: FusedBatchNorm,\n inputsToSave: ['x', 'mean', 'variance', 'scale'],\n gradFunc: (\n dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const {varianceEpsilon} = attrs as unknown as FusedBatchNormAttrs;\n const [x, mean, variance, scale] = saved;\n\n const scaleValue = scale == null ? scalar(1) : scale;\n const reductionAxes = getReductionAxes(mean.shape, x.shape);\n const tileShape: number[] = [];\n if (mean.rank === 1) {\n for (let i = 0; i < x.shape.length - 1; ++i) {\n tileShape.push(x.shape[i]);\n }\n tileShape.push(1);\n }\n\n const xMinusMean = sub(x, mean);\n const dyTimesScaleValue = mul(dy, scaleValue);\n const oneOverSqrtVariance = rsqrt(add(variance, scalar(varianceEpsilon)));\n const minusHalfRCube = mul(\n mul(mul(oneOverSqrtVariance, oneOverSqrtVariance), oneOverSqrtVariance),\n scalar(-0.5));\n\n const derX = () => {\n if (mean.rank === 1) {\n return reshape(\n mul(mul(dy,\n tile(\n reshape(oneOverSqrtVariance, [1, 1, 1, mean.shape[0]]),\n tileShape)),\n scaleValue),\n x.shape);\n } else {\n return reshape(mul(mul(dy, oneOverSqrtVariance), scaleValue), x.shape);\n }\n };\n const derMean = () => {\n let meanDer =\n mul(mul(oneOverSqrtVariance, scalar(-1)), dyTimesScaleValue);\n if (mean.rank === 1) {\n meanDer = sum(meanDer, reductionAxes);\n }\n return reshape(meanDer, mean.shape as ShapeMap[R]);\n };\n const derVariance = () => {\n let varianceDer = mul(mul(minusHalfRCube, xMinusMean), dyTimesScaleValue);\n\n if (mean.rank === 1) {\n varianceDer = sum(varianceDer, reductionAxes);\n }\n return reshape(varianceDer, mean.shape as ShapeMap[R]);\n };\n const derScale = () => {\n const xMinusMean2TimesRsqrt = mul(xMinusMean, oneOverSqrtVariance);\n\n let scaleDer = mul(dy, xMinusMean2TimesRsqrt);\n if (mean.rank === 1) {\n scaleDer = sum(scaleDer, reductionAxes);\n }\n return reshape(scaleDer, mean.shape as ShapeMap[R]);\n };\n const derOffset = () => {\n let offsetDer = dy;\n if (mean.rank === 1) {\n offsetDer = sum(offsetDer, reductionAxes);\n }\n return reshape(offsetDer, mean.shape as ShapeMap[R]);\n };\n\n return {\n x: derX,\n mean: derMean,\n variance: derVariance,\n scale: derScale,\n offset: derOffset\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GatherV2, GatherV2Attrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {getUndoAxesPermutation} from '../ops/axis_util';\nimport {reshape} from '../ops/reshape';\nimport {transpose} from '../ops/transpose';\nimport {unsortedSegmentSum} from '../ops/unsorted_segment_sum';\nimport {Tensor, Tensor1D} from '../tensor';\nimport {parseAxisParam} from '../util';\n\nexport const gatherGradConfig: GradConfig = {\n kernelName: GatherV2,\n inputsToSave: ['x', 'indices'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x, indices] = saved;\n const {axis} = attrs as unknown as GatherV2Attrs;\n\n const parsedAxis = parseAxisParam(axis, x.shape)[0];\n\n const derX = () => {\n const paramsShape = x.shape;\n const indicesSize = indices.size;\n\n const outerShape = paramsShape.slice(0, parsedAxis);\n const outerDims = outerShape.length;\n const innerShape = paramsShape.slice(axis, paramsShape.length).slice(1);\n const innerDims = innerShape.length;\n\n const outerAxesIndices = arrayRange(0, outerDims);\n const innerAxesIndices =\n arrayRange(outerDims + 1, outerDims + 1 + innerDims);\n\n const valuesShape = arrayConcat([outerShape, [indicesSize], innerShape]);\n\n const values = reshape(dy, valuesShape);\n const reshapedIndices = reshape(indices, [indicesSize]);\n\n const transposeDims =\n arrayConcat([[outerDims], outerAxesIndices, innerAxesIndices]);\n const valuesTranspose = transpose(values, transposeDims);\n let paramsGrad = unsortedSegmentSum(\n valuesTranspose, reshapedIndices as Tensor1D, x.shape[parsedAxis]);\n\n const invertTransposeDims = getUndoAxesPermutation(transposeDims);\n paramsGrad = transpose(paramsGrad, invertTransposeDims);\n\n return paramsGrad;\n };\n return {x: derX, indices: () => indices};\n }\n};\n\nfunction arrayRange(start: number, stop: number): number[] {\n const result = [];\n for (let i = start; i < stop; ++i) {\n result.push(i);\n }\n return result;\n}\n\nfunction arrayConcat(arrays: number[][]): number[] {\n const result = [];\n for (let i = 0; i < arrays.length; ++i) {\n for (let j = 0; j < arrays[i].length; ++j) {\n result.push(arrays[i][j]);\n }\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {GreaterEqual} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const greaterEqualGradConfig: GradConfig = {\n kernelName: GreaterEqual,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n return {a: () => zerosLike(a), b: () => zerosLike(b)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Identity} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {Tensor} from '../tensor';\n\nexport const identityGradConfig: GradConfig = {\n kernelName: Identity,\n gradFunc: (dy: Tensor) => {\n return {x: () => cast(dy, 'float32')};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsFinite} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const isFiniteGradConfig: GradConfig = {\n kernelName: IsFinite,\n gradFunc: (dy: Tensor) => {\n // TODO(nsthorat): Let gradients be null for cases where we want to stop\n // backpropgation.\n return {x: () => zerosLike(dy)};\n }\n};\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsInf} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const isInfGradConfig: GradConfig = {\n kernelName: IsInf,\n gradFunc: (dy: Tensor) => {\n // TODO(nsthorat): Let gradients be null for cases where we want to stop\n // backpropgation.\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsNan} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const isNanGradConfig: GradConfig = {\n kernelName: IsNan,\n gradFunc: (dy: Tensor) => {\n // TODO(nsthorat): Let gradients be null for cases where we want to stop\n // backpropgation.\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {LeakyRelu, LeakyReluAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {greater} from '../ops/greater';\nimport {mul} from '../ops/mul';\nimport {where} from '../ops/where';\nimport {Tensor} from '../tensor';\n\nexport const leakyReluGradConfig: GradConfig = {\n kernelName: LeakyRelu,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {alpha} = attrs as unknown as LeakyReluAttrs;\n const mask = greater(x, 0);\n\n // Returns `gradients * (features > 0) + alpha * gradients * (features <=\n // 0)`.\n return {x: () => where(mask, dy, mul(dy, alpha))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Log1p} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {div} from '../ops/div';\nimport {Tensor} from '../tensor';\n\nexport const log1pGradConfig: GradConfig = {\n kernelName: Log1p,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => div(dy, add(x, 1))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Log} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {Tensor} from '../tensor';\n\nexport const logGradConfig: GradConfig = {\n kernelName: Log,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => div(dy, cast(x, 'float32'))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {LogSoftmax, LogSoftmaxAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {exp} from '../ops/exp';\nimport {mul} from '../ops/mul';\nimport {sub} from '../ops/sub';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const logSoftmaxGradConfig: GradConfig = {\n kernelName: LogSoftmax,\n inputsToSave: [],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [value] = saved;\n const {axis} = attrs as unknown as LogSoftmaxAttrs;\n return {\n logits: () => {\n const keepDims = true;\n const softmax = exp(value);\n return sub(dy, mul(sum(dy, axis, keepDims), softmax));\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {LRNGrad, LRNGradAttrs, LRNGradInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nimport {op} from './operation';\n\nfunction localResponseNormalizationBackprop_(\n x: T, y: T, dy: T, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5): T {\n const inputs: LRNGradInputs = {x, y, dy};\n\n const attrs: LRNGradAttrs = {depthRadius, bias, alpha, beta};\n\n return ENGINE.runKernel(\n LRNGrad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap);\n}\n\nexport const localResponseNormalizationBackprop =\n op({localResponseNormalizationBackprop_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {LRN, LRNAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {localResponseNormalizationBackprop} from '../ops/local_response_normalization_backprop';\nimport {Tensor, Tensor4D} from '../tensor';\n\nexport const lrnGradConfig: GradConfig = {\n kernelName: LRN,\n inputsToSave: ['x'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x, y] = saved as [Tensor4D, Tensor4D];\n const {depthRadius, bias, alpha, beta} = attrs as unknown as LRNAttrs;\n\n return {\n x: () => localResponseNormalizationBackprop(\n x, y, dy, depthRadius, bias, alpha, beta)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as axis_util from '../ops/axis_util';\nimport {cast} from '../ops/cast';\nimport {equal} from '../ops/equal';\nimport {mul} from '../ops/mul';\nimport {reshape} from '../ops/reshape';\nimport {Tensor} from '../tensor';\n\n/**\n * Gradient helper function for the min and max operations.\n */\nexport function gradForMinAndMax(\n dy: T, y: T, xOrig: Tensor, origAxes: number[]) {\n if (y.rank < xOrig.rank) {\n y = reshape(y, axis_util.expandShapeToKeepDim(y.shape, origAxes)) as T;\n }\n if (dy.rank < xOrig.rank) {\n dy = reshape(dy, axis_util.expandShapeToKeepDim(dy.shape, origAxes)) as T;\n }\n return {\n x: () => {\n const dx = mul(dy, cast(equal(xOrig, y), dy.dtype));\n return dx;\n }\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Max, MaxAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport * as util from '../util';\n\nimport {gradForMinAndMax} from './min_max_grad_util';\n\nexport const maxGradConfig: GradConfig = {\n kernelName: Max,\n inputsToSave: ['x'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const maxAttrs: MaxAttrs = attrs as unknown as MaxAttrs;\n const {reductionIndices} = maxAttrs;\n const x = saved[0];\n const y = saved[1];\n const origAxes = util.parseAxisParam(reductionIndices, x.shape);\n const maxGrad = gradForMinAndMax(dy, y, x, origAxes);\n return {\n x: () => {\n return maxGrad['x']();\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Maximum} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {greaterEqual} from '../ops/greater_equal';\nimport {less} from '../ops/less';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const maximumGradConfig: GradConfig = {\n kernelName: Maximum,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const derA = () => mul(dy, cast(greaterEqual(a, b), 'float32'));\n const derB = () => mul(dy, cast(less(a, b), 'float32'));\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MaxPool3DGrad, MaxPool3DGradAttrs, MaxPool3DGradInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D, Tensor5D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport {checkPadOnDimRoundingMode} from './conv_util';\nimport {op} from './operation';\nimport {reshape} from './reshape';\n\n/**\n * Computes the backprop of a 3d max pool.\n *\n * @param dy The dy error, of rank 5 of shape\n * [batchSize, depth, height, width, channels].\n * assumed.\n * @param input The original input image, of rank 5 or rank 4 of shape\n * [batchSize, depth, height, width, channels].\n * @param output The original output image, of rank 5 of shape\n * [batchSize, outDepth, outHeight, outWidth, channels].\n * @param filterSize The filter size:\n * `[filterDepth, filterHeight, filterWidth]`.\n * `filterSize` is a single number,\n * then `filterDepth == filterHeight == filterWidth`.\n * @param strides The strides of the pooling:\n * `[strideDepth, strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param pad A string from: 'same', 'valid'. The type of padding algorithm\n * used in the forward prop of the op.\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction maxPool3dGrad_(\n dy: T|TensorLike, input: T|TensorLike, output: T|TensorLike,\n filterSize: [number, number, number]|number,\n strides: [number, number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n const $dy = convertToTensor(dy, 'dy', 'maxPool3dGrad');\n const $input = convertToTensor(input, 'input', 'maxPool3dGrad');\n const $output = convertToTensor(output, 'output', 'maxPool3dGrad');\n\n let dy5D = $dy as Tensor5D;\n let input5D = $input as Tensor5D;\n let output5D = $output as Tensor5D;\n let reshapedTo5D = false;\n\n if ($input.rank === 4) {\n reshapedTo5D = true;\n dy5D = reshape(\n $dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]);\n input5D = reshape($input, [\n 1, $input.shape[0], $input.shape[1], $input.shape[2], $input.shape[3]\n ]);\n output5D = reshape($output, [\n 1, $output.shape[0], $output.shape[1], $output.shape[2], $output.shape[3]\n ]);\n }\n\n util.assert(\n dy5D.rank === 5,\n () => `Error in maxPool3dGrad: dy must be rank 5 but got rank ` +\n `${dy5D.rank}.`);\n util.assert(\n input5D.rank === 5,\n () => `Error in maxPool3dGrad: input must be rank 5 but got rank ` +\n `${input5D.rank}.`);\n util.assert(\n output5D.rank === 5,\n () => `Error in maxPool3dGrad: output must be rank 5 but got rank ` +\n `${output5D.rank}.`);\n checkPadOnDimRoundingMode('maxPool3dGrad', pad, dimRoundingMode);\n const inputs:\n MaxPool3DGradInputs = {dy: dy5D, input: input5D, output: output5D};\n const attrs: MaxPool3DGradAttrs = {filterSize, strides, pad, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const res = ENGINE.runKernel(\n MaxPool3DGrad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as T;\n\n if (reshapedTo5D) {\n return reshape(\n res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]) as\n T;\n }\n\n return res;\n}\n\nexport const maxPool3dGrad = /* @__PURE__ */ op({maxPool3dGrad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {MaxPool3D, MaxPool3DAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {maxPool3dGrad} from '../ops/max_pool_3d_grad';\nimport {Tensor, Tensor5D} from '../tensor';\n\nexport const maxPool3DGradConfig: GradConfig = {\n kernelName: MaxPool3D,\n inputsToSave: ['x'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x, y] = saved as [Tensor5D, Tensor5D];\n const {filterSize, strides, pad, dimRoundingMode} =\n attrs as unknown as MaxPool3DAttrs;\n\n return {\n x: () => maxPool3dGrad(\n dy as Tensor5D, x, y, filterSize, strides, pad, dimRoundingMode)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ENGINE} from '../engine';\nimport {MaxPoolGrad, MaxPoolGradAttrs, MaxPoolGradInputs} from '../kernel_names';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\nimport {convertToTensor} from '../tensor_util_env';\nimport {TensorLike} from '../types';\nimport * as util from '../util';\n\nimport * as conv_util from './conv_util';\nimport {op} from './operation';\n\n/**\n * Computes the backprop of a 2D max pool.\n *\n * @param dy The dy error, of rank 4 or rank 3 of shape\n * [batchSize, height, width, channels]. If rank 3, batch of 1 is\n * assumed.\n * @param input The original input image, of rank 4, of shape\n * [batchSize, height, width, channels].\n * @param output The original output image, of rank 4, of shape\n * [batchSize, outHeight, outWidth, channels].\n * @param filterSize The filter size: `[filterHeight, filterWidth]`. If\n * `filterSize` is a single number, then `filterHeight == filterWidth`.\n * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If\n * `strides` is a single number, then `strideHeight == strideWidth`.\n * @param pad The type of padding algorithm used in the forward prop of the op.\n * 'same', 'valid', for more info, see this guide:\n * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution](\n * https://www.tensorflow.org/api_docs/python/tf/nn/convolution)\n * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is\n * provided, it will default to truncate.\n */\nfunction maxPoolGrad_(\n dy: Tensor4D|TensorLike, input: Tensor4D|TensorLike,\n output: Tensor4D|TensorLike, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|conv_util.ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): Tensor4D {\n const $dy = convertToTensor(dy, 'dy', 'maxPoolGrad');\n const $input = convertToTensor(input, 'input', 'maxPoolGrad');\n const $output = convertToTensor(output, 'output', 'maxPoolGrad');\n\n util.assert(\n $input.rank === $dy.rank,\n () => `Rank of input (${$input.rank}) does not match rank of dy ` +\n `(${$dy.rank})`);\n\n util.assert(\n $dy.rank === 4,\n () => `Error in maxPoolGrad: dy must be rank 4 but got rank ` +\n `${$dy.rank}.`);\n util.assert(\n $input.rank === 4,\n () => `Error in maxPoolGrad: input must be rank 4 but got rank ` +\n `${$input.rank}.`);\n conv_util.checkPadOnDimRoundingMode('maxPoolGrad', pad, dimRoundingMode);\n const inputs: MaxPoolGradInputs = {dy: $dy, input: $input, output: $output};\n const attrs: MaxPoolGradAttrs = {filterSize, strides, pad, dimRoundingMode};\n\n // tslint:disable-next-line: no-unnecessary-type-assertion\n return ENGINE.runKernel(\n MaxPoolGrad, inputs as unknown as NamedTensorMap,\n attrs as unknown as NamedAttrMap) as Tensor4D;\n}\n\nexport const maxPoolGrad = /* @__PURE__ */ op({maxPoolGrad_});\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {MaxPool, MaxPoolAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {maxPoolGrad} from '../ops/max_pool_grad';\nimport {Tensor, Tensor4D} from '../tensor';\n\nexport const maxPoolGradConfig: GradConfig = {\n kernelName: MaxPool,\n inputsToSave: ['x'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x, y] = saved as [Tensor4D, Tensor4D];\n const {filterSize, strides, pad} = attrs as unknown as MaxPoolAttrs;\n\n return {\n x: () => maxPoolGrad(dy as Tensor4D, x, y, filterSize, strides, pad)\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {PadV2, PadV2Attrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {slice} from '../ops/slice';\nimport {Tensor} from '../tensor';\n\nexport const padV2GradConfig: GradConfig = {\n kernelName: PadV2,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n // Pad introduces values around the original tensor, so the gradient\n // slices the original shape out of the gradient.\n const x = saved[0];\n const {paddings} = attrs as unknown as PadV2Attrs;\n const begin = paddings.map(p => p[0]);\n return {x: () => slice(dy, begin, x.shape)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Pow} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport * as broadcast_util from '../ops/broadcast_util';\nimport {cast} from '../ops/cast';\nimport {greater} from '../ops/greater';\nimport {log} from '../ops/log';\nimport {mul} from '../ops/mul';\nimport {pow} from '../ops/pow';\nimport {reshape} from '../ops/reshape';\nimport {scalar} from '../ops/scalar';\nimport {sub} from '../ops/sub';\nimport {sum} from '../ops/sum';\nimport {where} from '../ops/where';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const powGradConfig: GradConfig = {\n kernelName: Pow,\n inputsToSave: ['a', 'b'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b, y] = saved;\n const base = a;\n const exp = b;\n const outShape =\n broadcast_util.assertAndGetBroadcastShape(base.shape, exp.shape);\n\n const derBase = () => {\n const expFloat = cast(exp, 'float32');\n let res = mul(dy, mul(expFloat, pow(base, sub(expFloat, scalar(1)))));\n const reduceAxes = broadcast_util.getReductionAxes(base.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, base.shape);\n };\n const derExp = () => {\n const condition = greater(base, 0);\n const logBase = where(condition, log(base), zerosLike(base));\n let res = mul(dy, mul(y, logBase));\n const reduceAxes = broadcast_util.getReductionAxes(exp.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, exp.shape);\n };\n return {a: derBase, b: derExp};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Prelu} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {getReductionAxes} from '../ops/broadcast_util';\nimport {greater} from '../ops/greater';\nimport {mul} from '../ops/mul';\nimport {reshape} from '../ops/reshape';\nimport {sum} from '../ops/sum';\nimport {where} from '../ops/where';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const preluGradConfig: GradConfig = {\n kernelName: Prelu,\n inputsToSave: ['x', 'alpha'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x, alpha] = saved;\n const mask = greater(x, 0);\n\n return {\n x: () => where(mask, dy, mul(dy, alpha)),\n alpha: () => {\n let res = where(mask, zerosLike(dy), mul(dy, x));\n const reduceAxes = getReductionAxes(alpha.shape, dy.shape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, alpha.shape);\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2022 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '../base';\nimport {Prod, ProdAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {cumprod} from '../ops/cumprod';\nimport {mul} from '../ops/mul';\nimport {reshape} from '../ops/reshape';\nimport {transpose} from '../ops/transpose';\nimport {Tensor} from '../tensor';\n\n// Gradient for product operation on a single axis.\nfunction prodGradFn_(x: Tensor, dy: Tensor, axis: number): Tensor {\n // The gradient tensor (dy) has a set of axes removed, so we create re-shaped\n // versions (of size 1) for the removed axis; this supports broadcasting over\n // those dimensions.\n const expandedYShape = x.shape.slice();\n expandedYShape[axis] = 1;\n\n // The actual gradient computation.\n const expandedDy = reshape(dy, expandedYShape);\n const xCumProd = cumprod(x, axis, true, false);\n const xCumRevProd = cumprod(x, axis, true, true);\n const dx = mul(xCumProd, xCumRevProd);\n return mul(expandedDy, dx);\n}\n\n// Support gradients when the product is done on many axes at once.\n// This done py pushing all the axes on which the product is applied into a\n// single axis.\nfunction prodsGradFn_(x: Tensor, dy: Tensor, axis: number[]): Tensor {\n // Move all axes for doing prod over to the end of the tensor.\n const xRank = x.shape.length;\n const finalProdAxis = xRank - axis.length;\n const xPermutation = backend_util.getAxesPermutation(axis, xRank);\n let permutedX = x;\n if (xPermutation != null) {\n permutedX = transpose(x, xPermutation);\n }\n\n // Reshape all the prod dimensions into a single one, and do compute prod\n // gradients on that.\n const newShape = permutedX.shape.slice();\n const removedShape = newShape.splice(xRank - axis.length, axis.length);\n const endPartShape = removedShape.reduce((p, c) => p * c, 1);\n newShape.push(endPartShape);\n const reshapedPermutedX = permutedX.reshape(newShape);\n let prodGrad = prodGradFn_(reshapedPermutedX, dy, finalProdAxis);\n\n // Undo the re-shaping now we have the dx vector, and permute back to\n // original axes order.\n prodGrad = prodGrad.reshape(permutedX.shape);\n if (xPermutation != null) {\n const undoPermutation = backend_util.getUndoAxesPermutation(xPermutation);\n prodGrad = transpose(prodGrad, undoPermutation);\n }\n return prodGrad;\n}\n\n// Running example:\n// [\n// [\n// [3.0, 4.0],\n// [5.0, 6.0],\n// [7.0, 8.0]\n// ],\n// [\n// [3.0, 5.0],\n// [0.0, 6.0],\n// [5.0, 6.0]\n// ]\n// ]\n//\nexport const prodGradConfig: GradConfig = {\n kernelName: Prod,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor|Tensor[], saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {axis} = (attrs as {}) as ProdAttrs;\n let axisArr = [] as number[];\n if (axis === undefined || axis === null) {\n axisArr = x.shape.map((_, i) => i);\n } else if (typeof axis === 'number') {\n axisArr = [axis];\n } else {\n axisArr = axis;\n }\n return {x: () => prodsGradFn_(x, dy as Tensor, axisArr)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Relu6} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {lessEqual} from '../ops/less_equal';\nimport {mul} from '../ops/mul';\nimport {step} from '../ops/step';\nimport {Tensor} from '../tensor';\n\nexport const relu6GradConfig: GradConfig = {\n kernelName: Relu6,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n const mask = mul(lessEqual(x, 6), step(x));\n\n return {x: () => mul(dy, cast(mask, 'float32'))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Selu} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {exp} from '../ops/exp';\nimport {greater} from '../ops/greater';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {SELU_SCALE, SELU_SCALEALPHA} from '../ops/selu_util';\nimport {where} from '../ops/where';\nimport {Tensor} from '../tensor';\n\nexport const seluGradConfig: GradConfig = {\n kernelName: Selu,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {\n x: () => {\n const mask = greater(x, scalar(0));\n\n const scaleAlpha = scalar(SELU_SCALEALPHA);\n const scale = scalar(SELU_SCALE);\n\n const greaterThanZeroDer = mul(dy, scale);\n const lessEqualZeroDer =\n mul(mul(dy, scaleAlpha), exp(cast(x, 'float32')));\n\n return where(mask, greaterThanZeroDer, lessEqualZeroDer);\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sigmoid} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const sigmoidGradConfig: GradConfig = {\n kernelName: Sigmoid,\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [y] = saved;\n\n return {x: () => mul(dy, mul(y, sub(scalar(1), y)))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Softmax, SoftmaxAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {sub} from '../ops/sub';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const softmaxGradConfig: GradConfig = {\n kernelName: Softmax,\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [y] = saved;\n const {dim} = attrs as unknown as SoftmaxAttrs;\n const keepDims = true;\n\n const dyTimesY = mul(dy, y);\n return {\n logits: () => sub(dyTimesY, mul(sum(dyTimesY, [dim], keepDims), y))\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {SpaceToBatchND, SpaceToBatchNDAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {batchToSpaceND} from '../ops/batch_to_space_nd';\nimport {Tensor} from '../tensor';\n\nexport const spaceToBatchNDGradConfig: GradConfig = {\n kernelName: SpaceToBatchND,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const {blockShape, paddings} = attrs as unknown as SpaceToBatchNDAttrs;\n return {x: () => batchToSpaceND(dy, blockShape, paddings)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {SplitV, SplitVAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {concat} from '../ops/concat';\nimport {Tensor} from '../tensor';\n\nexport const splitVGradConfig: GradConfig = {\n kernelName: SplitV,\n gradFunc: (dy: Tensor[], saved: Tensor[], attrs: NamedAttrMap) => {\n const {axis} = attrs as unknown as SplitVAttrs;\n\n return {x: () => concat(dy, axis)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tanh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {square} from '../ops/square';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const tanhGradConfig: GradConfig = {\n kernelName: Tanh,\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [y] = saved;\n\n return {x: () => mul(sub(scalar(1), square(y)), dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tile, TileAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {add} from '../ops/add';\nimport {slice} from '../ops/slice';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const tileGradConfig: GradConfig = {\n kernelName: Tile,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {reps} = attrs as unknown as TileAttrs;\n\n const derX = () => {\n let xGrad = zerosLike(x);\n // TODO(cais): Maybe reduce memory footprint by avoiding repeated\n // slicing.\n if (x.rank === 1) {\n for (let i = 0; i < reps[0]; ++i) {\n xGrad = add(xGrad, slice(dy, [i * x.shape[0]], [x.shape[0]]));\n }\n } else if (x.rank === 2) {\n for (let i = 0; i < reps[0]; ++i) {\n for (let j = 0; j < reps[1]; ++j) {\n xGrad = add(xGrad, slice(dy, [i * x.shape[0], j * x.shape[1]], [\n x.shape[0], x.shape[1]\n ]));\n }\n }\n } else if (x.rank === 3) {\n for (let i = 0; i < reps[0]; ++i) {\n for (let j = 0; j < reps[1]; ++j) {\n for (let k = 0; k < reps[2]; ++k) {\n xGrad =\n add(xGrad,\n slice(\n dy, [i * x.shape[0], j * x.shape[1], k * x.shape[2]],\n [x.shape[0], x.shape[1], x.shape[2]]));\n }\n }\n }\n } else if (x.rank === 4) {\n for (let i = 0; i < reps[0]; ++i) {\n for (let j = 0; j < reps[1]; ++j) {\n for (let k = 0; k < reps[2]; ++k) {\n for (let l = 0; l < reps[3]; ++l) {\n xGrad =\n add(xGrad,\n slice(\n dy,\n [\n i * x.shape[0], j * x.shape[1], k * x.shape[2],\n l * x.shape[3]\n ],\n [x.shape[0], x.shape[1], x.shape[2], x.shape[3]]));\n }\n }\n }\n }\n } else {\n throw new Error(\n `Gradient for tile operation is not implemented for rank-` +\n `${x.rank} tensors yet.`);\n }\n return xGrad;\n };\n return {x: derX};\n },\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {absGradConfig} from './gradients/Abs_grad';\nimport {acosGradConfig} from './gradients/Acos_grad';\nimport {acoshGradConfig} from './gradients/Acosh_grad';\nimport {addGradConfig} from './gradients/Add_grad';\nimport {addNGradConfig} from './gradients/AddN_grad';\nimport {argMaxGradConfig} from './gradients/ArgMax_grad';\nimport {argMinGradConfig} from './gradients/ArgMin_grad';\nimport {asinGradConfig} from './gradients/Asin_grad';\nimport {asinhGradConfig} from './gradients/Asinh_grad';\nimport {atan2GradConfig} from './gradients/Atan2_grad';\nimport {atanGradConfig} from './gradients/Atan_grad';\nimport {atanhGradConfig} from './gradients/Atanh_grad';\nimport {avgPool3DGradConfig} from './gradients/AvgPool3D_grad';\nimport {avgPoolGradConfig} from './gradients/AvgPool_grad';\nimport {batchMatMulGradConfig} from './gradients/BatchMatMul_grad';\nimport {batchToSpaceNDGradConfig} from './gradients/BatchToSpaceND_grad';\nimport {broadcastToGradConfig} from './gradients/BroadcastTo_grad';\nimport {castGradConfig} from './gradients/Cast_grad';\nimport {ceilGradConfig} from './gradients/Ceil_grad';\nimport {clipByValueGradConfig} from './gradients/ClipByValue_grad';\nimport {complexAbsGradConfig} from './gradients/ComplexAbs_grad';\nimport {concatGradConfig} from './gradients/Concat_grad';\nimport {conv2DGradConfig} from './gradients/Conv2D_grad';\nimport {conv2DBackpropInputGradConfig} from './gradients/Conv2DBackpropInput_grad';\nimport {conv3DGradConfig} from './gradients/Conv3D_grad';\nimport {cosGradConfig} from './gradients/Cos_grad';\nimport {coshGradConfig} from './gradients/Cosh_grad';\nimport {cumsumGradConfig} from './gradients/Cumsum_grad';\nimport {depthwiseConv2dNativeGradConfig} from './gradients/DepthwiseConv2dNative_grad';\nimport {dilation2dGradConfig} from './gradients/Dilation2D_grad';\nimport {eluGradConfig} from './gradients/Elu_grad';\nimport {erfGradConfig} from './gradients/Erf_grad';\nimport {expGradConfig} from './gradients/Exp_grad';\nimport {expandDimsGradConfig} from './gradients/ExpandDims_grad';\nimport {expm1GradConfig} from './gradients/Expm1_grad';\nimport {floorGradConfig} from './gradients/Floor_grad';\nimport {floorDivGradConfig} from './gradients/FloorDiv_grad';\nimport {fusedBatchNormGradConfig} from './gradients/FusedBatchNorm_grad';\nimport {gatherGradConfig} from './gradients/GatherV2_grad';\nimport {greaterEqualGradConfig} from './gradients/GreaterEqual_grad';\nimport {identityGradConfig} from './gradients/Identity_grad';\nimport {isFiniteGradConfig} from './gradients/IsFinite_grad';\nimport {isInfGradConfig} from './gradients/IsInf_grad';\nimport {isNanGradConfig} from './gradients/IsNan_grad';\nimport {leakyReluGradConfig} from './gradients/LeakyRelu_grad';\nimport {log1pGradConfig} from './gradients/Log1p_grad';\nimport {logGradConfig} from './gradients/Log_grad';\nimport {logSoftmaxGradConfig} from './gradients/LogSoftmax_grad';\nimport {lrnGradConfig} from './gradients/LRN_grad';\nimport {maxGradConfig} from './gradients/Max_grad';\nimport {maximumGradConfig} from './gradients/Maximum_grad';\nimport {maxPool3DGradConfig} from './gradients/MaxPool3D_grad';\nimport {maxPoolGradConfig} from './gradients/MaxPool_grad';\nimport {meanGradConfig} from './gradients/Mean_grad';\nimport {minGradConfig} from './gradients/Min_grad';\nimport {minimumGradConfig} from './gradients/Minimum_grad';\nimport {mirrorPadGradConfig} from './gradients/MirrorPad_grad';\nimport {modGradConfig} from './gradients/Mod_grad';\nimport {multiplyGradConfig} from './gradients/Multiply_grad';\nimport {negGradConfig} from './gradients/Neg_grad';\nimport {oneHotGradConfig} from './gradients/OneHot_grad';\nimport {onesLikeGradConfig} from './gradients/OnesLike_grad';\nimport {packGradConfig} from './gradients/Pack_grad';\nimport {padV2GradConfig} from './gradients/PadV2_grad';\nimport {powGradConfig} from './gradients/Pow_grad';\nimport {preluGradConfig} from './gradients/Prelu_grad';\nimport {prodGradConfig} from './gradients/Prod_grad';\nimport {divGradConfig} from './gradients/RealDiv_grad';\nimport {reciprocalGradConfig} from './gradients/Reciprocal_grad';\nimport {relu6GradConfig} from './gradients/Relu6_grad';\nimport {reluGradConfig} from './gradients/Relu_grad';\nimport {reshapeGradConfig} from './gradients/Reshape_grad';\nimport {resizeBilinearGradConfig} from './gradients/ResizeBilinear_grad';\nimport {resizeNearestNeighborGradConfig} from './gradients/ResizeNearestNeighbor_grad';\nimport {reverseGradConfig} from './gradients/Reverse_grad';\nimport {roundGradConfig} from './gradients/Round_grad';\nimport {rsqrtGradConfig} from './gradients/Rsqrt_grad';\nimport {selectGradConfig} from './gradients/Select_grad';\nimport {seluGradConfig} from './gradients/Selu_grad';\nimport {sigmoidGradConfig} from './gradients/Sigmoid_grad';\nimport {signGradConfig} from './gradients/Sign_grad';\nimport {sinGradConfig} from './gradients/Sin_grad';\nimport {sinhGradConfig} from './gradients/Sinh_grad';\nimport {sliceGradConfig} from './gradients/Slice_grad';\nimport {softmaxGradConfig} from './gradients/Softmax_grad';\nimport {softplusGradConfig} from './gradients/Softplus_grad';\nimport {spaceToBatchNDGradConfig} from './gradients/SpaceToBatchND_grad';\nimport {splitVGradConfig} from './gradients/SplitV_grad';\nimport {sqrtGradConfig} from './gradients/Sqrt_grad';\nimport {squareGradConfig} from './gradients/Square_grad';\nimport {squaredDifferenceGradConfig} from './gradients/SquaredDifference_grad';\nimport {stepGradConfig} from './gradients/Step_grad';\nimport {subGradConfig} from './gradients/Sub_grad';\nimport {sumGradConfig} from './gradients/Sum_grad';\nimport {tanGradConfig} from './gradients/Tan_grad';\nimport {tanhGradConfig} from './gradients/Tanh_grad';\nimport {tileGradConfig} from './gradients/Tile_grad';\nimport {transposeGradConfig} from './gradients/Transpose_grad';\nimport {unpackGradConfig} from './gradients/Unpack_grad';\nimport {unsortedSegmentSumGradConfig} from './gradients/UnsortedSegmentSum_grad';\nimport {zerosLikeGradConfig} from './gradients/ZerosLike_grad';\nimport {GradConfig} from './kernel_registry';\nimport {registerGradient} from './kernel_registry';\n\n// Export all kernel configs here so that the package can auto register them\nconst gradConfigs: GradConfig[] = [\n absGradConfig,\n acosGradConfig,\n acoshGradConfig,\n addGradConfig,\n addNGradConfig,\n argMaxGradConfig,\n argMinGradConfig,\n asinGradConfig,\n asinhGradConfig,\n atan2GradConfig,\n atanGradConfig,\n atanhGradConfig,\n avgPool3DGradConfig,\n avgPoolGradConfig,\n batchMatMulGradConfig,\n batchToSpaceNDGradConfig,\n broadcastToGradConfig,\n castGradConfig,\n ceilGradConfig,\n clipByValueGradConfig,\n complexAbsGradConfig,\n concatGradConfig,\n conv2DBackpropInputGradConfig,\n conv2DGradConfig,\n conv3DGradConfig,\n cosGradConfig,\n coshGradConfig,\n cumsumGradConfig,\n depthwiseConv2dNativeGradConfig,\n dilation2dGradConfig,\n divGradConfig,\n eluGradConfig,\n erfGradConfig,\n expGradConfig,\n expandDimsGradConfig,\n expm1GradConfig,\n floorDivGradConfig,\n floorGradConfig,\n fusedBatchNormGradConfig,\n gatherGradConfig,\n greaterEqualGradConfig,\n identityGradConfig,\n isFiniteGradConfig,\n isInfGradConfig,\n isNanGradConfig,\n leakyReluGradConfig,\n log1pGradConfig,\n logGradConfig,\n logSoftmaxGradConfig,\n lrnGradConfig,\n maxGradConfig,\n maxGradConfig,\n maximumGradConfig,\n maxPool3DGradConfig,\n maxPoolGradConfig,\n meanGradConfig,\n minGradConfig,\n minimumGradConfig,\n mirrorPadGradConfig,\n modGradConfig,\n multiplyGradConfig,\n negGradConfig,\n oneHotGradConfig,\n onesLikeGradConfig,\n packGradConfig,\n padV2GradConfig,\n padV2GradConfig,\n powGradConfig,\n preluGradConfig,\n prodGradConfig,\n reciprocalGradConfig,\n relu6GradConfig,\n reluGradConfig,\n reshapeGradConfig,\n resizeBilinearGradConfig,\n resizeNearestNeighborGradConfig,\n reverseGradConfig,\n roundGradConfig,\n rsqrtGradConfig,\n selectGradConfig,\n seluGradConfig,\n sigmoidGradConfig,\n signGradConfig,\n sinGradConfig,\n sinhGradConfig,\n sliceGradConfig,\n softmaxGradConfig,\n softplusGradConfig,\n spaceToBatchNDGradConfig,\n spaceToBatchNDGradConfig,\n splitVGradConfig,\n splitVGradConfig,\n sqrtGradConfig,\n squaredDifferenceGradConfig,\n squareGradConfig,\n stepGradConfig,\n subGradConfig,\n sumGradConfig,\n tanGradConfig,\n tanhGradConfig,\n tileGradConfig,\n transposeGradConfig,\n unpackGradConfig,\n unsortedSegmentSumGradConfig,\n zerosLikeGradConfig\n];\n\nfor (const gradientConfig of gradConfigs) {\n registerGradient(gradientConfig);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ZerosLike} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const zerosLikeGradConfig: GradConfig = {\n kernelName: ZerosLike,\n gradFunc: (dy: Tensor) => {\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {RealDiv} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport * as broadcast_util from '../ops/broadcast_util';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {reshape} from '../ops/reshape';\nimport {square} from '../ops/square';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const divGradConfig: GradConfig = {\n kernelName: RealDiv,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape =\n broadcast_util.assertAndGetBroadcastShape(a.shape, b.shape);\n const derA = () => {\n const res = div(dy, cast(b, 'float32'));\n const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(res, reduceAxes), a.shape);\n }\n return res;\n };\n const derB = () => {\n let res = mul(dy, cast(a, 'float32'));\n const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n res = reshape(sum(res, reduceAxes), b.shape);\n }\n const tmp = square(b);\n return neg(div(res, cast(tmp, 'float32')));\n };\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Mean, MeanAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {computeOutAndReduceShapes} from '../ops/axis_util';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {ones} from '../ops/ones';\nimport {reshape} from '../ops/reshape';\nimport {Tensor} from '../tensor';\nimport * as util from '../util';\n\nexport const meanGradConfig: GradConfig = {\n kernelName: Mean,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {axis} = attrs as unknown as MeanAttrs;\n const axes = util.parseAxisParam(axis, x.shape);\n const shapes = computeOutAndReduceShapes(x.shape, axes);\n const reduceShape = shapes[1];\n const reduceSize = util.sizeFromShape(reduceShape);\n\n const derX = () => {\n const expandedDyShape = x.shape.slice();\n axes.forEach(axis => {\n expandedDyShape[axis] = 1;\n });\n const expandedDy = reshape(dy, expandedDyShape);\n const res = div(mul(expandedDy, ones(x.shape, 'float32')), reduceSize);\n return res;\n };\n\n return {x: derX};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Min, MinAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {Tensor} from '../tensor';\nimport * as util from '../util';\n\nimport {gradForMinAndMax} from './min_max_grad_util';\n\nexport const minGradConfig: GradConfig = {\n kernelName: Min,\n inputsToSave: ['x'],\n outputsToSave: [true],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const minAttrs: MinAttrs = attrs as unknown as MinAttrs;\n const {axis} = minAttrs;\n const [x, y] = saved;\n const origAxes = util.parseAxisParam(axis, x.shape);\n const minGrad = gradForMinAndMax(dy, y, x, origAxes);\n return {\n x: () => {\n return minGrad['x']();\n }\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Minimum} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {greater} from '../ops/greater';\nimport {lessEqual} from '../ops/less_equal';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const minimumGradConfig: GradConfig = {\n kernelName: Minimum,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const derA = () => mul(dy, cast(lessEqual(a, b), 'float32'));\n const derB = () => mul(dy, cast(greater(a, b), 'float32'));\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {MirrorPad, MirrorPadAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {slice} from '../ops/slice';\nimport {Tensor} from '../tensor';\n\nexport const mirrorPadGradConfig: GradConfig = {\n kernelName: MirrorPad,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n // Pad introduces values around the original tensor, so the gradient\n // slices the original shape out of the gradient.\n const x = saved[0];\n const {paddings} = attrs as unknown as MirrorPadAttrs;\n const begin = paddings.map(p => p[0]);\n return {x: () => slice(dy, begin, x.shape)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Mod} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util';\nimport {div} from '../ops/div';\nimport {floor} from '../ops/floor';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {reshape} from '../ops/reshape';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const modGradConfig: GradConfig = {\n kernelName: Mod,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape = assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n const reduceAxes = getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(dy, reduceAxes), a.shape);\n }\n return dy;\n };\n const derB = () => {\n const res = mul(dy, neg(floor(div(a, b))));\n const reduceAxes = getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(res, reduceAxes), b.shape);\n }\n return res;\n };\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Multiply} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {assertAndGetBroadcastShape, getReductionAxes} from '../ops/broadcast_util';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {reshape} from '../ops/reshape';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const multiplyGradConfig: GradConfig = {\n kernelName: Multiply,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape = assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n const res = mul(dy, cast(b, 'float32'));\n const reduceAxes = getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(res, reduceAxes), a.shape);\n }\n return res;\n };\n const derB = () => {\n const res = mul(dy, cast(a, 'float32'));\n const reduceAxes = getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n return reshape(sum(res, reduceAxes), b.shape);\n }\n return res;\n };\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Neg} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {neg} from '../ops/neg';\nimport {Tensor} from '../tensor';\n\nexport const negGradConfig: GradConfig = {\n kernelName: Neg,\n gradFunc: (dy: Tensor) => {\n return {x: () => neg(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OneHot} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zeros} from '../ops/zeros';\nimport {Tensor} from '../tensor';\n\nexport const oneHotGradConfig: GradConfig = {\n kernelName: OneHot,\n inputsToSave: ['indices'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const indices = saved[0];\n return {indices: () => zeros(indices.shape, 'float32')};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OnesLike} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const onesLikeGradConfig: GradConfig = {\n kernelName: OnesLike,\n gradFunc: (dy: Tensor) => {\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Pack, PackAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {unstack} from '../ops/unstack';\nimport {Tensor} from '../tensor';\n\nexport const packGradConfig: GradConfig = {\n kernelName: Pack,\n saveAllInputs: true,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const {axis} = attrs as unknown as PackAttrs;\n const derTensors = unstack(dy, axis);\n return derTensors.map(t => () => t) as {};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Reciprocal} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {div} from '../ops/div';\nimport {neg} from '../ops/neg';\nimport {square} from '../ops/square';\nimport {Tensor} from '../tensor';\n\nexport const reciprocalGradConfig: GradConfig = {\n kernelName: Reciprocal,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => div(dy, neg(square(x)))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Relu} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {step} from '../ops/step';\nimport {Tensor} from '../tensor';\n\nexport const reluGradConfig: GradConfig = {\n kernelName: Relu,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => mul(dy, cast(step(x), 'float32'))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Reshape} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {reshape} from '../ops/reshape';\nimport {Tensor} from '../tensor';\n\nexport const reshapeGradConfig: GradConfig = {\n kernelName: Reshape,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => reshape(dy, x.shape)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {ResizeBilinear, ResizeBilinearGrad, ResizeBilinearGradInputs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nexport const resizeBilinearGradConfig: GradConfig = {\n kernelName: ResizeBilinear,\n inputsToSave: ['images'],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [images] = saved;\n\n const inputs: ResizeBilinearGradInputs = {dy, images};\n const imagesDer = () =>\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n ResizeBilinearGrad, inputs as unknown as NamedTensorMap, attrs) as\n Tensor4D;\n\n return {images: imagesDer};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ENGINE} from '../engine';\nimport {ResizeNearestNeighbor, ResizeNearestNeighborGrad, ResizeNearestNeighborGradInputs} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {NamedAttrMap} from '../kernel_registry';\nimport {Tensor, Tensor4D} from '../tensor';\nimport {NamedTensorMap} from '../tensor_types';\n\nexport const resizeNearestNeighborGradConfig: GradConfig = {\n kernelName: ResizeNearestNeighbor,\n inputsToSave: ['images'],\n gradFunc: (dy: Tensor4D, saved: Tensor[], attrs: NamedAttrMap) => {\n const [images] = saved;\n\n const inputs: ResizeNearestNeighborGradInputs = {dy, images};\n const imagesDer = () =>\n // tslint:disable-next-line: no-unnecessary-type-assertion\n ENGINE.runKernel(\n ResizeNearestNeighborGrad, inputs as unknown as NamedTensorMap,\n attrs) as Tensor4D;\n\n return {images: imagesDer};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Reverse, ReverseAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {reverse} from '../ops/reverse';\nimport {Tensor} from '../tensor';\nimport {parseAxisParam} from '../util';\n\nexport const reverseGradConfig: GradConfig = {\n kernelName: Reverse,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const {dims} = attrs as unknown as ReverseAttrs;\n const axes = parseAxisParam(dims, dy.shape);\n return {x: () => reverse(dy, axes)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Round} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const roundGradConfig: GradConfig = {\n kernelName: Round,\n gradFunc: (dy: Tensor) => {\n // TODO(nsthorat): Let gradients be null for cases where we want to stop\n // backpropgation.\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Rsqrt} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {neg} from '../ops/neg';\nimport {pow} from '../ops/pow';\nimport {Tensor} from '../tensor';\n\nexport const rsqrtGradConfig: GradConfig = {\n kernelName: Rsqrt,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => neg(div(dy, mul(pow(x, 1.5), 2)))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Select} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {logicalNot} from '../ops/logical_not';\nimport {mul} from '../ops/mul';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const selectGradConfig: GradConfig = {\n kernelName: Select,\n inputsToSave: ['condition'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [condition] = saved;\n return {\n // TODO(julianoks): Return null for condition gradient\n // when backprop supports it.\n condition: () => cast(zerosLike(condition), 'float32'),\n t: () => mul(dy, cast(condition, dy.dtype)),\n e: () => mul(dy, cast(logicalNot(condition), dy.dtype))\n };\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sign} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const signGradConfig: GradConfig = {\n kernelName: Sign,\n gradFunc: (dy: Tensor) => {\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sin} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {cos} from '../ops/cos';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const sinGradConfig: GradConfig = {\n kernelName: Sin,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => mul(cos(cast(x, 'float32')), dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sinh} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {cosh} from '../ops/cosh';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const sinhGradConfig: GradConfig = {\n kernelName: Sinh,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => mul(cosh(cast(x, 'float32')), dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Slice, SliceAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {pad} from '../ops/pad';\nimport {parseSliceParams} from '../ops/slice_util';\nimport {Tensor} from '../tensor';\n\nexport const sliceGradConfig: GradConfig = {\n kernelName: Slice,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const {begin, size} = attrs as unknown as SliceAttrs;\n\n const inputShape = x.shape;\n const [begin_, size_] = parseSliceParams(x, begin, size);\n\n // Create an Nx2 padding where the first column represents how many\n // zeros are prepended (at start) for each dimension, and the second\n // column indicates how many zeros are appended (at end).\n\n // The number of zeros to append is the shape of the input\n // elementwise-subtracted by both the begin vector and sizes vector.\n const paddings: Array<[number, number]> = [];\n for (let i = 0; i < dy.rank; i++) {\n paddings.push([begin_[i], inputShape[i] - begin_[i] - size_[i]]);\n }\n return {x: () => pad(dy, paddings)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Softplus} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {sigmoid} from '../ops/sigmoid';\nimport {Tensor} from '../tensor';\n\nexport const softplusGradConfig: GradConfig = {\n kernelName: Softplus,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => mul(dy, sigmoid(x))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sqrt} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {div} from '../ops/div';\nimport {mul} from '../ops/mul';\nimport {sqrt} from '../ops/sqrt';\nimport {Tensor} from '../tensor';\n\nexport const sqrtGradConfig: GradConfig = {\n kernelName: Sqrt,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => div(dy, mul(sqrt(cast(x, 'float32')), 2))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {SquaredDifference} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {scalar} from '../ops/scalar';\nimport {sub} from '../ops/sub';\nimport {Tensor} from '../tensor';\n\nexport const squaredDifferenceGradConfig: GradConfig = {\n kernelName: SquaredDifference,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const two = scalar(2);\n const derA = () => mul(dy, mul(two, sub(a, b)));\n const derB = () => mul(dy, mul(two, sub(b, a)));\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Square} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cast} from '../ops/cast';\nimport {mul} from '../ops/mul';\nimport {Tensor} from '../tensor';\n\nexport const squareGradConfig: GradConfig = {\n kernelName: Square,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n return {x: () => mul(dy, mul(cast(x, 'float32'), 2))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Step} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor} from '../tensor';\n\nexport const stepGradConfig: GradConfig = {\n kernelName: Step,\n gradFunc: (dy: Tensor) => {\n // TODO(manrajgrover): Return null for gradients when backprop supports\n // it.\n return {x: () => zerosLike(dy)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Sub} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport * as broadcast_util from '../ops/broadcast_util';\nimport {neg} from '../ops/neg';\nimport {reshape} from '../ops/reshape';\nimport {sum} from '../ops/sum';\nimport {Tensor} from '../tensor';\n\nexport const subGradConfig: GradConfig = {\n kernelName: Sub,\n inputsToSave: ['a', 'b'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [a, b] = saved;\n const outShape =\n broadcast_util.assertAndGetBroadcastShape(a.shape, b.shape);\n\n const derA = () => {\n let res = dy;\n const reduceAxes = broadcast_util.getReductionAxes(a.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(res, a.shape);\n };\n const derB = () => {\n let res = dy;\n const reduceAxes = broadcast_util.getReductionAxes(b.shape, outShape);\n if (reduceAxes.length > 0) {\n res = sum(res, reduceAxes);\n }\n return reshape(neg(res), b.shape);\n };\n\n return {a: derA, b: derB};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Sum, SumAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {mul} from '../ops/mul';\nimport {ones} from '../ops/ones';\nimport {reshape} from '../ops/reshape';\nimport {Tensor} from '../tensor';\nimport {parseAxisParam} from '../util';\n\nexport const sumGradConfig: GradConfig = {\n kernelName: Sum,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const [x] = saved;\n const expandedDyShape = x.shape.slice();\n const {axis} = attrs as unknown as SumAttrs;\n\n const axes = parseAxisParam(axis, x.shape);\n axes.forEach(axis => {\n expandedDyShape[axis] = 1;\n });\n const expandedDy = reshape(dy, expandedDyShape);\n const derX = mul(expandedDy, ones(x.shape, 'float32'));\n\n return {x: () => derX};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tan} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {cos} from '../ops/cos';\nimport {div} from '../ops/div';\nimport {square} from '../ops/square';\nimport {Tensor} from '../tensor';\n\nexport const tanGradConfig: GradConfig = {\n kernelName: Tan,\n inputsToSave: ['x'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [x] = saved;\n\n return {x: () => div(dy, square(cos(x)))};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Transpose, TransposeAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport * as axis_util from '../ops/axis_util';\nimport {transpose} from '../ops/transpose';\nimport {Tensor} from '../tensor';\n\nexport const transposeGradConfig: GradConfig = {\n kernelName: Transpose,\n gradFunc: (dy: Tensor, saved: Tensor[], attrs: NamedAttrMap) => {\n const transposeAttrs: TransposeAttrs = attrs as unknown as TransposeAttrs;\n const {perm} = transposeAttrs;\n const undoPerm = axis_util.getUndoAxesPermutation(perm);\n return {x: () => transpose(dy, undoPerm)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Unpack, UnpackAttrs} from '../kernel_names';\nimport {GradConfig, NamedAttrMap} from '../kernel_registry';\nimport {stack} from '../ops/stack';\nimport {Tensor} from '../tensor';\n\nexport const unpackGradConfig: GradConfig = {\n kernelName: Unpack,\n gradFunc: (dy: Tensor[], saved: Tensor[], attrs: NamedAttrMap) => {\n const unpackAttrs: UnpackAttrs = attrs as unknown as UnpackAttrs;\n const {axis} = unpackAttrs;\n return {value: () => stack(dy, axis)};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {UnsortedSegmentSum} from '../kernel_names';\nimport {GradConfig} from '../kernel_registry';\nimport {expandDims} from '../ops/expand_dims';\nimport {gather} from '../ops/gather';\nimport {greaterEqual} from '../ops/greater_equal';\nimport {logicalAnd} from '../ops/logical_and';\nimport {maximum} from '../ops/maximum';\nimport {ones} from '../ops/ones';\nimport {scalar} from '../ops/scalar';\nimport {where} from '../ops/where';\nimport {zerosLike} from '../ops/zeros_like';\nimport {Tensor, Tensor1D} from '../tensor';\n\nexport const unsortedSegmentSumGradConfig: GradConfig = {\n kernelName: UnsortedSegmentSum,\n inputsToSave: ['segmentIds'],\n gradFunc: (dy: Tensor, saved: Tensor[]) => {\n const [segmentIds] = saved;\n\n const derX = () => {\n return gatherDropNegatives(dy, segmentIds as Tensor1D);\n };\n return {x: derX};\n }\n};\n\nfunction gatherDropNegatives(x: T, indices: Tensor1D) {\n // Helper function for unsorted segment ops. Gathers params for\n // positive segment ids and gathers 0 for inputs with negative segment id.\n // Mirrors _GatherDropNegatives from tensorflow/python/ops/math_grad.py\n const zeroClippedIndices = maximum(indices, zerosLike(indices));\n const gathered = gather(x, zeroClippedIndices as Tensor1D);\n let isPositive = greaterEqual(indices, scalar(0, 'int32'));\n const numIters = gathered.rank - isPositive.rank;\n for (let i = 0; i < numIters; ++i) {\n isPositive = expandDims(isPositive, i + 1);\n }\n isPositive = logicalAnd(isPositive, ones(gathered.shape, 'bool'));\n const zeroSlice = zerosLike(gathered);\n return where(isPositive, gathered, zeroSlice);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {abs} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n abs(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.abs = function(this: T) {\n this.throwIfDisposed();\n return abs(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {acos} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n acos(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.acos = function(this: T) {\n this.throwIfDisposed();\n return acos(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {acosh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n acosh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.acosh = function(this: T) {\n this.throwIfDisposed();\n return acosh(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {add} from '../../ops/add';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n add(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.add = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return add(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {all} from '../../ops/all';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n all(this: T, axis?: number|number[], keepDims?: boolean):\n T;\n }\n}\n\ngetGlobalTensorClass().prototype.all = function(\n this: T, axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return all(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {any} from '../../ops/any';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n any(this: T, axis?: number|number[], keepDims?: boolean):\n T;\n }\n}\n\ngetGlobalTensorClass().prototype.any = function(\n this: T, axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return any(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {argMax} from '../../ops/arg_max';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n argMax(axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.argMax = function(\n axis?: number): T {\n this.throwIfDisposed();\n return argMax(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {argMin} from '../../ops/arg_min';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n argMin(axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.argMin = function(\n axis: number): T {\n this.throwIfDisposed();\n return argMin(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Scalar, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\nimport {assert} from '../../util';\n\ndeclare module '../../tensor' {\n interface Tensor {\n asScalar(): Scalar;\n }\n}\n\n/**\n * Converts a size-1 `tf.Tensor` to a `tf.Scalar`.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.asScalar = function(this: T):\n Scalar {\n this.throwIfDisposed();\n assert(this.size === 1, () => 'The array must have only 1 element.');\n return reshape(this, []);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cast} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {DataType, Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n asType(this: T, dtype: DataType): T;\n }\n}\n\n/**\n * Casts a `tf.Tensor` to a specified dtype.\n *\n * @param dtype Data-type to cast the tensor to.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.asType = function(\n this: T, dtype: DataType): T {\n this.throwIfDisposed();\n return cast(this, dtype);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n as1D(): Tensor1D;\n }\n}\n\n/**\n * Converts a `tf.Tensor` to a `tf.Tensor1D`.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.as1D = function(): T {\n this.throwIfDisposed();\n return reshape(this, [this.size]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n as2D(rows: number, columns: number): Tensor2D;\n }\n}\n\n/**\n * Converts a `tf.Tensor` to a `tf.Tensor2D`.\n *\n * @param rows Number of rows in `tf.Tensor2D`.\n * @param columns Number of columns in `tf.Tensor2D`.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.as2D = function(\n rows: number, columns: number): T {\n this.throwIfDisposed();\n return reshape(this, [rows, columns]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n as3D(rows: number, columns: number, depth: number):\n Tensor3D;\n }\n}\n\n/**\n * Converts a `tf.Tensor` to a `tf.Tensor3D`.\n *\n * @param rows Number of rows in `tf.Tensor3D`.\n * @param columns Number of columns in `tf.Tensor3D`.\n * @param depth Depth of `tf.Tensor3D`.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.as3D = function(\n rows: number, columns: number, depth: number): T {\n this.throwIfDisposed();\n return reshape(this, [rows, columns, depth]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n as4D(\n rows: number, columns: number, depth: number, depth2: number): Tensor4D;\n }\n}\n\n/**\n * Converts a `tf.Tensor` to a `tf.Tensor4D`.\n *\n * @param rows Number of rows in `tf.Tensor4D`.\n * @param columns Number of columns in `tf.Tensor4D`.\n * @param depth Depth of `tf.Tensor4D`.\n * @param depth2 4th dimension of `tf.Tensor4D`.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.as4D = function(\n rows: number, columns: number, depth: number, depth2: number): T {\n this.throwIfDisposed();\n return reshape(this, [rows, columns, depth, depth2]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n as5D(\n rows: number, columns: number, depth: number, depth2: number,\n depth3: number): Tensor5D;\n }\n}\n\n/**\n * Converts a `tf.Tensor` to a `tf.Tensor5D`.\n *\n * @param rows Number of rows in `tf.Tensor5D`.\n * @param columns Number of columns in `tf.Tensor5D`.\n * @param depth Depth of `tf.Tensor5D`.\n * @param depth2 4th dimension of `tf.Tensor5D`.\n * @param depth3 5th dimension of 'tf.Tensor5D'\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.as5D = function(\n rows: number, columns: number, depth: number, depth2: number,\n depth3: number): T {\n this.throwIfDisposed();\n return reshape(this, [rows, columns, depth, depth2, depth3]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {asin} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n asin(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.asin = function(this: T): T {\n this.throwIfDisposed();\n return asin(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {asinh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n asinh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.asinh = function(this: T):\n T {\n this.throwIfDisposed();\n return asinh(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {atan} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n atan(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.atan = function(this: T): T {\n this.throwIfDisposed();\n return atan(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {atan2} from '../../ops/atan2';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n atan2(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.atan2 = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return atan2(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {atanh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n atanh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.atanh = function(this: T):\n T {\n this.throwIfDisposed();\n return atanh(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ExplicitPadding} from '../../ops/conv_util';\nimport {avgPool} from '../../ops/avg_pool';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n avgPool(\n filterSize: [number, number]|number, strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.avgPool =\n function(\n this: T, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return avgPool(this, filterSize, strides, pad, dimRoundingMode);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {batchToSpaceND} from '../../ops/batch_to_space_nd';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n batchToSpaceND(blockShape: number[], crops: number[][]):\n Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.batchToSpaceND = function(\n blockShape: number[], crops: number[][]): Tensor {\n this.throwIfDisposed();\n return batchToSpaceND(this, blockShape, crops);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {batchNorm} from '../../ops/batchnorm';\nimport {getGlobalTensorClass, Tensor, Tensor1D} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n batchNorm(\n mean: Tensor|Tensor1D|TensorLike,\n variance: Tensor|Tensor1D|TensorLike,\n offset?: Tensor|Tensor1D|TensorLike,\n scale?: Tensor|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.batchNorm = function(\n mean: Tensor|Tensor1D|TensorLike,\n variance: Tensor|Tensor1D|TensorLike,\n offset?: Tensor|Tensor1D|TensorLike,\n scale?: Tensor|Tensor1D|TensorLike,\n varianceEpsilon?: number): Tensor {\n this.throwIfDisposed();\n return batchNorm(this, mean, variance, offset, scale, varianceEpsilon);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {broadcastTo} from '../../ops/broadcast_to';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, ShapeMap} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n broadcastTo(shape: ShapeMap[R]): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.broadcastTo = function(\n shape: ShapeMap[R]): Tensor {\n this.throwIfDisposed();\n return broadcastTo(this, shape);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cast} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {DataType, Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n cast(dtype: DataType): T;\n }\n}\n\ngetGlobalTensorClass().prototype.cast = function(\n dtype: DataType): T {\n this.throwIfDisposed();\n return cast(this, dtype) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {ceil} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n ceil(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.ceil = function(this: T): T {\n this.throwIfDisposed();\n return ceil(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {clipByValue} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n clipByValue(min: number, max: number): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.clipByValue = function(\n min: number, max: number): T {\n this.throwIfDisposed();\n return clipByValue(this, min, max) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {concat} from '../../ops/concat';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n concat(tensors: T|Array, axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.concat = function(\n x: T|Array, axis?: number): T {\n this.throwIfDisposed();\n if (x instanceof Tensor) {\n x = [x];\n }\n return concat([this, ...x], axis) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {conv1d} from '../../ops/conv1d';\nimport {ExplicitPadding} from '../../ops/conv_util';\nimport {getGlobalTensorClass, Tensor2D, Tensor3D} from '../../tensor';\nimport {Rank, TensorLike3D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n conv1d(\n filter: Tensor3D|TensorLike3D, stride: number,\n pad: 'valid'|'same'|number|ExplicitPadding, dataFormat?: 'NWC'|'NCW',\n dilation?: number, dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.conv1d = function(\n filter: Tensor3D|TensorLike3D, stride: number,\n pad: 'valid'|'same'|number|ExplicitPadding, dataFormat?: 'NWC'|'NCW',\n dilation?: number, dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return conv1d(\n this, filter, stride, pad, dataFormat, dilation,\n dimRoundingMode) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {conv2dTranspose} from '../../ops/conv2d_transpose';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank, TensorLike4D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n conv2dTranspose(\n filter: Tensor4D|TensorLike4D,\n outputShape: [number, number, number, number]|[number, number, number],\n strides: [number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.conv2dTranspose =\n function(\n filter: Tensor4D|TensorLike4D,\n outputShape: [number, number, number, number]|[number, number, number],\n strides: [number, number]|number, pad: 'valid'|'same'|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return conv2dTranspose(\n this, filter, outputShape, strides, pad, dimRoundingMode) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {conv2d} from '../../ops/conv2d';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank, TensorLike4D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n conv2d(\n filter: Tensor4D|TensorLike4D, strides: [number, number]|number,\n pad: 'valid'|'same'|number, dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.conv2d = function(\n filter: Tensor4D|TensorLike4D, strides: [number, number]|number,\n pad: 'valid'|'same'|number, dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return conv2d(\n this, filter, strides, pad, dataFormat, dilations,\n dimRoundingMode) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cos} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n cos(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.cos = function(this: T): T {\n this.throwIfDisposed();\n return cos(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cosh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n cosh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.cosh = function(this: T): T {\n this.throwIfDisposed();\n return cosh(this);\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the 'License');\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an 'AS IS' BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport { cumprod } from '../../ops/cumprod';\nimport { getGlobalTensorClass, Tensor } from '../../tensor';\nimport { Rank } from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n cumprod(\n axis?: number,\n exclusive?: boolean,\n reverse?: boolean\n ): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.cumprod = function (\n axis?: number,\n exclusive?: boolean,\n reverse?: boolean\n): Tensor {\n this.throwIfDisposed();\n return cumprod(this, axis, exclusive, reverse);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {cumsum} from '../../ops/cumsum';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n cumsum(\n axis?: number, exclusive?: boolean, reverse?: boolean): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.cumsum = function(\n axis?: number, exclusive?: boolean, reverse?: boolean): Tensor {\n this.throwIfDisposed();\n return cumsum(this, axis, exclusive, reverse);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {depthToSpace} from '../../ops/depth_to_space';\nimport {getGlobalTensorClass, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n depthToSpace(\n blockSize: number, dataFormat: 'NHWC'|'NCHW'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.depthToSpace = function(\n blockSize: number, dataFormat: 'NHWC'|'NCHW'): T {\n this.throwIfDisposed();\n return depthToSpace(this, blockSize, dataFormat) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {depthwiseConv2d} from '../../ops/depthwise_conv2d';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank, TensorLike4D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n depthwiseConv2d(\n filter: Tensor4D|TensorLike4D, strides: [number, number]|number,\n pad: 'valid'|'same'|number, dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.depthwiseConv2d =\n function(\n filter: Tensor4D|TensorLike4D, strides: [number, number]|number,\n pad: 'valid'|'same'|number, dataFormat?: 'NHWC'|'NCHW',\n dilations?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return depthwiseConv2d(\n this, filter, strides, pad, dataFormat, dilations,\n dimRoundingMode) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {dilation2d} from '../../ops/dilation2d';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank, TensorLike3D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n dilation2d(\n filter: Tensor3D|TensorLike3D, strides: [number, number]|number,\n pad: 'valid'|'same', dilations?: [number, number]|number,\n dataFormat?: 'NHWC'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.dilation2d =\n function(\n filter: Tensor3D|TensorLike3D, strides: [number, number]|number,\n pad: 'valid'|'same', dilations?: [number, number]|number,\n dataFormat?: 'NHWC'): T {\n this.throwIfDisposed();\n return dilation2d(this, filter, strides, pad, dilations, dataFormat) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {divNoNan} from '../../ops/div_no_nan';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n divNoNan(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.divNoNan = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return divNoNan(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {div} from '../../ops/div';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n div(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.div = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return div(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {dot} from '../../ops/dot';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n dot(b: Tensor|TensorLike): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.dot = function(\n b: T|TensorLike): Tensor {\n this.throwIfDisposed();\n return dot(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {elu} from '../../ops/elu';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n elu(): T;\n }\n}\n\ngetGlobalTensorClass().prototype.elu = function(this: T): T {\n this.throwIfDisposed();\n return elu(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {equal} from '../../ops/equal';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n equal(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.equal = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return equal(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {erf} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n erf(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.erf = function(this: T): T {\n this.throwIfDisposed();\n return erf(this);\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {euclideanNorm} from '../../ops/euclidean_norm';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n euclideanNorm(\n this: T, axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.euclideanNorm = function(\n this: T, axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return euclideanNorm(this, axis, keepDims) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {exp} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n exp(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.exp = function(this: T): T {\n this.throwIfDisposed();\n return exp(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {expandDims} from '../../ops/expand_dims';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n expandDims(axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.expandDims = function(\n axis?: number): T {\n this.throwIfDisposed();\n return expandDims(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {expm1} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n expm1(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.expm1 = function(this: T):\n T {\n this.throwIfDisposed();\n return expm1(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {fft} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n fft(this: Tensor): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.fft = function(this: Tensor):\n T {\n this.throwIfDisposed();\n return fft(this) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n flatten(): Tensor1D;\n }\n}\n\n/**\n * Flatten a Tensor to a 1D array.\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.flatten = function(): T {\n this.throwIfDisposed();\n return reshape(this, [this.size]) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {floor} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n floor(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.floor = function(this: T):\n T {\n this.throwIfDisposed();\n return floor(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {floorDiv} from '../../ops/floorDiv';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n floorDiv(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.floorDiv = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return floorDiv(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {gather} from '../../ops/gather';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n gather(\n this: T, indices: Tensor|TensorLike, axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.gather = function(\n this: T, indices: Tensor|TensorLike, axis?: number): T {\n this.throwIfDisposed();\n return gather(this, indices, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {greaterEqual} from '../../ops/greater_equal';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n greaterEqual(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.greaterEqual = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return greaterEqual(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {greater} from '../../ops/greater';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n greater(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.greater = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return greater(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {ifft} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n ifft(this: Tensor): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.ifft = function(\n this: Tensor): T {\n this.throwIfDisposed();\n return ifft(this) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {irfft} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n irfft(this: Tensor): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.irfft = function(\n this: Tensor): T {\n this.throwIfDisposed();\n return irfft(this) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {isFinite} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n isFinite(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.isFinite = function(this: T):\n T {\n this.throwIfDisposed();\n return isFinite(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {isInf} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n isInf(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.isInf = function(this: T):\n T {\n this.throwIfDisposed();\n return isInf(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {isNaN} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n isNaN(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.isNaN = function(this: T):\n T {\n this.throwIfDisposed();\n return isNaN(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {leakyRelu} from '../../ops/leaky_relu';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n leakyRelu(alpha: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.leakyRelu = function(\n this: T, alpha: number): T {\n this.throwIfDisposed();\n return leakyRelu(this, alpha);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {lessEqual} from '../../ops/less_equal';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n lessEqual(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.lessEqual = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return lessEqual(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {less} from '../../ops/less';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n less(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.less = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return less(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {localResponseNormalization} from '../../ops/local_response_normalization';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n localResponseNormalization(\n depthRadius?: number, bias?: number, alpha?: number, beta?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.localResponseNormalization =\n function(\n depthRadius?: number, bias?: number, alpha?: number, beta?: number): T {\n this.throwIfDisposed();\n return localResponseNormalization(this, depthRadius, bias, alpha, beta) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {logSigmoid} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logSigmoid(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logSigmoid = function(\n this: T): T {\n this.throwIfDisposed();\n return logSigmoid(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {logSoftmax} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logSoftmax(this: T, axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logSoftmax = function(\n this: T, axis?: number): T {\n this.throwIfDisposed();\n return logSoftmax(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {logSumExp} from '../../ops/log_sum_exp';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logSumExp(\n this: T, axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logSumExp = function(\n this: T, axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return logSumExp(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {log} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n log(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.log = function(this: T): T {\n this.throwIfDisposed();\n return log(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {log1p} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n log1p(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.log1p = function(this: T):\n T {\n this.throwIfDisposed();\n return log1p(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {logicalAnd} from '../../ops/logical_and';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logicalAnd(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logicalAnd = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return logicalAnd(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {logicalNot} from '../../ops/logical_not';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logicalNot(): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logicalNot = function(): T {\n this.throwIfDisposed();\n return logicalNot(this) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {logicalOr} from '../../ops/logical_or';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logicalOr(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logicalOr = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return logicalOr(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {logicalXor} from '../../ops/logical_xor';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n logicalXor(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.logicalXor = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return logicalXor(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {matMul} from '../../ops/mat_mul';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n matMul(\n b: Tensor|TensorLike, transposeA?: boolean,\n transposeB?: boolean): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.matMul = function(\n this: T, b: Tensor|TensorLike, transposeA?: boolean,\n transposeB?: boolean): Tensor {\n this.throwIfDisposed();\n return matMul(this, b, transposeA, transposeB);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ExplicitPadding} from '../../ops/conv_util';\nimport {maxPool} from '../../ops/max_pool';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n maxPool(\n filterSize: [number, number]|number, strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.maxPool =\n function(\n this: T, filterSize: [number, number]|number,\n strides: [number, number]|number,\n pad: 'valid'|'same'|number|ExplicitPadding,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return maxPool(this, filterSize, strides, pad, dimRoundingMode);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {max} from '../../ops/max';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n max(axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.max = function(\n axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return max(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {maximum} from '../../ops/maximum';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n maximum(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.maximum = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return maximum(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {mean} from '../../ops/mean';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n mean(axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.mean = function(\n axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return mean(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {min} from '../../ops/min';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n min(axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.min = function(\n axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return min(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {minimum} from '../../ops/minimum';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n minimum(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.minimum = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return minimum(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {mirrorPad} from '../../ops/mirror_pad';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n mirrorPad(\n paddings: Array<[number, number]>, mode: 'reflect'|'symmetric'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.mirrorPad = function(\n this: T, paddings: Array<[number, number]>,\n mode: 'reflect'|'symmetric'): T {\n this.throwIfDisposed();\n return mirrorPad(this, paddings, mode);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {mod} from '../../ops/mod';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n mod(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.mod = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return mod(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {mul} from '../../ops/mul';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n mul(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.mul = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return mul(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {neg} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n neg(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.neg = function(this: T): T {\n this.throwIfDisposed();\n return neg(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {norm} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n norm(\n ord?: number|'euclidean'|'fro', axis?: number|number[],\n keepDims?: boolean): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.norm = function(\n ord?: number|'euclidean'|'fro', axis?: number|number[],\n keepDims?: boolean) {\n this.throwIfDisposed();\n return norm(this, ord, axis, keepDims) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {notEqual} from '../../ops/not_equal';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n notEqual(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.notEqual = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return notEqual(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {oneHot} from '../../ops/one_hot';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n oneHot(depth: number, onValue: number, offValue: number): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.oneHot = function(\n depth: number, onValue = 1, offValue = 0): Tensor {\n this.throwIfDisposed();\n return oneHot(this, depth, onValue, offValue);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {onesLike} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n onesLike(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.onesLike = function(this: T):\n T {\n this.throwIfDisposed();\n return onesLike(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {pad} from '../../ops/pad';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n pad(\n paddings: Array<[number, number]>, constantValue?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.pad = function(\n this: T, paddings: Array<[number, number]>, constantValue: number): T {\n this.throwIfDisposed();\n return pad(this, paddings, constantValue);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {ExplicitPadding} from '../../ops/conv_util';\nimport {pool} from '../../ops/pool';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n pool(\n windowShape: [number, number]|number, poolingType: 'avg'|'max',\n padding: 'valid'|'same'|number|ExplicitPadding,\n diationRate?: [number, number]|number,\n strides?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.pool = function(\n this: T, windowShape: [number, number]|number, poolingType: 'max'|'avg',\n padding: 'valid'|'same'|number|ExplicitPadding,\n dilationRate?: [number, number]|number,\n strides?: [number, number]|number,\n dimRoundingMode?: 'floor'|'round'|'ceil'): T {\n this.throwIfDisposed();\n return pool(this, windowShape, poolingType, padding, dilationRate, strides,\n dimRoundingMode);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {pow} from '../../ops/pow';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n pow(exp: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.pow = function(\n exp: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return pow(this, exp);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {prelu} from '../../ops/prelu';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n prelu(alpha: T|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.prelu = function(\n this: T, alpha: T|TensorLike): T {\n this.throwIfDisposed();\n return prelu(this, alpha);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {prod} from '../../ops/prod';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n prod(this: T, axis?: number|number[], keepDims?: boolean):\n T;\n }\n}\n\ngetGlobalTensorClass().prototype.prod = function(\n this: T, axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return prod(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {reciprocal} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n reciprocal(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.reciprocal = function(\n this: T): T {\n this.throwIfDisposed();\n return reciprocal(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {relu} from '../../ops/relu';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n relu(): T;\n }\n}\n\ngetGlobalTensorClass().prototype.relu = function(this: T): T {\n this.throwIfDisposed();\n return relu(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {relu6} from '../../ops/relu6';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n relu6(): T;\n }\n}\n\ngetGlobalTensorClass().prototype.relu6 = function(this: T):\n T {\n this.throwIfDisposed();\n return relu6(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n reshapeAs(x: T): T;\n }\n}\n\n/**\n * Reshapes the tensor into the shape of the provided tensor.\n *\n * @param x The tensor of required shape.\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.reshapeAs = function(x: T):\n T {\n this.throwIfDisposed();\n return reshape(this, x.shape) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {reshape} from '../../ops/reshape';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n reshape(shape: number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.reshape = function(\n shape: number[]): T {\n this.throwIfDisposed();\n return reshape(this, shape) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {resizeBilinear} from '../../ops/image/resize_bilinear';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n resizeBilinear(\n newShape2D: [number, number], alignCorners?: boolean,\n halfPixelCenters?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.resizeBilinear =\n function(\n this: T, newShape2D: [number, number], alignCorners?: boolean,\n halfPixelCenters?: boolean): T {\n this.throwIfDisposed();\n return resizeBilinear(this, newShape2D, alignCorners, halfPixelCenters);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {resizeNearestNeighbor} from '../../ops/image/resize_nearest_neighbor';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n resizeNearestNeighbor(\n newShape2D: [number, number], alignCorners?: boolean,\n halfFloatCenters?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.resizeNearestNeighbor =\n function(\n this: T, newShape2D: [number, number], alignCorners?: boolean,\n halfFloatCenters?: boolean): T {\n this.throwIfDisposed();\n return resizeNearestNeighbor(\n this, newShape2D, alignCorners, halfFloatCenters);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {reverse} from '../../ops/reverse';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n reverse(this: T, axis?: number|number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.reverse = function(\n this: T, axis?: number|number[]): T {\n this.throwIfDisposed();\n return reverse(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {rfft} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n rfft(this: Tensor): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.rfft = function(\n this: Tensor): T {\n this.throwIfDisposed();\n return rfft(this) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {round} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n round(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.round = function(this: T):\n T {\n this.throwIfDisposed();\n return round(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {rsqrt} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n rsqrt(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.rsqrt = function(this: T):\n T {\n this.throwIfDisposed();\n return rsqrt(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {selu} from '../../ops/selu';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n selu(): T;\n }\n}\n\ngetGlobalTensorClass().prototype.selu = function(this: T): T {\n this.throwIfDisposed();\n return selu(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {separableConv2d} from '../../ops/separable_conv2d';\nimport {getGlobalTensorClass, Tensor3D, Tensor4D} from '../../tensor';\nimport {Rank, TensorLike, TensorLike4D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n separableConv2d(\n depthwiseFilter: Tensor4D|TensorLike4D,\n pointwiseFilter: Tensor4D|TensorLike, strides: [number, number]|number,\n pad: 'valid'|'same', dilation?: [number, number]|number,\n dataFormat?: 'NHWC'|'NCHW'): T;\n }\n}\n\ngetGlobalTensorClass().prototype.separableConv2d =\n function(\n depthwiseFilter: Tensor4D|TensorLike4D,\n pointwiseFilter: Tensor4D|TensorLike, strides: [number, number]|number,\n pad: 'valid'|'same', dilation?: [number, number]|number,\n dataFormat?: 'NHWC'|'NCHW'): T {\n this.throwIfDisposed();\n return separableConv2d(\n this, depthwiseFilter, pointwiseFilter, strides, pad, dilation,\n dataFormat) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {sigmoid} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sigmoid(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sigmoid = function(this: T):\n T {\n this.throwIfDisposed();\n return sigmoid(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {sign} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sign(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sign = function(this: T): T {\n this.throwIfDisposed();\n return sign(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {sin} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sin(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sin = function(this: T): T {\n this.throwIfDisposed();\n return sin(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {sinh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sinh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sinh = function(this: T): T {\n this.throwIfDisposed();\n return sinh(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {slice} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n slice(\n this: T, begin: number|number[], size?: number|number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.slice = function(\n this: T, begin: number|number[], size?: number|number[]): T {\n this.throwIfDisposed();\n return slice(this, begin, size);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {softmax} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n softmax(this: T, dim?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.softmax = function(\n this: T, dim: number): T {\n this.throwIfDisposed();\n return softmax(this, dim);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {softplus} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n softplus(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.softplus = function(this: T):\n T {\n this.throwIfDisposed();\n return softplus(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {spaceToBatchND} from '../../ops/space_to_batch_nd';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n spaceToBatchND(blockShape: number[], paddings: number[][]):\n Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.spaceToBatchND = function(\n blockShape: number[], paddings: number[][]): Tensor {\n this.throwIfDisposed();\n return spaceToBatchND(this, blockShape, paddings);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {split} from '../../ops/split';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n split(numOrSizeSplits: number[]|number, axis?: number):\n T[];\n }\n}\n\ngetGlobalTensorClass().prototype.split = function(\n numOrSizeSplits: number[]|number, axis?: number): T[] {\n this.throwIfDisposed();\n return split(this, numOrSizeSplits, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {sqrt} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sqrt(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sqrt = function(this: T): T {\n this.throwIfDisposed();\n return sqrt(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {square} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n square(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.square = function(this: T):\n T {\n this.throwIfDisposed();\n return square(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {squaredDifference} from '../../ops/squared_difference';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n squaredDifference(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.squaredDifference = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return squaredDifference(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {squeeze} from '../../ops/squeeze';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n squeeze(axis?: number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.squeeze = function(\n axis?: number[]): T {\n this.throwIfDisposed();\n return squeeze(this, axis);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {stack} from '../../ops/stack';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n stack(x: Tensor|Tensor[], axis?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.stack = function(\n x: Tensor|Tensor[], axis?: number): T {\n this.throwIfDisposed();\n const tensorsToBeStacked = x instanceof Tensor ? [this, x] : [this, ...x];\n return stack(tensorsToBeStacked, axis) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {step} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n step(this: T, alpha?: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.step = function(\n this: T, alpha?: number) {\n this.throwIfDisposed();\n return step(this, alpha);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {stridedSlice} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n stridedSlice(\n this: Tensor, begin: number[], end: number[], strides: number[],\n beginMask?: number, endMask?: number, ellipsisMask?: number,\n newAxisMask?: number, shrinkAxisMask?: number): Tensor;\n }\n}\n\ngetGlobalTensorClass().prototype.stridedSlice = function(\n this: Tensor, begin: number[], end: number[], strides: number[],\n beginMask?: number, endMask?: number, ellipsisMask?: number,\n newAxisMask?: number, shrinkAxisMask?: number): T {\n this.throwIfDisposed();\n return stridedSlice(\n this, begin, end, strides, beginMask, endMask, ellipsisMask,\n newAxisMask, shrinkAxisMask) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {sub} from '../../ops/sub';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sub(b: Tensor|TensorLike): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sub = function(\n b: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return sub(this, b);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {sum} from '../../ops/sum';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n sum(axis?: number|number[], keepDims?: boolean): T;\n }\n}\n\ngetGlobalTensorClass().prototype.sum = function(\n axis?: number|number[], keepDims?: boolean): T {\n this.throwIfDisposed();\n return sum(this, axis, keepDims);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {tan} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n tan(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.tan = function(this: T): T {\n this.throwIfDisposed();\n return tan(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {tanh} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n tanh(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.tanh = function(this: T): T {\n this.throwIfDisposed();\n return tanh(this);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {tile} from '../../ops/tile';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n tile(b: number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.tile = function(\n reps: number[]): T {\n this.throwIfDisposed();\n return tile(this, reps) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cast} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n toBool(this: T): T;\n }\n}\n\n/**\n * Casts the array to type `bool`\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.toBool = function(this: T):\n T {\n this.throwIfDisposed();\n return cast(this, 'bool');\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cast} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n toFloat(this: T): T;\n }\n}\n\n/**\n * Casts the array to type `float32`\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.toFloat = function(this: T):\n T {\n this.throwIfDisposed();\n return cast(this, 'float32');\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {cast} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n toInt(this: T): T;\n }\n}\n\n/**\n * Casts the array to type `int32`\n *\n * @doc {heading: 'Tensors', subheading: 'Classes'}\n */\ngetGlobalTensorClass().prototype.toInt = function(this: T):\n T {\n this.throwIfDisposed();\n return cast(this, 'int32');\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {topk} from '../../ops/topk';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n topk(this: T, k?: number, sorted?: boolean):\n {values: T, indices: T};\n }\n}\n\ngetGlobalTensorClass().prototype.topk = function(\n this: T, k?: number, sorted?: boolean): {values: T, indices: T} {\n this.throwIfDisposed();\n return topk(this, k, sorted) as {values: T, indices: T};\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {transpose} from '../../ops/transpose';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n transpose(perm?: number[]): T;\n }\n}\n\ngetGlobalTensorClass().prototype.transpose = function(\n this: T, perm?: number[]): T {\n this.throwIfDisposed();\n return transpose(this, perm);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {unique} from '../../ops/unique';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n unique(this: T, axis?: number): {values: T, indices: T};\n }\n}\n\ngetGlobalTensorClass().prototype.unique = function(\n this: T, axis?: number): {values: T, indices: T} {\n this.throwIfDisposed();\n return unique(this, axis) as {values: T, indices: T};\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {unsortedSegmentSum} from '../../ops/unsorted_segment_sum';\nimport {getGlobalTensorClass, Tensor, Tensor1D} from '../../tensor';\nimport {Rank, TensorLike1D} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n unsortedSegmentSum(\n this: T, segmentIds: Tensor1D|TensorLike1D, numSegments: number): T;\n }\n}\n\ngetGlobalTensorClass().prototype.unsortedSegmentSum =\n function(\n this: T, segmentIds: Tensor1D|TensorLike1D, numSegments: number): T {\n this.throwIfDisposed();\n return unsortedSegmentSum(this, segmentIds, numSegments);\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {unstack} from '../../ops/unstack';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n unstack(axis?: number): T[];\n }\n}\n\ngetGlobalTensorClass().prototype.unstack = function(\n axis?: number): T[] {\n this.throwIfDisposed();\n return unstack(this, axis) as T[];\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {where} from '../../ops/where';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank, TensorLike} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n where(condition: Tensor|TensorLike, x: Tensor|TensorLike):\n T;\n }\n}\n\ngetGlobalTensorClass().prototype.where = function(\n condition: Tensor|TensorLike, x: Tensor|TensorLike): T {\n this.throwIfDisposed();\n return where(condition, this, x) as T;\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// TODO update import path once op is modularized.\nimport {zerosLike} from '../../ops/ops';\nimport {getGlobalTensorClass, Tensor} from '../../tensor';\nimport {Rank} from '../../types';\n\ndeclare module '../../tensor' {\n interface Tensor {\n zerosLike(this: T): T;\n }\n}\n\ngetGlobalTensorClass().prototype.zerosLike = function(\n this: T): T {\n this.throwIfDisposed();\n return zerosLike(this);\n};\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Explicit error types.\n *\n * See the following link for more information about why the code includes\n * calls to setPrototypeOf:\n *\n * https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work\n */\n// tslint:enable\n\n/**\n * Equivalent of Python's AttributeError.\n */\nexport class AttributeError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, AttributeError.prototype);\n }\n}\n\n/**\n * Equivalent of Python's RuntimeError.\n */\nexport class RuntimeError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, RuntimeError.prototype);\n }\n}\n\n/**\n * Equivalent of Python's ValueError.\n */\nexport class ValueError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, ValueError.prototype);\n }\n}\n\n/**\n * Equivalent of Python's NotImplementedError.\n */\nexport class NotImplementedError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, NotImplementedError.prototype);\n }\n}\n\n/**\n * Equivalent of Python's AssertionError.\n */\nexport class AssertionError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, AssertionError.prototype);\n }\n}\n\n/**\n * Equivalent of Python's IndexError.\n */\nexport class IndexError extends Error {\n constructor(message?: string) {\n super(message);\n // Set the prototype explicitly.\n Object.setPrototypeOf(this, IndexError.prototype);\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n/**\n * LruCache: A mapping from the String to T. If the number of the entries is\n * exceeding the `maxEntries`, the LruCache will delete the least recently\n * used entry.\n */\n\nexport class LruCache {\n private cache: Map;\n private maxEntries: number;\n\n constructor(maxEntries?: number) {\n this.maxEntries = maxEntries || 100;\n this.cache = new Map();\n }\n\n /**\n * Get the entry for the key and mark it as used recently.\n */\n public get(key: string): T {\n let entry: T;\n if (this.cache.has(key)) {\n entry = this.cache.get(key);\n this.cache.delete(key);\n this.cache.set(key, entry);\n }\n return entry;\n }\n\n /**\n * Put the entry into the cache. If the key already existed, mark the key as\n * used recently.\n */\n public put(key: string, value: T): void {\n if (this.cache.has(key)) {\n this.cache.delete(key);\n } else if (this.cache.size >= this.maxEntries) {\n const keyToDelete = this.cache.keys().next().value;\n this.cache.delete(keyToDelete);\n }\n this.cache.set(key, value);\n }\n\n /**\n * Get the MaxEntries of the cache.\n */\n public getMaxEntries(): number {\n return this.maxEntries;\n }\n\n /**\n * Set the MaxEntries of the cache. If the maxEntries is decreased, reduce\n * entries in the cache.\n */\n public setMaxEntries(maxEntries: number): void {\n if (maxEntries < 0) {\n throw new Error(\n `The maxEntries of LRU caches must be at least 0, but got ${\n maxEntries}.`);\n }\n\n if (this.maxEntries > maxEntries) {\n for (let i = 0; i < this.maxEntries - maxEntries; i++) {\n const keyToDelete = this.cache.keys().next().value;\n this.cache.delete(keyToDelete);\n }\n }\n\n this.maxEntries = maxEntries;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: utils/generic_utils.py */\n\nimport {DataType, fused, serialization, util} from '@tensorflow/tfjs-core';\n\nimport {AssertionError, ValueError} from '../errors';\n\n// tslint:enable\n\n/**\n * If `value` is an Array, equivalent to Python's `value * numValues`.\n * If `value` is not an Array, equivalent to Python's `[value] * numValues`\n */\n// tslint:disable-next-line:no-any\nexport function pyListRepeat(value: any, numValues: number): any[] {\n if (Array.isArray(value)) {\n // tslint:disable-next-line:no-any\n let newArray: any[] = [];\n for (let i = 0; i < numValues; i++) {\n newArray = newArray.concat(value);\n }\n return newArray;\n } else {\n const newArray = new Array(numValues);\n newArray.fill(value);\n return newArray;\n }\n}\n\nexport function assert(val: boolean, message?: string): void {\n if (!val) {\n throw new AssertionError(message);\n }\n}\n\n/**\n * Count the number of elements of the `array` that are equal to `reference`.\n */\nexport function count(array: T[], refernce: T) {\n let counter = 0;\n for (const item of array) {\n if (item === refernce) {\n counter++;\n }\n }\n return counter;\n}\n\n/**\n * If an array is of length 1, just return the first element. Otherwise, return\n * the full array.\n * @param tensors\n */\nexport function singletonOrArray(xs: T[]): T|T[] {\n if (xs.length === 1) {\n return xs[0];\n }\n return xs;\n}\n\n/**\n * Normalizes a list/tensor into a list.\n *\n * If a tensor is passed, we return\n * a list of size 1 containing the tensor.\n *\n * @param x target object to be normalized.\n */\n// tslint:disable-next-line:no-any\nexport function toList(x: any): any[] {\n if (Array.isArray(x)) {\n return x;\n }\n return [x];\n}\n\n/**\n * Generate a UID for a list\n */\n// tslint:disable-next-line:no-any\nexport function objectListUid(objs: any|any[]): string {\n const objectList = toList(objs);\n let retVal = '';\n for (const obj of objectList) {\n if (obj.id == null) {\n throw new ValueError(\n `Object ${obj} passed to objectListUid without an id`);\n }\n if (retVal !== '') {\n retVal = retVal + ', ';\n }\n retVal = `${retVal}${Math.abs(obj.id)}`;\n }\n return retVal;\n}\n/**\n * Converts string to snake-case.\n * @param name\n */\nexport function toSnakeCase(name: string): string {\n const intermediate = name.replace(/(.)([A-Z][a-z0-9]+)/g, '$1_$2');\n const insecure =\n intermediate.replace(/([a-z])([A-Z])/g, '$1_$2').toLowerCase();\n /*\n If the class is private the name starts with \"_\" which is not secure\n for creating scopes. We prefix the name with \"private\" in this case.\n */\n if (insecure[0] !== '_') {\n return insecure;\n }\n return 'private' + insecure;\n}\n\nexport function toCamelCase(identifier: string): string {\n // quick return for empty string or single character strings\n if (identifier.length <= 1) {\n return identifier;\n }\n // Check for the underscore indicating snake_case\n if (identifier.indexOf('_') === -1) {\n return identifier;\n }\n return identifier.replace(/[_]+(\\w|$)/g, (m, p1) => p1.toUpperCase());\n}\n\n// tslint:disable-next-line:no-any\nlet _GLOBAL_CUSTOM_OBJECTS = {} as {[objName: string]: any};\n\nexport function serializeKerasObject(instance: serialization.Serializable):\n serialization.ConfigDictValue {\n if (instance === null || instance === undefined) {\n return null;\n }\n const dict: serialization.ConfigDictValue = {};\n dict['className'] = instance.getClassName();\n dict['config'] = instance.getConfig();\n return dict;\n}\n\n/**\n * Replace ndarray-style scalar objects in serialization objects with numbers.\n *\n * Background: In some versions of tf.keras, certain scalar values in the HDF5\n * model save file can be serialized as: `{'type': 'ndarray', 'value': num}`,\n * where in `num` is a plain number. This method converts such serialization\n * to a `number`.\n *\n * @param config The keras-format serialization object to be processed\n * (in place).\n */\nfunction convertNDArrayScalarsInConfig(config: serialization.ConfigDictValue):\n void {\n if (config == null || typeof config !== 'object') {\n return;\n } else if (Array.isArray(config)) {\n config.forEach(configItem => convertNDArrayScalarsInConfig(configItem));\n } else {\n const fields = Object.keys(config);\n for (const field of fields) {\n const value = config[field];\n if (value != null && typeof value === 'object') {\n if (!Array.isArray(value) && value['type'] === 'ndarray' &&\n typeof value['value'] === 'number') {\n config[field] = value['value'];\n } else {\n convertNDArrayScalarsInConfig(value as serialization.ConfigDict);\n }\n }\n }\n }\n}\n\n/**\n * Deserialize a saved Keras Object\n * @param identifier either a string ID or a saved Keras dictionary\n * @param moduleObjects a list of Python class names to object constructors\n * @param customObjects a list of Python class names to object constructors\n * @param printableModuleName debug text for the object being reconstituted\n * @param fastWeightInit Optional flag to use fast weight initialization\n * during deserialization. This is applicable to cases in which\n * the initialization will be immediately overwritten by loaded weight\n * values. Default: `false`.\n * @returns a TensorFlow.js Layers object\n */\n// tslint:disable:no-any\nexport function deserializeKerasObject(\n identifier: string|serialization.ConfigDict,\n moduleObjects = {} as {[objName: string]: any},\n customObjects = {} as {[objName: string]: any},\n printableModuleName = 'object', fastWeightInit = false): any {\n // tslint:enable\n if (typeof identifier === 'string') {\n const functionName = identifier;\n let fn;\n if (functionName in customObjects) {\n fn = customObjects[functionName];\n } else if (functionName in _GLOBAL_CUSTOM_OBJECTS) {\n fn = _GLOBAL_CUSTOM_OBJECTS[functionName];\n } else {\n fn = moduleObjects[functionName];\n if (fn == null) {\n throw new ValueError(\n `Unknown ${printableModuleName}: ${identifier}. ` +\n `This may be due to one of the following reasons:\\n` +\n `1. The ${printableModuleName} is defined in Python, in which ` +\n `case it needs to be ported to TensorFlow.js or your JavaScript ` +\n `code.\\n` +\n `2. The custom ${printableModuleName} is defined in JavaScript, ` +\n `but is not registered properly with ` +\n `tf.serialization.registerClass().`);\n // TODO(cais): Add link to tutorial page on custom layers.\n }\n }\n return fn;\n } else {\n // In this case we are dealing with a Keras config dictionary.\n const config = identifier;\n if (config['className'] == null || config['config'] == null) {\n throw new ValueError(\n `${printableModuleName}: Improper config format: ` +\n `${JSON.stringify(config)}.\\n` +\n `'className' and 'config' must set.`);\n }\n const className = config['className'] as string;\n let cls, fromConfig;\n if (className in customObjects) {\n [cls, fromConfig] = customObjects[className];\n } else if (className in _GLOBAL_CUSTOM_OBJECTS) {\n [cls, fromConfig] = _GLOBAL_CUSTOM_OBJECTS['className'];\n } else if (className in moduleObjects) {\n [cls, fromConfig] = moduleObjects[className];\n }\n if (cls == null) {\n throw new ValueError(\n `Unknown ${printableModuleName}: ${className}. ` +\n `This may be due to one of the following reasons:\\n` +\n `1. The ${printableModuleName} is defined in Python, in which ` +\n `case it needs to be ported to TensorFlow.js or your JavaScript ` +\n `code.\\n` +\n `2. The custom ${printableModuleName} is defined in JavaScript, ` +\n `but is not registered properly with ` +\n `tf.serialization.registerClass().`);\n // TODO(cais): Add link to tutorial page on custom layers.\n }\n if (fromConfig != null) {\n // Porting notes: Instead of checking to see whether fromConfig accepts\n // customObjects, we create a customObjects dictionary and tack it on to\n // config['config'] as config['config'].customObjects. Objects can use it,\n // if they want.\n\n // tslint:disable-next-line:no-any\n const customObjectsCombined = {} as {[objName: string]: any};\n for (const key of Object.keys(_GLOBAL_CUSTOM_OBJECTS)) {\n customObjectsCombined[key] = _GLOBAL_CUSTOM_OBJECTS[key];\n }\n for (const key of Object.keys(customObjects)) {\n customObjectsCombined[key] = customObjects[key];\n }\n // Add the customObjects to config\n const nestedConfig = config['config'] as serialization.ConfigDict;\n nestedConfig['customObjects'] = customObjectsCombined;\n\n const backupCustomObjects = {..._GLOBAL_CUSTOM_OBJECTS};\n for (const key of Object.keys(customObjects)) {\n _GLOBAL_CUSTOM_OBJECTS[key] = customObjects[key];\n }\n convertNDArrayScalarsInConfig(config['config']);\n const returnObj =\n fromConfig(cls, config['config'], customObjects, fastWeightInit);\n _GLOBAL_CUSTOM_OBJECTS = {...backupCustomObjects};\n\n return returnObj;\n } else {\n // Then `cls` may be a function returning a class.\n // In this case by convention `config` holds\n // the kwargs of the function.\n const backupCustomObjects = {..._GLOBAL_CUSTOM_OBJECTS};\n for (const key of Object.keys(customObjects)) {\n _GLOBAL_CUSTOM_OBJECTS[key] = customObjects[key];\n }\n // In python this is **config['config'], for tfjs-layers we require\n // classes that use this fall-through construction method to take\n // a config interface that mimics the expansion of named parameters.\n const returnObj = new cls(config['config']);\n _GLOBAL_CUSTOM_OBJECTS = {...backupCustomObjects};\n return returnObj;\n }\n }\n}\n\n/**\n * Compares two numbers for sorting.\n * @param a\n * @param b\n */\nexport function numberCompare(a: number, b: number) {\n return (a < b) ? -1 : ((a > b) ? 1 : 0);\n}\n\n/**\n * Comparison of two numbers for reverse sorting.\n * @param a\n * @param b\n */\nexport function reverseNumberCompare(a: number, b: number) {\n return -1 * numberCompare(a, b);\n}\n\n/**\n * Convert a string into the corresponding DType.\n * @param dtype\n * @returns An instance of DType.\n */\nexport function stringToDType(dtype: string): DataType {\n switch (dtype) {\n case 'float32':\n return 'float32';\n default:\n throw new ValueError(`Invalid dtype: ${dtype}`);\n }\n}\n\n/**\n * Test the element-by-element equality of two Arrays of strings.\n * @param xs First array of strings.\n * @param ys Second array of strings.\n * @returns Wether the two arrays are all equal, element by element.\n */\nexport function stringsEqual(xs: string[], ys: string[]): boolean {\n if (xs == null || ys == null) {\n return xs === ys;\n }\n if (xs.length !== ys.length) {\n return false;\n }\n for (let i = 0; i < xs.length; ++i) {\n if (xs[i] !== ys[i]) {\n return false;\n }\n }\n return true;\n}\n\n/**\n * Get the unique elements of an array.\n * @param xs Array.\n * @returns An Array consisting of the unique elements in `xs`.\n */\nexport function unique(xs: T[]): T[] {\n if (xs == null) {\n return xs;\n }\n const out: T[] = [];\n // TODO(cais): Maybe improve performance by sorting.\n for (const x of xs) {\n if (out.indexOf(x) === -1) {\n out.push(x);\n }\n }\n return out;\n}\n\n/**\n * Determine if an Object is empty (i.e., does not have own properties).\n * @param obj Object\n * @returns Whether the Object is empty.\n * @throws ValueError: If object is `null` or `undefined`.\n */\nexport function isObjectEmpty(obj: {}): boolean {\n if (obj == null) {\n throw new ValueError(`Invalid value in obj: ${JSON.stringify(obj)}`);\n }\n for (const key in obj) {\n if (obj.hasOwnProperty(key)) {\n return false;\n }\n }\n return true;\n}\n\n/**\n * Helper function used to build type union/enum run-time checkers.\n * @param values The list of allowed values.\n * @param label A string name for the type\n * @param value The value to test.\n * @throws ValueError: If the value is not in values nor `undefined`/`null`.\n */\nexport function checkStringTypeUnionValue(\n values: string[], label: string, value: string): void {\n if (value == null) {\n return;\n }\n if (values.indexOf(value) < 0) {\n throw new ValueError(`${value} is not a valid ${label}. Valid values are ${\n values} or null/undefined.`);\n }\n}\n\n/**\n * Helper function for verifying the types of inputs.\n *\n * Ensures that the elements of `x` are all of type `expectedType`.\n * Also verifies that the length of `x` is within bounds.\n *\n * @param x Object to test.\n * @param expectedType The string expected type of all of the elements in the\n * Array.\n * @param minLength Return false if x.length is less than this.\n * @param maxLength Return false if x.length is greater than this.\n * @returns true if and only if `x` is an `Array` with\n * length >= `minLength` and <= `maxLength`.\n */\n// tslint:disable:no-any\nexport function checkArrayTypeAndLength(\n x: any, expectedType: string, minLength = 0,\n maxLength = Infinity): boolean {\n assert(minLength >= 0);\n assert(maxLength >= minLength);\n return (\n Array.isArray(x) && x.length >= minLength && x.length <= maxLength &&\n x.every(e => typeof e === expectedType));\n}\n// tslint:enable:no-any\n\n/**\n * Assert that a value or an array of value are positive integer.\n *\n * @param value The value being asserted on. May be a single number or an array\n * of numbers.\n * @param name Name of the value, used to make the error message.\n */\nexport function assertPositiveInteger(value: number|number[], name: string) {\n if (Array.isArray(value)) {\n util.assert(\n value.length > 0, () => `${name} is unexpectedly an empty array.`);\n value.forEach(\n (v, i) => assertPositiveInteger(v, `element ${i + 1} of ${name}`));\n } else {\n util.assert(\n Number.isInteger(value) && value > 0,\n () => `Expected ${name} to be a positive integer, but got ` +\n `${formatAsFriendlyString(value)}.`);\n }\n}\n\n/**\n * Format a value into a display-friendly, human-readable fashion.\n *\n * - `null` is formatted as `'null'`\n * - Strings are formated with flanking pair of quotes.\n * - Arrays are formatted with flanking pair of square brackets.\n *\n * @param value The value to display.\n * @return Formatted string.\n */\n// tslint:disable-next-line:no-any\nexport function formatAsFriendlyString(value: any): string {\n if (value === null) {\n return 'null';\n } else if (Array.isArray(value)) {\n return '[' + value.map(v => formatAsFriendlyString(v)).join(',') + ']';\n } else if (typeof value === 'string') {\n return `\"${value}\"`;\n } else {\n return `${value}`;\n }\n}\n\n/**\n * Returns a function `f2` (decorator) which wraps the original function\n * `f`. `f2` guarantees that `f` can be called at most once\n * every `waitMs` ms. If `f2` is called more often, it will return\n * the last returned result of `f`.\n *\n * @param f The original function `f` to wrap.\n * @param waitMs The time between two consecutive calls to `f` in ms.\n */\nexport function debounce(\n f: (...args: Array<{}>) => T, waitMs: number,\n nowFunc?: Function): (...args: Array<{}>) => T {\n let lastTime = nowFunc != null ? nowFunc() : util.now();\n let lastResult: T;\n const f2 = (...args: Array<{}>) => {\n const now = nowFunc != null ? nowFunc() : util.now();\n if (now - lastTime < waitMs) {\n return lastResult;\n }\n lastTime = now;\n lastResult = f(...args);\n return lastResult;\n };\n return f2;\n}\n\n/**\n * Returns the fusable activation given a layers identifier.\n *\n * @param activationName The layers identifier string.\n * @return The name of the fusable activation.\n */\nexport function mapActivationToFusedKernel(activationName: string):\n fused.Activation {\n if (activationName === 'relu') {\n return 'relu';\n }\n if (activationName === 'linear') {\n return 'linear';\n }\n if (activationName === 'elu') {\n return 'elu';\n }\n return null;\n}\n\ntype PossibleValues = Array>;\n\n/**\n * Returns the cartesian product of sets of values.\n * This works the same as itertools.product in Python.\n *\n * Example:\n *\n * filters = [128, 256, 512]\n * paddings = ['same', 'valid']\n *\n * product = [ [128, 'same'], [128, 'valid'], [256, 'same'], [256, 'valid'],\n * [512, 'same'], [512, 'valid']]\n *\n * @param arrayOfValues List/array of values.\n * @return The cartesian product.\n */\nexport function getCartesianProductOfValues(...arrayOfValues: PossibleValues):\n PossibleValues {\n assert(arrayOfValues.length > 0, 'arrayOfValues is empty');\n\n for (const values of arrayOfValues) {\n assert(Array.isArray(values), 'one of the values is not an array');\n assert(values.length > 0, 'one of the values is empty');\n }\n\n return arrayOfValues.reduce((products, values) => {\n if (products.length === 0) {\n return values.map(value => [value]);\n }\n\n return values\n .map(value => {\n return products.map((prevValue) => [...prevValue, value]);\n })\n .reduce((flattenedProduct, unflattenedProduct) => {\n return flattenedProduct.concat(unflattenedProduct);\n }, []);\n }, [] as PossibleValues);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Utilities related to persistent state in the backend.\n */\n\n/**\n * An ID to track `tf.SymbolicTensor`s and derived classes.\n * Required in different places in engine/topology.ts to identify unique\n * tensors.\n */\nlet _nextUniqueTensorId = 0;\n\nexport function getNextUniqueTensorId(): number {\n return _nextUniqueTensorId++;\n}\n\nconst _uidPrefixes: {[prefix: string]: number} = {};\n\n/**\n * Provides a unique UID given a string prefix.\n *\n * @param prefix\n */\nexport function getUid(prefix = ''): string {\n if (!(prefix in _uidPrefixes)) {\n _uidPrefixes[prefix] = 0;\n }\n _uidPrefixes[prefix] += 1;\n return prefix + _uidPrefixes[prefix].toString();\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n// TODO(huan): add layer-specific input shape types (see: https://github.com/tensorflow/tfjs-layers/pull/492)\n/** @docalias (null | number)[] */\nexport type Shape = Array;\n\n// The tfjs-core version of DataType must stay synced with this.\nexport type DataType = 'float32'|'int32'|'bool'|'complex64'|'string';\n\n// TODO(soergel): Move the CamelCase versions back out of keras_format\n// e.g. to src/common.ts. Maybe even duplicate *all* of these to be pedantic?\n/** @docinline */\nexport type DataFormat = 'channelsFirst'|'channelsLast';\nexport const VALID_DATA_FORMAT_VALUES = ['channelsFirst', 'channelsLast'];\n\nexport type InterpolationFormat = 'nearest'|'bilinear';\nexport const VALID_INTERPOLATION_FORMAT_VALUES = ['nearest', 'bilinear'];\n// These constants have a snake vs. camel distinction.\nexport type DataFormatSerialization = 'channels_first'|'channels_last';\n\n/** @docinline */\nexport type PaddingMode = 'valid'|'same'|'causal';\nexport const VALID_PADDING_MODE_VALUES = ['valid', 'same', 'causal'];\n\n/** @docinline */\nexport type PoolMode = 'max'|'avg';\nexport const VALID_POOL_MODE_VALUES = ['max', 'avg'];\n\n/** @docinline */\nexport type BidirectionalMergeMode = 'sum'|'mul'|'concat'|'ave';\nexport const VALID_BIDIRECTIONAL_MERGE_MODES = ['sum', 'mul', 'concat', 'ave'];\n\n/** @docinline */\nexport type SampleWeightMode = 'temporal';\nexport const VALID_SAMPLE_WEIGHT_MODES = ['temporal'];\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Common functions for TensorFlow.js Layers.\n */\nimport {VALID_DATA_FORMAT_VALUES, VALID_INTERPOLATION_FORMAT_VALUES, VALID_PADDING_MODE_VALUES, VALID_POOL_MODE_VALUES} from './keras_format/common';\nimport {checkStringTypeUnionValue} from './utils/generic_utils';\n\n// A map from the requested scoped name of a Tensor to the number of Tensors\n// wanting that name so far. This allows enforcing name uniqueness by appending\n// an incrementing index, e.g. scope/name, scope/name_1, scope/name_2, etc.\nconst nameMap: Map = new Map();\n\nexport function checkDataFormat(value?: string): void {\n checkStringTypeUnionValue(VALID_DATA_FORMAT_VALUES, 'DataFormat', value);\n}\n\nexport function checkInterpolationFormat(value?: string): void {\n checkStringTypeUnionValue(\n VALID_INTERPOLATION_FORMAT_VALUES, 'InterpolationFormat', value);\n}\n\nexport function checkPaddingMode(value?: string): void {\n checkStringTypeUnionValue(VALID_PADDING_MODE_VALUES, 'PaddingMode', value);\n}\n\nexport function checkPoolMode(value?: string): void {\n checkStringTypeUnionValue(VALID_POOL_MODE_VALUES, 'PoolMode', value);\n}\n\nconst _nameScopeStack: string[] = [];\nconst _nameScopeDivider = '/';\n\n/**\n * Enter namescope, which can be nested.\n */\nexport function nameScope(name: string, fn: () => T): T {\n _nameScopeStack.push(name);\n try {\n const val: T = fn();\n _nameScopeStack.pop();\n return val;\n } catch (e) {\n _nameScopeStack.pop();\n throw e;\n }\n}\n\n/**\n * Get the current namescope as a flat, concatenated string.\n */\nfunction currentNameScopePrefix(): string {\n if (_nameScopeStack.length === 0) {\n return '';\n } else {\n return _nameScopeStack.join(_nameScopeDivider) + _nameScopeDivider;\n }\n}\n\n/**\n * Get the name a Tensor (or Variable) would have if not uniqueified.\n * @param tensorName\n * @return Scoped name string.\n */\nexport function getScopedTensorName(tensorName: string): string {\n if (!isValidTensorName(tensorName)) {\n throw new Error('Not a valid tensor name: \\'' + tensorName + '\\'');\n }\n return currentNameScopePrefix() + tensorName;\n}\n\n/**\n * Get unique names for Tensors and Variables.\n * @param scopedName The fully-qualified name of the Tensor, i.e. as produced by\n * `getScopedTensorName()`.\n * @return A unique version of the given fully scoped name.\n * If this is the first time that the scoped name is seen in this session,\n * then the given `scopedName` is returned unaltered. If the same name is\n * seen again (producing a collision), an incrementing suffix is added to the\n * end of the name, so it takes the form 'scope/name_1', 'scope/name_2', etc.\n */\nexport function getUniqueTensorName(scopedName: string): string {\n if (!isValidTensorName(scopedName)) {\n throw new Error('Not a valid tensor name: \\'' + scopedName + '\\'');\n }\n if (!nameMap.has(scopedName)) {\n nameMap.set(scopedName, 0);\n }\n const index = nameMap.get(scopedName);\n nameMap.set(scopedName, nameMap.get(scopedName) + 1);\n\n if (index > 0) {\n const result = `${scopedName}_${index}`;\n // Mark the composed name as used in case someone wants\n // to call getUniqueTensorName(\"name_1\").\n nameMap.set(result, 1);\n return result;\n } else {\n return scopedName;\n }\n}\n\nconst tensorNameRegex = new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\\._\\/]*$/);\n\n/**\n * Determine whether a string is a valid tensor name.\n * @param name\n * @returns A Boolean indicating whether `name` is a valid tensor name.\n */\nexport function isValidTensorName(name: string): boolean {\n return !!name.match(tensorNameRegex);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {backend} from '@tensorflow/tfjs-core';\nimport {DataFormat} from '../keras_format/common';\n\nlet _epsilon: number;\n\n/**\n * Returns the value of the fuzz factor used in numeric expressions.\n */\nexport function epsilon() {\n if (_epsilon == null) {\n _epsilon = backend().epsilon();\n }\n return _epsilon;\n}\n\n/**\n * Sets the value of the fuzz factor used in numeric expressions.\n * @param e New value of epsilon.\n */\nexport function setEpsilon(e: number) {\n _epsilon = e;\n}\n\n/**\n * Returns the default image data format convention.\n */\nexport function imageDataFormat(): DataFormat {\n return 'channelsLast';\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Math utility functions.\n *\n * This file contains some frequently used math function that operates on\n * number[] or Float32Array and return a number. Many of these functions are\n * not-so-thick wrappers around TF.js Core functions. But they offer the\n * convenience of\n * 1) not having to convert the inputs into Tensors,\n * 2) not having to convert the returned Tensors to numbers.\n */\n\nimport {ValueError} from '../errors';\n\nexport type ArrayTypes = Uint8Array|Int32Array|Float32Array;\n\n/**\n * Determine if a number is an integer.\n */\nexport function isInteger(x: number): boolean {\n return x === parseInt(x.toString(), 10);\n}\n\n/**\n * Calculate the product of an array of numbers.\n * @param array The array to calculate the product over.\n * @param begin Beginning index, inclusive.\n * @param end Ending index, exclusive.\n * @return The product.\n */\nexport function arrayProd(\n array: number[]|ArrayTypes, begin?: number, end?: number): number {\n if (begin == null) {\n begin = 0;\n }\n if (end == null) {\n end = array.length;\n }\n\n let prod = 1;\n for (let i = begin; i < end; ++i) {\n prod *= array[i];\n }\n return prod;\n}\n\n/**\n * Compute minimum value.\n * @param array\n * @return minimum value.\n */\nexport function min(array: number[]|Float32Array): number {\n // same behavior as tf.min()\n if (array.length === 0) {\n return Number.NaN;\n }\n let min = Number.POSITIVE_INFINITY;\n for (let i = 0; i < array.length; i++) {\n const value = array[i];\n if (value < min) {\n min = value;\n }\n }\n return min;\n}\n\n/**\n * Compute maximum value.\n * @param array\n * @return maximum value\n */\nexport function max(array: number[]|Float32Array): number {\n // same behavior as tf.max()\n if (array.length === 0) {\n return Number.NaN;\n }\n let max = Number.NEGATIVE_INFINITY;\n for (let i = 0; i < array.length; i++) {\n const value = array[i];\n if (value > max) {\n max = value;\n }\n }\n return max;\n}\n\n/**\n * Compute sum of array.\n * @param array\n * @return The sum.\n */\nexport function sum(array: number[]|Float32Array): number {\n let sum = 0;\n for (let i = 0; i < array.length; i++) {\n const value = array[i];\n sum += value;\n }\n return sum;\n}\n\n/**\n * Compute mean of array.\n * @param array\n * @return The mean.\n */\nexport function mean(array: number[]|Float32Array): number {\n return sum(array) / array.length;\n}\n\n/**\n * Compute variance of array.\n * @param array\n * @return The variance.\n */\nexport function variance(array: number[]|Float32Array): number {\n const meanValue = mean(array);\n const demeaned = array.map((value: number) => value - meanValue);\n let sumSquare = 0;\n for (let i = 0; i < demeaned.length; i++) {\n const value = demeaned[i];\n sumSquare += value * value;\n }\n return sumSquare / array.length;\n}\n\n/**\n * Compute median of array.\n * @param array\n * @return The median value.\n */\nexport function median(array: number[]|Float32Array): number {\n const arraySorted = array.slice().sort((a, b) => a - b);\n const lowIdx = Math.floor((arraySorted.length - 1) / 2);\n const highIdx = Math.ceil((arraySorted.length - 1) / 2);\n if (lowIdx === highIdx) {\n return arraySorted[lowIdx];\n }\n return (arraySorted[lowIdx] + arraySorted[highIdx]) / 2;\n}\n\n/**\n * Generate an array of integers in [begin, end).\n * @param begin Beginning integer, inclusive.\n * @param end Ending integer, exclusive.\n * @returns Range array.\n * @throws ValueError, iff `end` < `begin`.\n */\nexport function range(begin: number, end: number): number[] {\n if (end < begin) {\n throw new ValueError(`end (${end}) < begin (${begin}) is forbidden.`);\n }\n const out: number[] = [];\n for (let i = begin; i < end; ++i) {\n out.push(i);\n }\n return out;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * deeplearn.js backend.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {onesLike as coreOnesLike, scalar, Tensor, Tensor1D, tensor1d, Tensor2D, Tensor3D, Tensor4D, Tensor5D, tidy, where, zerosLike as coreZerosLike} from '@tensorflow/tfjs-core';\nimport {checkDataFormat} from '../common';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {DataFormat, Shape} from '../keras_format/common';\nimport {HasShape} from '../types';\nimport * as math_utils from '../utils/math_utils';\n\nimport {imageDataFormat} from './common';\n\n// tslint:enable\n\n/* Setting and getting backend from deeplearn.js. */\n\n// Default deeplearn.js backend is WebGL (GPU).\nlet backend: 'cpu'|'webgl' = 'webgl';\n\nexport function setBackend(requestedBackend: 'cpu'|'webgl') {\n tfc.setBackend(requestedBackend);\n backend = requestedBackend;\n}\n\nexport function getBackend(): 'cpu'|'webgl' {\n return backend;\n}\n\n/**\n * Indicates whether the backend is operating symbolically.\n *\n * This function will be used to determine how to interpret user code. If\n * it returns true, calls to the backend construct a symbolic graph; if\n * it returns false, calls to the backend execute immediately.\n */\nexport function isBackendSymbolic(): boolean {\n return false;\n}\n\n/**\n * Get the number of elements in a Tensor.\n * @param x The Tensor.\n * @return Number of elements in `x`.\n */\nexport function countParams(x: HasShape): number {\n const shape = x.shape;\n if (shape.length > 0) {\n return shape.reduce((a: number, b: number) => a * b);\n } else {\n // Scalar.\n return 1;\n }\n}\n\n/**\n * Casts a tensor to a different dtype and returns it.\n * @param x Input tensor.\n * @param dtype String: 'float32'|'int32'|'bool'.\n * @returns Tensor of the specified `dtype`.\n */\nexport function cast(x: Tensor, dtype: tfc.DataType): Tensor {\n return tfc.cast(x, dtype);\n}\n\n/**\n * Adds a 1-sized dimension at index \"axis\".\n * @param x Input tensor.\n * @param axis Position where to add the new axis.\n * @returns Result of the dimension expansion.\n */\nexport function expandDims(x: Tensor, axis = -1): Tensor {\n const outShape = x.shape.slice();\n if (axis < 0) {\n axis = outShape.length + axis + 1;\n }\n outShape.splice(axis, 0, 1);\n return tfc.reshape(x, outShape);\n}\n\n/**\n * Repeats a 2D tensor.\n *\n * If `x` has shape `[samples, dim]` and `n` is 2, for example, the output\n * will have shape `[samples, 2, dim]`.\n *\n * @param x Input tensor.\n * @param n Integer, number of times to repeat.\n * @returns The result of the repeat operation.\n * @throws ValueError: If input tensor is not 2D.\n */\nexport function repeat(x: Tensor, n: number): Tensor {\n return tidy(() => {\n if (x.shape.length !== 2) {\n throw new ValueError(\n `repeat() expects a rank-2 tensor, but received a ` +\n `rank-${x.shape.length} tensor.`);\n }\n const y = expandDims(x, 1);\n return tile(y, [1, n, 1]);\n });\n}\n\n/**\n * Flatten a Tensor into 1D.\n * @param x Input tensor.\n * @return The result of the flattening `x`.\n */\nexport function flatten(x: Tensor): Tensor {\n const newShape = [math_utils.arrayProd(x.shape)];\n return tfc.reshape(x, newShape);\n}\n\n/**\n * Turn a nD tensor into a 2D tensor with same 0th dimension.\n * In other words, it flattens each data samples of a batch.\n *\n * @param x The tensor to flatten. The rank of this tensor is required to be 2\n * or higher.\n * @return The result of the flattening.\n */\nexport function batchFlatten(x: Tensor): Tensor {\n if (x.rank <= 1) {\n throw new ValueError(\n `batchFlatten requires a minimum rank of 2. Got rank: ${x.rank}.`);\n }\n const newShape = [x.shape[0], math_utils.arrayProd(x.shape, 1)];\n return tfc.reshape(x, newShape);\n}\n\n/**\n * Do slicing along the first axis.\n * @param array input `tf.Tensor`.\n * @param start starting index, inclusive.\n * @param size size of the slice along the first axis.\n * @returns result of the slicing.\n * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.\n */\nexport function sliceAlongFirstAxis(\n array: Tensor, start: number, size: number): Tensor {\n return tidy(() => {\n switch (array.rank) {\n case 1:\n return tfc.slice1d(array as Tensor1D, start, size);\n case 2:\n return tfc.slice2d(\n array as Tensor2D, [start, 0], [size, array.shape[1]]);\n case 3:\n return tfc.slice3d(\n array as Tensor3D, [start, 0, 0],\n [size, array.shape[1], array.shape[2]]);\n case 4:\n return tfc.slice4d(\n array as Tensor4D, [start, 0, 0, 0],\n [size, array.shape[1], array.shape[2], array.shape[3]]);\n case 5:\n return tfc.slice(array as Tensor5D, [start, 0, 0, 0, 0], [\n size, array.shape[1], array.shape[2], array.shape[3], array.shape[4]\n ]);\n case 6:\n return tfc.slice(array, [start, 0, 0, 0, 0, 0], [\n size, array.shape[1], array.shape[2], array.shape[3], array.shape[4],\n array.shape[5]\n ]);\n default:\n throw new ValueError(\n `sliceAlongFirstAxis() received an unsupported tensor rank: ` +\n `${array.rank}`);\n }\n });\n}\n\n/**\n * Do slicing along the last axis.\n * @param array input `tf.Tensor`.\n * @param start starting index, inclusive.\n * @param size size of the slice along the last axis.\n * @returns result of the slicing.\n * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.\n */\nexport function sliceAlongLastAxis(\n array: Tensor, start: number, size: number): Tensor {\n return tidy(() => {\n switch (array.rank) {\n case 1:\n return tfc.slice1d(array as Tensor1D, start, size);\n case 2:\n return tfc.slice2d(\n array as Tensor2D, [0, start], [array.shape[0], size]);\n case 3:\n return tfc.slice3d(\n array as Tensor3D, [0, 0, start],\n [array.shape[0], array.shape[1], size]);\n case 4:\n return tfc.slice4d(\n array as Tensor4D, [0, 0, 0, start],\n [array.shape[0], array.shape[1], array.shape[2], size]);\n default:\n throw new ValueError(\n `sliceAlongLastAxis() received an unsupported tensor rank: ` +\n `${array.rank}`);\n }\n });\n}\n\n/**\n * Do slicing along the sepcified axis.\n * @param array input `tf.Tensor`.\n * @param start starting index, inclusive.\n * @param size of the slice along the chosen axis.\n * @param choose an axis.\n * @returns result of the slicing.\n * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`.\n */\nexport function sliceAlongAxis(\n array: Tensor, start: number, size: number, axis: number): Tensor {\n return tidy(() => {\n switch (array.rank) {\n case 1:\n return tfc.slice1d(array as Tensor1D, start, size);\n case 2:\n switch (axis) {\n case 1:\n return sliceAlongFirstAxis(array, start, size);\n case 2:\n return sliceAlongLastAxis(array, start, size);\n default:\n throw new ValueError(\n `The axis is not within the rank of the tensor ` +\n `${axis}`);\n }\n case 3:\n switch (axis) {\n case 1:\n return sliceAlongFirstAxis(array, start, size);\n case 2:\n return tfc.slice3d(\n array as Tensor3D, [0, start, 0],\n [array.shape[0], size, array.shape[2]]);\n case 3:\n return sliceAlongLastAxis(array, start, size);\n default:\n throw new ValueError(\n `The axis is not within the rank of the tensor ` +\n `${axis}`);\n }\n case 4:\n switch (axis) {\n case 1:\n return sliceAlongFirstAxis(array, start, size);\n case 2:\n return tfc.slice4d(\n array as Tensor4D, [0, start, 0, 0],\n [array.shape[0], size, array.shape[2], array.shape[3]]);\n case 3:\n return tfc.slice4d(\n array as Tensor4D, [0, 0, start, 0],\n [array.shape[0], array.shape[1], size, array.shape[3]]);\n case 4:\n return sliceAlongLastAxis(array, start, size);\n default:\n throw new ValueError(\n `The axis is not within the rank of the tensor ` +\n `${axis}`);\n }\n default:\n throw new ValueError(\n `sliceAlongLastAxis() received an unsupported tensor rank: ` +\n `${array.rank}`);\n }\n });\n}\n\n/**\n * Concatenates a list of tensors alongside the specified axis.\n * @param tensors `Array` of tensors to concatenate.\n * @param axis Concatenation axis.\n * @returns The result of the concatenation.\n */\nexport function concatenate(tensors: Tensor[], axis = -1): Tensor {\n let rank: number;\n if (axis < 0) {\n rank = tensors[0].rank;\n if (rank !== 0) {\n axis = rank;\n } else {\n axis = 0;\n }\n }\n if (axis === tensors[0].rank) {\n // Porting Note: This is necessary because tfc.concat() requires axis to be\n // in the interval [-rank, rank).\n axis = -1;\n }\n // Porting Note: Sparse concat is not supported yet.\n return tfc.concat(tensors, axis);\n}\n\n/**\n * Concatenate two arrays along the first dimension.\n * @param a The 1st `tf.Tensor` to concatenate.\n * @param b The 2nd `tf.Tensor` to concatenate.\n * @returns Result of the concatenation.\n * @throws ValueError: If `a` is of an unsupported subtype of `tf.Tensor`.\n */\nexport function concatAlongFirstAxis(a: Tensor, b: Tensor): Tensor {\n switch (a.rank) {\n case 1:\n return tfc.concat1d([a as Tensor1D, b as Tensor1D]);\n case 2:\n return tfc.concat2d([a as Tensor2D, b as Tensor2D], 0);\n case 3:\n return tfc.concat3d([a as Tensor3D, b as Tensor3D], 0);\n case 4:\n return tfc.concat4d([a as Tensor4D, b as Tensor4D], 0);\n default:\n throw new ValueError(\n `concatAlongFirstAxis() received an unsupported ` +\n `tensor rank: ${a.rank}`);\n }\n}\n\n/**\n * Creates a tensor by tiling `x` by `n`.\n * @param x A tensor.\n * @param n An Array of integers or a single integer. If an Array, the length\n * must be the same as the number of dimensions in `x`. If a single integer,\n * it will be treated as an Array of length 1.\n */\nexport function tile(x: Tensor, n: number|number[]): Tensor {\n if (!Array.isArray(n)) {\n n = [n];\n }\n if (x.rank !== n.length) {\n throw new ValueError(\n `The length of input n (${n.length}) does not match ` +\n `the number of dimensions in input x (${x.rank})`);\n }\n return tfc.tile(x, n);\n}\n\n/* Creation of random tensors. */\n\n/**\n * Get a tensor with normal distribution of values.\n *\n * @param shape Shape of the tensor.\n * @param mean mean value of the normal distribution.\n * @param stddev standard deviation of the normal distribution.\n * @param dtype\n * @param seed\n * @return The normal tensor.\n */\nexport function randomNormal(\n shape: Shape, mean = 0.0, stddev = 1.0, dtype?: 'float32'|'int32',\n seed?: number): Tensor {\n return tfc.randomNormal(shape, mean, stddev, dtype, seed);\n}\n\n/* Linear Algebra */\n\n/**\n * Multiply two tensors and returns the result as a tensor.\n *\n * For 2D tensors, this is equivalent to matrix multiplication (matMul).\n * For tensors of higher ranks, it follows the Theano behavior,\n * (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`). From the Theano documentation:\n *\n * For N dimensions it is a sum product over the last axis of x and the\n * second-to-last of y:\n *\n * @param a A tensor of at least rank 2.\n * @param b A tensor of at least rank 2.\n * @param activation (optional) A string identifying the activation\n * function.\n * @return Result of the dot operation.\n */\nexport function dot(\n a: Tensor, b: Tensor, activation?: tfc.fused.Activation,\n bias?: Tensor): Tensor {\n if ((a.rank < 2) || (b.rank < 2)) {\n throw new NotImplementedError(\n `dot requires both inputs to be rank >= 2` +\n ` but got x shape = ${a.shape} and y shape = ${b.shape}`);\n }\n if (b.rank >= 3) {\n const xLastDim = a.shape.slice(-1)[0];\n const ySecondLastDim = b.shape.slice(-2)[0];\n if (xLastDim !== ySecondLastDim) {\n throw new NotImplementedError(\n `If rank y >= 3, then the second last dim` +\n ` of y must equal the last dim of x but got x shape = ${\n a.shape} and ` +\n ` y shape = ${b.shape}`);\n }\n }\n // Handle basic 2D x 2D case.\n if ((a.rank === 2) && (b.rank === 2)) {\n const transposeA = false;\n const transposeB = false;\n // tfc.fused.matMul only fuses certain activation functions. Unsupported\n // activation functions are treated as 'linear' activations, which is\n // equivalent to a no-op.\n return tfc.fused.matMul({\n a,\n b: b as Tensor2D,\n transposeA,\n transposeB,\n bias: bias ? reshapeBias(a.rank, bias, imageDataFormat()) : null,\n activation\n });\n } else {\n // Reshape x into the analogous 2D Tensor.\n const aFirstDims = a.shape.slice(); // Holds all but the last dim of x.\n const aLastDim = aFirstDims.pop();\n a = tfc.reshape(a, [-1, aLastDim]);\n\n // Reshape y into the analogous 2D Tensor, and keep track of the\n // required dimensions to reproduce the output shape.\n const bShape = b.shape.slice();\n const bLastDim = bShape.pop();\n const ySecondLastDim = bShape.pop();\n const yOtherDims = [...bShape, bLastDim];\n // permutation should be like [r-2, 0, 1, 2, ... r-4, r-3, r-1]\n // where r is the rank of y.\n const perm = Array.from({length: b.rank}, (_, i) => {\n if (i === 0) {\n return b.rank - 2;\n } else if (i <= b.rank - 2) {\n return i - 1;\n }\n return i;\n });\n b = tfc.reshape(tfc.transpose(b, perm), [ySecondLastDim, -1]);\n\n // Multiply x and y as 2D Tensors, and then reshape back to original.\n const outputShape = [...aFirstDims, ...yOtherDims];\n const transposeA = false;\n const transposeB = false;\n return tfc.reshape(\n tfc.fused.matMul({\n a,\n b,\n transposeA,\n transposeB,\n bias: bias ? reshapeBias(a.rank, bias, imageDataFormat()) : null,\n activation\n }),\n outputShape);\n }\n}\n\n/**\n * Compute the sign Tensor of an input Tensor.\n *\n * Elements of the input `tf.Tensor` that are === 0 are mapped to 0.\n * Elements of the input `tf.Tensor` that are > 0 are mapped to 1.\n * Elements of the input `tf.Tensor` that are < 0 are mapped to -1.\n *\n * @param x Input `tf.Tensor`.\n * @return The sign `tf.Tensor`.\n */\nexport function sign(x: Tensor): Tensor {\n // TODO(cais): Move to the core.\n return tidy(() => {\n const zerosLikeX = coreZerosLike(x);\n const onesLikeX = coreOnesLike(x);\n return where(\n tfc.equal(x, zerosLikeX), zerosLikeX,\n where(\n tfc.greater(x, coreZerosLike(x)), onesLikeX,\n tfc.mul(-1, onesLikeX)));\n });\n}\n\n/**\n * Computes the one-hot representation of an integer tensor.\n * @param indices nD integer tensor of shape\n * `(batch_size, dim1, dim2, ... dim(n-1))`\n * @param numClasses Integer, number of classes to consider.\n * @returns (n + 1)D one hot representation of the input\n * with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`\n */\nexport function oneHot(indices: Tensor, numClasses: number): Tensor {\n return tidy(() => {\n if (indices.rank !== 1) {\n throw new Error(\n 'Only 1D one-hot tensors are supported in the ' +\n 'deeplearn backend, at present.');\n }\n indices = tfc.cast(indices, 'int32');\n return tfc.cast(tfc.oneHot(indices as Tensor1D, numClasses), 'float32');\n });\n}\n\n/* Elementary math functions. */\n\n/**\n * Retrieves the elements of indices `indices` in the tensor `reference`.\n * @param reference A tensor.\n * @param indices An integer tensor of indices or an `Array` of integers.\n * @param axis Axis along which to perform the gather operation.\n * @returns The result of the gathering as a tensor.\n */\nexport function gather(\n reference: Tensor, indices: number[]|Tensor1D, axis?: number): Tensor {\n return tidy(() => {\n if (Array.isArray(indices)) {\n indices = tensor1d(indices, 'int32');\n } else {\n indices = tfc.cast(indices, 'int32');\n }\n return tfc.gather(reference, indices, axis);\n });\n}\n\n/**\n * Element-wise square.\n * @param x Input tensor.\n * @return element-wise x^2\n */\nexport function square(x: Tensor): Tensor {\n return tfc.mul(x, x);\n}\n\n/**\n * Element-wise exponentiation.\n *\n * Porting Note: In PyKeras, `a` (the exponent) is a Python integer, which\n * takes advatnage of the backend's (e.g., TensorFlow's) automatic\n * conversion to tensor. Here we allow `a` to be either a number or a tensor.\n *\n * @param x The base tensor.\n * @param a The exponent, tensor or number. If a number, it is rounded to the\n * nearest integer and converted to a tensor.\n * @returns A tensor of the same shape as `x`.\n */\nexport function pow(x: Tensor, a: Tensor|number): Tensor {\n return tidy(() => {\n if (typeof (a) === 'number') {\n a = scalar(Math.round(a), 'int32');\n }\n if (a.dtype !== 'int32') {\n throw new NotImplementedError(\n `Non-int32 dtype (${a.dtype}) is not supported by pow() yet`);\n }\n return tfc.pow(x, a);\n });\n}\n\n/**\n * Reshapes bias tensor according to rank of x.\n */\nfunction reshapeBias(xRank: number, bias: Tensor, dataFormat: string) {\n const biasShape = bias.shape;\n\n if (bias.rank !== 1 && bias.rank !== xRank) {\n throw new ValueError(\n `Unexpected bias dimensions: ${bias.rank}` +\n `; expected it to be 1 or ${xRank}`);\n }\n\n if (xRank === 5) {\n if (dataFormat === 'channelsFirst') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, biasShape[0], 1, 1, 1]);\n } else {\n return tfc.reshape(\n bias, [1, biasShape[3], biasShape[0], biasShape[1], biasShape[2]]);\n }\n } else if (dataFormat === 'channelsLast') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, 1, 1, 1, biasShape[0]]);\n } else {\n return tfc.reshape(bias, [1].concat(biasShape));\n }\n }\n } else if (xRank === 4) {\n if (dataFormat === 'channelsFirst') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, biasShape[0], 1, 1]);\n } else {\n return tfc.reshape(bias, [1, biasShape[2], biasShape[0], biasShape[1]]);\n }\n } else if (dataFormat === 'channelsLast') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, 1, 1, biasShape[0]]);\n } else {\n return tfc.reshape(bias, [1].concat(biasShape));\n }\n }\n } else if (xRank === 3) {\n if (dataFormat === 'channelsFirst') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, biasShape[0], 1]);\n } else {\n return tfc.reshape(bias, [1, biasShape[1], biasShape[0]]);\n }\n } else if (dataFormat === 'channelsLast') {\n if (biasShape.length === 1) {\n return tfc.reshape(bias, [1, 1, biasShape[0]]);\n } else {\n return tfc.reshape(bias, [1].concat(biasShape));\n }\n }\n } else if (xRank < 3) {\n return bias;\n }\n throw new ValueError(`Unsupported input rank by biasAdd: ${bias.rank}`);\n}\n\n/* Neural-network operations. */\n\n/**\n * Add a bias to a tensor.\n *\n * @param x The tensor to add the bias to.\n * @param bias The bias to add to `x`. Must be 1D or the same rank as `x`.\n * @return Result of the bias adding.\n * @throws ValueError: If the rank of `bias` is incorrect.\n */\nexport function biasAdd(\n x: Tensor, bias: Tensor, dataFormat?: DataFormat): Tensor {\n return tidy(() => {\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n checkDataFormat(dataFormat);\n\n return tfc.add(x, reshapeBias(x.rank, bias, dataFormat));\n });\n}\n\n/**\n * Exponential linear unit (ELU).\n * @param x A tensor or variable to compute the activation function for.\n * @param alpha: A scalar, a scaling factor for the negative section.\n * @return Output of the ELU operation.\n */\nexport function elu(x: Tensor, alpha = 1): Tensor {\n // TODO(cais): Add support for alpha values other than 1.\n if (alpha !== 1) {\n throw new NotImplementedError(\n `Support for alpha values other than 1 (${alpha}) is not implemented ` +\n `yet.`);\n }\n return tfc.elu(x);\n}\n\n/**\n * Softsign of a tensor.\n *\n * Defined as x / (abs(x) + 1), element-wise.\n *\n * @param x: Input.\n * @returns Output.\n */\nexport function softsign(x: Tensor): Tensor {\n return tidy(() => tfc.div(x, tfc.add(tfc.abs(x), 1)));\n}\n\n/**\n * Sets entries in `x` to zero at random, while scaling the entire tensor.\n *\n * @param x input tensor.\n * @param level fraction of the entries in the tensor that will be set to 0.\n * @param noiseShape shape of randomly generated keep/drop flags, must be\n * broadcastable to the shape of `x`. Optional.\n * @param seed random seed to ensure determinism. Optional.\n * @returns Result of the dropout operation.\n */\nexport function dropout(\n x: Tensor, level: number, noiseShape?: number[], seed?: number): Tensor {\n return tidy(() => tfc.dropout(x, level, noiseShape, seed));\n}\n\n/**\n * Element-wise, segment-wise linear approximation of sigmoid.\n *\n * Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.\n * In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.\n *\n * @param x Input tensor.\n * @returns Output tensor.\n */\nexport function hardSigmoid(x: Tensor): Tensor {\n return tidy(() => {\n const y = tfc.add(.5, tfc.mul(.2, x));\n return tfc.clipByValue(y, 0, 1);\n });\n}\n\n/**\n * Invoke `x` in the training phase, and `alt` otherwise.\n *\n * Porting Note: We do not create placeholder tensors for the `training`\n * boolean flag here, because there is no such thing in the TF.js imperative\n * backend.\n *\n * @param x The function to invoke iff `training` is `true`.\n * @param alt The function to invoke iff `training` is `false`.\n * @param training Boolean flag for whether training phase is active.\n * @returns The return value of `x()` if `training` is `true`, or the return\n * value of `alt()` if `training` is `false`.\n */\nexport function inTrainPhase(x: () => T, alt: () => T, training = false): T {\n return training ? x() : alt();\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {BaseSerialization} from './types';\n\n// TODO(soergel): Move the CamelCase versions back out of keras_format\n// e.g. to src/common.ts. Maybe even duplicate *all* of these to be pedantic?\n/** @docinline */\nexport type FanMode = 'fanIn'|'fanOut'|'fanAvg';\nexport const VALID_FAN_MODE_VALUES = ['fanIn', 'fanOut', 'fanAvg'];\n\n// These constants have a snake vs. camel distinction.\nexport type FanModeSerialization = 'fan_in'|'fan_out'|'fan_avg';\n\n/** @docinline */\nexport type Distribution = 'normal'|'uniform'|'truncatedNormal';\nexport const VALID_DISTRIBUTION_VALUES =\n ['normal', 'uniform', 'truncatedNormal'];\n// These constants have a snake vs. camel distinction.\nexport type DistributionSerialization = 'normal'|'uniform'|'truncated_normal';\n\nexport type ZerosSerialization = BaseSerialization<'Zeros', {}>;\n\nexport type OnesSerialization = BaseSerialization<'Ones', {}>;\n\nexport type ConstantConfig = {\n value: number;\n};\n\nexport type ConstantSerialization =\n BaseSerialization<'Constant', ConstantConfig>;\n\nexport type RandomNormalConfig = {\n mean?: number;\n stddev?: number;\n seed?: number;\n};\n\nexport type RandomNormalSerialization =\n BaseSerialization<'RandomNormal', RandomNormalConfig>;\n\nexport type RandomUniformConfig = {\n minval?: number;\n maxval?: number;\n seed?: number;\n};\n\nexport type RandomUniformSerialization =\n BaseSerialization<'RandomUniform', RandomUniformConfig>;\n\nexport type TruncatedNormalConfig = {\n mean?: number;\n stddev?: number;\n seed?: number;\n};\n\nexport type TruncatedNormalSerialization =\n BaseSerialization<'TruncatedNormal', TruncatedNormalConfig>;\n\nexport type VarianceScalingConfig = {\n scale?: number;\n\n mode?: FanModeSerialization;\n distribution?: DistributionSerialization;\n seed?: number;\n};\n\nexport type VarianceScalingSerialization =\n BaseSerialization<'VarianceScaling', VarianceScalingConfig>;\n\nexport type OrthogonalConfig = {\n seed?: number;\n gain?: number;\n};\n\nexport type OrthogonalSerialization =\n BaseSerialization<'Orthogonal', OrthogonalConfig>;\n\nexport type IdentityConfig = {\n gain?: number;\n};\n\nexport type IdentitySerialization =\n BaseSerialization<'Identity', IdentityConfig>;\n\n// Update initializerClassNames below in concert with this.\nexport type InitializerSerialization = ZerosSerialization|OnesSerialization|\n ConstantSerialization|RandomUniformSerialization|RandomNormalSerialization|\n TruncatedNormalSerialization|IdentitySerialization|\n VarianceScalingSerialization|OrthogonalSerialization;\n\nexport type InitializerClassName = InitializerSerialization['class_name'];\n\n// We can't easily extract a string[] from the string union type, but we can\n// recapitulate the list, enforcing at compile time that the values are valid\n// and that we have the right number of them.\n\n/**\n * A string array of valid Initializer class names.\n *\n * This is guaranteed to match the `InitializerClassName` union type.\n */\nexport const initializerClassNames: InitializerClassName[] = [\n 'Zeros', 'Ones', 'Constant', 'RandomNormal', 'RandomUniform',\n 'TruncatedNormal', 'VarianceScaling', 'Orthogonal', 'Identity'\n];\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {DataType, eye, linalg, mul, ones, randomUniform, scalar, serialization, Tensor, Tensor2D, tidy, transpose, truncatedNormal, zeros} from '@tensorflow/tfjs-core';\n\nimport * as K from './backend/tfjs_backend';\nimport {checkDataFormat} from './common';\nimport {NotImplementedError, ValueError} from './errors';\nimport {DataFormat, Shape} from './keras_format/common';\nimport {Distribution, FanMode, VALID_DISTRIBUTION_VALUES, VALID_FAN_MODE_VALUES} from './keras_format/initializer_config';\nimport {checkStringTypeUnionValue, deserializeKerasObject, serializeKerasObject} from './utils/generic_utils';\nimport {arrayProd} from './utils/math_utils';\n\nexport function checkFanMode(value?: string): void {\n checkStringTypeUnionValue(VALID_FAN_MODE_VALUES, 'FanMode', value);\n}\n\nexport function checkDistribution(value?: string): void {\n checkStringTypeUnionValue(VALID_DISTRIBUTION_VALUES, 'Distribution', value);\n}\n\n/**\n * Initializer base class.\n *\n * @doc {\n * heading: 'Initializers', subheading: 'Classes', namespace: 'initializers'}\n */\nexport abstract class Initializer extends serialization.Serializable {\n public fromConfigUsesCustomObjects(): boolean {\n return false;\n }\n /**\n * Generate an initial value.\n * @param shape\n * @param dtype\n * @return The init value.\n */\n abstract apply(shape: Shape, dtype?: DataType): Tensor;\n\n getConfig(): serialization.ConfigDict {\n return {};\n }\n}\n\nexport class Zeros extends Initializer {\n /** @nocollapse */\n static className = 'Zeros';\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return zeros(shape, dtype);\n }\n}\nserialization.registerClass(Zeros);\n\nexport class Ones extends Initializer {\n /** @nocollapse */\n static className = 'Ones';\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return ones(shape, dtype);\n }\n}\nserialization.registerClass(Ones);\n\nexport interface ConstantArgs {\n /** The value for each element in the variable. */\n value: number;\n}\n\nexport class Constant extends Initializer {\n /** @nocollapse */\n static className = 'Constant';\n private value: number;\n constructor(args: ConstantArgs) {\n super();\n if (typeof args !== 'object') {\n throw new ValueError(\n `Expected argument of type ConstantConfig but got ${args}`);\n }\n if (args.value === undefined) {\n throw new ValueError(`config must have value set but got ${args}`);\n }\n this.value = args.value;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return tidy(() => mul(scalar(this.value), ones(shape, dtype)));\n }\n\n override getConfig(): serialization.ConfigDict {\n return {\n value: this.value,\n };\n }\n}\nserialization.registerClass(Constant);\n\nexport interface RandomUniformArgs {\n /** Lower bound of the range of random values to generate. */\n minval?: number;\n /** Upper bound of the range of random values to generate. */\n maxval?: number;\n /** Used to seed the random generator. */\n seed?: number;\n}\n\nexport class RandomUniform extends Initializer {\n /** @nocollapse */\n static className = 'RandomUniform';\n readonly DEFAULT_MINVAL = -0.05;\n readonly DEFAULT_MAXVAL = 0.05;\n private minval: number;\n private maxval: number;\n private seed: number;\n\n constructor(args: RandomUniformArgs) {\n super();\n this.minval = args.minval || this.DEFAULT_MINVAL;\n this.maxval = args.maxval || this.DEFAULT_MAXVAL;\n this.seed = args.seed;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return randomUniform(shape, this.minval, this.maxval, dtype, this.seed);\n }\n\n override getConfig(): serialization.ConfigDict {\n return {minval: this.minval, maxval: this.maxval, seed: this.seed};\n }\n}\nserialization.registerClass(RandomUniform);\n\nexport interface RandomNormalArgs {\n /** Mean of the random values to generate. */\n mean?: number;\n /** Standard deviation of the random values to generate. */\n stddev?: number;\n /** Used to seed the random generator. */\n seed?: number;\n}\n\nexport class RandomNormal extends Initializer {\n /** @nocollapse */\n static className = 'RandomNormal';\n readonly DEFAULT_MEAN = 0.;\n readonly DEFAULT_STDDEV = 0.05;\n private mean: number;\n private stddev: number;\n private seed: number;\n\n constructor(args: RandomNormalArgs) {\n super();\n this.mean = args.mean || this.DEFAULT_MEAN;\n this.stddev = args.stddev || this.DEFAULT_STDDEV;\n this.seed = args.seed;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n dtype = dtype || 'float32';\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new NotImplementedError(\n `randomNormal does not support dType ${dtype}.`);\n }\n\n return K.randomNormal(shape, this.mean, this.stddev, dtype, this.seed);\n }\n\n override getConfig(): serialization.ConfigDict {\n return {mean: this.mean, stddev: this.stddev, seed: this.seed};\n }\n}\nserialization.registerClass(RandomNormal);\n\nexport interface TruncatedNormalArgs {\n /** Mean of the random values to generate. */\n mean?: number;\n /** Standard deviation of the random values to generate. */\n stddev?: number;\n /** Used to seed the random generator. */\n seed?: number;\n}\n\nexport class TruncatedNormal extends Initializer {\n /** @nocollapse */\n static className = 'TruncatedNormal';\n\n readonly DEFAULT_MEAN = 0.;\n readonly DEFAULT_STDDEV = 0.05;\n private mean: number;\n private stddev: number;\n private seed: number;\n\n constructor(args: TruncatedNormalArgs) {\n super();\n this.mean = args.mean || this.DEFAULT_MEAN;\n this.stddev = args.stddev || this.DEFAULT_STDDEV;\n this.seed = args.seed;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n dtype = dtype || 'float32';\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new NotImplementedError(\n `truncatedNormal does not support dType ${dtype}.`);\n }\n return truncatedNormal(shape, this.mean, this.stddev, dtype, this.seed);\n }\n\n override getConfig(): serialization.ConfigDict {\n return {mean: this.mean, stddev: this.stddev, seed: this.seed};\n }\n}\nserialization.registerClass(TruncatedNormal);\n\nexport interface IdentityArgs {\n /**\n * Multiplicative factor to apply to the identity matrix.\n */\n gain?: number;\n}\n\nexport class Identity extends Initializer {\n /** @nocollapse */\n static className = 'Identity';\n private gain: number;\n constructor(args: IdentityArgs) {\n super();\n this.gain = args.gain != null ? args.gain : 1.0;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return tidy(() => {\n if (shape.length !== 2 || shape[0] !== shape[1]) {\n throw new ValueError(\n 'Identity matrix initializer can only be used for' +\n ' 2D square matrices.');\n } else {\n return mul(this.gain, eye(shape[0]));\n }\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n return {gain: this.gain};\n }\n}\nserialization.registerClass(Identity);\n\n/**\n * Computes the number of input and output units for a weight shape.\n * @param shape Shape of weight.\n * @param dataFormat data format to use for convolution kernels.\n * Note that all kernels in Keras are standardized on the\n * CHANNEL_LAST ordering (even when inputs are set to CHANNEL_FIRST).\n * @return An length-2 array: fanIn, fanOut.\n */\nfunction computeFans(\n shape: Shape, dataFormat: DataFormat = 'channelsLast'): number[] {\n let fanIn: number;\n let fanOut: number;\n checkDataFormat(dataFormat);\n if (shape.length === 2) {\n fanIn = shape[0];\n fanOut = shape[1];\n } else if ([3, 4, 5].indexOf(shape.length) !== -1) {\n if (dataFormat === 'channelsFirst') {\n const receptiveFieldSize = arrayProd(shape, 2);\n fanIn = shape[1] * receptiveFieldSize;\n fanOut = shape[0] * receptiveFieldSize;\n } else if (dataFormat === 'channelsLast') {\n const receptiveFieldSize = arrayProd(shape, 0, shape.length - 2);\n fanIn = shape[shape.length - 2] * receptiveFieldSize;\n fanOut = shape[shape.length - 1] * receptiveFieldSize;\n }\n } else {\n const shapeProd = arrayProd(shape);\n fanIn = Math.sqrt(shapeProd);\n fanOut = Math.sqrt(shapeProd);\n }\n\n return [fanIn, fanOut];\n}\n\nexport interface VarianceScalingArgs {\n /** Scaling factor (positive float). */\n scale?: number;\n\n /** Fanning mode for inputs and outputs. */\n mode?: FanMode;\n\n /** Probabilistic distribution of the values. */\n distribution?: Distribution;\n\n /** Random number generator seed. */\n seed?: number;\n}\n\nexport class VarianceScaling extends Initializer {\n /** @nocollapse */\n static className = 'VarianceScaling';\n private scale: number;\n private mode: FanMode;\n private distribution: Distribution;\n private seed: number;\n\n /**\n * Constructor of VarianceScaling.\n * @throws ValueError for invalid value in scale.\n */\n constructor(args: VarianceScalingArgs) {\n super();\n if (args.scale < 0.0) {\n throw new ValueError(\n `scale must be a positive float. Got: ${args.scale}`);\n }\n this.scale = args.scale == null ? 1.0 : args.scale;\n this.mode = args.mode == null ? 'fanIn' : args.mode;\n checkFanMode(this.mode);\n this.distribution =\n args.distribution == null ? 'normal' : args.distribution;\n checkDistribution(this.distribution);\n this.seed = args.seed;\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n const fans = computeFans(shape);\n const fanIn = fans[0];\n const fanOut = fans[1];\n let scale = this.scale;\n if (this.mode === 'fanIn') {\n scale /= Math.max(1, fanIn);\n } else if (this.mode === 'fanOut') {\n scale /= Math.max(1, fanOut);\n } else {\n scale /= Math.max(1, (fanIn + fanOut) / 2);\n }\n\n if (this.distribution === 'normal') {\n const stddev = Math.sqrt(scale);\n dtype = dtype || 'float32';\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new NotImplementedError(\n `${this.getClassName()} does not support dType ${dtype}.`);\n }\n return truncatedNormal(shape, 0, stddev, dtype, this.seed);\n } else {\n const limit = Math.sqrt(3 * scale);\n return randomUniform(shape, -limit, limit, dtype, this.seed);\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n return {\n scale: this.scale,\n mode: this.mode,\n distribution: this.distribution,\n seed: this.seed\n };\n }\n}\nserialization.registerClass(VarianceScaling);\n\nexport interface SeedOnlyInitializerArgs {\n /** Random number generator seed. */\n seed?: number;\n}\n\nexport class GlorotUniform extends VarianceScaling {\n /** @nocollapse */\n static override className = 'GlorotUniform';\n\n /**\n * Constructor of GlorotUniform\n * @param scale\n * @param mode\n * @param distribution\n * @param seed\n */\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 1.0,\n mode: 'fanAvg',\n distribution: 'uniform',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, GlorotUniform is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(GlorotUniform);\n\nexport class GlorotNormal extends VarianceScaling {\n /** @nocollapse */\n static override className = 'GlorotNormal';\n\n /**\n * Constructor of GlorotNormal.\n * @param scale\n * @param mode\n * @param distribution\n * @param seed\n */\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 1.0,\n mode: 'fanAvg',\n distribution: 'normal',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, GlorotNormal is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(GlorotNormal);\n\nexport class HeNormal extends VarianceScaling {\n /** @nocollapse */\n static override className = 'HeNormal';\n\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 2.0,\n mode: 'fanIn',\n distribution: 'normal',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, HeNormal is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(HeNormal);\n\nexport class HeUniform extends VarianceScaling {\n /** @nocollapse */\n static override className = 'HeUniform';\n\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 2.0,\n mode: 'fanIn',\n distribution: 'uniform',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, HeUniform is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(HeUniform);\n\nexport class LeCunNormal extends VarianceScaling {\n /** @nocollapse */\n static override className = 'LeCunNormal';\n\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 1.0,\n mode: 'fanIn',\n distribution: 'normal',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, LeCunNormal is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(LeCunNormal);\n\nexport class LeCunUniform extends VarianceScaling {\n /** @nocollapse */\n static override className = 'LeCunUniform';\n\n constructor(args?: SeedOnlyInitializerArgs) {\n super({\n scale: 1.0,\n mode: 'fanIn',\n distribution: 'uniform',\n seed: args == null ? null : args.seed\n });\n }\n\n override getClassName(): string {\n // In Python Keras, LeCunUniform is not a class, but a helper method\n // that creates a VarianceScaling object. Use 'VarianceScaling' as\n // class name to be compatible with that.\n return VarianceScaling.className;\n }\n}\nserialization.registerClass(LeCunUniform);\n\nexport interface OrthogonalArgs extends SeedOnlyInitializerArgs {\n /**\n * Multiplicative factor to apply to the orthogonal matrix. Defaults to 1.\n */\n gain?: number;\n}\n\nexport class Orthogonal extends Initializer {\n /** @nocollapse */\n static className = 'Orthogonal';\n readonly DEFAULT_GAIN = 1;\n protected readonly gain: number;\n protected readonly seed: number;\n\n constructor(args?: OrthogonalArgs) {\n super();\n this.gain = args.gain == null ? this.DEFAULT_GAIN : args.gain;\n this.seed = args.seed;\n\n if (this.seed != null) {\n throw new NotImplementedError(\n 'Random seed is not implemented for Orthogonal Initializer yet.');\n }\n }\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n return tidy(() => {\n if (shape.length < 2) {\n throw new NotImplementedError('Shape must be at least 2D.');\n }\n if (shape[0] * shape[1] > 2000) {\n console.warn(\n `Orthogonal initializer is being called on a matrix with more ` +\n `than 2000 (${shape[0] * shape[1]}) elements: ` +\n `Slowness may result.`);\n }\n\n // TODO(cais): Add seed support.\n const normalizedShape =\n shape[0] > shape[1] ? [shape[1], shape[0]] : shape;\n const a = K.randomNormal(normalizedShape, 0, 1, 'float32') as Tensor2D;\n let q = linalg.gramSchmidt(a) as Tensor2D;\n if (shape[0] > shape[1]) {\n q = transpose(q);\n }\n return mul(this.gain, q);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n return {\n gain: this.gain,\n seed: this.seed,\n };\n }\n}\nserialization.registerClass(Orthogonal);\n\n/** @docinline */\nexport type InitializerIdentifier =\n 'constant'|'glorotNormal'|'glorotUniform'|'heNormal'|'heUniform'|'identity'|\n 'leCunNormal'|'leCunUniform'|'ones'|'orthogonal'|'randomNormal'|\n 'randomUniform'|'truncatedNormal'|'varianceScaling'|'zeros'|string;\n\n// Maps the JavaScript-like identifier keys to the corresponding registry\n// symbols.\nexport const INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP:\n {[identifier in InitializerIdentifier]: string} = {\n 'constant': 'Constant',\n 'glorotNormal': 'GlorotNormal',\n 'glorotUniform': 'GlorotUniform',\n 'heNormal': 'HeNormal',\n 'heUniform': 'HeUniform',\n 'identity': 'Identity',\n 'leCunNormal': 'LeCunNormal',\n 'leCunUniform': 'LeCunUniform',\n 'ones': 'Ones',\n 'orthogonal': 'Orthogonal',\n 'randomNormal': 'RandomNormal',\n 'randomUniform': 'RandomUniform',\n 'truncatedNormal': 'TruncatedNormal',\n 'varianceScaling': 'VarianceScaling',\n 'zeros': 'Zeros'\n };\n\nfunction deserializeInitializer(\n config: serialization.ConfigDict,\n customObjects: serialization.ConfigDict = {}): Initializer {\n return deserializeKerasObject(\n config, serialization.SerializationMap.getMap().classNameMap,\n customObjects, 'initializer');\n}\n\nexport function serializeInitializer(initializer: Initializer):\n serialization.ConfigDictValue {\n return serializeKerasObject(initializer);\n}\n\nexport function getInitializer(identifier: InitializerIdentifier|Initializer|\n serialization.ConfigDict): Initializer {\n if (typeof identifier === 'string') {\n const className = identifier in INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP ?\n INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] :\n identifier;\n /* We have four 'helper' classes for common initializers that\n all get serialized as 'VarianceScaling' and shouldn't go through\n the deserializeInitializer pathway. */\n if (className === 'GlorotNormal') {\n return new GlorotNormal();\n } else if (className === 'GlorotUniform') {\n return new GlorotUniform();\n } else if (className === 'HeNormal') {\n return new HeNormal();\n } else if (className === 'HeUniform') {\n return new HeUniform();\n } else if (className === 'LeCunNormal') {\n return new LeCunNormal();\n } else if (className === 'LeCunUniform') {\n return new LeCunUniform();\n } else {\n const config: serialization.ConfigDict = {};\n config['className'] = className;\n config['config'] = {};\n return deserializeInitializer(config);\n }\n } else if (identifier instanceof Initializer) {\n return identifier;\n } else {\n return deserializeInitializer(identifier);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: utils/generic_utils.py */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\nimport {ValueError} from '../errors';\nimport {Shape} from '../keras_format/common';\n// tslint:enable\n\n/**\n * Determine whether the input is an Array of Shapes.\n */\nexport function isArrayOfShapes(x: Shape|Shape[]): boolean {\n return Array.isArray(x) && Array.isArray(x[0]);\n}\n\n/**\n * Special case of normalizing shapes to lists.\n *\n * @param x A shape or list of shapes to normalize into a list of Shapes.\n * @return A list of Shapes.\n */\nexport function normalizeShapeList(x: Shape|Shape[]): Shape[] {\n if (x.length === 0) {\n return [];\n }\n if (!Array.isArray(x[0])) {\n return [x] as Shape[];\n }\n return x as Shape[];\n}\n\n/**\n * Helper function to obtain exactly one Tensor.\n * @param xs: A single `tf.Tensor` or an `Array` of `tf.Tensor`s.\n * @return A single `tf.Tensor`. If `xs` is an `Array`, return the first one.\n * @throws ValueError: If `xs` is an `Array` and its length is not 1.\n */\nexport function getExactlyOneTensor(xs: Tensor|Tensor[]): Tensor {\n let x: Tensor;\n if (Array.isArray(xs)) {\n if (xs.length !== 1) {\n throw new ValueError(`Expected Tensor length to be 1; got ${xs.length}`);\n }\n x = xs[0];\n } else {\n x = xs;\n }\n return x;\n}\n\n/**\n * Helper function to obtain exactly on instance of Shape.\n *\n * @param shapes Input single `Shape` or Array of `Shape`s.\n * @returns If input is a single `Shape`, return it unchanged. If the input is\n * an `Array` containing exactly one instance of `Shape`, return the instance.\n * Otherwise, throw a `ValueError`.\n * @throws ValueError: If input is an `Array` of `Shape`s, and its length is not\n * 1.\n */\nexport function getExactlyOneShape(shapes: Shape|Shape[]): Shape {\n if (Array.isArray(shapes) && Array.isArray(shapes[0])) {\n if (shapes.length === 1) {\n shapes = shapes as Shape[];\n return shapes[0];\n } else {\n throw new ValueError(`Expected exactly 1 Shape; got ${shapes.length}`);\n }\n } else {\n return shapes as Shape;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {LayerVariable} from '../variables';\n\n/**\n * Count the elements in an Array of LayerVariables.\n *\n * @param weights: The LayerVariables of which the constituent numbers are to\n * be counted.\n * @returns A count of the elements in all the LayerVariables\n */\nexport function countParamsInWeights(weights: LayerVariable[]): number {\n let count = 0;\n for (const weight of weights) {\n if (weight.shape.length === 0) {\n count += 1;\n } else {\n count += weight.shape.reduce((a, b) => a * b);\n }\n }\n return count;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {DataType, Tensor, variableGrads} from '@tensorflow/tfjs-core';\n\nimport {getNextUniqueTensorId} from './backend/state';\nimport {getScopedTensorName, getUniqueTensorName} from './common';\nimport {Constraint} from './constraints';\nimport {NotImplementedError} from './errors';\nimport {Shape} from './keras_format/common';\nimport {HasShape} from './types';\n\nconst DEFAULT_VARIABLE_NAME_PREFIX = 'Variable';\n\n/**\n * A `tf.layers.LayerVariable` is similar to a `tf.Tensor` in that it has a\n * dtype and shape, but its value is mutable. The value is itself represented\n * as a`tf.Tensor`, and can be read with the `read()` method and updated with\n * the `write()` method.\n */\nexport class LayerVariable {\n readonly dtype: DataType;\n readonly shape: Shape;\n\n readonly id: number;\n // The fully scoped name of this Variable, including a unique suffix if needed\n readonly name: string;\n // The originally requested fully scoped name of this Variable, not including\n // any unique suffix. This may be needed when restoring weights because this\n // original name is used as a key.\n readonly originalName: string;\n private trainable_: boolean;\n\n protected readonly val: tfc.Variable;\n readonly constraint: Constraint;\n /**\n * Construct Variable from a `tf.Tensor`.\n *\n * If not explicitly named, the Variable will be given a name with the\n * prefix 'Variable'. Variable names are unique. In the case of name\n * collision, suffixies '_' will be added to the name.\n *\n * @param val Initial value of the Variable.\n * @param name Name of the variable. If `null` or `undefined` is provided, it\n * will default a name with the prefix 'Variable'.\n * @param constraint Optional, projection function to be applied to the\n * variable after optimize updates\n * @throws ValueError if `name` is `null` or `undefined`.\n */\n constructor(\n val: Tensor, dtype: DataType = 'float32',\n name = DEFAULT_VARIABLE_NAME_PREFIX, trainable = true,\n constraint: Constraint = null) {\n this.dtype = dtype == null ? 'float32' : dtype;\n this.shape = val.shape;\n this.id = getNextUniqueTensorId();\n\n name = name == null ? DEFAULT_VARIABLE_NAME_PREFIX : name;\n this.originalName = getScopedTensorName(name);\n this.name = getUniqueTensorName(this.originalName);\n\n this.trainable_ = trainable;\n this.constraint = constraint;\n\n this.val = tfc.variable(val, this.trainable_, this.name, this.dtype);\n }\n\n /**\n * Get a snapshot of the Variable's value.\n *\n * The returned value is a snapshot of the Variable's value at the time of\n * the invocation. Future mutations in the value of the tensor will only\n * be reflected by future calls to this method.\n */\n read(): Tensor {\n this.assertNotDisposed();\n return this.val;\n }\n\n /**\n * Update the value of the Variable.\n *\n * @param newVal: The new value to update to. Must be consistent with the\n * dtype and shape of the Variable.\n * @return This Variable.\n */\n write(newVal: Tensor) {\n // TODO(cais): Once TF.js Core supports Tensor.dtype, check dtype match.\n this.assertNotDisposed();\n checkShapesMatch(this.val, newVal);\n // Skip updating if this is the exact same tensor.\n if (this.val.id !== newVal.id) {\n this.val.assign(newVal);\n if (this.constraint != null) {\n this.val.assign(this.constraint.apply(this.val));\n }\n }\n return this;\n }\n\n /**\n * Dispose this LayersVariable instance from memory.\n */\n dispose(): void {\n this.assertNotDisposed();\n this.val.dispose();\n }\n\n protected assertNotDisposed(): void {\n if (this.val.isDisposed) {\n throw new Error(`LayersVariable ${this.name} is already disposed.`);\n }\n }\n\n get trainable(): boolean {\n return this.trainable_;\n }\n\n set trainable(trainable: boolean) {\n this.trainable_ = trainable;\n this.val.trainable = trainable;\n }\n}\n\nfunction checkShapesMatch(x: HasShape, y: HasShape): void {\n if (x.shape.toString() !== y.shape.toString()) {\n throw new Error(\n 'Shape mismatch: ' + JSON.stringify(x.shape) + ' vs. ' +\n JSON.stringify(y.shape));\n }\n}\n\n/**\n * Create a Variable.\n * @param x The initial value of the `Variable`.\n * @param dtype optional, the type of the variable.\n * @param name optional, the name of the variable, default provided by\n * Variable.\n * @param constraint optional, a constraint to be applied after every update.\n * @return The newly instantiated `Variable`.\n */\nexport function variable(\n x: Tensor, dtype?: DataType, name?: string,\n constraint?: Constraint): LayerVariable {\n return new LayerVariable(x, dtype, name, true, constraint);\n}\n\n/**\n * Instantiates an all-zeros Variable and returns it.\n *\n * @param shape Shape of the tensor.\n * @param dtype DType of the tensor.\n * @param name Name of the tensor.\n * @return An all-zero Variable.\n */\nexport function zerosVariable(\n shape: Shape, dtype?: DataType, name?: string): LayerVariable {\n // TODO(cais): Implement logic for dtype.\n return new LayerVariable(tfc.zeros(shape), dtype, name);\n}\n\n/**\n * Instantiates an all-zeros tensor of the same shape as another tensor.\n *\n * @param x The other tensor.\n * @param dtype DType of the tensor.\n * @param name Name of the tensor.\n * @return A newly instantiated Variable.\n */\nexport function zerosLike(\n x: Tensor, dtype?: DataType, name?: string): LayerVariable {\n return new LayerVariable(tfc.zerosLike(x), dtype, name);\n}\n\n/**\n * Instantiates an all-ones tensor and returns it.\n *\n * @param shape Shape of the tensor.\n * @param dtype DType of the tensor.\n * @param name Name of the tensor.\n * @return An all-ones Variable.\n */\nexport function onesVariable(\n shape: Shape, dtype?: DataType, name?: string): LayerVariable {\n // TODO(cais): Implement logic for dtype.\n const allocated = tfc.ones(shape);\n return new LayerVariable(allocated, dtype, name);\n}\n\n/**\n * Instantiates an all-ones tensor of the same shape as another tensor.\n *\n * @param x The other tensor.\n * @param dtype DType of the tensor.\n * @param name Name of the tensor.\n * @return A newly instantiated Variable.\n */\nexport function onesLike(\n x: Tensor, dtype?: DataType, name?: string): LayerVariable {\n const allocated = tfc.onesLike(x);\n return new LayerVariable(allocated, dtype, name);\n}\n\n/**\n * Instantiate an identity matrix and returns it, as a Variable\n *\n * @param size Number of rows/columns.\n * @param dtype Data type of returned Variable.\n * @param name Name of returned Variable.\n * @return A Variable, an identity matrix.\n */\nexport function eyeVariable(\n size: number, dtype?: DataType, name?: string): LayerVariable {\n return new LayerVariable(tfc.eye(size), dtype, name);\n}\n\n/**\n * Get a Variable with uniform distribution of values.\n * @param shape Shape of the tensor.\n * @param minval Lower bound of the uniform distribution.\n * @param maxval Upper bound of the uniform distribution.\n * @param dtype\n * @param seed\n * @param name Optional name.\n * @return The uniform-random Variable.\n */\nexport function randomUniformVariable(\n shape: Shape, minval: number, maxval: number, dtype?: DataType,\n seed?: number, name = 'randomUniform'): LayerVariable {\n return new LayerVariable(\n tfc.randomUniform(shape, minval, maxval, dtype), dtype, name);\n}\n\n/**\n * Get a Variable with truncated-normal distribution of values.\n * @param shape Shape of the tensor.\n * @param mean mean value of the normal distribution.\n * @param stddev standard deviation of the normal distribution.\n * @param dtype\n * @param seed\n * @param name Optional name.\n * @return The truncated-normal-random Variable.\n */\nexport function truncatedNormalVariable(\n shape: Shape, mean = 0.0, stddev = 1.0, dtype?: DataType, seed?: number,\n name = 'truncatedNormal'): LayerVariable {\n // TODO(cais): Implement logic for dtype and seed once they are supported\n // by deeplearn.js.\n dtype = dtype || 'float32';\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new NotImplementedError(\n `randomNormal does not support dType ${dtype}.`);\n }\n return new LayerVariable(\n tfc.truncatedNormal(shape, mean, stddev, dtype, seed), dtype, name);\n}\n/**\n * Get a Variable with normal distribution of values.\n * @param shape Shape of the tensor.\n * @param mean mean value of the normal distribution.\n * @param stddev standard deviation of the normal distribution.\n * @param dtype\n * @param seed\n * @param name Optional name.\n * @return The truncated-normal-random Variable.\n */\nexport function randomNormalVariable(\n shape: Shape, mean = 0.0, stddev = 1.0, dtype?: DataType, seed?: number,\n name = 'randomNormal'): LayerVariable {\n dtype = dtype || 'float32';\n if (dtype !== 'float32' && dtype !== 'int32') {\n throw new NotImplementedError(\n `randomNormalVariable does not support dType ${dtype}.`);\n }\n return new LayerVariable(\n tfc.randomNormal(shape, mean, stddev, dtype, seed), dtype, name);\n}\n\n/**\n * Update the value of a Variable.\n * @param x The Variable to be updated.\n * @param xNew The new value to update to.\n * @return The Variable updated.\n */\nexport function update(x: LayerVariable, xNew: Tensor): LayerVariable {\n return x.write(xNew);\n}\n\n/**\n * Update the value of a Variable by adding an increment.\n * @param x The Variable to be updated.\n * @param increment The incrment to add to `x`.\n * @return The Variable updated.\n */\nexport function updateAdd(x: LayerVariable, increment: Tensor): LayerVariable {\n return x.write(tfc.add(x.read(), increment));\n}\n\n/**\n * Update the value of a Variable by subtracting a decrement.\n * @param x The Variable to be updated.\n * @param decrement The decrement to subtract from `x`.\n * @return The Variable updated.\n */\nexport function updateSub(x: LayerVariable, decrement: Tensor): LayerVariable {\n return x.write(tfc.sub(x.read(), decrement));\n}\n\n/**\n * Get the values of an array of Variables.\n *\n * @param tensors An `Array` of `Variable`s to get the values of.\n * @return The values of the inputs, as an `Array` of`tf.Tensor`s.\n */\nexport function batchGetValue(xs: LayerVariable[]): Tensor[] {\n return xs.map(x => x.read());\n}\n\n/**\n * Update the value of multiple Variables at once.\n *\n * @param variablesAndValues An `Array`, each element is of type\n * [Variable, Tensor]. The first item is the\n * `Variable` of which the value is to be updated. The second item\n * carries the new value.\n */\nexport function batchSetValue(\n variablesAndValues: Array<[LayerVariable, Tensor]>): void {\n variablesAndValues.forEach(variableAndValue => {\n const variable: LayerVariable = variableAndValue[0];\n variable.write(variableAndValue[1]);\n });\n}\n\n/**\n * Returns the gradients of `variables` w.r.t. the return value of `lossFn`.\n * @param lossFn A function which returns a Scalar to be used as the function\n * value (i.e., numerator) for differentiation.\n * @param variables List of variables to be used as the independent variables\n * (i.e., denominator) for differentiation.\n * @returns An Array of gradients tensors.\n */\nexport function gradients(\n lossFn: () => tfc.Scalar, variables: LayerVariable[]): Tensor[] {\n // TODO(cais): The return type signature can be simplified if deeplearn makes\n // the corresponding type public.\n const variableList =\n variables.map(variable => variable.read() as tfc.Variable);\n const valudAndGrads = variableGrads(lossFn, variableList);\n return variables.map(variable => valudAndGrads.grads[variable.name]);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: keras/engine/topology.py */\n\nimport {DataType, Scalar, serialization, Tensor, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {getNextUniqueTensorId, getUid} from '../backend/state';\nimport {getScopedTensorName, getUniqueTensorName, nameScope} from '../common';\nimport {Constraint} from '../constraints';\nimport {AttributeError, NotImplementedError, RuntimeError, ValueError} from '../errors';\nimport {getInitializer, Initializer} from '../initializers';\nimport {Shape} from '../keras_format/common';\nimport {Regularizer} from '../regularizers';\nimport {Kwargs, RegularizerFn} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport * as types_utils from '../utils/types_utils';\nimport * as variable_utils from '../utils/variable_utils';\nimport {batchGetValue, batchSetValue, LayerVariable} from '../variables';\n\n// TODO(michaelterry): This is a stub until it's defined.\nexport type Op = (x: LayerVariable) => LayerVariable;\n\n/**\n * Constructor arguments for InputSpec.\n */\nexport interface InputSpecArgs {\n /** Expected datatype of the input. */\n dtype?: DataType;\n /** Expected shape of the input (may include null for unchecked axes). */\n shape?: Shape;\n /** Expected rank of the input. */\n ndim?: number;\n /** Maximum rank of the input. */\n maxNDim?: number;\n /** Minimum rank of the input. */\n minNDim?: number;\n /** Dictionary mapping integer axes to a specific dimension value. */\n axes?: {[axis: number]: number};\n}\n\n/**\n * Specifies the ndim, dtype and shape of every input to a layer.\n *\n * Every layer should expose (if appropriate) an `inputSpec` attribute:\n * a list of instances of InputSpec (one per input tensor).\n *\n * A null entry in a shape is compatible with any dimension,\n * a null shape is compatible with any shape.\n */\nexport class InputSpec {\n /** Expected datatype of the input. */\n dtype?: DataType;\n /** Expected shape of the input (may include null for unchecked axes). */\n shape?: Shape;\n /** Expected rank of the input. */\n ndim?: number;\n /** Maximum rank of the input. */\n maxNDim?: number;\n /** Minimum rank of the input. */\n minNDim?: number;\n /** Dictionary mapping integer axes to a specific dimension value. */\n axes?: {[axis: number]: number};\n\n constructor(args: InputSpecArgs) {\n this.dtype = args.dtype;\n this.shape = args.shape;\n /*\n TODO(michaelterry): Could throw error if ndim and shape are both defined\n (then backport).\n */\n if (args.shape != null) {\n this.ndim = args.shape.length;\n } else {\n this.ndim = args.ndim;\n }\n this.maxNDim = args.maxNDim;\n this.minNDim = args.minNDim;\n this.axes = args.axes || {};\n }\n}\n\n/**\n * `tf.SymbolicTensor` is a placeholder for a Tensor without any concrete value.\n *\n * They are most often encountered when building a graph of `Layer`s for a\n * `tf.LayersModel` and the input data's shape, but not values are known.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\nexport class SymbolicTensor {\n /* A unique ID for the tensor to be able to differentiate tensors. */\n readonly id: number;\n // The fully scoped name of this Variable, including a unique suffix if needed\n readonly name: string;\n // The originally requested fully scoped name of this Variable, not including\n // any unique suffix. This may be needed when restoring weights because this\n // original name is used as a key.\n readonly originalName?: string;\n /**\n * Rank/dimensionality of the tensor.\n */\n readonly rank: number;\n /**\n * Replacement for _keras_history.\n */\n nodeIndex: number;\n /**\n * Replacement for _keras_history.\n */\n tensorIndex: number;\n\n /**\n *\n * @param dtype\n * @param shape\n * @param sourceLayer The Layer that produced this symbolic tensor.\n * @param inputs The inputs passed to sourceLayer's __call__() method.\n * @param nodeIndex\n * @param tensorIndex\n * @param callArgs The keyword arguments passed to the __call__() method.\n * @param name\n * @param outputTensorIndex The index of this tensor in the list of outputs\n * returned by apply().\n */\n constructor(\n readonly dtype: DataType, readonly shape: Shape,\n public sourceLayer: Layer, readonly inputs: SymbolicTensor[],\n readonly callArgs: Kwargs, name?: string,\n readonly outputTensorIndex?: number) {\n this.id = getNextUniqueTensorId();\n if (name != null) {\n this.originalName = getScopedTensorName(name);\n this.name = getUniqueTensorName(this.originalName);\n }\n this.rank = shape.length;\n }\n}\n\n/**\n * Constructor arguments for Node.\n */\nexport interface NodeArgs {\n /**\n * The layer that takes `inputTensors` and turns them into `outputTensors`.\n * (the node gets created when the `call` method of the layer is called).\n */\n outboundLayer: Layer;\n /**\n * A list of layers, the same length as `inputTensors`, the layers from where\n * `inputTensors` originate.\n */\n inboundLayers: Layer[];\n /**\n * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is\n * the origin node of `inputTensors[i]` (necessary since each inbound layer\n * might have several nodes, e.g. if the layer is being shared with a\n * different data stream).\n */\n nodeIndices: number[];\n /**\n * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`\n * is the index of `inputTensors[i]` within the output of the inbound layer\n * (necessary since each inbound layer might have multiple tensor outputs,\n * with each one being independently manipulable).\n */\n tensorIndices: number[];\n /** List of input tensors. */\n inputTensors: SymbolicTensor[];\n /** List of output tensors. */\n outputTensors: SymbolicTensor[];\n /** List of input masks (a mask can be a tensor, or null). */\n inputMasks: Tensor[];\n /** List of output masks (a mask can be a tensor, or null). */\n outputMasks: Tensor[];\n /** List of input shape tuples. */\n inputShapes: Shape|Shape[];\n /** List of output shape tuples. */\n outputShapes: Shape|Shape[];\n}\n\n/**\n * The type of the return value of Layer.dispose() and Container.dispose().\n */\nexport interface DisposeResult {\n /**\n * Reference count after the dispose call.\n */\n refCountAfterDispose: number;\n\n /**\n * Number of variables dispose in this dispose call.\n */\n numDisposedVariables: number;\n}\n\nlet _nextNodeID = 0;\n\n/**\n * A `Node` describes the connectivity between two layers.\n *\n * Each time a layer is connected to some new input,\n * a node is added to `layer.inboundNodes`.\n *\n * Each time the output of a layer is used by another layer,\n * a node is added to `layer.outboundNodes`.\n *\n * `nodeIndices` and `tensorIndices` are basically fine-grained coordinates\n * describing the origin of the `inputTensors`, verifying the following:\n *\n * `inputTensors[i] ==\n * inboundLayers[i].inboundNodes[nodeIndices[i]].outputTensors[\n * tensorIndices[i]]`\n *\n * A node from layer A to layer B is added to:\n * A.outboundNodes\n * B.inboundNodes\n */\nexport class Node {\n /**\n * The layer that takes `inputTensors` and turns them into `outputTensors`\n * (the node gets created when the `call` method of the layer is called).\n */\n outboundLayer: Layer;\n /**\n * A list of layers, the same length as `inputTensors`, the layers from where\n * `inputTensors` originate.\n */\n inboundLayers: Layer[];\n /**\n * A list of integers, the same length as `inboundLayers`. `nodeIndices[i]` is\n * the origin node of `inputTensors[i]` (necessary since each inbound layer\n * might have several nodes, e.g. if the layer is being shared with a\n * different data stream).\n */\n nodeIndices: number[];\n /**\n * A list of integers, the same length as `inboundLayers`. `tensorIndices[i]`\n * is the index of `inputTensors[i]` within the output of the inbound layer\n * (necessary since each inbound layer might have multiple tensor outputs,\n * with each one being independently manipulable).\n */\n tensorIndices: number[];\n /** List of input tensors. */\n inputTensors: SymbolicTensor[];\n /** List of output tensors. */\n outputTensors: SymbolicTensor[];\n /** List of input masks (a mask can be a tensor, or null). */\n inputMasks: Tensor[];\n /** List of output masks (a mask can be a tensor, or null). */\n outputMasks: Tensor[];\n /** List of input shape tuples. */\n inputShapes: Shape|Shape[];\n /** List of output shape tuples. */\n outputShapes: Shape|Shape[];\n\n readonly id: number;\n\n constructor(\n args: NodeArgs,\n // TODO(michaelterry): Define actual type for this.\n public callArgs?: Kwargs) {\n this.id = _nextNodeID++;\n /*\n Layer instance (NOT a list).\n this is the layer that takes a list of input tensors\n and turns them into a list of output tensors.\n the current node will be added to\n the inboundNodes of outboundLayer.\n */\n this.outboundLayer = args.outboundLayer;\n\n /*\n The following 3 properties describe where\n the input tensors come from: which layers,\n and for each layer, which node and which\n tensor output of each node.\n */\n\n // List of layer instances.\n this.inboundLayers = args.inboundLayers;\n // List of integers, 1:1 mapping with inboundLayers.\n this.nodeIndices = args.nodeIndices;\n // List of integers, 1:1 mapping with inboundLayers.\n this.tensorIndices = args.tensorIndices;\n\n /*\n Following 2 properties:\n tensor inputs and outputs of outboundLayer.\n */\n\n // List of tensors. 1:1 mapping with inboundLayers.\n this.inputTensors = args.inputTensors;\n // List of tensors, created by outboundLayer.call().\n this.outputTensors = args.outputTensors;\n\n /*\n Following 2 properties: input and output masks.\n List of tensors, 1:1 mapping with inputTensor.\n */\n this.inputMasks = args.inputMasks;\n // List of tensors, created by outboundLayer.computeMask().\n this.outputMasks = args.outputMasks;\n\n // Following 2 properties: input and output shapes.\n\n // List of shape tuples, shapes of inputTensors.\n this.inputShapes = args.inputShapes;\n // List of shape tuples, shapes of outputTensors.\n this.outputShapes = args.outputShapes;\n\n // Add nodes to all layers involved.\n for (const layer of args.inboundLayers) {\n if (layer != null) {\n layer.outboundNodes.push(this);\n }\n }\n args.outboundLayer.inboundNodes.push(this);\n }\n\n getConfig(): serialization.ConfigDict {\n const inboundNames: string[] = [];\n for (const layer of this.inboundLayers) {\n if (layer != null) {\n inboundNames.push(layer.name);\n } else {\n inboundNames.push(null);\n }\n }\n return {\n outboundLayer: this.outboundLayer ? this.outboundLayer.name : null,\n inboundLayers: inboundNames,\n nodeIndices: this.nodeIndices,\n tensorIndices: this.tensorIndices\n };\n }\n}\n\n/** Constructor arguments for Layer. */\nexport declare interface LayerArgs {\n /**\n * If defined, will be used to create an input layer to insert before this\n * layer. If both `inputShape` and `batchInputShape` are defined,\n * `batchInputShape` will be used. This argument is only applicable to input\n * layers (the first layer of a model).\n */\n inputShape?: Shape;\n /**\n * If defined, will be used to create an input layer to insert before this\n * layer. If both `inputShape` and `batchInputShape` are defined,\n * `batchInputShape` will be used. This argument is only applicable to input\n * layers (the first layer of a model).\n */\n batchInputShape?: Shape;\n /**\n * If `inputShape` is specified and `batchInputShape` is *not* specified,\n * `batchSize` is used to construct the `batchInputShape`: `[batchSize,\n * ...inputShape]`\n */\n batchSize?: number;\n /**\n * The data-type for this layer. Defaults to 'float32'.\n * This argument is only applicable to input layers (the first layer of a\n * model).\n */\n dtype?: DataType;\n /** Name for this layer. */\n name?: string;\n /**\n * Whether the weights of this layer are updatable by `fit`.\n * Defaults to true.\n */\n trainable?: boolean;\n /**\n * Initial weight values of the layer.\n */\n weights?: Tensor[];\n /** Legacy support. Do not use for new code. */\n inputDType?: DataType;\n}\n\n// If necessary, add `output` arguments to the CallHook function.\n// This is currently used for testing only, but may be used for debugger-related\n// purposes in the future.\nexport type CallHook = (inputs: Tensor|Tensor[], kwargs: Kwargs) => void;\n\nlet _nextLayerID = 0;\n\n/**\n * A layer is a grouping of operations and weights that can be composed to\n * create a `tf.LayersModel`.\n *\n * Layers are constructed by using the functions under the\n * [tf.layers](#Layers-Basic) namespace.\n *\n * @doc {heading: 'Layers', subheading: 'Classes', namespace: 'layers'}\n */\nexport abstract class Layer extends serialization.Serializable {\n /** Name for this layer. Must be unique within a model. */\n name: string;\n /**\n * List of InputSpec class instances.\n *\n * Each entry describes one required input:\n * - ndim\n * - dtype\n * A layer with `n` input tensors must have an `inputSpec` of length `n`.\n */\n inputSpec: InputSpec[];\n supportsMasking: boolean;\n /** Whether the layer weights will be updated during training. */\n protected trainable_: boolean;\n batchInputShape: Shape;\n dtype: DataType;\n initialWeights: Tensor[];\n\n inboundNodes: Node[];\n outboundNodes: Node[];\n\n activityRegularizer: Regularizer;\n\n protected _trainableWeights: LayerVariable[];\n private _nonTrainableWeights: LayerVariable[];\n private _losses: RegularizerFn[];\n // TODO(cais): _updates is currently unused.\n private _updates: Tensor[];\n private _built: boolean;\n private _callHook: CallHook = null;\n\n private _addedWeightNames: string[] = [];\n\n readonly id: number;\n\n // Porting Notes: PyKeras does not have this property in this base Layer\n // class. Instead lets Layer subclass set it dynamically and checks the\n // value with `hasattr`. In tfjs-layers, we let this be a member of this\n // base class.\n protected _stateful = false;\n\n protected _refCount: number|null;\n\n // A flag for whether fast (i.e., all-zero) weight initialization is to\n // be used during `build()` call. This speeds up weight initialization\n // by saving unnecessary calls to expensive initializers in cases where\n // the initialized values will be overwritten by loaded weight values\n // during model loading.\n private fastWeightInitDuringBuild: boolean;\n\n constructor(args: LayerArgs = {}) {\n super();\n this.id = _nextLayerID++;\n\n this.activityRegularizer = null;\n\n this.inputSpec = null;\n this.supportsMasking = false;\n\n // These properties will be set upon call of this.build()\n this._trainableWeights = [];\n this._nonTrainableWeights = [];\n this._losses = [];\n this._updates = [];\n this._built = false;\n\n /*\n These lists will be filled via successive calls\n to this.addInboundNode().\n */\n this.inboundNodes = [];\n this.outboundNodes = [];\n\n let name = args.name;\n if (!name) {\n const prefix = this.getClassName();\n name = generic_utils.toSnakeCase(prefix) + '_' + getUid(prefix);\n }\n this.name = name;\n\n this.trainable_ = args.trainable == null ? true : args.trainable;\n\n if (args.inputShape != null || args.batchInputShape != null) {\n /*\n In this case we will later create an input layer\n to insert before the current layer\n */\n let batchInputShape: Shape;\n if (args.batchInputShape != null) {\n batchInputShape = args.batchInputShape;\n } else if (args.inputShape != null) {\n let batchSize: number = null;\n if (args.batchSize != null) {\n batchSize = args.batchSize;\n }\n batchInputShape = [batchSize].concat(args.inputShape);\n }\n this.batchInputShape = batchInputShape;\n\n // Set dtype.\n let dtype = args.dtype;\n if (dtype == null) {\n dtype = args.inputDType;\n }\n if (dtype == null) {\n dtype = 'float32';\n }\n this.dtype = dtype;\n }\n\n if (args.weights != null) {\n this.initialWeights = args.weights;\n } else {\n this.initialWeights = null;\n }\n\n // The value of `_refCount` is initialized to null. When the layer is used\n // in a symbolic way for the first time, it will be set to 1.\n this._refCount = null;\n\n this.fastWeightInitDuringBuild = false;\n }\n\n /**\n * Converts a layer and its index to a unique (immutable type) name.\n * This function is used internally with `this.containerNodes`.\n * @param layer The layer.\n * @param nodeIndex The layer's position (e.g. via enumerate) in a list of\n * nodes.\n *\n * @returns The unique name.\n */\n protected static nodeKey(layer: Layer, nodeIndex: number) {\n return layer.name + '_ib-' + nodeIndex.toString();\n }\n\n /**\n * Returns this.inboundNode at index nodeIndex.\n *\n * Porting note: This is a replacement for _get_node_attribute_at_index()\n * @param nodeIndex\n * @param attrName The name of the attribute related to request for this node.\n */\n private getNodeAtIndex(nodeIndex: number, attrName: string): Node {\n if (this.inboundNodes.length === 0) {\n throw new RuntimeError(\n 'The layer has never been called ' +\n `and thus has no defined ${attrName}.`);\n }\n if (this.inboundNodes.length <= nodeIndex) {\n throw new ValueError(\n `Asked to get ${attrName} at node ${nodeIndex}, ` +\n `but the layer has only ${this.inboundNodes.length} inbound nodes.`);\n }\n return this.inboundNodes[nodeIndex];\n }\n\n /**\n * Retrieves the input tensor(s) of a layer at a given node.\n *\n * @param nodeIndex Integer, index of the node from which to retrieve the\n * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer\n * was called.\n *\n * @return A tensor (or list of tensors if the layer has multiple inputs).\n */\n getInputAt(nodeIndex: number): SymbolicTensor|SymbolicTensor[] {\n return generic_utils.singletonOrArray(\n this.getNodeAtIndex(nodeIndex, 'input').inputTensors);\n }\n\n /**\n * Retrieves the output tensor(s) of a layer at a given node.\n *\n * @param nodeIndex Integer, index of the node from which to retrieve the\n * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer\n * was called.\n *\n * @return A tensor (or list of tensors if the layer has multiple outputs).\n */\n getOutputAt(nodeIndex: number): SymbolicTensor|SymbolicTensor[] {\n return generic_utils.singletonOrArray(\n this.getNodeAtIndex(nodeIndex, 'output').outputTensors);\n }\n\n // Properties\n\n /**\n * Retrieves the input tensor(s) of a layer.\n *\n * Only applicable if the layer has exactly one inbound node,\n * i.e. if it is connected to one incoming layer.\n *\n * @return Input tensor or list of input tensors.\n *\n * @exception AttributeError if the layer is connected to more than one\n * incoming layers.\n */\n get input(): SymbolicTensor|SymbolicTensor[] {\n if (this.inboundNodes.length > 1) {\n throw new AttributeError(\n `Layer ${this.name}` +\n ' has multiple inbound nodes, ' +\n 'hence the notion of \"layer input\" ' +\n 'is ill-defined. ' +\n 'Use `getInputAt(nodeIndex)` instead.');\n } else if (this.inboundNodes.length === 0) {\n throw new AttributeError(\n `Layer ${this.name}` +\n ' is not connected, no input to return.');\n }\n return generic_utils.singletonOrArray(\n this.getNodeAtIndex(0, 'input').inputTensors);\n }\n\n /**\n * Retrieves the output tensor(s) of a layer.\n *\n * Only applicable if the layer has exactly one inbound node,\n * i.e. if it is connected to one incoming layer.\n *\n * @return Output tensor or list of output tensors.\n *\n * @exception AttributeError if the layer is connected to more than one\n * incoming layers.\n */\n get output(): SymbolicTensor|SymbolicTensor[] {\n if (this.inboundNodes.length === 0) {\n throw new AttributeError(\n `Layer ${this.name}` +\n ' has no inbound nodes.');\n }\n if (this.inboundNodes.length > 1) {\n throw new AttributeError(\n `Layer ${this.name}` +\n ' has multiple inbound nodes, ' +\n 'hence the notion of \"layer output\" ' +\n 'is ill-defined. ' +\n 'Use `getOutputAt(nodeIndex)` instead.');\n }\n return generic_utils.singletonOrArray(\n this.getNodeAtIndex(0, 'output').outputTensors);\n }\n\n get losses(): RegularizerFn[] {\n return this._losses;\n }\n\n /**\n * Retrieves the Layer's current loss values.\n *\n * Used for regularizers during training.\n */\n calculateLosses(): Scalar[] {\n // Porting Node: This is an augmentation to Layer.loss in PyKeras.\n // In PyKeras, Layer.loss returns symbolic tensors. Here a concrete\n // Tensor (specifically Scalar) values are returned. This is due to the\n // imperative backend.\n return this.losses.map(lossFn => lossFn());\n }\n\n get updates(): Tensor[] {\n return this._updates;\n }\n\n get built(): boolean {\n return this._built;\n }\n\n set built(built: boolean) {\n this._built = built;\n }\n\n get trainable(): boolean {\n return this.trainable_;\n }\n\n set trainable(trainable: boolean) {\n this._trainableWeights.forEach(w => w.trainable = trainable);\n this.trainable_ = trainable;\n }\n\n get trainableWeights(): LayerVariable[] {\n if (this.trainable_) {\n return this._trainableWeights.filter(w => w.trainable);\n } else {\n return [];\n }\n }\n\n set trainableWeights(weights: LayerVariable[]) {\n this._trainableWeights = weights;\n }\n\n get nonTrainableWeights(): LayerVariable[] {\n if (this.trainable) {\n return this._trainableWeights.filter(w => !w.trainable)\n .concat(this._nonTrainableWeights);\n } else {\n return this._trainableWeights.concat(this._nonTrainableWeights);\n }\n }\n\n set nonTrainableWeights(weights: LayerVariable[]) {\n this._nonTrainableWeights = weights;\n }\n\n /**\n * The concatenation of the lists trainableWeights and nonTrainableWeights\n * (in this order).\n */\n get weights(): LayerVariable[] {\n return this.trainableWeights.concat(this.nonTrainableWeights);\n }\n\n get stateful(): boolean {\n return this._stateful;\n }\n\n /**\n * Reset the states of the layer.\n *\n * This method of the base Layer class is essentially a no-op.\n * Subclasses that are stateful (e.g., stateful RNNs) should override this\n * method.\n */\n resetStates(): void {\n if (!this.stateful) {\n throw new Error(\n 'Cannot call the resetStates() method of a non-stateful Layer ' +\n 'object.');\n }\n }\n\n /**\n * Checks compatibility between the layer and provided inputs.\n *\n * This checks that the tensor(s) `input`\n * verify the input assumptions of the layer\n * (if any). If not, exceptions are raised.\n *\n * @param inputs Input tensor or list of input tensors.\n *\n * @exception ValueError in case of mismatch between\n * the provided inputs and the expectations of the layer.\n */\n protected assertInputCompatibility(inputs: Tensor|Tensor[]|SymbolicTensor|\n SymbolicTensor[]): void {\n inputs = generic_utils.toList(inputs);\n if (this.inputSpec == null || this.inputSpec.length === 0) {\n return;\n }\n const inputSpec = generic_utils.toList(this.inputSpec);\n if (inputs.length !== inputSpec.length) {\n throw new ValueError(\n `Layer ${this.name} expects ${inputSpec.length} inputs, ` +\n `but it received ${inputs.length} input tensors. ` +\n `Input received: ${inputs}`);\n }\n for (let inputIndex = 0; inputIndex < inputs.length; inputIndex++) {\n const x = inputs[inputIndex];\n const spec: InputSpec = inputSpec[inputIndex];\n if (spec == null) {\n continue;\n }\n\n // Check ndim.\n const ndim = x.rank;\n if (spec.ndim != null) {\n if (ndim !== spec.ndim) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ${this.name}: ` +\n `expected ndim=${spec.ndim}, found ndim=${ndim}`);\n }\n }\n if (spec.maxNDim != null) {\n if (ndim > spec.maxNDim) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ${this.name}` +\n `: expected max_ndim=${spec.maxNDim}, found ndim=${ndim}`);\n }\n }\n if (spec.minNDim != null) {\n if (ndim < spec.minNDim) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ${this.name}` +\n `: expected min_ndim=${spec.minNDim}, found ndim=${ndim}.`);\n }\n }\n\n // Check dtype.\n if (spec.dtype != null) {\n if (x.dtype !== spec.dtype) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ${this.name} ` +\n `: expected dtype=${spec.dtype}, found dtype=${x.dtype}.`);\n }\n }\n\n // Check specific shape axes.\n if (spec.axes) {\n const xShape = x.shape;\n for (const key in spec.axes) {\n const axis = Number(key);\n const value = spec.axes[key];\n // Perform Python-style slicing in case axis < 0;\n // TODO(cais): Use https://github.com/alvivi/typescript-underscore to\n // ensure type safety through Underscore calls.\n const xShapeAtAxis =\n axis >= 0 ? xShape[axis] : xShape[xShape.length + axis];\n if (value != null && [value, null].indexOf(xShapeAtAxis) === -1) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ` +\n `${this.name}: expected axis ${axis} of input shape to ` +\n `have value ${value} but got shape ${xShape}.`);\n }\n }\n }\n\n // Check shape.\n if (spec.shape != null) {\n for (let i = 0; i < spec.shape.length; ++i) {\n const specDim = spec.shape[i];\n const dim = x.shape[i];\n if (specDim != null && dim != null) {\n if (specDim !== dim) {\n throw new ValueError(\n `Input ${inputIndex} is incompatible with layer ` +\n `${this.name}: expected shape=${spec.shape}, ` +\n `found shape=${x.shape}.`);\n }\n }\n }\n }\n }\n }\n\n /**\n * This is where the layer's logic lives.\n *\n * @param inputs Input tensor, or list/tuple of input tensors.\n * @param kwargs Additional keyword arguments.\n *\n * @return A tensor or list/tuple of tensors.\n */\n call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return inputs;\n }\n\n protected invokeCallHook(inputs: Tensor|Tensor[], kwargs: Kwargs) {\n if (this._callHook != null) {\n this._callHook(inputs, kwargs);\n }\n }\n\n /**\n * Set call hook.\n * This is currently used for testing only.\n * @param callHook\n */\n setCallHook(callHook: CallHook) {\n this._callHook = callHook;\n }\n\n /**\n * Clear call hook.\n * This is currently used for testing only.\n */\n clearCallHook() {\n this._callHook = null;\n }\n\n /**\n * Builds or executes a `Layer`'s logic.\n *\n * When called with `tf.Tensor`(s), execute the `Layer`'s computation and\n * return Tensor(s). For example:\n *\n * ```js\n * const denseLayer = tf.layers.dense({\n * units: 1,\n * kernelInitializer: 'zeros',\n * useBias: false\n * });\n *\n * // Invoke the layer's apply() method with a `tf.Tensor` (with concrete\n * // numeric values).\n * const input = tf.ones([2, 2]);\n * const output = denseLayer.apply(input);\n *\n * // The output's value is expected to be [[0], [0]], due to the fact that\n * // the dense layer has a kernel initialized to all-zeros and does not have\n * // a bias.\n * output.print();\n * ```\n *\n * When called with `tf.SymbolicTensor`(s), this will prepare the layer for\n * future execution. This entails internal book-keeping on shapes of\n * expected Tensors, wiring layers together, and initializing weights.\n *\n * Calling `apply` with `tf.SymbolicTensor`s are typically used during the\n * building of non-`tf.Sequential` models. For example:\n *\n * ```js\n * const flattenLayer = tf.layers.flatten();\n * const denseLayer = tf.layers.dense({units: 1});\n *\n * // Use tf.layers.input() to obtain a SymbolicTensor as input to apply().\n * const input = tf.input({shape: [2, 2]});\n * const output1 = flattenLayer.apply(input);\n *\n * // output1.shape is [null, 4]. The first dimension is the undetermined\n * // batch size. The second dimension comes from flattening the [2, 2]\n * // shape.\n * console.log(JSON.stringify(output1.shape));\n *\n * // The output SymbolicTensor of the flatten layer can be used to call\n * // the apply() of the dense layer:\n * const output2 = denseLayer.apply(output1);\n *\n * // output2.shape is [null, 1]. The first dimension is the undetermined\n * // batch size. The second dimension matches the number of units of the\n * // dense layer.\n * console.log(JSON.stringify(output2.shape));\n *\n * // The input and output can be used to construct a model that consists\n * // of the flatten and dense layers.\n * const model = tf.model({inputs: input, outputs: output2});\n * ```\n *\n * @param inputs a `tf.Tensor` or `tf.SymbolicTensor` or an Array of them.\n * @param kwargs Additional keyword arguments to be passed to `call()`.\n *\n * @return Output of the layer's `call` method.\n *\n * @exception ValueError error in case the layer is missing shape information\n * for its `build` call.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n // Porting Note: This is a replacement for __call__() in Python.\n apply(\n inputs: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n kwargs?: Kwargs): Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[] {\n kwargs = kwargs || {};\n\n this.assertNotDisposed();\n\n // Ensure inputs are all the same type.\n const inputsList = generic_utils.toList(inputs);\n\n let allAreSymbolic = true;\n for (const input of inputsList) {\n if (!(input instanceof SymbolicTensor)) {\n allAreSymbolic = false;\n break;\n }\n }\n let noneAreSymbolic = true;\n for (const input of inputsList) {\n if (input instanceof SymbolicTensor) {\n noneAreSymbolic = false;\n break;\n }\n }\n\n if (allAreSymbolic === noneAreSymbolic) {\n throw new ValueError(\n 'Arguments to apply() must be all ' +\n 'SymbolicTensors or all Tensors');\n }\n\n // TODO(michaelterry): nameScope() may not be necessary.\n return nameScope(this.name, () => {\n // Handle laying building (weight creating, input spec locking).\n if (!this.built) {\n /*\n Throw exceptions in case the input is not compatible\n with the inputSpec specified in the layer constructor.\n */\n this.assertInputCompatibility(inputs);\n\n // Collect input shapes to build layer.\n const inputShapes: Shape[] = [];\n for (const xElem of generic_utils.toList(inputs)) {\n inputShapes.push(xElem.shape);\n }\n this.build(generic_utils.singletonOrArray(inputShapes));\n this.built = true;\n\n // Load weights that were specified at layer instantiation.\n if (this.initialWeights) {\n this.setWeights(this.initialWeights);\n }\n\n if (this._refCount === null && noneAreSymbolic) {\n // The first use of this layer is a non-symbolic call, set ref count\n // to 1 so the Layer can be properly disposed if its dispose() method\n // is called.\n this._refCount = 1;\n }\n }\n\n /*\n Throw exceptions in case the input is not compatible\n with the inputSpec set at build time.\n */\n this.assertInputCompatibility(inputs);\n\n // Handle mask propagation.\n // TODO(michaelterry): Mask propagation not currently implemented.\n\n // Actually call the layer, collecting output(s), mask(s), and shape(s).\n if (noneAreSymbolic) {\n let output = this.call(inputs as Tensor | Tensor[], kwargs);\n // TODO(michaelterry): Compute the outputMask\n\n // If the layer returns tensors from its inputs, unmodified,\n // we copy them to avoid loss of tensor metadata.\n const outputList: Tensor[] = generic_utils.toList(output);\n const outputListCopy: Tensor[] = [];\n // TODO(michaelterry): This copying may not be necessary given our eager\n // backend.\n for (let x of outputList) {\n if (inputsList.indexOf(x) !== -1) {\n x = x.clone();\n }\n outputListCopy.push(x);\n }\n output = generic_utils.singletonOrArray(outputListCopy);\n\n if (this.activityRegularizer != null) {\n throw new NotImplementedError(\n 'Layer invocation in the presence of activity ' +\n 'regularizer(s) is not supported yet.');\n }\n\n // TODO(michaelterry): Call addInboundNode()?\n return output;\n } else {\n const inputShape = collectInputShape(inputs);\n const outputShape = this.computeOutputShape(inputShape);\n let output: SymbolicTensor|SymbolicTensor[];\n const outputDType = guessOutputDType(inputs);\n this.warnOnIncompatibleInputShape(\n Array.isArray(inputs) ? inputShape[0] as Shape :\n inputShape as Shape);\n\n if (outputShape != null && outputShape.length > 0 &&\n Array.isArray(outputShape[0])) {\n // We have multiple output shapes. Create multiple output tensors.\n output = (outputShape as Shape[])\n .map(\n (shape, index) => new SymbolicTensor(\n outputDType, shape, this,\n generic_utils.toList(inputs), kwargs, this.name,\n index));\n } else {\n output = new SymbolicTensor(\n outputDType, outputShape as Shape, this,\n generic_utils.toList(inputs), kwargs, this.name);\n }\n\n /*\n Add an inbound node to the layer, so that it keeps track\n of the call and of all new variables created during the call.\n This also updates the layer history of the output tensor(s).\n If the input tensor(s) had no previous history,\n this does nothing.\n */\n this.addInboundNode(\n inputs as SymbolicTensor | SymbolicTensor[], output, null, null,\n inputShape, outputShape, kwargs);\n this._refCount++;\n\n if (this.activityRegularizer != null) {\n throw new NotImplementedError(\n 'Layer invocation in the presence of activity ' +\n 'regularizer(s) is not supported yet.');\n }\n\n return output;\n }\n });\n }\n\n /**\n * Check compatibility between input shape and this layer's batchInputShape.\n *\n * Print warning if any incompatibility is found.\n *\n * @param inputShape Input shape to be checked.\n */\n protected warnOnIncompatibleInputShape(inputShape: Shape) {\n if (this.batchInputShape == null) {\n return;\n } else if (inputShape.length !== this.batchInputShape.length) {\n console.warn(\n `The rank of the input tensor provided (shape: ` +\n `${JSON.stringify(inputShape)}) does not match that of the ` +\n `batchInputShape (${JSON.stringify(this.batchInputShape)}) ` +\n `of the layer ${this.name}`);\n } else {\n let dimMismatch = false;\n this.batchInputShape.forEach((dimension, i) => {\n if (dimension != null && inputShape[i] != null &&\n inputShape[i] !== dimension) {\n dimMismatch = true;\n }\n });\n if (dimMismatch) {\n console.warn(\n `The shape of the input tensor ` +\n `(${JSON.stringify(inputShape)}) does not ` +\n `match the expectation of layer ${this.name}: ` +\n `${JSON.stringify(this.batchInputShape)}`);\n }\n }\n }\n\n /**\n * Retrieves the output shape(s) of a layer.\n *\n * Only applicable if the layer has only one inbound node, or if all inbound\n * nodes have the same output shape.\n *\n * @returns Output shape or shapes.\n * @throws AttributeError: if the layer is connected to more than one incoming\n * nodes.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n get outputShape(): Shape|Shape[] {\n if (this.inboundNodes == null || this.inboundNodes.length === 0) {\n throw new AttributeError(\n `The layer ${this.name} has never been called and thus has no ` +\n `defined output shape.`);\n }\n const allOutputShapes: string[] = [];\n for (const node of this.inboundNodes) {\n const shapeString = JSON.stringify(node.outputShapes);\n if (allOutputShapes.indexOf(shapeString) === -1) {\n allOutputShapes.push(shapeString);\n }\n }\n if (allOutputShapes.length === 1) {\n const outputShapes = this.inboundNodes[0].outputShapes;\n if (Array.isArray(outputShapes) && Array.isArray(outputShapes[0]) &&\n outputShapes.length === 1) {\n return (outputShapes as Shape[])[0];\n } else {\n return outputShapes;\n }\n\n } else {\n throw new AttributeError(\n `The layer ${this.name} has multiple inbound nodes with different ` +\n `output shapes. Hence the notion of \"output shape\" is ill-defined ` +\n `for the layer.`);\n // TODO(cais): Implement getOutputShapeAt().\n }\n }\n\n /**\n * Counts the total number of numbers (e.g., float32, int32) in the\n * weights.\n *\n * @returns An integer count.\n * @throws RuntimeError: If the layer is not built yet (in which case its\n * weights are not defined yet.)\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n countParams(): number {\n if (!this.built) {\n throw new RuntimeError(\n `You tried to call countParams() on ${this.name}, ` +\n `but the layer is not built yet. Build it first by calling ` +\n `build(batchInputShape).`);\n }\n return variable_utils.countParamsInWeights(this.weights);\n }\n\n /**\n * Creates the layer weights.\n *\n * Must be implemented on all layers that have weights.\n *\n * Called when apply() is called to construct the weights.\n *\n * @param inputShape A `Shape` or array of `Shape` (unused).\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n build(inputShape: Shape|Shape[]) {\n this.built = true;\n }\n\n /**\n * Returns the current values of the weights of the layer.\n *\n * @param trainableOnly Whether to get the values of only trainable weights.\n * @returns Weight values as an `Array` of `tf.Tensor`s.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n getWeights(trainableOnly = false): Tensor[] {\n return batchGetValue(trainableOnly ? this.trainableWeights : this.weights);\n }\n\n /**\n * Sets the weights of the layer, from Tensors.\n *\n * @param weights a list of Tensors. The number of arrays and their shape\n * must match number of the dimensions of the weights of the layer (i.e.\n * it should match the output of `getWeights`).\n *\n * @exception ValueError If the provided weights list does not match the\n * layer's specifications.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n setWeights(weights: Tensor[]): void {\n tidy(() => {\n const params = this.weights;\n if (params.length !== weights.length) {\n // TODO(cais): Restore the following and use `providedWeights`, instead\n // of `weights` in the error message, once the deeplearn.js bug is\n // fixed: https://github.com/PAIR-code/deeplearnjs/issues/498 const\n // providedWeights = JSON.stringify(weights).slice(0, 50);\n throw new ValueError(\n `You called setWeights(weights) on layer \"${this.name}\" ` +\n `with a weight list of length ${weights.length}, ` +\n `but the layer was expecting ${params.length} weights. ` +\n `Provided weights: ${weights}...`);\n }\n if (params.length === 0) {\n return;\n }\n const weightValueTuples: Array<[LayerVariable, Tensor]> = [];\n const paramValues = batchGetValue(params);\n for (let i = 0; i < paramValues.length; ++i) {\n const pv = paramValues[i];\n const p = params[i];\n const w = weights[i];\n if (!util.arraysEqual(pv.shape, w.shape)) {\n throw new ValueError(\n `Layer weight shape ${pv.shape} ` +\n `not compatible with provided weight shape ${w.shape}`);\n }\n weightValueTuples.push([p, w]);\n }\n batchSetValue(weightValueTuples);\n });\n }\n\n /**\n * Adds a weight variable to the layer.\n *\n * @param name Name of the new weight variable.\n * @param shape The shape of the weight.\n * @param dtype The dtype of the weight.\n * @param initializer An initializer instance.\n * @param regularizer A regularizer instance.\n * @param trainable Whether the weight should be trained via backprop or not\n * (assuming that the layer itself is also trainable).\n * @param constraint An optional trainable.\n * @return The created weight variable.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n protected addWeight(\n name: string, shape: Shape, dtype?: DataType, initializer?: Initializer,\n regularizer?: Regularizer, trainable?: boolean, constraint?: Constraint,\n getInitializerFunc?: Function): LayerVariable {\n // Reject duplicate weight names.\n if (this._addedWeightNames.indexOf(name) !== -1) {\n throw new ValueError(\n `Duplicate weight name ${name} for layer ${this.name}`);\n }\n this._addedWeightNames.push(name);\n\n if (dtype == null) {\n dtype = 'float32';\n }\n\n if (this.fastWeightInitDuringBuild) {\n initializer = getInitializerFunc != null ? getInitializerFunc() :\n getInitializer('zeros');\n }\n const initValue = initializer.apply(shape, dtype);\n const weight =\n new LayerVariable(initValue, dtype, name, trainable, constraint);\n initValue.dispose();\n // Request backend not to dispose the weights of the model on scope() exit.\n if (regularizer != null) {\n this.addLoss(() => regularizer.apply(weight.read()));\n }\n if (trainable == null) {\n trainable = true;\n }\n if (trainable) {\n this._trainableWeights.push(weight);\n } else {\n this._nonTrainableWeights.push(weight);\n }\n return weight;\n }\n\n /**\n * Set the fast-weight-initialization flag.\n *\n * In cases where the initialized weight values will be immediately\n * overwritten by loaded weight values during model loading, setting\n * the flag to `true` saves unnecessary calls to potentially expensive\n * initializers and speeds up the loading process.\n *\n * @param value Target value of the flag.\n */\n setFastWeightInitDuringBuild(value: boolean) {\n this.fastWeightInitDuringBuild = value;\n }\n\n /**\n * Add losses to the layer.\n *\n * The loss may potentially be conditional on some inputs tensors,\n * for instance activity losses are conditional on the layer's inputs.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n addLoss(losses: RegularizerFn|RegularizerFn[]): void {\n if (losses == null || Array.isArray(losses) && losses.length === 0) {\n return;\n }\n // Update this.losses\n losses = generic_utils.toList(losses);\n if (this._losses !== undefined && this._losses !== null) {\n this.losses.push(...losses);\n }\n }\n\n /**\n * Computes the output shape of the layer.\n *\n * Assumes that the layer will be built to match that input shape provided.\n *\n * @param inputShape A shape (tuple of integers) or a list of shape tuples\n * (one per output tensor of the layer). Shape tuples can include null for\n * free dimensions, instead of an integer.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n /**\n * Computes an output mask tensor.\n *\n * @param inputs Tensor or list of tensors.\n * @param mask Tensor or list of tensors.\n *\n * @return null or a tensor (or list of tensors, one per output tensor of the\n * layer).\n */\n computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]): Tensor\n |Tensor[] {\n if (!this.supportsMasking) {\n if (mask != null) {\n if (Array.isArray(mask)) {\n mask.forEach(maskElement => {\n if (maskElement != null) {\n throw new TypeError(\n `Layer ${this.name} does not support masking, ` +\n 'but was passed an inputMask.');\n }\n });\n } else {\n throw new TypeError(\n `Layer ${this.name} does not support masking, ` +\n 'but was passed an inputMask.');\n }\n }\n // masking not explicitly supported: return null as mask\n return null;\n }\n // if masking is explictly supported, by default\n // carry over the input mask\n return mask;\n }\n\n /**\n * Internal method to create an inbound node for the layer.\n *\n * @param inputTensors List of input tensors.\n * @param outputTensors List of output tensors.\n * @param inputMasks List of input masks (a mask can be a tensor, or null).\n * @param outputMasks List of output masks (a mask can be a tensor, or null).\n * @param inputShapes List of input shape tuples.\n * @param outputShapes List of output shape tuples.\n * @param kwargs Dictionary of keyword arguments that were passed to the\n * `call` method of the layer at the call that created the node.\n */\n private addInboundNode(\n inputTensors: SymbolicTensor|SymbolicTensor[],\n outputTensors: SymbolicTensor|SymbolicTensor[],\n inputMasks: Tensor|Tensor[], outputMasks: Tensor|Tensor[],\n inputShapes: Shape|Shape[], outputShapes: Shape|Shape[],\n kwargs: {} = null): void {\n const inputTensorList: SymbolicTensor[] =\n generic_utils.toList(inputTensors);\n outputTensors = generic_utils.toList(outputTensors);\n inputMasks = generic_utils.toList(inputMasks);\n outputMasks = generic_utils.toList(outputMasks);\n inputShapes = types_utils.normalizeShapeList(inputShapes);\n outputShapes = types_utils.normalizeShapeList(outputShapes);\n\n // Collect input tensor(s) coordinates.\n const inboundLayers: Layer[] = [];\n const nodeIndices: number[] = [];\n const tensorIndices: number[] = [];\n for (const x of inputTensorList) {\n /*\n * TODO(michaelterry): Keras adds this value to tensors; it's not\n * clear whether we'll use this or not.\n */\n inboundLayers.push(x.sourceLayer);\n nodeIndices.push(x.nodeIndex);\n tensorIndices.push(x.tensorIndex);\n }\n\n // Create node, add it to inbound nodes.\n // (This call has side effects.)\n // tslint:disable-next-line:no-unused-expression\n new Node(\n {\n outboundLayer: this,\n inboundLayers,\n nodeIndices,\n tensorIndices,\n inputTensors: inputTensorList,\n outputTensors,\n inputMasks,\n outputMasks,\n inputShapes,\n outputShapes\n },\n kwargs);\n\n // Update tensor history\n for (let i = 0; i < outputTensors.length; i++) {\n // TODO(michaelterry: _uses_learning_phase not tracked.\n outputTensors[i].sourceLayer = this;\n outputTensors[i].nodeIndex = this.inboundNodes.length - 1;\n outputTensors[i].tensorIndex = i;\n }\n }\n\n /**\n * Returns the config of the layer.\n *\n * A layer config is a TS dictionary (serializable)\n * containing the configuration of a layer.\n * The same layer can be reinstantiated later\n * (without its trained weights) from this configuration.\n *\n * The config of a layer does not include connectivity\n * information, nor the layer class name. These are handled\n * by 'Container' (one layer of abstraction above).\n *\n * Porting Note: The TS dictionary follows TS naming standards for\n * keys, and uses tfjs-layers type-safe Enums. Serialization methods\n * should use a helper function to convert to the pythonic storage\n * standard. (see serialization_utils.convertTsToPythonic)\n *\n * @returns TS dictionary of configuration.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n getConfig(): serialization.ConfigDict {\n const config:\n serialization.ConfigDict = {name: this.name, trainable: this.trainable};\n if (this.batchInputShape != null) {\n config['batchInputShape'] = this.batchInputShape;\n }\n if (this.dtype != null) {\n config['dtype'] = this.dtype;\n }\n return config;\n }\n\n /**\n * Dispose the weight variables that this Layer instance holds.\n *\n * @returns {number} Number of disposed variables.\n */\n protected disposeWeights(): number {\n this.weights.forEach(weight => weight.dispose());\n return this.weights.length;\n }\n\n protected assertNotDisposed() {\n if (this._refCount === 0) {\n throw new Error(`Layer '${this.name}' is already disposed.`);\n }\n }\n\n /**\n * Attempt to dispose layer's weights.\n *\n * This method decreases the reference count of the Layer object by 1.\n *\n * A Layer is reference-counted. Its reference count is incremented by 1\n * the first item its `apply()` method is called and when it becomes a part\n * of a new `Node` (through calling the `apply()` method on a\n * `tf.SymbolicTensor`).\n *\n * If the reference count of a Layer becomes 0, all the weights will be\n * disposed and the underlying memory (e.g., the textures allocated in WebGL)\n * will be freed.\n *\n * Note: If the reference count is greater than 0 after the decrement, the\n * weights of the Layer will *not* be disposed.\n *\n * After a Layer is disposed, it cannot be used in calls such as `apply()`,\n * `getWeights()` or `setWeights()` anymore.\n *\n * @returns A DisposeResult Object with the following fields:\n * - refCountAfterDispose: The reference count of the Container after this\n * `dispose()` call.\n * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed\n * during this `dispose()` call.\n * @throws {Error} If the layer is not built yet, or if the layer has already\n * been disposed.\n *\n * @doc {heading: 'Models', 'subheading': 'Classes'}\n */\n dispose(): DisposeResult {\n if (!this.built) {\n throw new Error(\n `Cannot dispose Layer ${this.name} because it has not been ` +\n `built yet.`);\n }\n\n if (this._refCount === null) {\n throw new Error(\n `Cannot dispose Layer ${this.name} because it has not been used ` +\n `yet.`);\n }\n\n this.assertNotDisposed();\n\n let numDisposedVariables = 0;\n if (--this._refCount === 0) {\n numDisposedVariables = this.disposeWeights();\n }\n\n return {refCountAfterDispose: this._refCount, numDisposedVariables};\n }\n}\n\n/**\n * Collects the input shape(s) of a list of `tf.Tensor`s or\n * `tf.SymbolicTensor`s.\n *\n * TODO(michaelterry): Update PyKeras docs (backport).\n *\n * @param inputTensors List of input tensors (or single input tensor).\n *\n * @return List of shape tuples (or single tuple), one tuple per input.\n */\nfunction collectInputShape(inputTensors: SymbolicTensor|SymbolicTensor[]|Tensor|\n Tensor[]): Shape|Shape[] {\n inputTensors =\n generic_utils.toList(inputTensors) as SymbolicTensor[] | Tensor[];\n const shapes: Shape[] = [];\n for (const x of inputTensors) {\n shapes.push(x.shape);\n }\n return generic_utils.singletonOrArray(shapes);\n}\n\n/**\n * Guesses output dtype based on inputs.\n *\n * At present, just returns 'float32' for any input.\n *\n * @param inputTensors List of input tensors (or single input tensor).\n *\n * @return The guessed DType. At present, always returns 'float32'.\n */\nfunction guessOutputDType(inputTensors: SymbolicTensor|SymbolicTensor[]|Tensor|\n Tensor[]): DataType {\n return 'float32';\n}\n\n/**\n * Returns the list of input tensors necessary to compute `tensor`.\n *\n * Output will always be a list of tensors (potentially with 1 element).\n *\n * @param tensor The tensor to start from.\n * @param layer Origin layer of the tensor.\n * @param nodeIndex Origin node index of the tensor.\n *\n * @return Array of input tensors.\n */\nexport function getSourceInputs(\n tensor: SymbolicTensor, layer?: Layer,\n nodeIndex?: number): SymbolicTensor[] {\n if (layer == null || (nodeIndex != null && nodeIndex > 0)) {\n layer = tensor.sourceLayer;\n nodeIndex = tensor.nodeIndex;\n }\n if (layer.inboundNodes.length === 0) {\n return [tensor];\n } else {\n const node = layer.inboundNodes[nodeIndex];\n if (node.inboundLayers.length === 0) {\n return node.inputTensors;\n } else {\n const sourceTensors: SymbolicTensor[] = [];\n for (let i = 0; i < node.inboundLayers.length; i++) {\n const x = node.inputTensors[i];\n const layer = node.inboundLayers[i];\n const nodeIndex = node.nodeIndices[i];\n const previousSources = getSourceInputs(x, layer, nodeIndex);\n // Avoid input redundancy.\n for (const x of previousSources) {\n if (sourceTensors.indexOf(x) === -1) {\n sourceTensors.push(x);\n }\n }\n }\n return sourceTensors;\n }\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {DataType, serialization, Tensor} from '@tensorflow/tfjs-core';\n\nimport {getUid} from '../backend/state';\nimport {ValueError} from '../errors';\nimport {Shape} from '../keras_format/common';\nimport {Kwargs} from '../types';\n\nimport {DisposeResult, Layer, Node, SymbolicTensor} from './topology';\n\n/**\n * Constructor arguments for InputLayer.\n *\n * Note: You should provide only inputShape or batchInputShape (not both).\n * If only inputShape is provided, then the batchInputShape is determined by\n * the batchSize argument and the inputShape: [batchSize].concat(inputShape).\n */\nexport declare interface InputLayerArgs {\n /** Input shape, not including the batch axis. */\n inputShape?: Shape;\n /** Optional input batch size (integer or null). */\n batchSize?: number;\n /** Batch input shape, including the batch axis. */\n batchInputShape?: Shape;\n /** Datatype of the input. */\n dtype?: DataType;\n /**\n * Whether the placeholder created is meant to be sparse.\n */\n sparse?: boolean; // TODO(michaelterry): Not clear whether we'll need this.\n\n /** Name of the layer. */\n name?: string;\n}\n\nexport class InputLayer extends Layer {\n /** @nocollapse */\n static readonly className = 'InputLayer';\n sparse: boolean;\n constructor(args: InputLayerArgs) {\n super({\n dtype: args.dtype,\n name: args.name != null ? args.name : getUid('input').toString()\n });\n // Normalize config.batchSize and config.sparse\n if (args.batchSize == null) {\n args.batchSize = null;\n }\n if (args.sparse == null) {\n args.sparse = false;\n }\n\n this.trainable = false;\n this.built = true;\n this.sparse = args.sparse;\n\n if (args.inputShape != null && args.batchInputShape != null) {\n throw new ValueError(\n 'Only provide the inputShape OR ' +\n 'batchInputShape argument to inputLayer, not both at the same time.');\n }\n let batchInputShape = args.batchInputShape;\n if (batchInputShape == null) {\n if (args.inputShape == null) {\n throw new ValueError(\n 'An InputLayer should be passed either a ' +\n '`batchInputShape` or an `inputShape`.');\n } else {\n batchInputShape = [args.batchSize].concat(args.inputShape);\n }\n } else {\n // TODO(michaelterry): Backport to PyKeras\n if (args.batchSize != null) {\n throw new ValueError(\n 'Cannot specify batchSize if batchInputShape is ' +\n 'specified when creating an InputLayer.');\n }\n }\n\n const dtype = args.dtype || 'float32';\n\n this.batchInputShape = batchInputShape;\n this.dtype = dtype;\n // TODO(michaelterry): Backport this to PyKeras?\n this.inputSpec = [{shape: batchInputShape}];\n\n const inputTensor = new SymbolicTensor(\n this.dtype, this.batchInputShape, this, [], {}, this.name);\n inputTensor.nodeIndex = 0;\n inputTensor.tensorIndex = 0;\n\n // Create an input node to add to this.outboundNode.\n // (This call has side effects.)\n // tslint:disable-next-line:no-unused-expression\n new Node({\n outboundLayer: this,\n inboundLayers: [],\n nodeIndices: [],\n tensorIndices: [],\n inputTensors: [inputTensor],\n outputTensors: [inputTensor],\n inputMasks: [null],\n outputMasks: [null],\n inputShapes: [batchInputShape],\n outputShapes: [batchInputShape]\n });\n }\n\n override apply(\n inputs: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n kwargs?: Kwargs): Tensor|Tensor[]|SymbolicTensor {\n throw new ValueError(\n 'Cannot pass any input to an ' +\n `InputLayer's apply() method. InputLayer name: ${this.name}`);\n }\n\n override dispose(): DisposeResult {\n // dispose() for InputLayer is overridden as no-op.\n return {refCountAfterDispose: this._refCount, numDisposedVariables: 0};\n }\n\n override getConfig(): serialization.ConfigDict {\n return {\n batchInputShape: this.batchInputShape,\n dtype: this.dtype,\n sparse: this.sparse,\n name: this.name\n };\n }\n}\nserialization.registerClass(InputLayer);\n\n/**\n * Config for the Input function.\n *\n * Note: You should provide only shape or batchShape (not both).\n * If only shape is provided, then the batchShape becomes\n * [null].concat(inputShape).\n */\nexport interface InputConfig {\n /**\n * A shape, not including the batch size. For instance, `shape=[32]`\n * indicates that the expected input will be batches of 32-dimensional\n * vectors.\n */\n shape?: Shape;\n /**\n * A shape tuple (integer), including the batch size. For instance,\n * `batchShape=[10, 32]` indicates that the expected input will be batches of\n * 10 32-dimensional vectors. `batchShape=[null, 32]` indicates batches of an\n * arbitrary number of 32-dimensional vectors.\n */\n batchShape?: Shape;\n /**\n * An optional name string for the layer. Should be unique in a model (do not\n * reuse the same name twice). It will be autogenerated if it isn't provided.\n */\n name?: string;\n dtype?: DataType;\n /**\n * A boolean specifying whether the placeholder to be created is sparse.\n */\n sparse?: boolean;\n}\n\nexport function Input(config: InputConfig): SymbolicTensor {\n if (config.batchShape == null && config.shape == null) {\n throw new Error(\n 'Please provide to Input either a `shape`' +\n ' or a `batchShape` argument. Note that ' +\n '`shape` does not include the batch ' +\n 'dimension.');\n }\n if (config.batchShape != null && config.shape != null) {\n // TODO(michaelterry): Backport to PyKeras.\n throw new ValueError(\n 'Please provide either a `shape` or `batchShape` ' +\n 'argument to Input, but not both.');\n }\n let batchShape = config.batchShape;\n if (config.shape != null && batchShape == null) {\n batchShape = [null].concat(config.shape);\n }\n\n let dtype = config.dtype;\n if (dtype == null) {\n dtype = 'float32';\n }\n\n const inputLayer = new InputLayer({\n batchInputShape: batchShape,\n name: config.name,\n dtype,\n sparse: config.sparse\n });\n\n const outputs = inputLayer.inboundNodes[0].outputTensors;\n return outputs[0];\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Executor: Evaluates SymbolicTensor based on feeds.\n */\n\nimport {cast, dispose, memory, Tensor, util} from '@tensorflow/tfjs-core';\n\nimport {ValueError} from '../errors';\nimport {Kwargs} from '../types';\nimport {LruCache} from '../utils/executor_utils';\nimport {toList} from '../utils/generic_utils';\n\nimport {InputLayer} from './input_layer';\nimport {SymbolicTensor} from './topology';\n\n/**\n * Helper function to check the dtype and shape compatibility of a feed value.\n */\nfunction assertFeedCompatibility(key: SymbolicTensor, val: Tensor): Tensor {\n // Check dtype compatibility.\n if (key.dtype == null || key.dtype === val.dtype) {\n // a. If types match, return val tensor as is.\n return val;\n }\n try {\n // b. Attempt to convert to expected type.\n return cast(val, key.dtype);\n } catch (err) {\n // c. If conversion fails, return helpful error.\n throw new ValueError(\n `The dtype of the feed (${val.dtype}) can not be cast to the dtype ` +\n `of the key '${key.name}' (${key.dtype}).`);\n }\n}\n\n/**\n * A concrete Tensor value for a symbolic tensor as the key.\n */\nexport interface Feed {\n key: SymbolicTensor;\n value: Tensor;\n}\n\n/**\n * FeedDict: A mapping from unique SymbolicTensors to feed values for them.\n * A feed value is a concrete value represented as an `Tensor`.\n */\nexport class FeedDict {\n private id2Value: {[id: number]: Tensor} = {};\n private id2Mask: {[id: number]: Tensor} = {};\n private name2Id: {[name: string]: number} = {};\n\n /**\n * Constructor, optionally does copy-construction.\n * @param feeds An Array of `Feed`s, or another `FeedDict`, in which case\n * copy-construction will be performed.\n */\n constructor(feeds?: Feed[]|FeedDict) {\n if (feeds instanceof FeedDict) {\n for (const id in feeds.id2Value) {\n this.id2Value[id] = feeds.id2Value[id];\n if (id in feeds.id2Mask) {\n this.id2Mask[id] = feeds.id2Mask[id];\n }\n }\n } else {\n if (feeds == null) {\n return;\n }\n for (const feed of feeds) {\n this.add(feed.key, feed.value);\n }\n }\n }\n\n /**\n * Add a key-value pair to the FeedDict.\n *\n * @param key The key of the feed.\n * @param value The value of the tensor feed.\n * @param mask The value of the mask feed (optional).\n * @returns This `FeedDict`.\n * @throws ValueError: If the key `SymbolicTensor` already exists in the\n * `FeedDict`.\n */\n add(key: SymbolicTensor, value: Tensor, mask?: Tensor): FeedDict {\n if (this.id2Value[key.id] == null) {\n this.id2Value[key.id] = assertFeedCompatibility(key, value);\n this.name2Id[key.name] = key.id;\n if (mask != null) {\n this.id2Mask[key.id] = mask;\n }\n } else {\n throw new ValueError(`Duplicate key: name=${key.name}, id=${key.id}`);\n }\n return this;\n }\n\n /**\n * Add a Feed to the FeedDict.\n * @param feed The new `Feed` to add.\n * @returns This `FeedDict`.\n */\n addFeed(feed: Feed) {\n this.add(feed.key, feed.value);\n }\n\n /**\n * Probe whether a key already exists in the FeedDict.\n * @param key\n */\n hasKey(key: SymbolicTensor): boolean {\n return this.id2Value[key.id] != null;\n }\n\n /**\n * Get all the SymbolicTensor available in this FeedDict.\n */\n names(): string[] {\n return Object.keys(this.name2Id);\n }\n\n /**\n * Get the feed value for given key.\n * @param key The SymbolicTensor, or its name (as a string), of which the\n * value is sought.\n * @returns If `key` exists, the corresponding feed value.\n * @throws ValueError: If `key` does not exist in this `FeedDict`.\n */\n getValue(key: SymbolicTensor|string): Tensor {\n if (key instanceof SymbolicTensor) {\n if (this.id2Value[key.id] == null) {\n throw new ValueError(`Nonexistent key: ${key.name}`);\n } else {\n return this.id2Value[key.id];\n }\n } else {\n const id = this.name2Id[key];\n if (id == null) {\n throw new ValueError(`Feed dict has no SymbolicTensor name: ${key}`);\n }\n return this.id2Value[id];\n }\n }\n\n /**\n * Get the feed mask for given key.\n * @param key The SymbolicTensor, or its name (as a string), of which the\n * value is sought.\n * @returns If `key` exists, the corresponding feed mask.\n * @throws ValueError: If `key` does not exist in this `FeedDict`.\n */\n getMask(key: SymbolicTensor|string): Tensor {\n if (key instanceof SymbolicTensor) {\n if (this.id2Value[key.id] == null) {\n throw new ValueError(`Nonexistent key: ${key.name}`);\n } else {\n return this.id2Mask[key.id];\n }\n } else {\n const id = this.name2Id[key];\n if (id == null) {\n throw new ValueError(`Feed dict has no SymbolicTensor name: ${key}`);\n }\n return this.id2Mask[id];\n }\n }\n\n /** Dispose all mask Tensors held by this object. */\n disposeMasks() {\n if (this.id2Mask != null) {\n dispose(this.id2Mask);\n }\n }\n}\n\n// Cache for topologically sorted SymbolicTensors for given execution\n// targets (i.e., fetches).\nexport const cachedSorted: LruCache =\n new LruCache();\n\n// Cache for recipient count maps for given execution targets (i.e., fetches).\nexport const cachedRecipientCounts: LruCache =\n new LruCache();\n\nexport function updateCacheMaxEntries(maxEntries: number) {\n if (cachedSorted != null) {\n cachedSorted.setMaxEntries(maxEntries);\n }\n if (cachedRecipientCounts != null) {\n cachedRecipientCounts.setMaxEntries(maxEntries);\n }\n}\n\n/**\n * Interface for the optional object used for probing the memory\n * usage and other statistics during execution.\n */\nexport interface ExecutionProbe {\n /**\n * Maximum number of tensors that exist during all steps of the\n * execution. Tensor counts are measured at the beginning of every\n * step.\n */\n maxNumTensors?: number;\n\n /**\n * Minimum number of tensors that exist during all steps of the\n * execution. Tensor counts are measured at the beginning of every\n * step.\n */\n minNumTensors?: number;\n}\n\n/**\n * Execute a SymbolicTensor by using concrete feed values.\n *\n * A `SymbolicTensor` object is a node in a computation graph of TF.js\n * Layers. The object is backed by a source layer and input\n * `SymbolicTensor`s to the source layer. This method evaluates\n * the `call()` method of the source layer, using concrete values of the\n * inputs obtained from either\n * * `feedDict`, if the input key exists in `feedDict`, or else,\n * * a recursive call to `execute()` itself.\n *\n * @param x: The `SymbolicTensor` to execute.\n * @param feedDict: The feed values, as base condition of the recursion.\n * execution.\n * @param kwargs: Optional keyword arguments.\n * @param probe: A probe object (of interface `ExecutionProbe`) used for\n * testing memory footprint of `execute` calls.\n * @returns Result of the execution.\n * @throws ValueError: If any `SymbolicTensor`s from `InputLayer`s\n * encountered during the execution lacks a feed value in `feedDict`.\n */\nexport function execute(\n fetches: SymbolicTensor|SymbolicTensor[], feedDict: FeedDict,\n kwargs?: Kwargs, probe?: ExecutionProbe): Tensor|\n Tensor[]|[Tensor | Tensor[]] {\n const training: boolean = kwargs == null ? false : kwargs['training'];\n\n const arrayFetches = Array.isArray(fetches);\n const fetchArray: SymbolicTensor[] =\n arrayFetches ? fetches : [fetches];\n\n const outputNames = fetchArray.map(t => t.name);\n const finalOutputs: Tensor[] = [];\n const feedNames = feedDict.names();\n for (const outputName of outputNames) {\n if (feedNames.indexOf(outputName) !== -1) {\n finalOutputs.push(feedDict.getValue(outputName));\n } else {\n finalOutputs.push(null);\n }\n }\n\n if (probe != null) {\n // For optional probing of memory footprint during execution.\n probe.maxNumTensors = -Infinity;\n probe.minNumTensors = Infinity;\n }\n\n // Check cache.\n const fetchAndFeedKey =\n outputNames.join(',') + '|' + feedDict.names().sort().join(',');\n let sorted: SymbolicTensor[] = cachedSorted.get(fetchAndFeedKey);\n let recipientCounts: {[fetchName: string]: number};\n if (sorted == null) {\n // Cache doesn't contain the desired combination of fetches. Compute\n // topological sort for the combination for the first time.\n const out = getTopologicalSortAndRecipientCounts(fetchArray, feedDict);\n sorted = out.sorted;\n recipientCounts = out.recipientCounts;\n\n // Store results in cache for future use.\n cachedSorted.put(fetchAndFeedKey, sorted);\n cachedRecipientCounts.put(fetchAndFeedKey, recipientCounts);\n }\n recipientCounts = {};\n if (!training) {\n Object.assign(recipientCounts, cachedRecipientCounts.get(fetchAndFeedKey));\n }\n\n const internalFeedDict = new FeedDict(feedDict);\n\n // Start iterative execution on the topologically-sorted SymbolicTensors.\n for (let i = 0; i < sorted.length; ++i) {\n if (probe != null) {\n // For optional probing of memory usage during execution.\n const numTensors = memory().numTensors;\n if (numTensors > probe.maxNumTensors) {\n probe.maxNumTensors = numTensors;\n }\n if (numTensors < probe.minNumTensors) {\n probe.minNumTensors = numTensors;\n }\n }\n\n const symbolic = sorted[i];\n const srcLayer = symbolic.sourceLayer;\n if (srcLayer instanceof InputLayer) {\n continue;\n }\n const inputValues: Tensor[] = [];\n const inputMasks: Tensor[] = [];\n const tensorsToDispose: Tensor[] = [];\n\n let maskExists = false;\n for (const input of symbolic.inputs) {\n const value = internalFeedDict.getValue(input);\n const mask = internalFeedDict.getMask(input);\n inputValues.push(value);\n inputMasks.push(mask);\n if (mask != null) {\n maskExists = true;\n }\n if (!training) {\n recipientCounts[input.name]--;\n if (recipientCounts[input.name] === 0 && !feedDict.hasKey(input) &&\n outputNames.indexOf(input.name) === -1 && !value.isDisposed &&\n input.sourceLayer.stateful !== true) {\n tensorsToDispose.push(value);\n }\n }\n }\n\n if (maskExists) {\n kwargs = kwargs || {};\n kwargs['mask'] = inputMasks[0];\n }\n const outputTensors =\n toList(srcLayer.apply(inputValues, kwargs)) as Tensor[];\n let outputMask: Tensor|Tensor[] = null;\n if (srcLayer.supportsMasking) {\n outputMask = srcLayer.computeMask(inputValues, inputMasks);\n }\n const layerOutputs = getNodeOutputs(symbolic);\n const outputSymbolicTensors =\n Array.isArray(layerOutputs) ? layerOutputs : [layerOutputs];\n for (let i = 0; i < outputSymbolicTensors.length; ++i) {\n if (!internalFeedDict.hasKey(outputSymbolicTensors[i])) {\n internalFeedDict.add(\n outputSymbolicTensors[i], outputTensors[i],\n Array.isArray(outputMask) ? outputMask[0] : outputMask);\n }\n const index = outputNames.indexOf(outputSymbolicTensors[i].name);\n if (index !== -1) {\n finalOutputs[index] = outputTensors[i];\n }\n }\n\n if (!training) {\n // Clean up Tensors that are no longer needed.\n dispose(tensorsToDispose);\n }\n }\n // NOTE(cais): Unlike intermediate tensors, we don't discard mask\n // tensors as we go, because these tensors are sometimes passed over a\n // series of mutliple layers, i.e., not obeying the immediate input\n // relations in the graph. If this becomes a memory-usage concern,\n // we can improve this in the future.\n internalFeedDict.disposeMasks();\n\n return arrayFetches ? finalOutputs : finalOutputs[0];\n}\n\ntype RecipientCounts = {\n [fetchName: string]: number\n};\n\nexport type RecipientMap = {\n [fetchName: string]: Set;\n};\n\n/**\n * Sort the `SymbolicTensor`s topologically, for an array of fetches.\n *\n * This function calls getTopologicalSortAndRecipientCountsForOneFetch and\n * merges their results.\n *\n * @param fetch The array of fetches requested. Must be a non-empty array.\n * @param feedDict The dictionary of fed values.\n * @returns sorted: Topologically-sorted array of SymbolicTensors.\n * recipientCounts: Recipient counts for all SymbolicTensors in `sorted`.\n */\nfunction getTopologicalSortAndRecipientCounts(\n fetches: SymbolicTensor[], feedDict: FeedDict):\n {sorted: SymbolicTensor[], recipientCounts: RecipientCounts} {\n util.assert(\n fetches != null && fetches.length > 0,\n () => `Expected at least one fetch, got none`);\n\n let finalSorted: SymbolicTensor[] = [];\n let finalRecipientMap: RecipientMap = {};\n if (fetches.length === 1) {\n // Special-casing 1 fetch for efficiency.\n const out =\n getTopologicalSortAndRecipientCountsForOneFetch(fetches[0], feedDict);\n finalSorted = out.sorted;\n finalRecipientMap = out.recipientMap;\n } else {\n const visited = new Set();\n for (const fetch of fetches) {\n const {sorted, recipientMap} =\n getTopologicalSortAndRecipientCountsForOneFetch(fetch, feedDict);\n\n // Merge sorted SymbolicTensor Arrays.\n for (const symbolicTensor of sorted) {\n if (!visited.has(symbolicTensor.name)) {\n finalSorted.push(symbolicTensor);\n visited.add(symbolicTensor.name);\n }\n }\n\n // Merge recipient maps.\n for (const name in recipientMap) {\n if (finalRecipientMap[name] == null) {\n finalRecipientMap[name] = new Set();\n }\n recipientMap[name].forEach(\n recipient => finalRecipientMap[name].add(recipient));\n }\n }\n }\n return {\n sorted: finalSorted,\n recipientCounts: recipientMap2Counts(finalRecipientMap)\n };\n}\n\nfunction recipientMap2Counts(recipientMap: RecipientMap): RecipientCounts {\n const recipientCounts: RecipientCounts = {};\n for (const name in recipientMap) {\n recipientCounts[name] = recipientMap[name].size;\n }\n return recipientCounts;\n}\n\n/**\n * Sort the `SymbolicTensor`s topologically, for a single fetch.\n *\n * This helper function processes the upstream SymbolicTensors of a single\n * fetch.\n *\n * @param fetch The single fetch requested.\n * @param feedDict The dictionary of fed values.\n * @returns sorted: Topologically-sorted array of SymbolicTensors.\n * recipientMap: Recipient names for all SymbolicTensors in `sorted`.\n */\nexport function getTopologicalSortAndRecipientCountsForOneFetch(\n fetch: SymbolicTensor, feedDict: FeedDict):\n {sorted: SymbolicTensor[], recipientMap: RecipientMap} {\n const visited = new Set();\n const sorted: SymbolicTensor[] = [];\n const recipientMap: RecipientMap = {};\n\n // Put keys of the feedDict into visited first, so they don't have to be\n // walked. This is needed in case where there are feeds for intermediate\n // SymbolicTensors of the graph.\n for (const key of feedDict.names()) {\n visited.add(key);\n }\n\n const stack: SymbolicTensor[] = [];\n const marks: number[] = [];\n\n // Initial population of stack and marks.\n stack.push(fetch);\n\n while (stack.length > 0) {\n const top = stack[stack.length - 1];\n if (visited.has(top.name)) {\n stack.pop();\n continue;\n }\n const topIsMarked = marks[marks.length - 1] === stack.length - 1;\n if (top.inputs.length === 0 || topIsMarked) {\n // Input SymbolicTensor or all children have been visited.\n stack.pop();\n sorted.push(top);\n visited.add(top.name);\n if (topIsMarked) {\n marks.pop();\n }\n } else {\n // A non-input SymbolicTensor whose upstream SymbolicTensors haven't\n // been visited yet. Push them onto the stack.\n marks.push(stack.length - 1);\n for (const input of top.inputs) {\n // Increment the recipient count. Note that this needs to happen\n // regardless of whether the SymbolicTensor has been visited before.\n if (recipientMap[input.name] == null) {\n recipientMap[input.name] = new Set();\n }\n recipientMap[input.name].add(top.name);\n\n if (visited.has(input.name)) {\n continue; // Avoid repeated visits to the same SymbolicTensor.\n }\n stack.push(input);\n }\n }\n }\n return {sorted, recipientMap};\n}\n\n/**\n * Get the symbolic output tensors of the node to which a given fetch belongs.\n * @param fetch The fetched symbolic tensor.\n * @returns The Array of symbolic tensors output by the node to which `fetch`\n * belongs.\n */\nfunction getNodeOutputs(fetch: SymbolicTensor): SymbolicTensor|\n SymbolicTensor[] {\n let layerOutputs: SymbolicTensor|SymbolicTensor[];\n if (fetch.sourceLayer.inboundNodes.length === 1) {\n layerOutputs = fetch.sourceLayer.output;\n } else {\n let nodeIndex: number = null;\n for (let i = 0; i < fetch.sourceLayer.inboundNodes.length; ++i) {\n for (const outputTensor of fetch.sourceLayer.inboundNodes[i]\n .outputTensors) {\n if (outputTensor.id === fetch.id) {\n nodeIndex = i;\n break;\n }\n }\n }\n layerOutputs = fetch.sourceLayer.getOutputAt(nodeIndex);\n }\n return layerOutputs;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: keras/contraints.py */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\nimport {epsilon} from './backend/common';\nimport {deserializeKerasObject, serializeKerasObject} from './utils/generic_utils';\n\n/**\n * Helper function used by many of the Constraints to find the L2Norms.\n */\nfunction calcL2Norms(w: Tensor, axis: number): Tensor {\n return tidy(() => tfc.sqrt(tfc.sum(tfc.mul(w, w), axis, true)));\n}\n\n/**\n * Base class for functions that impose constraints on weight values\n *\n * @doc {\n * heading: 'Constraints',\n * subheading: 'Classes',\n * namespace: 'constraints'\n * }\n */\nexport abstract class Constraint extends serialization.Serializable {\n /* Porting note: was __call__, apply chosen to match other similar choices */\n abstract apply(w: Tensor): Tensor;\n getConfig(): serialization.ConfigDict {\n return {};\n }\n}\n\nexport interface MaxNormArgs {\n /**\n * Maximum norm for incoming weights\n */\n maxValue?: number;\n /**\n * Axis along which to calculate norms.\n *\n * For instance, in a `Dense` layer the weight matrix\n * has shape `[inputDim, outputDim]`,\n * set `axis` to `0` to constrain each weight vector\n * of length `[inputDim,]`.\n * In a `Conv2D` layer with `dataFormat=\"channels_last\"`,\n * the weight tensor has shape\n * `[rows, cols, inputDepth, outputDepth]`,\n * set `axis` to `[0, 1, 2]`\n * to constrain the weights of each filter tensor of size\n * `[rows, cols, inputDepth]`.\n */\n axis?: number;\n}\n\nexport class MaxNorm extends Constraint {\n /** @nocollapse */\n static readonly className = 'MaxNorm';\n private maxValue: number;\n private axis: number;\n private readonly defaultMaxValue = 2;\n private readonly defaultAxis = 0;\n\n constructor(args: MaxNormArgs) {\n super();\n this.maxValue =\n args.maxValue != null ? args.maxValue : this.defaultMaxValue;\n this.axis = args.axis != null ? args.axis : this.defaultAxis;\n }\n\n apply(w: Tensor): Tensor {\n return tidy(() => {\n const norms = calcL2Norms(w, this.axis);\n const desired = tfc.clipByValue(norms, 0, this.maxValue);\n return tfc.mul(w, tfc.div(desired, tfc.add(epsilon(), norms)));\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n return {maxValue: this.maxValue, axis: this.axis};\n }\n}\nserialization.registerClass(MaxNorm);\n\nexport interface UnitNormArgs {\n /**\n * Axis along which to calculate norms.\n *\n * For instance, in a `Dense` layer the weight matrix\n * has shape `[inputDim, outputDim]`,\n * set `axis` to `0` to constrain each weight vector\n * of length `[inputDim,]`.\n * In a `Conv2D` layer with `dataFormat=\"channels_last\"`,\n * the weight tensor has shape\n * `[rows, cols, inputDepth, outputDepth]`,\n * set `axis` to `[0, 1, 2]`\n * to constrain the weights of each filter tensor of size\n * `[rows, cols, inputDepth]`.\n */\n axis?: number;\n}\n\nexport class UnitNorm extends Constraint {\n /** @nocollapse */\n static readonly className = 'UnitNorm';\n private axis: number;\n private readonly defaultAxis = 0;\n constructor(args: UnitNormArgs) {\n super();\n this.axis = args.axis != null ? args.axis : this.defaultAxis;\n }\n\n apply(w: Tensor): Tensor {\n return tidy(\n () => tfc.div(w, tfc.add(epsilon(), calcL2Norms(w, this.axis))));\n }\n\n override getConfig(): serialization.ConfigDict {\n return {axis: this.axis};\n }\n}\nserialization.registerClass(UnitNorm);\n\nexport class NonNeg extends Constraint {\n /** @nocollapse */\n static readonly className = 'NonNeg';\n\n apply(w: Tensor): Tensor {\n return tfc.relu(w);\n }\n}\nserialization.registerClass(NonNeg);\n\nexport interface MinMaxNormArgs {\n /**\n * Minimum norm for incoming weights\n */\n minValue?: number;\n /**\n * Maximum norm for incoming weights\n */\n maxValue?: number;\n /**\n * Axis along which to calculate norms.\n * For instance, in a `Dense` layer the weight matrix\n * has shape `[inputDim, outputDim]`,\n * set `axis` to `0` to constrain each weight vector\n * of length `[inputDim,]`.\n * In a `Conv2D` layer with `dataFormat=\"channels_last\"`,\n * the weight tensor has shape\n * `[rows, cols, inputDepth, outputDepth]`,\n * set `axis` to `[0, 1, 2]`\n * to constrain the weights of each filter tensor of size\n * `[rows, cols, inputDepth]`.\n */\n axis?: number;\n /**\n * Rate for enforcing the constraint: weights will be rescaled to yield:\n * `(1 - rate) * norm + rate * norm.clip(minValue, maxValue)`.\n * Effectively, this means that rate=1.0 stands for strict\n * enforcement of the constraint, while rate<1.0 means that\n * weights will be rescaled at each step to slowly move\n * towards a value inside the desired interval.\n */\n rate?: number;\n}\n\nexport class MinMaxNorm extends Constraint {\n /** @nocollapse */\n static readonly className = 'MinMaxNorm';\n private minValue: number;\n private maxValue: number;\n private rate: number;\n private axis: number;\n private readonly defaultMinValue = 0.0;\n private readonly defaultMaxValue = 1.0;\n private readonly defaultRate = 1.0;\n private readonly defaultAxis = 0;\n\n constructor(args: MinMaxNormArgs) {\n super();\n this.minValue =\n args.minValue != null ? args.minValue : this.defaultMinValue;\n this.maxValue =\n args.maxValue != null ? args.maxValue : this.defaultMaxValue;\n this.rate = args.rate != null ? args.rate : this.defaultRate;\n this.axis = args.axis != null ? args.axis : this.defaultAxis;\n }\n\n apply(w: Tensor): Tensor {\n return tidy(() => {\n const norms = calcL2Norms(w, this.axis);\n const desired = tfc.add(\n tfc.mul(\n this.rate, tfc.clipByValue(norms, this.minValue, this.maxValue)),\n tfc.mul(1.0 - this.rate, norms));\n return tfc.mul(w, tfc.div(desired, tfc.add(epsilon(), norms)));\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n return {\n minValue: this.minValue,\n maxValue: this.maxValue,\n rate: this.rate,\n axis: this.axis\n };\n }\n}\nserialization.registerClass(MinMaxNorm);\n\n/** @docinline */\nexport type ConstraintIdentifier =\n 'maxNorm'|'minMaxNorm'|'nonNeg'|'unitNorm'|string;\n\n// Maps the JavaScript-like identifier keys to the corresponding registry\n// symbols.\nexport const CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP:\n {[identifier in ConstraintIdentifier]: string} = {\n 'maxNorm': 'MaxNorm',\n 'minMaxNorm': 'MinMaxNorm',\n 'nonNeg': 'NonNeg',\n 'unitNorm': 'UnitNorm'\n };\n\nexport function serializeConstraint(constraint: Constraint):\n serialization.ConfigDictValue {\n return serializeKerasObject(constraint);\n}\n\nexport function deserializeConstraint(\n config: serialization.ConfigDict,\n customObjects: serialization.ConfigDict = {}): Constraint {\n return deserializeKerasObject(\n config, serialization.SerializationMap.getMap().classNameMap,\n customObjects, 'constraint');\n}\n\nexport function getConstraint(identifier: ConstraintIdentifier|\n serialization.ConfigDict|Constraint): Constraint {\n if (identifier == null) {\n return null;\n }\n if (typeof identifier === 'string') {\n const className = identifier in CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP ?\n CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] :\n identifier;\n const config = {className, config: {}};\n return deserializeConstraint(config);\n } else if (identifier instanceof Constraint) {\n return identifier;\n } else {\n return deserializeConstraint(identifier);\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\n\nimport {updateCacheMaxEntries} from './engine/executor';\n\nexport const ENV = env();\n\n/** The max number of entries for the caches of layers' topological sort. */\nENV.registerFlag(\n 'TOPOLOGICAL_SORT_CACHE_MAX_ENTRIES', () => 100, updateCacheMaxEntries);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: keras/callbacks.py */\n\nimport {add, div, keep, mul, nextFrame, Scalar, Tensor, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {Container} from './engine/container';\nimport {ValueError} from './errors';\nimport {Logs, resolveScalarsInLogs, UnresolvedLogs} from './logs';\nimport * as generic_utils from './utils/generic_utils';\n\n/** Verbosity logging level when fitting a model. */\nexport enum ModelLoggingVerbosity {\n SILENT = 0,\n VERBOSE = 1\n}\n\n/** How often to yield to the main thread when training (in ms). */\nexport const DEFAULT_YIELD_EVERY_MS = 125;\n\nexport type Params = {\n [key: string]: number|string|boolean|number[]|string[]|boolean[];\n};\n\nexport type YieldEveryOptions = 'auto'|'batch'|'epoch'|'never'|number;\n\n/**\n * Abstract base class used to build new callbacks.\n *\n * The `logs` dictionary that callback methods take as argument will contain\n * keys for quantities relevant to the current batch or epoch.\n *\n * Currently, the `.fit()` method of the `Sequential` model class\n * will include the following quantities in the `logs` that\n * it passes to its callbacks:\n *\n * onEpochEnd: Logs include `acc` and `loss`, and optionally include `valLoss`\n * (if validation is enabled in `fit`), and `valAcc` (if validation and\n * accuracy monitoring are enabled).\n * onBatchBegin: Logs include `size`, the number of samples in the current\n * batch.\n * onBatchEnd: Logs include `loss`, and optionally `acc` (if accuracy monitoring\n * is enabled).\n */\nexport abstract class BaseCallback {\n // TODO(michaelterry): This type is a best guess.\n validationData: Tensor|Tensor[] = null;\n /**\n * Training parameters (eg. verbosity, batch size, number of epochs...).\n */\n params: Params;\n\n setParams(params: Params): void {\n this.params = params;\n }\n\n async onEpochBegin(epoch: number, logs?: UnresolvedLogs) {}\n\n async onEpochEnd(epoch: number, logs?: UnresolvedLogs) {}\n\n async onBatchBegin(batch: number, logs?: UnresolvedLogs) {}\n\n async onBatchEnd(batch: number, logs?: UnresolvedLogs) {}\n\n async onTrainBegin(logs?: UnresolvedLogs) {}\n\n async onTrainEnd(logs?: UnresolvedLogs) {}\n\n // LayersModel needs to call Callback.setModel(), but cannot actually depend\n // on Callback because that creates a cyclic dependency. Providing this no-op\n // method on BaseCallback breaks the cycle: this way LayersModel can depend on\n // BaseCallback but not on Callback. The argument is typed as `Container`\n // (the superclass of LayersModel) to avoid recapitulating the cycle. Callback\n // overrides this method and enforces that the argument is really a\n // LayersModel.\n setModel(model: Container): void {\n // Do nothing. Use Callback instead of BaseCallback to track the model.\n }\n}\n\n/**\n * Container abstracting a list of callbacks.\n */\nexport class CallbackList {\n callbacks: BaseCallback[];\n queueLength: number;\n\n // TODO(cais): When the need arises, uncomment the following lines and\n // implement the queue for time values.\n // private deltaTBatch: number;\n // private deltaTsBatchBegin: Array;\n // private deltaTsBatchEnd: Array;\n\n /**\n * Constructor of CallbackList.\n * @param callbacks Array of `Callback` instances.\n * @param queueLength Queue length for keeping running statistics over\n * callback execution time.\n */\n constructor(callbacks?: BaseCallback[], queueLength = 10) {\n // TODO(cais): Make use of queueLength when implementing the queue for time\n // values.\n if (callbacks == null) {\n callbacks = [];\n }\n this.callbacks = callbacks;\n this.queueLength = queueLength;\n }\n\n append(callback: BaseCallback): void {\n this.callbacks.push(callback);\n }\n\n setParams(params: Params): void {\n for (const callback of this.callbacks) {\n callback.setParams(params);\n }\n }\n\n setModel(model: Container): void {\n for (const callback of this.callbacks) {\n callback.setModel(model);\n }\n }\n\n /**\n * Called at the start of an epoch.\n * @param epoch Index of epoch.\n * @param logs Dictionary of logs.\n */\n async onEpochBegin(epoch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onEpochBegin(epoch, logs);\n }\n }\n\n /**\n * Called at the end of an epoch.\n * @param epoch Index of epoch.\n * @param logs Dictionary of logs.\n */\n async onEpochEnd(epoch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onEpochEnd(epoch, logs);\n }\n }\n\n /**\n * Called right before processing a batch.\n * @param batch Index of batch within the current epoch.\n * @param logs Dictionary of logs.\n */\n async onBatchBegin(batch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onBatchBegin(batch, logs);\n }\n }\n\n /**\n * Called at the end of a batch.\n * @param batch Index of batch within the current epoch.\n * @param logs Dictionary of logs.\n */\n async onBatchEnd(batch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onBatchEnd(batch, logs);\n }\n }\n\n /**\n * Called at the beginning of training.\n * @param logs Dictionary of logs.\n */\n async onTrainBegin(logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onTrainBegin(logs);\n }\n }\n\n /**\n * Called at the end of training.\n * @param logs Dictionary of logs.\n */\n async onTrainEnd(logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n for (const callback of this.callbacks) {\n await callback.onTrainEnd(logs);\n }\n }\n}\n\n/**\n * Callback that accumulates epoch averages of metrics.\n *\n * This callback is automatically applied to every LayersModel.\n */\nexport class BaseLogger extends BaseCallback {\n private seen: number;\n private totals: UnresolvedLogs;\n\n constructor() {\n super();\n }\n\n override async onEpochBegin(epoch: number) {\n this.seen = 0;\n this.totals = {};\n }\n\n override async onBatchEnd(batch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n const batchSize = logs['size'] == null ? 0 : logs['size'] as number;\n this.seen += batchSize;\n for (const key in logs) {\n const value = logs[key];\n if (typeof value === 'number') {\n if (!this.totals.hasOwnProperty(key)) {\n this.totals[key] = 0;\n }\n this.totals[key] = this.totals[key] as number + value * batchSize;\n } else {\n let oldTotalsToDispose: Scalar;\n if (key in this.totals) {\n oldTotalsToDispose = this.totals[key] as Scalar;\n } else {\n this.totals[key] = 0;\n }\n const total: Scalar =\n tidy(() => add((this.totals[key]), mul(value, batchSize)));\n this.totals[key] = total;\n if (oldTotalsToDispose != null) {\n oldTotalsToDispose.dispose();\n }\n }\n }\n }\n\n override async onEpochEnd(epoch: number, logs?: UnresolvedLogs) {\n if (logs != null) {\n for (const key of this.params['metrics'] as string[]) {\n if (this.totals[key] == null) {\n continue;\n }\n if (typeof this.totals[key] === 'number') {\n logs[key] = this.totals[key] as number / this.seen;\n } else {\n tidy(() => {\n const log: Scalar = mul(div(1, this.seen), this.totals[key]);\n logs[key] = log;\n (this.totals[key] as Tensor).dispose();\n keep(logs[key] as Scalar);\n });\n }\n }\n }\n }\n}\n\n/**\n * Callback that records events into a `History` object. This callback is\n * automatically applied to every TF.js Layers model. The `History` object\n * gets returned by the `fit` method of models.\n */\nexport class History extends BaseCallback {\n epoch: number[];\n history: {[key: string]: Array};\n\n override async onTrainBegin(logs?: UnresolvedLogs) {\n this.epoch = [];\n this.history = {};\n }\n\n override async onEpochEnd(epoch: number, logs?: UnresolvedLogs) {\n if (logs == null) {\n logs = {};\n }\n this.epoch.push(epoch);\n for (const key in logs) {\n if (this.history[key] == null) {\n this.history[key] = [];\n }\n this.history[key].push(logs[key]);\n }\n }\n\n /**\n * Await the values of all losses and metrics.\n */\n async syncData() {\n const promises: Array> = [];\n const keys: string[] = [];\n const indices: number[] = [];\n for (const key in this.history) {\n const valueArray = this.history[key];\n for (let i = 0; i < valueArray.length; ++i) {\n if (typeof valueArray[i] !== 'number') {\n const valueScalar = valueArray[i] as Tensor;\n promises.push(valueScalar.data());\n keys.push(key);\n indices.push(i);\n }\n }\n }\n const values = await Promise.all(promises);\n for (let n = 0; n < values.length; ++n) {\n const tensorToDispose = this.history[keys[n]][indices[n]] as Tensor;\n tensorToDispose.dispose();\n this.history[keys[n]][indices[n]] = values[n][0];\n }\n }\n}\n\nexport interface CustomCallbackArgs {\n onTrainBegin?: (logs?: Logs) => void | Promise;\n onTrainEnd?: (logs?: Logs) => void | Promise;\n onEpochBegin?: (epoch: number, logs?: Logs) => void | Promise;\n onEpochEnd?: (epoch: number, logs?: Logs) => void | Promise;\n onBatchBegin?: (batch: number, logs?: Logs) => void | Promise;\n onBatchEnd?: (batch: number, logs?: Logs) => void | Promise;\n onYield?: (epoch: number, batch: number, logs: Logs) => void | Promise;\n // Used for test DI mocking.\n nowFunc?: Function;\n nextFrameFunc?: Function;\n}\n\n/**\n * Custom callback for training.\n */\nexport class CustomCallback extends BaseCallback {\n protected readonly trainBegin: (logs?: Logs) => void | Promise;\n protected readonly trainEnd: (logs?: Logs) => void | Promise;\n protected readonly epochBegin:\n (epoch: number, logs?: Logs) => void | Promise;\n protected readonly epochEnd:\n (epoch: number, logs?: Logs) => void | Promise;\n protected readonly batchBegin:\n (batch: number, logs?: Logs) => void | Promise;\n protected readonly batchEnd:\n (batch: number, logs?: Logs) => void | Promise;\n protected readonly yield:\n (epoch: number, batch: number, logs: Logs) => void | Promise;\n\n private yieldEvery: YieldEveryOptions;\n private currentEpoch = 0;\n public nowFunc: Function;\n public nextFrameFunc: Function;\n\n constructor(args: CustomCallbackArgs, yieldEvery?: YieldEveryOptions) {\n super();\n this.nowFunc = args.nowFunc;\n this.nextFrameFunc = args.nextFrameFunc || nextFrame;\n this.yieldEvery = yieldEvery || 'auto';\n if (this.yieldEvery === 'auto') {\n this.yieldEvery = DEFAULT_YIELD_EVERY_MS;\n }\n if (this.yieldEvery === 'never' && args.onYield != null) {\n throw new Error(\n 'yieldEvery is `never` but you provided an `onYield` callback. ' +\n 'Either change `yieldEvery` or remove the callback');\n }\n if (util.isNumber(this.yieldEvery)) {\n // Decorate `maybeWait` so it will be called at most once every\n // `yieldEvery` ms.\n this.maybeWait = generic_utils.debounce(\n this.maybeWait.bind(this), this.yieldEvery as number, this.nowFunc);\n }\n this.trainBegin = args.onTrainBegin;\n this.trainEnd = args.onTrainEnd;\n this.epochBegin = args.onEpochBegin;\n this.epochEnd = args.onEpochEnd;\n this.batchBegin = args.onBatchBegin;\n this.batchEnd = args.onBatchEnd;\n this.yield = args.onYield;\n }\n\n async maybeWait(epoch: number, batch: number, logs: UnresolvedLogs) {\n const ps: Array> = [];\n if (this.yield != null) {\n await resolveScalarsInLogs(logs);\n ps.push(this.yield(epoch, batch, logs as Logs));\n }\n ps.push(this.nextFrameFunc());\n await Promise.all(ps);\n }\n\n override async onEpochBegin(epoch: number, logs?: UnresolvedLogs):\n Promise {\n this.currentEpoch = epoch;\n if (this.epochBegin != null) {\n await resolveScalarsInLogs(logs);\n await this.epochBegin(epoch, logs as Logs);\n }\n }\n\n override async onEpochEnd(epoch: number, logs?: UnresolvedLogs):\n Promise {\n const ps: Array> = [];\n if (this.epochEnd != null) {\n await resolveScalarsInLogs(logs);\n ps.push(this.epochEnd(epoch, logs as Logs));\n }\n if (this.yieldEvery === 'epoch') {\n ps.push(this.nextFrameFunc());\n }\n await Promise.all(ps);\n }\n\n override async onBatchBegin(batch: number, logs?: UnresolvedLogs):\n Promise {\n if (this.batchBegin != null) {\n await resolveScalarsInLogs(logs);\n await this.batchBegin(batch, logs as Logs);\n }\n }\n\n override async onBatchEnd(batch: number, logs?: UnresolvedLogs):\n Promise {\n const ps: Array> = [];\n if (this.batchEnd != null) {\n await resolveScalarsInLogs(logs);\n ps.push(this.batchEnd(batch, logs as Logs));\n }\n if (this.yieldEvery === 'batch') {\n ps.push(this.nextFrameFunc());\n } else if (util.isNumber(this.yieldEvery)) {\n ps.push(this.maybeWait(this.currentEpoch, batch, logs));\n }\n await Promise.all(ps);\n }\n\n override async onTrainBegin(logs?: UnresolvedLogs): Promise {\n if (this.trainBegin != null) {\n await resolveScalarsInLogs(logs);\n await this.trainBegin(logs as Logs);\n }\n }\n\n override async onTrainEnd(logs?: UnresolvedLogs): Promise {\n if (this.trainEnd != null) {\n await resolveScalarsInLogs(logs);\n await this.trainEnd(logs as Logs);\n }\n }\n}\n\n/**\n * Standardize callbacks or configurations of them to an Array of callbacks.\n */\nexport function standardizeCallbacks(\n callbacks: BaseCallback|BaseCallback[]|CustomCallbackArgs|\n CustomCallbackArgs[],\n yieldEvery: YieldEveryOptions): BaseCallback[] {\n if (callbacks == null) {\n callbacks = {} as BaseCallback;\n }\n if (callbacks instanceof BaseCallback) {\n return [callbacks];\n }\n if (Array.isArray(callbacks) && callbacks[0] instanceof BaseCallback) {\n return callbacks as BaseCallback[];\n }\n // Convert custom callback configs to custom callback objects.\n const callbackConfigs =\n generic_utils.toList(callbacks) as CustomCallbackArgs[];\n return callbackConfigs.map(\n callbackConfig => new CustomCallback(callbackConfig, yieldEvery));\n}\n\nexport declare type BaseCallbackConstructor = {\n new (): BaseCallback\n};\n\n/**\n * A global registry for callback constructors to be used during\n * LayersModel.fit().\n */\nexport class CallbackConstructorRegistry {\n private static constructors:\n {[verbosityLevel: number]: BaseCallbackConstructor[]} = {};\n\n /**\n * Blocks public access to constructor.\n */\n private constructor() {}\n\n /**\n * Register a tf.LayersModel.fit() callback constructor.\n *\n * The registered callback constructor will be used to instantiate\n * callbacks for every tf.LayersModel.fit() call afterwards.\n *\n * @param verbosityLevel Level of verbosity at which the `callbackConstructor`\n * is to be reigstered.\n * @param callbackConstructor A no-arg constructor for `tf.Callback`.\n * @throws Error, if the same callbackConstructor has been registered before,\n * either at the same or a different `verbosityLevel`.\n */\n static registerCallbackConstructor(\n verbosityLevel: number, callbackConstructor: BaseCallbackConstructor) {\n util.assert(\n verbosityLevel >= 0 && Number.isInteger(verbosityLevel),\n () => `Verbosity level is expected to be an integer >= 0, ` +\n `but got ${verbosityLevel}`);\n CallbackConstructorRegistry.checkForDuplicate(callbackConstructor);\n if (CallbackConstructorRegistry.constructors[verbosityLevel] == null) {\n CallbackConstructorRegistry.constructors[verbosityLevel] = [];\n }\n CallbackConstructorRegistry.constructors[verbosityLevel].push(\n callbackConstructor);\n }\n\n private static checkForDuplicate(callbackConstructor:\n BaseCallbackConstructor) {\n for (const levelName in CallbackConstructorRegistry.constructors) {\n const constructors = CallbackConstructorRegistry.constructors[+levelName];\n constructors.forEach(ctor => {\n if (ctor === callbackConstructor) {\n throw new ValueError('Duplicate callback constructor.');\n }\n });\n }\n }\n\n /**\n * Clear all registered callback constructors.\n */\n protected static clear() {\n CallbackConstructorRegistry.constructors = {};\n }\n\n /**\n * Create callbacks using the registered callback constructors.\n *\n * Given `verbosityLevel`, all constructors registered at that level or above\n * will be called and the instantiated callbacks will be used.\n *\n * @param verbosityLevel: Level of verbosity.\n */\n static createCallbacks(verbosityLevel: number): BaseCallback[] {\n const constructors: BaseCallbackConstructor[] = [];\n for (const levelName in CallbackConstructorRegistry.constructors) {\n const level = +levelName;\n if (verbosityLevel >= level) {\n constructors.push(...CallbackConstructorRegistry.constructors[level]);\n }\n }\n return constructors.map(ctor => new ctor());\n }\n}\n\nexport function configureCallbacks(\n callbacks: BaseCallback[], verbose: ModelLoggingVerbosity, epochs: number,\n initialEpoch: number, numTrainSamples: number, stepsPerEpoch: number,\n batchSize: number, doValidation: boolean,\n callbackMetrics: string[]): {callbackList: CallbackList, history: History} {\n const history = new History();\n const actualCallbacks: BaseCallback[] = [\n new BaseLogger(), ...CallbackConstructorRegistry.createCallbacks(verbose)\n ];\n if (callbacks != null) {\n actualCallbacks.push(...callbacks);\n }\n actualCallbacks.push(history);\n const callbackList = new CallbackList(actualCallbacks);\n\n // TODO(cais): Figure out when this LayersModel instance can have a\n // dynamically\n // set property called 'callback_model' as in PyKeras.\n\n callbackList.setParams({\n epochs,\n initialEpoch,\n samples: numTrainSamples,\n steps: stepsPerEpoch,\n batchSize,\n verbose,\n doValidation,\n metrics: callbackMetrics,\n });\n return {callbackList, history};\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n// tslint:disable-next-line:max-line-length\nimport {Constant, ConstantArgs, GlorotNormal, GlorotUniform, HeNormal, HeUniform, Identity, IdentityArgs, Initializer, LeCunNormal, LeCunUniform, Ones, Orthogonal, OrthogonalArgs, RandomNormal, RandomNormalArgs, RandomUniform, RandomUniformArgs, SeedOnlyInitializerArgs, TruncatedNormal, TruncatedNormalArgs, VarianceScaling, VarianceScalingArgs, Zeros} from './initializers';\n\n/**\n * Initializer that generates tensors initialized to 0.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function zeros(): Zeros {\n return new Zeros();\n}\n\n/**\n * Initializer that generates tensors initialized to 1.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function ones(): Initializer {\n return new Ones();\n}\n\n/**\n * Initializer that generates values initialized to some constant.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function constant(args: ConstantArgs): Initializer {\n return new Constant(args);\n}\n\n/**\n * Initializer that generates random values initialized to a uniform\n * distribution.\n *\n * Values will be distributed uniformly between the configured minval and\n * maxval.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function randomUniform(args: RandomUniformArgs): Initializer {\n return new RandomUniform(args);\n}\n\n/**\n * Initializer that generates random values initialized to a normal\n * distribution.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function randomNormal(args: RandomNormalArgs): Initializer {\n return new RandomNormal(args);\n}\n\n/**\n * Initializer that generates random values initialized to a truncated normal\n * distribution.\n *\n * These values are similar to values from a `RandomNormal` except that values\n * more than two standard deviations from the mean are discarded and re-drawn.\n * This is the recommended initializer for neural network weights and filters.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function truncatedNormal(args: TruncatedNormalArgs): Initializer {\n return new TruncatedNormal(args);\n}\n\n/**\n * Initializer that generates the identity matrix.\n * Only use for square 2D matrices.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function identity(args: IdentityArgs): Initializer {\n return new Identity(args);\n}\n\n/**\n * Initializer capable of adapting its scale to the shape of weights.\n * With distribution=NORMAL, samples are drawn from a truncated normal\n * distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:\n * - number of input units in the weight tensor, if mode = FAN_IN.\n * - number of output units, if mode = FAN_OUT.\n * - average of the numbers of input and output units, if mode = FAN_AVG.\n * With distribution=UNIFORM,\n * samples are drawn from a uniform distribution\n * within [-limit, limit], with `limit = sqrt(3 * scale / n)`.\n *\n * @doc {heading: 'Initializers',namespace: 'initializers'}\n */\nexport function varianceScaling(config: VarianceScalingArgs): Initializer {\n return new VarianceScaling(config);\n}\n\n/**\n * Glorot uniform initializer, also called Xavier uniform initializer.\n * It draws samples from a uniform distribution within [-limit, limit]\n * where `limit` is `sqrt(6 / (fan_in + fan_out))`\n * where `fan_in` is the number of input units in the weight tensor\n * and `fan_out` is the number of output units in the weight tensor\n *\n * Reference:\n * Glorot & Bengio, AISTATS 2010\n * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function glorotUniform(args: SeedOnlyInitializerArgs): Initializer {\n return new GlorotUniform(args);\n}\n\n/**\n * Glorot normal initializer, also called Xavier normal initializer.\n * It draws samples from a truncated normal distribution centered on 0\n * with `stddev = sqrt(2 / (fan_in + fan_out))`\n * where `fan_in` is the number of input units in the weight tensor\n * and `fan_out` is the number of output units in the weight tensor.\n *\n * Reference:\n * Glorot & Bengio, AISTATS 2010\n * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function glorotNormal(args: SeedOnlyInitializerArgs): Initializer {\n return new GlorotNormal(args);\n}\n\n/**\n * He normal initializer.\n *\n * It draws samples from a truncated normal distribution centered on 0\n * with `stddev = sqrt(2 / fanIn)`\n * where `fanIn` is the number of input units in the weight tensor.\n *\n * Reference:\n * He et al., http://arxiv.org/abs/1502.01852\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function heNormal(args: SeedOnlyInitializerArgs): Initializer {\n return new HeNormal(args);\n}\n\n/**\n * He uniform initializer.\n *\n * It draws samples from a uniform distribution within [-limit, limit]\n * where `limit` is `sqrt(6 / fan_in)`\n * where `fanIn` is the number of input units in the weight tensor.\n *\n * Reference:\n * He et al., http://arxiv.org/abs/1502.01852\n *\n * @doc {heading: 'Initializers',namespace: 'initializers'}\n */\nexport function heUniform(args: SeedOnlyInitializerArgs): Initializer {\n return new HeUniform(args);\n}\n\n/**\n * LeCun normal initializer.\n *\n * It draws samples from a truncated normal distribution centered on 0\n * with `stddev = sqrt(1 / fanIn)`\n * where `fanIn` is the number of input units in the weight tensor.\n *\n * References:\n * [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)\n * [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function leCunNormal(args: SeedOnlyInitializerArgs): Initializer {\n return new LeCunNormal(args);\n}\n\n/**\n * LeCun uniform initializer.\n *\n * It draws samples from a uniform distribution in the interval\n * `[-limit, limit]` with `limit = sqrt(3 / fanIn)`,\n * where `fanIn` is the number of input units in the weight tensor.\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function leCunUniform(args: SeedOnlyInitializerArgs): Initializer {\n return new LeCunUniform(args);\n}\n\n/**\n * Initializer that generates a random orthogonal matrix.\n *\n * Reference:\n * [Saxe et al., http://arxiv.org/abs/1312.6120](http://arxiv.org/abs/1312.6120)\n *\n * @doc {heading: 'Initializers', namespace: 'initializers'}\n */\nexport function orthogonal(args: OrthogonalArgs): Initializer {\n return new Orthogonal(args);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {dispose, Scalar} from '@tensorflow/tfjs-core';\n\n/**\n * Logs in which values can be either numbers or Tensors (Scalars).\n *\n * Used internally.\n */\nexport type UnresolvedLogs = {\n [key: string]: number|Scalar;\n};\n\n/**\n * Turn any Scalar values in a Logs object into actual number values.\n *\n * @param logs The `Logs` object to be resolved in place.\n */\nexport async function resolveScalarsInLogs(logs: UnresolvedLogs) {\n if (logs == null) {\n return;\n }\n const promises: Array> = [];\n const keys: string[] = [];\n const scalarsToDispose: Scalar[] = [];\n for (const key in logs) {\n const value = logs[key];\n if (typeof value !== 'number') {\n const valueScalar = value;\n promises.push(valueScalar.data());\n keys.push(key);\n scalarsToDispose.push(valueScalar);\n }\n }\n if (promises.length > 0) {\n const values = await Promise.all(promises);\n for (let i = 0; i < values.length; ++i) {\n logs[keys[i]] = values[i][0];\n }\n // Dispose the original scalar tensors.\n dispose(scalarsToDispose);\n }\n}\n\n/**\n * Dispose all Tensors in an UnresolvedLogs object.\n *\n * @param logs An `UnresolvedLogs` object potentially containing `tf.Tensor`s in\n * places where the values can be `tf.Tensor` or `number`.\n */\nexport function disposeTensorsInLogs(logs: UnresolvedLogs) {\n if (logs == null) {\n return;\n }\n for (const key in logs) {\n const value = logs[key];\n if (typeof value !== 'number') {\n value.dispose();\n }\n }\n}\n\n/**\n * Logs in which values can only be numbers.\n *\n * Used when calling client-provided custom callbacks.\n */\nexport type Logs = {\n [key: string]: number;\n};\n","import superPropBase from \"./superPropBase.js\";\nexport default function _get() {\n if (typeof Reflect !== \"undefined\" && Reflect.get) {\n _get = Reflect.get.bind();\n } else {\n _get = function _get(target, property, receiver) {\n var base = superPropBase(target, property);\n if (!base) return;\n var desc = Object.getOwnPropertyDescriptor(base, property);\n if (desc.get) {\n return desc.get.call(arguments.length < 3 ? target : receiver);\n }\n return desc.value;\n };\n }\n return _get.apply(this, arguments);\n}","import getPrototypeOf from \"./getPrototypeOf.js\";\nexport default function _superPropBase(object, property) {\n while (!Object.prototype.hasOwnProperty.call(object, property)) {\n object = getPrototypeOf(object);\n if (object === null) break;\n }\n return object;\n}","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original Source layers/__init__.py */\nimport {serialization} from '@tensorflow/tfjs-core';\n\nimport {deserializeKerasObject} from '../utils/generic_utils';\n\n/**\n * Instantiate a layer from a config dictionary.\n * @param config dict of the form {class_name: str, config: dict}\n * @param customObjects dict mapping class names (or function names)\n * of custom (non-Keras) objects to class/functions\n * @param fastWeightInit Optional flag to use fast weight initialization\n * during deserialization. This is applicable to cases in which\n * the initialization will be immediately overwritten by loaded weight\n * values. Default: `false`.\n * @returns Layer instance (may be LayersModel, Sequential, Layer...)\n */\nexport function deserialize(\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict,\n fastWeightInit = false): serialization.Serializable {\n return deserializeKerasObject(\n config, serialization.SerializationMap.getMap().classNameMap,\n customObjects, 'layer', fastWeightInit);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original Source: losses.py */\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {Tensor, Tensor1D, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {epsilon} from './backend/common';\nimport * as K from './backend/tfjs_backend';\nimport {ValueError} from './errors';\nimport {LossOrMetricFn} from './types';\n\n/**\n * Normalizes a tensor wrt the L2 norm alongside the specified axis.\n * @param x\n * @param axis Axis along which to perform normalization.\n */\nexport function l2Normalize(x: Tensor, axis?: number): Tensor {\n return tidy(() => {\n if (x.dtype !== 'float32') {\n x = tfc.cast(x, 'float32');\n }\n const squareSum = tfc.sum(K.square(x), axis, true);\n const epsilonTensor = tfc.fill(squareSum.shape, epsilon());\n const norm = tfc.sqrt(tfc.maximum(squareSum, epsilonTensor));\n return tfc.div(x, norm);\n });\n}\n\nexport function meanSquaredError(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => tfc.mean(K.square(tfc.sub(yPred, yTrue)), -1));\n}\n\nexport function meanAbsoluteError(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => tfc.mean(tfc.abs(tfc.sub(yPred, yTrue)), -1));\n}\n\nexport function meanAbsolutePercentageError(\n yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const diff = tfc.sub(yTrue, yPred);\n const clippedTrue =\n tfc.clipByValue(tfc.abs(yTrue), epsilon(), Number.MAX_VALUE);\n const absResult = tfc.abs(tfc.div(diff, clippedTrue));\n return tfc.mul(100, tfc.mean(absResult, -1));\n });\n}\n\nexport function meanSquaredLogarithmicError(\n yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const clippedPred = tfc.clipByValue(yPred, epsilon(), Number.MAX_VALUE);\n const firstLog = tfc.log(tfc.add(1, clippedPred));\n\n const clippedTrue = tfc.clipByValue(yTrue, epsilon(), Number.MAX_VALUE);\n const secondLog = tfc.log(tfc.add(1, clippedTrue));\n\n return tfc.mean(K.square(tfc.sub(firstLog, secondLog)), -1);\n });\n}\n\nexport function squaredHinge(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const maxResult = tfc.maximum(0, tfc.sub(1, tfc.mul(yTrue, yPred)));\n return tfc.mean(K.square(maxResult), -1);\n });\n}\n\nexport function hinge(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const maxResult = tfc.maximum(0, tfc.sub(1, tfc.mul(yTrue, yPred)));\n return tfc.mean(maxResult, -1);\n });\n}\n\nexport function categoricalHinge(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const pos = tfc.sum(tfc.mul(yTrue, yPred), -1);\n const neg = tfc.max(tfc.mul(tfc.sub(1, yTrue), yPred), -1);\n return tfc.maximum(0, tfc.add(1, tfc.sub(neg, pos)));\n });\n}\n\n/**\n * Logarithm of the hyperbolic cosine of the prediction error.\n *\n * `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and\n * to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly\n * like the mean squared error, but will not be so strongly affected by the\n * occasional wildly incorrect prediction.\n */\nexport function logcosh(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const log2 = Math.log(2);\n const predictionDiff = tfc.sub(yPred, yTrue);\n const logcoshResult = tfc.sub(\n tfc.add(predictionDiff, tfc.softplus(tfc.mul(-2, predictionDiff))),\n log2);\n return tfc.mean(logcoshResult, -1);\n });\n}\n\nexport function categoricalCrossentropy(\n target: Tensor, output: Tensor, fromLogits = false): Tensor {\n return tidy(() => {\n if (fromLogits) {\n output = tfc.softmax(output);\n } else {\n // scale preds so that the class probabilities of each sample sum to 1.\n const outputSum = tfc.sum(output, output.shape.length - 1, true);\n output = tfc.div(output, outputSum);\n }\n output = tfc.clipByValue(output, epsilon(), 1 - epsilon());\n return tfc.neg(tfc.sum(\n tfc.mul(tfc.cast(target, 'float32'), tfc.log(output)),\n output.shape.length - 1));\n });\n}\n\n/**\n * Categorical crossentropy with integer targets.\n *\n * @param target An integer tensor.\n * @param output A tensor resulting from a softmax (unless `fromLogits` is\n * `true`, in which case `output` is expected to be the logits).\n * @param fromLogits Boolean, whether `output` is the result of a softmax, or is\n * a tensor of logits.\n */\nexport function sparseCategoricalCrossentropy(\n target: Tensor, output: Tensor, fromLogits = false): Tensor {\n return tidy(() => {\n const flatTarget =\n tfc.cast(tfc.floor(K.flatten(target)), 'int32') as Tensor1D;\n output = tfc.clipByValue(output, epsilon(), 1 - epsilon());\n const outputShape = output.shape;\n const oneHotTarget = tfc.reshape(\n tfc.oneHot(flatTarget, outputShape[outputShape.length - 1]),\n outputShape);\n return categoricalCrossentropy(oneHotTarget, output, fromLogits);\n });\n}\n\n/**\n * From TensorFlow's implementation in nn_impl.py:\n *\n * For brevity, let `x = logits`, `z = labels`. The logistic loss is\n * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n * = (1 - z) * x + log(1 + exp(-x))\n * = x - x * z + log(1 + exp(-x))\n * For x < 0, to avoid overflow in exp(-x), we reformulate the above\n * x - x * z + log(1 + exp(-x))\n * = log(exp(x)) - x * z + log(1 + exp(-x))\n * = - x * z + log(1 + exp(x))\n * Hence, to ensure stability and avoid overflow, the implementation uses this\n * equivalent formulation\n * max(x, 0) - x * z + log(1 + exp(-abs(x)))\n *\n * @param labels The labels.\n * @param logits The logits.\n */\nexport function sigmoidCrossEntropyWithLogits(\n labels: Tensor, logits: Tensor): Tensor {\n if (!util.arraysEqual(labels.shape, logits.shape)) {\n throw new ValueError(\n `logits and labels must have the same shape, but got shapes ` +\n `${JSON.stringify(labels.shape)} and ${JSON.stringify(logits.shape)}`);\n }\n return tidy(() => {\n // The logistic loss formula from above is\n // x - x * z + log(1 + exp(-x))\n // For x < 0, a more numerically stable formula is\n // -x * z + log(1 + exp(x))\n // Note that these two expressions can be combined into the following:\n // max(x, 0) - x * z + log(1 + exp(-abs(x)))\n const reluLogits = tfc.relu(logits);\n const negAbsLogits = tfc.neg(tfc.abs(logits));\n return tfc.add(\n tfc.sub(reluLogits, tfc.mul(logits, labels)),\n tfc.log1p(tfc.exp(negAbsLogits)));\n });\n}\n\nexport function binaryCrossentropy(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n let y: Tensor;\n y = tfc.clipByValue(yPred, epsilon(), 1 - epsilon());\n y = tfc.log(tfc.div(y, tfc.sub(1, y)));\n return tfc.mean(sigmoidCrossEntropyWithLogits(yTrue, y), -1);\n });\n}\n\nexport function kullbackLeiblerDivergence(\n yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const clippedTrue = tfc.clipByValue(yTrue, epsilon(), 1);\n const clippedPred = tfc.clipByValue(yPred, epsilon(), 1);\n return tfc.sum(\n tfc.mul(yTrue, tfc.log(tfc.div(clippedTrue, clippedPred))), -1);\n });\n}\n\nexport function poisson(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const logPred = tfc.log(tfc.add(epsilon(), yPred));\n return tfc.mean(tfc.sub(yPred, tfc.mul(yTrue, logPred)), -1);\n });\n}\n\nexport function cosineProximity(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const trueNormalized = l2Normalize(yTrue, -1);\n const predNormalized = l2Normalize(yPred, -1);\n const trueXPred = tfc.mul(trueNormalized, predNormalized);\n return tfc.neg(tfc.sum(trueXPred, -1));\n });\n}\n\nexport const mse = meanSquaredError;\nexport const MSE = meanSquaredError;\nexport const mae = meanAbsoluteError;\nexport const MAE = meanAbsoluteError;\nexport const mape = meanAbsolutePercentageError;\nexport const MAPE = meanAbsolutePercentageError;\nexport const msle = meanSquaredLogarithmicError;\nexport const MSLE = meanSquaredLogarithmicError;\nexport const kld = kullbackLeiblerDivergence;\nexport const KLD = kullbackLeiblerDivergence;\nexport const cosine = cosineProximity;\n\n// TODO(michaelterry): Add deserialize() function.\n\nexport const lossesMap: {[functionName: string]: LossOrMetricFn} = {\n meanSquaredError,\n meanAbsoluteError,\n meanAbsolutePercentageError,\n meanSquaredLogarithmicError,\n squaredHinge,\n hinge,\n categoricalHinge,\n logcosh,\n categoricalCrossentropy,\n sparseCategoricalCrossentropy,\n binaryCrossentropy,\n kullbackLeiblerDivergence,\n poisson,\n cosineProximity\n};\n\n// Porting note: This diverges from the PyKeras implementation and may need to\n// change based on (de)serialization requirements.\nexport function get(identifierOrFn: string|LossOrMetricFn): LossOrMetricFn {\n if (typeof identifierOrFn === 'string') {\n if (identifierOrFn in lossesMap) {\n return lossesMap[identifierOrFn];\n }\n let errMsg = `Unknown loss ${identifierOrFn}`;\n if (identifierOrFn.toLowerCase().includes('softmaxcrossentropy')) {\n errMsg = `Unknown loss ${identifierOrFn}. ` +\n 'Use \"categoricalCrossentropy\" as the string name for ' +\n 'tf.losses.softmaxCrossEntropy';\n }\n throw new ValueError(errMsg);\n } else {\n return identifierOrFn;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Built-in metrics.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {Tensor, tidy} from '@tensorflow/tfjs-core';\n\nimport * as K from './backend/tfjs_backend';\nimport {NotImplementedError, ValueError} from './errors';\nimport {categoricalCrossentropy as categoricalCrossentropyLoss, cosineProximity, meanAbsoluteError, meanAbsolutePercentageError, meanSquaredError, sparseCategoricalCrossentropy as sparseCategoricalCrossentropyLoss} from './losses';\nimport {binaryCrossentropy as lossBinaryCrossentropy} from './losses';\nimport {lossesMap} from './losses';\nimport {LossOrMetricFn} from './types';\nimport * as util from './utils/generic_utils';\n\nexport function binaryAccuracy(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const threshold = tfc.mul(.5, tfc.onesLike(yPred));\n const yPredThresholded = K.cast(tfc.greater(yPred, threshold), yTrue.dtype);\n return tfc.mean(tfc.equal(yTrue, yPredThresholded), -1);\n });\n}\n\nexport function categoricalAccuracy(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(\n () => K.cast(\n tfc.equal(tfc.argMax(yTrue, -1), tfc.argMax(yPred, -1)), 'float32'));\n}\n\nfunction truePositives(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n return tfc.cast(\n tfc.sum(tfc.logicalAnd(tfc.equal(yTrue, 1), tfc.equal(yPred, 1))),\n 'float32');\n });\n}\n\nfunction falseNegatives(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n return tfc.cast(\n tfc.sum(tfc.logicalAnd(tfc.equal(yTrue, 1), tfc.equal(yPred, 0))),\n 'float32');\n });\n}\n\nfunction falsePositives(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n return tfc.cast(\n tfc.sum(tfc.logicalAnd(tfc.equal(yTrue, 0), tfc.equal(yPred, 1))),\n 'float32');\n });\n}\n\nexport function precision(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const tp = truePositives(yTrue, yPred);\n const fp = falsePositives(yTrue, yPred);\n\n const denominator = tfc.add(tp, fp);\n\n return tfc.cast(\n tfc.where(tfc.greater(denominator, 0), tfc.div(tp, denominator), 0),\n 'float32');\n });\n}\n\nexport function recall(yTrue: Tensor, yPred: Tensor): Tensor {\n return tidy(() => {\n const tp = truePositives(yTrue, yPred);\n const fn = falseNegatives(yTrue, yPred);\n\n const denominator = tfc.add(tp, fn);\n\n return tfc.cast(\n tfc.where(tfc.greater(denominator, 0), tfc.div(tp, denominator), 0),\n 'float32');\n });\n}\n\nexport function binaryCrossentropy(yTrue: Tensor, yPred: Tensor): Tensor {\n return lossBinaryCrossentropy(yTrue, yPred);\n}\n\nexport function sparseCategoricalAccuracy(\n yTrue: Tensor, yPred: Tensor): Tensor {\n if (yTrue.rank === yPred.rank) {\n yTrue = tfc.squeeze(yTrue, [yTrue.rank - 1]);\n }\n yPred = tfc.argMax(yPred, -1);\n if (yPred.dtype !== yTrue.dtype) {\n yPred = tfc.cast(yPred, yTrue.dtype);\n }\n return tfc.cast(tfc.equal(yTrue, yPred), 'float32');\n}\n\nexport function topKCategoricalAccuracy(yTrue: Tensor, yPred: Tensor): Tensor {\n throw new NotImplementedError();\n}\n\nexport function sparseTopKCategoricalAccuracy(\n yTrue: Tensor, yPred: Tensor): Tensor {\n throw new NotImplementedError();\n}\n\n// Aliases.\nexport const mse = meanSquaredError;\nexport const MSE = meanSquaredError;\nexport const mae = meanAbsoluteError;\nexport const MAE = meanAbsoluteError;\nexport const mape = meanAbsolutePercentageError;\nexport const MAPE = meanAbsolutePercentageError;\nexport const categoricalCrossentropy = categoricalCrossentropyLoss;\nexport const cosine = cosineProximity;\nexport const sparseCategoricalCrossentropy = sparseCategoricalCrossentropyLoss;\n\n// TODO(cais, nielsene): Add serialize().\n\nexport const metricsMap: {[functionName: string]: LossOrMetricFn} = {\n binaryAccuracy,\n categoricalAccuracy,\n precision,\n categoricalCrossentropy,\n sparseCategoricalCrossentropy,\n mse,\n MSE,\n mae,\n MAE,\n mape,\n MAPE,\n cosine\n};\n\nexport function get(identifier: string|LossOrMetricFn): LossOrMetricFn {\n if (typeof identifier === 'string' && identifier in metricsMap) {\n return metricsMap[identifier];\n } else if (typeof identifier !== 'string' && identifier != null) {\n return identifier;\n } else {\n throw new ValueError(`Unknown metric ${identifier}`);\n }\n}\n\n/**\n * Get the shortcut function name.\n *\n * If the fn name is a string,\n * directly return the string name.\n * If the function is included in metricsMap or lossesMap,\n * return key of the map.\n * - If the function relative to multiple keys,\n * return the first found key as the function name.\n * - If the function exists in both lossesMap and metricsMap,\n * search lossesMap first.\n * If the function is not included in metricsMap or lossesMap,\n * return the function name.\n *\n * @param fn loss function, metric function, or short cut name.\n * @returns Loss or Metric name in string.\n */\nexport function getLossOrMetricName(fn: string|LossOrMetricFn): string {\n util.assert(fn !== null, `Unknown LossOrMetricFn ${fn}`);\n if (typeof fn === 'string') {\n return fn;\n } else {\n let fnName;\n for (const key of Object.keys(lossesMap)) {\n if (lossesMap[key] === fn) {\n fnName = key;\n break;\n }\n }\n if (fnName !== undefined) {\n return fnName;\n }\n for (const key of Object.keys(metricsMap)) {\n if (metricsMap[key] === fn) {\n fnName = key;\n break;\n }\n }\n if (fnName !== undefined) {\n return fnName;\n }\n return (fn as Function).name;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/** Utility functions related to user-defined metadata. */\n\n// Maximum recommended serialized size for user-defined metadata.\n// Beyond this limit, a warning message will be printed during model loading and\n// saving.\nexport const MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH = 1 * 1024 * 1024;\n\n/**\n * Check validity of user-defined metadata.\n *\n * @param userDefinedMetadata\n * @param modelName Name of the model that the user-defined metadata belongs to.\n * Used during construction of error messages.\n * @param checkSize Whether to check the size of the metadata is under\n * recommended limit. Default: `false`. If `true`, will try stringify the\n * JSON object and print a console warning if the serialzied size is above the\n * limit.\n * @throws Error if `userDefinedMetadata` is not a plain JSON object.\n */\nexport function checkUserDefinedMetadata(\n userDefinedMetadata: {}, modelName: string, checkSize = false): void {\n if (userDefinedMetadata == null ||\n typeof userDefinedMetadata !== 'object' ||\n Object.getPrototypeOf(userDefinedMetadata) !== Object.prototype ||\n !plainObjectCheck(userDefinedMetadata)) {\n throw new Error(\n 'User-defined metadata is expected to be a JSON object, but is not.');\n }\n\n if (checkSize) {\n const out = JSON.stringify(userDefinedMetadata);\n if (out.length > MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH) {\n console.warn(\n `User-defined metadata of model \"${modelName}\" is too large in ` +\n `size (length=${out.length} when serialized). It is not ` +\n `recommended to store such large objects in user-defined metadata. ` +\n `Please make sure its serialized length is <= ` +\n `${MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH}.`);\n }\n }\n}\n\n/**\n * Check if an input is plain JSON object or any valid subfield of it.\n *\n * @param x The input to be checked.\n * @param assertObject Whether to assert `x` is a JSON object, i.e., reject\n * cases of arrays and primitives.\n * @return Returns `true` if and only if `x` is a plain JSON object,\n * a JSON-valid primitive including string, number, boolean and null,\n * or an array of the said types.\n */\n// tslint:disable-next-line:no-any\nexport function plainObjectCheck(x: any): boolean {\n if (x === null) {\n // Note: typeof `null` is 'object', and `null` is valid in JSON.\n return true;\n } else if (typeof x === 'object') {\n if (Object.getPrototypeOf(x) === Object.prototype) {\n // `x` is a JavaScript object and its prototype is Object.\n const keys = Object.keys(x);\n for (const key of keys) {\n if (typeof key !== 'string') {\n // JSON keys must be strings.\n return false;\n }\n if (!plainObjectCheck(x[key])) { // Recursive call.\n return false;\n }\n }\n return true;\n } else {\n // `x` is a JavaScript object but its prototype is not Object.\n if (Array.isArray(x)) {\n // `x` is a JavaScript array.\n for (const item of x) {\n if (!plainObjectCheck(item)) { // Recursive call.\n return false;\n }\n }\n return true;\n } else {\n // `x` is a JavaScript object and its prototype is not Object,\n // and it's not an Array. I.e., it's a complex object such as\n // `Error` and `Date`.\n return false;\n }\n }\n } else {\n // `x` is not a JavaScript object or `null`.\n const xType = typeof x;\n return xType === 'string' || xType === 'number' || xType === 'boolean';\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {Container} from '../engine/container';\nimport {Layer, Node} from '../engine/topology';\nimport {countParamsInWeights} from './variable_utils';\n\n/**\n * Print the summary of a LayersModel object.\n *\n * @param model tf.LayersModel instance.\n * @param lineLength Total length of printed lines. Set this to adapt to the\n * display to different terminal or console sizes.\n * @param positions Relative or absolute positions of log elements in each\n * line. Each number corresponds to right-most (i.e., ending) position of a\n * column.\n * If not provided, defaults to `[0.45, 0.85, 1]` for sequential-like\n * models and `[0.33, 0.55, 0.67, 1]` for non-sequential like models.\n * @param printFn Print function to use.\n * It will be called on each line of the summary. You can provide a custom\n * function in order to capture the string summary. Defaults to `console.log`.\n */\nexport function printSummary(\n model: Container, lineLength?: number, positions?: number[],\n // tslint:disable-next-line:no-any\n printFn: (message?: any, ...optionalParams: any[]) => void =\n console.log): void {\n const sequentialLike = isModelSequentialLike(model);\n\n // Header names for different log elements.\n const toDisplay: string[] = ['Layer (type)', 'Input Shape', 'Output shape', 'Param #'];\n if (sequentialLike) {\n lineLength = lineLength || 90;\n positions = positions || [0.32, 0.61, 0.89, 1];\n } else {\n lineLength = lineLength || 115;\n positions = positions || [0.24, 0.48, 0.70, 0.80, 1];\n // Header names for different log elements.\n }\n\n if (positions[positions.length - 1] <= 1) {\n // `positions` is relative. Convert it to absolute positioning.\n positions = positions.map(p => Math.floor(lineLength * p));\n }\n\n let relevantNodes: Node[];\n if (!sequentialLike) {\n toDisplay.push('Receives inputs');\n relevantNodes = [];\n for (const depth in model.nodesByDepth) {\n relevantNodes.push(...model.nodesByDepth[depth]);\n }\n }\n\n printFn('_'.repeat(lineLength));\n printRow(toDisplay, positions, printFn);\n printFn('='.repeat(lineLength));\n\n const layers = model.layers;\n for (let i = 0; i < layers.length; ++i) {\n if (sequentialLike) {\n printLayerSummary(layers[i], positions, printFn);\n } else {\n printLayerSummaryWithConnections(\n layers[i], positions, relevantNodes, printFn);\n }\n printFn((i === layers.length - 1 ? '=' : '_').repeat(lineLength));\n }\n\n // tslint:disable-next-line:no-any\n (model as any).checkTrainableWeightsConsistency();\n\n const trainableCount = countTrainableParams(model);\n const nonTrainableCount = countParamsInWeights(model.nonTrainableWeights);\n\n printFn(`Total params: ${trainableCount + nonTrainableCount}`);\n printFn(`Trainable params: ${trainableCount}`);\n printFn(`Non-trainable params: ${nonTrainableCount}`);\n printFn('_'.repeat(lineLength));\n}\n\nfunction countTrainableParams(model: Container): number {\n let trainableCount: number;\n // tslint:disable:no-any\n if ((model as any).collectedTrainableWeights != null) {\n trainableCount =\n countParamsInWeights((model as any).collectedTrainableWeights);\n } else {\n trainableCount = countParamsInWeights(model.trainableWeights);\n }\n // tslint:enable:no-any\n return trainableCount;\n}\n\nfunction isModelSequentialLike(model: Container): boolean {\n let sequentialLike = true;\n const nodesByDepth: Node[][] = [];\n const nodes: Node[] = [];\n for (const depth in model.nodesByDepth) {\n nodesByDepth.push(model.nodesByDepth[depth]);\n }\n for (const depthNodes of nodesByDepth) {\n if (depthNodes.length > 1 ||\n depthNodes.length === 1 && depthNodes[0].inboundLayers.length > 1) {\n sequentialLike = false;\n break;\n }\n nodes.push(...depthNodes);\n }\n if (sequentialLike) {\n // Search for shared layers.\n for (const layer of model.layers) {\n let flag = false;\n for (const node of layer.inboundNodes) {\n if (nodes.indexOf(node) !== -1) {\n if (flag) {\n sequentialLike = false;\n break;\n } else {\n flag = true;\n }\n }\n }\n if (!sequentialLike) {\n break;\n }\n }\n }\n return sequentialLike;\n}\n\nfunction printRow(\n fields: string[], positions: number[],\n // tslint:disable-next-line:no-any\n printFn: (message?: any, ...optionalParams: any[]) => void = console.log) {\n let line = '';\n for (let i = 0; i < fields.length; ++i) {\n if (i > 0) {\n line = line.slice(0, line.length - 1) + ' ';\n }\n line += fields[i];\n line = line.slice(0, positions[i]);\n line += ' '.repeat(positions[i] - line.length);\n }\n printFn(line);\n}\n\n/**\n * Prints a summary for a single Layer, without connectivity information.\n *\n * @param layer: Layer instance to print.\n */\nfunction printLayerSummary(\n layer: Layer, positions: number[],\n // tslint:disable-next-line:no-any\n printFn: (message?: any, ...optionalParams: any[]) => void) {\n let outputShape: string;\n let inputShape: string;\n\n try {\n inputShape = (layer.inboundNodes.map(\n x => JSON.stringify(x.inputShapes)\n )).join(',');\n } catch (err) {\n inputShape = 'multiple';\n }\n\n try {\n outputShape = JSON.stringify(layer.outputShape);\n } catch (err) {\n outputShape = 'multiple';\n }\n\n const name = layer.name;\n const className = layer.getClassName();\n const fields: string[] =\n [`${name} (${className})`, inputShape,\n outputShape, layer.countParams().toString()];\n printRow(fields, positions, printFn);\n}\n\n/**\n * Prints a summary for a single Layer, with connectivity information.\n */\nfunction printLayerSummaryWithConnections(\n layer: Layer, positions: number[], relevantNodes: Node[],\n // tslint:disable-next-line:no-any\n printFn: (message?: any, ...optionalParams: any[]) => void) {\n let outputShape: string;\n let inputShape: string;\n\n try {\n inputShape = (layer.inboundNodes.map(\n x => JSON.stringify(x.inputShapes)\n )).join(',');\n } catch (err) {\n inputShape = 'multiple';\n }\n\n try {\n outputShape = JSON.stringify(layer.outputShape);\n } catch (err) {\n outputShape = 'multiple';\n }\n\n const connections: string[] = [];\n for (const node of layer.inboundNodes) {\n if (relevantNodes != null && relevantNodes.length > 0 &&\n relevantNodes.indexOf(node) === -1) {\n continue;\n }\n for (let i = 0; i < node.inboundLayers.length; ++i) {\n const inboundLayer = node.inboundLayers[i].name;\n const inboundLayerIndex = node.nodeIndices[i];\n const inboundTensorIndex = node.tensorIndices[i];\n connections.push(\n `${inboundLayer}[${inboundLayerIndex}][${inboundTensorIndex}]`);\n }\n }\n const name = layer.name;\n const className = layer.getClassName();\n const firstConnection = connections.length === 0 ? '' : connections[0];\n const fields: string[] = [\n `${name} (${className})`, inputShape,\n outputShape, layer.countParams().toString(),\n firstConnection\n ];\n\n printRow(fields, positions, printFn);\n for (let i = 1; i < connections.length; ++i) {\n printRow(['', '', '', '', connections[i]], positions, printFn);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n// Porting note: This file doesn't exist in PyKeras.\n// Its purpose here is to centralize the boundary layer between\n// tfjs-layers's internal Config TS-Centric format and PyKeras's\n// serialized Python Config format.\n\nimport {serialization} from '@tensorflow/tfjs-core';\n\nimport {PyJsonValue} from '../keras_format/types';\nimport * as generic_utils from '../utils/generic_utils';\n// tslint:enable\n\n/**\n * Test whether a value in an array is the name of a LayersModel or Layer.\n * @param key The key name that the value is found under. Note that the key\n * may not be at the level immediately above the value, if the value is in a\n * nested array.\n * @param index Index of the value in the Array that it is found in.\n * @param value The value object.\n * @returns A boolean indicating whether value is a name.\n */\nfunction isArrayItemInputOrOutputName(\n key: string, index: number, value: T): boolean {\n return (key === 'inboundNodes' || key === 'outputLayers' ||\n key === 'inputLayers') &&\n index === 0 && typeof value === 'string';\n}\n\n/**\n * Convert a Pythonic config object to TypeScript config object.\n * @param pythonicConfig The config object to convert.\n * @param key Optional key name of the object being converted.\n * @returns Result of the conversion.\n */\nexport function convertPythonicToTs(\n pythonicConfig: PyJsonValue, key?: string): serialization.ConfigDictValue {\n if (pythonicConfig === null) {\n return null;\n } else if (typeof pythonicConfig === 'string') {\n return generic_utils.toCamelCase(pythonicConfig);\n } else if (\n (typeof pythonicConfig === 'number') ||\n (typeof pythonicConfig === 'boolean')) {\n return pythonicConfig;\n } else if (pythonicConfig instanceof Array) {\n const tsArray = [];\n const arrayLength = pythonicConfig.length;\n for (let i = 0; i < arrayLength; ++i) {\n const item = pythonicConfig[i];\n if (isArrayItemInputOrOutputName(key, i, item)) {\n tsArray.push(item);\n } else {\n tsArray.push(convertPythonicToTs(item, key));\n }\n }\n return tsArray;\n } else {\n const tsDict: serialization.ConfigDict = {};\n for (const pythonicKey of Object.keys(pythonicConfig)) {\n const pythonicValue = pythonicConfig[pythonicKey];\n if (pythonicKey === 'name' && typeof pythonicValue === 'string') {\n // Special case the 'name' key with a string value. Name values, such as\n // the names of LayersModel and Layer instances, should not undergo the\n // camel-case conversion.\n tsDict[pythonicKey] = pythonicValue;\n } else {\n const tsKey = generic_utils.toCamelCase(pythonicKey);\n tsDict[tsKey] = convertPythonicToTs(pythonicValue, tsKey);\n }\n }\n return tsDict;\n }\n}\n\n/**\n * Convert a TypeScript config object to Python config object.\n * @param tsConfig The config object to convert.\n * @param key Optional key name of the object being converted.\n * @returns Result of the conversion.\n */\nexport function convertTsToPythonic(\n tsConfig: serialization.ConfigDictValue, key?: string): PyJsonValue {\n if (tsConfig === null || tsConfig === undefined) {\n return null;\n } else if (typeof tsConfig === 'string') {\n return generic_utils.toSnakeCase(tsConfig);\n } else if (\n (typeof tsConfig === 'number') || (typeof tsConfig === 'boolean')) {\n return tsConfig;\n } else if (tsConfig instanceof Array) {\n const pyArray = [];\n const arrayLength = tsConfig.length;\n for (let i = 0; i < arrayLength; ++i) {\n const item = tsConfig[i];\n if (isArrayItemInputOrOutputName(key, i, item)) {\n pyArray.push(item);\n } else {\n pyArray.push(convertTsToPythonic(item, key));\n }\n }\n return pyArray;\n } else {\n const pyDict: serialization.ConfigDict = {};\n for (const tsKey of Object.keys(tsConfig)) {\n const tsValue = tsConfig[tsKey];\n const pyKey = generic_utils.toSnakeCase(tsKey);\n if ((tsKey === 'name' || tsKey === 'className') &&\n typeof tsValue === 'string') {\n // Special case the 'name' key with a string value. Name values, such as\n // the names of LayersModel and Layer instances, should not undergo the\n // snake-case conversion.\n pyDict[pyKey] = tsValue;\n } else {\n pyDict[pyKey] = convertTsToPythonic(tsValue, tsKey);\n }\n }\n return pyDict;\n }\n}\n","/** @license See the LICENSE file. */\n\n// This code is auto-generated, do not modify this file!\nconst version = '4.2.0';\nexport {version};\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: keras/engine/topology.py */\n\nimport {NamedTensorMap, Scalar, serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\n\nimport {getUid} from '../backend/state';\nimport {NotImplementedError, RuntimeError, ValueError} from '../errors';\nimport {Shape} from '../keras_format/common';\nimport {TensorKeyWithArgsArray} from '../keras_format/node_config';\nimport {PyJsonDict} from '../keras_format/types';\nimport {deserialize as deserializeLayer} from '../layers/serialization';\nimport {Kwargs} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport {convertTsToPythonic} from '../utils/serialization_utils';\nimport * as types_utils from '../utils/types_utils';\nimport {batchSetValue, LayerVariable} from '../variables';\nimport {version as layersVersion} from '../version';\n\nimport {execute, FeedDict} from './executor';\nimport {InputLayer} from './input_layer';\nimport {DisposeResult, Layer, Node, SymbolicTensor} from './topology';\n\n/** Constructor config for Container. */\nexport interface ContainerArgs {\n inputs: SymbolicTensor|SymbolicTensor[];\n outputs: SymbolicTensor|SymbolicTensor[];\n name?: string;\n}\n\n/**\n * A Container is a directed acyclic graph of layers.\n *\n * It is the topological form of a \"model\". A LayersModel\n * is simply a Container with added training routines.\n *\n */\nexport abstract class Container extends Layer {\n inputs: SymbolicTensor[];\n outputs: SymbolicTensor[];\n\n inputLayers: Layer[];\n inputLayersNodeIndices: number[];\n inputLayersTensorIndices: number[];\n\n outputLayers: Layer[];\n outputLayersNodeIndices: number[];\n outputLayersTensorIndices: number[];\n\n layers: Layer[];\n layersByDepth: {[depth: string]: Layer[]};\n nodesByDepth: {[depth: string]: Node[]};\n\n internalContainerRefs: Container[];\n\n containerNodes = new Set();\n\n // TODO(michaelterry): Add cache support\n // private outputMaskCache: any;\n // private outputTensorCache: any;\n // private outputShapeCache: any;\n\n inputNames: string[];\n outputNames: string[];\n feedInputShapes: Shape[];\n\n protected internalInputShapes: Shape[];\n protected internalOutputShapes: Shape[];\n // TODO(cais): Maybe 'feed' should not in the names of these variables,\n // due to the fact that our backend is not symbolic.\n protected feedInputNames: string[];\n protected feedOutputNames: string[];\n\n constructor(args: ContainerArgs) {\n // No args passed to super's constructor.\n super({});\n this.name = args.name;\n if (this.name == null) {\n const prefix = this.getClassName().toLowerCase();\n this.name = getUid(prefix);\n }\n\n this.supportsMasking = false;\n this.trainable_ = true;\n\n // TODO(michaelterry): Initialize perInputLosses/Updates here.\n\n // Container-specific properties.\n if (Array.isArray(args.inputs)) {\n this.inputs = args.inputs.slice();\n } else {\n this.inputs = [args.inputs];\n }\n if (Array.isArray(args.outputs)) {\n this.outputs = args.outputs.slice();\n } else {\n this.outputs = [args.outputs];\n }\n\n // Check for redundancy in inputs.\n if (generic_utils.unique(this.inputs).length !== this.inputs.length) {\n throw new ValueError(\n 'The list of inputs passed to the model is ' +\n 'redundant. All inputs should only appear once. Found: ' +\n `${this.inputs.map(x => x.name)}`);\n }\n\n // Check for redundancy in outputs.\n if (generic_utils.unique(this.outputs).length !== this.outputs.length) {\n console.warn(\n 'The list of outputs passed to the model is redundant. ' +\n 'All outputs should only appear once. Found: ' +\n `${this.outputs.map(x => x.name)}`);\n }\n\n /*\n List of initial layers (1 to 1 mapping with this.inputs, hence the same\n layer might appear twice)\n */\n this.inputLayers = [];\n this.inputLayersNodeIndices = [];\n this.inputLayersTensorIndices = [];\n /*\n List of layers (1 to 1 mapping with this.outputs, hence the same layer\n might appear twice)\n */\n this.outputLayers = [];\n this.outputLayersNodeIndices = [];\n this.outputLayersTensorIndices = [];\n /*\n All layers in order of horizontal graph traversal. Entries are unique.\n Includes input and output layers.\n */\n this.layers = [];\n\n /*\n References to container layers that were constructed internally. We need\n these to properly dispose of tensors from nested containers.\n */\n this.internalContainerRefs = [];\n\n // TODO(michaelterry): Determine if caching still needed with eager\n // backend.\n /*\n This is for performance optimization when calling the Container on new\n inputs. Every time the Container is called on a set on input tensors,\n we compute the output tensors, output masks and output shapes in one pass,\n then cache them here. When one of these outputs is queried later,\n we retrieve it from there instead of recomputing it.\n */\n // this.outputTensorCache = {};\n // this.outputShapeCache = {};\n\n // Build this.outputLayers:\n for (const x of this.outputs) {\n const layer = x.sourceLayer;\n const nodeIndex = x.nodeIndex;\n const tensorIndex = x.tensorIndex;\n this.outputLayers.push(layer);\n this.outputLayersNodeIndices.push(nodeIndex);\n this.outputLayersTensorIndices.push(tensorIndex);\n }\n\n // TODO(michaelterry): Add output mask cache code.\n\n // Build this.inputLayers:\n for (const x of this.inputs) {\n const layer = x.sourceLayer;\n const nodeIndex = x.nodeIndex;\n const tensorIndex = x.tensorIndex;\n /*\n It's supposed to be an input layer, so only one node\n and one tensor output.\n */\n generic_utils.assert(nodeIndex === 0, 'input layer has >1 nodes');\n generic_utils.assert(tensorIndex === 0, 'input layer has >1 tensors');\n this.inputLayers.push(layer);\n this.inputLayersNodeIndices.push(nodeIndex);\n this.inputLayersTensorIndices.push(tensorIndex);\n }\n\n // Build this.inputNames and this.outputNames.\n this.inputNames = [];\n this.outputNames = [];\n this.feedInputShapes = [];\n this.feedInputNames = [];\n this.feedOutputNames = [];\n for (let i = 0; i < this.inputLayers.length; i++) {\n const layer = this.inputLayers[i];\n // Check that layer is an InputLayer.\n if (!(layer instanceof InputLayer)) {\n throw new TypeError(\n 'Input layers to a LayersModel must be InputLayer objects. ' +\n `Received inputs: ${args.inputs}. ` +\n `Input ${i} (0-based) originates ` +\n `from layer type ${layer.getClassName()}.`);\n }\n this.inputNames.push(layer.name);\n this.feedInputShapes.push(layer.batchInputShape);\n\n this.feedInputNames.push(layer.name);\n }\n for (const layer of this.outputLayers) {\n this.outputNames.push(layer.name);\n }\n\n this.internalInputShapes = this.inputs.map(x => x.shape);\n this.internalOutputShapes = this.outputs.map(x => x.shape);\n\n /*\n Container_nodes: set of nodes included in the graph (not all nodes\n included in the layers are relevant to the current graph).\n */\n // ids of all nodes relevant to the Container:\n const nodesDepths: {[nodeID: string]: number} = {};\n // To recover nodes from their ID.\n const nodeIDToNode: {[nodeID: string]: Node} = {};\n const layersDepths: {[layerID: string]: number} = {};\n // To layers from their ID.\n const layerIDToLayer: {[layerID: string]: Layer} = {};\n const layerIndices: {[layerID: string]: number} = {};\n const nodesInDecreasingDepth: Node[] = [];\n\n /**\n * Builds a map of the graph of layers.\n *\n * This recursively updates the map `layerIndices`,\n * the list `nodesInDecreasingDepth` and the set `containerNodes`.\n *\n * @param tensor Some tensor in a graph.\n * @param finishedNodes Set of nodes whose subgraphs have been traversed\n * completely. Useful to prevent duplicated work.\n * @param nodesInProgress Set of nodes that are currently active on the\n * recursion stack. Useful to detect cycles.\n * @param layer Layer from which `tensor` comes from. If not provided,\n * will be obtained from tensor.sourceLayer.\n * @param nodeIndex Node index from which `tensor` comes from.\n * @param tensorIndex TensorIndex from which `tensor` comes from.\n *\n * @exception RuntimeError if a cycle is detected.\n */\n const buildMapOfGraph =\n (tensor: SymbolicTensor, finishedNodes: Node[], nodesInProgress: Node[],\n layer?: Layer, nodeIndex?: number, tensorIndex?: number) => {\n if (layer == null || nodeIndex == null || tensorIndex == null) {\n layer = tensor.sourceLayer;\n nodeIndex = tensor.nodeIndex;\n tensorIndex = tensor.tensorIndex;\n }\n const node = layer.inboundNodes[nodeIndex];\n\n // Prevent cycles.\n if (nodesInProgress.indexOf(node) !== -1) {\n throw new RuntimeError(\n `The tensor ${tensor.name} at layer \"${layer.name}\" ` +\n 'is part of a cycle.');\n }\n\n // Don't repeat work for shared subgraphs\n if (finishedNodes.indexOf(node) !== -1) {\n return;\n }\n\n // Update containerNodes.\n this.containerNodes.add(Container.nodeKey(layer, nodeIndex));\n\n // Store the traversal order for layer sorting.\n if (!(layer.id in layerIndices)) {\n layerIndices[layer.id] = Object.keys(layerIndices).length;\n }\n\n if (nodesInProgress.indexOf(node) === -1) {\n nodesInProgress.push(node);\n }\n\n // Propagate to all previous tensors connected to this node.\n const numInboundLayers = node.inboundLayers.length;\n for (let i = 0; i < numInboundLayers; i++) {\n const x = node.inputTensors[i];\n const layer = node.inboundLayers[i];\n const nodeIndex = node.nodeIndices[i];\n const tensorIndex = node.tensorIndices[i];\n buildMapOfGraph(\n x, finishedNodes, nodesInProgress, layer, nodeIndex,\n tensorIndex);\n }\n finishedNodes.push(node);\n while (nodesInProgress.indexOf(node) >= 0) {\n nodesInProgress.splice(nodesInProgress.indexOf(node), 1);\n }\n nodesInDecreasingDepth.push(node);\n };\n\n const finishedNodes: Node[] = [];\n const nodesInProgress: Node[] = [];\n for (const x of this.outputs) {\n buildMapOfGraph(x, finishedNodes, nodesInProgress);\n }\n\n const reversedNodesInDecreasingDepth =\n nodesInDecreasingDepth.slice().reverse();\n for (const node of reversedNodesInDecreasingDepth) {\n nodeIDToNode[node.id] = node;\n // If the depth is not set, the node has no outbound nodes (depth 0).\n if (!(node.id in nodesDepths)) {\n nodesDepths[node.id] = 0;\n }\n let depth = nodesDepths[node.id];\n\n // Update the depth of the corresponding layer\n const previousDepth =\n (layersDepths[node.outboundLayer.id] == null ?\n 0 :\n layersDepths[node.outboundLayer.id]);\n\n /*\n If we've seen this layer before at a higher depth, we should use that\n depth instead of the node depth. This is necessary for shared layers\n that have inputs at different depth levels in the graph.\n */\n depth = Math.max(depth, previousDepth);\n layersDepths[node.outboundLayer.id] = depth;\n layerIDToLayer[node.outboundLayer.id] = node.outboundLayer;\n nodesDepths[node.id] = depth;\n\n // Update the depth of inbound nodes.\n for (let i = 0; i < node.inboundLayers.length; i++) {\n const inboundLayer = node.inboundLayers[i];\n const nodeIndex = node.nodeIndices[i];\n const inboundNode = inboundLayer.inboundNodes[nodeIndex];\n const previousDepth =\n (nodesDepths[inboundNode.id] == null ? 0 :\n nodesDepths[inboundNode.id]);\n nodesDepths[inboundNode.id] = Math.max(depth + 1, previousDepth);\n nodeIDToNode[inboundNode.id] = inboundNode;\n }\n }\n\n // Build a dict {depth: list of nodes with this depth}\n const nodesByDepth: {[depth: string]: Node[]} = {};\n for (const nodeID in nodesDepths) {\n const depth = nodesDepths[nodeID];\n if (!(depth in nodesByDepth)) {\n nodesByDepth[depth] = [];\n }\n nodesByDepth[depth].push(nodeIDToNode[nodeID]);\n }\n\n // Build a dict {depth: list of layers with this depth}\n const layersByDepth: {[depth: string]: Layer[]} = {};\n for (const layerID in layersDepths) {\n const depth = layersDepths[layerID];\n if (!(depth in layersByDepth)) {\n layersByDepth[depth] = [];\n }\n layersByDepth[depth].push(layerIDToLayer[layerID]);\n }\n\n // Get sorted list of layer depths.\n let depthKeys = Object.keys(layersByDepth)\n .map(x => parseInt(x, 10))\n .sort(generic_utils.reverseNumberCompare);\n\n // Set this.layers and this.layersByDepth.\n this.layers = [];\n for (const depth of depthKeys) {\n const layersForDepth = layersByDepth[depth];\n // Container.layers needs to have a deterministic order:\n // here we order them by traversal order.\n layersForDepth.sort((a, b) => {\n const aIndex = layerIndices[a.id];\n const bIndex = layerIndices[b.id];\n if (aIndex < bIndex) {\n return -1;\n }\n if (aIndex > bIndex) {\n return 1;\n }\n return 0;\n });\n for (const layer of layersForDepth) {\n if (layer instanceof Container) {\n this.internalContainerRefs.push(layer);\n }\n this.layers.push(layer);\n }\n }\n this.layersByDepth = layersByDepth;\n\n // Get sorted list of node depths;\n depthKeys = Object.keys(nodesByDepth)\n .map(x => parseInt(x, 10))\n .sort(generic_utils.reverseNumberCompare);\n\n // Check that all tensors required are computable.\n // computable_tensors: all tensors in the graph\n // that can be computed from the inputs provided.\n const computableTensors = this.inputs.slice();\n\n // To provide a better error msg.\n const layersWithCompleteInput: string[] = [];\n for (const depth of depthKeys) {\n for (const node of nodesByDepth[depth]) {\n const layer = node.outboundLayer;\n if (layer != null) {\n for (const x of node.inputTensors) {\n if (computableTensors.indexOf(x) === -1) {\n throw new RuntimeError(\n `Graph disconnected: cannot obtain value for tensor ${x}` +\n ` at layer \"${layer.name}\". ` +\n 'The following previous layers were accessed without ' +\n `issue: ${layersWithCompleteInput}`);\n }\n }\n for (const x of node.outputTensors) {\n computableTensors.push(x);\n }\n layersWithCompleteInput.push(layer.name);\n }\n }\n }\n\n // Set this.containerNodes and this.nodesByDepth.\n this.nodesByDepth = nodesByDepth;\n\n // Ensure name unicity, which will be crucial for serialization\n // (since serialized nodes refer to layers by their name).\n const allNames = this.layers.map(x => x.name);\n for (const name of allNames) {\n const numOccurrences = allNames.filter(x => x === name).length;\n if (numOccurrences !== 1) {\n throw new RuntimeError(\n `The name \"${name}\" is used ${numOccurrences} times ` +\n 'in the model. All layer names should be unique. Layer names: ' +\n JSON.stringify(allNames));\n }\n }\n\n // Layer parameters.\n // The new container starts with a single inbound node\n // for its inputs, and no outbound nodes.\n // Will be appended to by future calls to apply().\n this.outboundNodes = [];\n // Will be appended to below, and by future calls to apply().\n this.inboundNodes = [];\n\n // Create the node linking internal inputs to internal outputs.\n // (This call has side effects.)\n // tslint:disable-next-line:no-unused-expression\n new Node({\n outboundLayer: this,\n inboundLayers: [],\n nodeIndices: [],\n tensorIndices: [],\n inputTensors: this.inputs,\n outputTensors: this.outputs,\n inputMasks: this.inputs.map(x => null),\n outputMasks: this.outputs.map(x => null),\n inputShapes: this.inputs.map(x => x.shape),\n outputShapes: this.outputs.map(x => x.shape)\n });\n this.built = true;\n this._refCount = 1; // The ref count of a container always start at 1.\n }\n\n protected override assertNotDisposed() {\n if (this._refCount === 0) {\n throw new Error(`Container '${this.name}' is already disposed.`);\n }\n }\n\n /**\n * Attempt to dispose a LayersModel's weights.\n *\n * This method decrease the reference count of the LayersModel object by 1.\n *\n * A LayersModel is reference-counted. Its reference count is incremented by 1\n * when it is first constructed and when it is used as a Layer of another\n * LayersModel.\n *\n * If the reference count of a LayersModel becomes 0, the `dispose` method of\n * all its constituent `Layer`s will be called.\n *\n * Note: If the reference count is greater than 0 after the decrement, the\n * `dispose` method of its constituent `Layer`s will *not* be called.\n *\n * After a LayersModel is disposed, it cannot be used in calls such as\n * 'predict`, `evaluate` or `fit` anymore.\n *\n * @returns A DisposeResult Object with the following fields:\n * - refCountAfterDispose: The reference count of the LayersModel after this\n * `dispose()` call.\n * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed\n * during this `dispose()` call.\n * @throws {Error} If the layer is not built yet, or if the LayersModel has\n * already been disposed.\n */\n override dispose(): DisposeResult {\n this.assertNotDisposed();\n const result:\n DisposeResult = {refCountAfterDispose: null, numDisposedVariables: 0};\n if (--this._refCount === 0) {\n for (const layer of this.layers) {\n result.numDisposedVariables += layer.dispose().numDisposedVariables;\n }\n\n // Call dispose on each internally created container layer again to ensure\n // their refCounts hit zero and their tensors are subsequently deleted.\n for (const container of this.internalContainerRefs) {\n result.numDisposedVariables += container.dispose().numDisposedVariables;\n }\n }\n result.refCountAfterDispose = this._refCount;\n return result;\n }\n\n override get trainable() {\n return this.trainable_;\n }\n\n override set trainable(trainable: boolean) {\n this.layers.forEach(layer => {\n // tslint:disable-next-line:no-any\n ((layer as any)._trainableWeights as LayerVariable[])\n .forEach(w => w.trainable = trainable);\n });\n this.trainable_ = trainable;\n }\n\n override get trainableWeights(): LayerVariable[] {\n // Porting Note: This check below is to prevent errors where the\n // _trainableWeights inherited from the parent class (Layer) gets\n // inadvertently used.\n if (this._trainableWeights.length > 0) {\n throw new ValueError(\n 'Container instance unexpectedly contains _trainableWeights.' +\n 'The trainable weights of a Container are a union of the ' +\n 'trainable weights of its consituent Layers. Its own ' +\n '_trainableWeights must remain an empty Array.');\n }\n\n if (!this.trainable) {\n return [];\n }\n let weights: LayerVariable[] = [];\n for (const layer of this.layers) {\n weights = weights.concat(layer.trainableWeights);\n }\n return weights;\n }\n\n override get nonTrainableWeights(): LayerVariable[] {\n const weights: LayerVariable[] = [];\n for (const layer of this.layers) {\n weights.push(...layer.nonTrainableWeights);\n }\n if (!this.trainable) {\n const trainableWeights: LayerVariable[] = [];\n for (const layer of this.layers) {\n trainableWeights.push(...layer.trainableWeights);\n }\n return trainableWeights.concat(weights);\n }\n return weights;\n }\n\n override get weights(): LayerVariable[] {\n return this.trainableWeights.concat(this.nonTrainableWeights);\n }\n\n /**\n * Loads all layer weights from a JSON object.\n *\n * Porting Note: HDF5 weight files cannot be directly loaded in JavaScript /\n * TypeScript. The utility script at `scripts/pykeras.py` offers means\n * to convert them into JSON strings compatible with this method.\n * Porting Note: TensorFlow.js Layers supports only loading by name currently.\n *\n * @param weights A JSON mapping weight names to weight values as nested\n * arrays of numbers, or a `NamedTensorMap`, i.e., a JSON mapping weight\n * names to `tf.Tensor` objects.\n * @param strict Require that the provided weights exactly match those\n * required by the container. Default: `true`. Passing `false` means that\n * extra weights and missing weights will be silently ignored.\n */\n loadWeights(weights: NamedTensorMap, strict = true) {\n const nameToWeight: {[name: string]: LayerVariable} = {};\n let totalWeightsCount = 0;\n for (const layer of this.layers) {\n for (const weight of layer.weights) {\n if (nameToWeight[weight.originalName] != null) {\n throw new ValueError(`Duplicate weight name: ${weight.originalName}`);\n }\n nameToWeight[weight.originalName] = weight;\n totalWeightsCount++;\n }\n }\n\n const weightValueTuples: Array<[LayerVariable, Tensor]> = [];\n for (const name in weights) {\n // TF 2.2.0 added cell name to the weight name in the format of\n // layer_name/cell_name/weight_name, we need to remove\n // the inner cell name.\n let validatedName = name;\n if (nameToWeight[name] == null) {\n const tokens = name.split('/');\n const shortenNameArray =\n tokens.slice(0, -2).concat([tokens[tokens.length - 1]]);\n validatedName = shortenNameArray.join('/');\n }\n if (nameToWeight[validatedName] != null) {\n weightValueTuples.push([nameToWeight[validatedName], weights[name]]);\n } else if (strict) {\n throw new ValueError(\n `Provided weight data has no target variable: ${name}`);\n }\n delete nameToWeight[validatedName];\n }\n\n if (strict) {\n // Check that all weights are set.\n const unsetNames: string[] = [];\n for (const name in nameToWeight) {\n unsetNames.push(name);\n }\n if (unsetNames.length > 0) {\n throw new ValueError(\n `${unsetNames.length} of ${\n totalWeightsCount} weights are not set: ` +\n `${unsetNames}`);\n }\n }\n\n batchSetValue(weightValueTuples);\n }\n\n /**\n * Util shared between different serialization methods.\n * @returns LayersModel config with Keras version information added.\n */\n protected updatedConfig(): serialization.ConfigDict {\n const theConfig = this.getConfig();\n const modelConfig: serialization.ConfigDict = {};\n modelConfig['className'] = this.getClassName();\n modelConfig['config'] = theConfig;\n modelConfig['kerasVersion'] = `tfjs-layers ${layersVersion}`;\n // TODO(nielsene): Replace something like K.backend() once\n // possible.\n modelConfig['backend'] = 'TensorFlow.js';\n return modelConfig;\n }\n\n /**\n * Returns a JSON string containing the network configuration.\n *\n * To load a network from a JSON save file, use\n * models.modelFromJSON(jsonString);\n * @param extraJsonArgs Unused in tfjs-layers, maintained for PyKeras\n * @param returnString Whether the return value should be stringified\n * (default: `true`).\n * @returns a JSON string if `returnString` (default), or a JSON object if\n * `!returnString`.\n */\n // tslint:disable-next-line:no-any\n toJSON(unused?: any, returnString = true): string|PyJsonDict {\n const modelConfig = convertTsToPythonic(this.updatedConfig()) as PyJsonDict;\n return returnString ? JSON.stringify(modelConfig) : modelConfig;\n }\n\n /**\n * Call the model on new inputs.\n *\n * In this case `call` just reapplies all ops in the graph to the new inputs\n * (e.g. build a new computational graph from the provided inputs).\n *\n * @param inputs A tensor or list of tensors.\n * @param mask A mask or list of masks. A mask can be either a tensor or null\n * (no mask).\n *\n * @return A tensor if there is a single output, or a list of tensors if there\n * are more than one outputs.\n */\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = generic_utils.toList(inputs);\n const feedDict = new FeedDict();\n for (let i = 0; i < this.inputs.length; ++i) {\n feedDict.add(this.inputs[i], inputs[i]);\n }\n return execute(this.outputs, feedDict, kwargs) as Tensor | Tensor[];\n });\n }\n\n /**\n * Computes an output mask tensor.\n *\n * @param inputs Tensor or list of tensors.\n * @param mask Tensor or list of tensors.\n *\n * @return null or a tensor (or list of tensors, one per output tensor of the\n * layer).\n */\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]): Tensor\n |Tensor[] {\n return tidy(() => {\n inputs = generic_utils.toList(inputs);\n let masks: Tensor[];\n if (mask == null) {\n masks = generic_utils.pyListRepeat(null, inputs.length);\n } else {\n masks = generic_utils.toList(mask);\n }\n // TODO(michaelterry): Add support for mask caching.\n return this.runInternalGraph(inputs, masks)[1];\n });\n }\n\n /**\n * Computes the output shape of the layer.\n *\n * Assumes that the layer will be built to match that input shape provided.\n *\n * @param inputShape A shape (tuple of integers) or a list of shape tuples\n * (one per output tensor of the layer). Shape tuples can include null for\n * free dimensions, instead of an integer.\n */\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n const inputShapes = types_utils.normalizeShapeList(inputShape);\n if (inputShapes.length !== this.inputLayers.length) {\n throw new ValueError(\n `Invalid inputShape argument ${inputShape}: ` +\n `model has ${this.inputLayers.length} tensor inputs.`);\n }\n\n // TODO(michaelterry): Add caching\n const layersToOutputShapes: {[shapeKey: string]: Shape} = {};\n for (let i = 0; i < inputShapes.length; i++) {\n const layer = this.inputLayers[i];\n const inputShape = inputShapes[i];\n // It's an input layer: computeOutputShape is identity,\n // and there is only one node and one tensor output.\n const shapeKey = layer.name + '_0_0';\n layersToOutputShapes[shapeKey] = inputShape;\n }\n\n const depthKeys = Object.keys(this.nodesByDepth)\n .map(x => parseInt(x, 10))\n .sort(generic_utils.reverseNumberCompare);\n // Iterate over nodes, by depth level.\n if (depthKeys.length > 1) {\n for (const depth of depthKeys) {\n const nodes = this.nodesByDepth[depth];\n for (const node of nodes) {\n // This is always a single layer, never a list.\n const layer = node.outboundLayer;\n if (this.inputLayers.map(x => x.id).indexOf(layer.id) !== -1) {\n // We've already covered the input layers a few lines above.\n continue;\n }\n // Potentially redundant list, same size of node.inputTensors.\n const inputShapes: Shape[] = [];\n for (let j = 0; j < node.inboundLayers.length; j++) {\n const inboundLayer = node.inboundLayers[j];\n const nodeIndex = node.nodeIndices[j];\n const tensorIndex = node.tensorIndices[j];\n const shapeKey = `${inboundLayer.name}_${nodeIndex}_${tensorIndex}`;\n const inputShape = layersToOutputShapes[shapeKey];\n inputShapes.push(inputShape);\n }\n\n const outputShape = layer.computeOutputShape(\n generic_utils.singletonOrArray(inputShapes));\n\n const outputShapes = types_utils.normalizeShapeList(outputShape);\n const nodeIndex = layer.inboundNodes.indexOf(node);\n for (let j = 0; j < outputShapes.length; j++) {\n const shapeKey = `${layer.name}_${nodeIndex}_${j}`;\n layersToOutputShapes[shapeKey] = outputShapes[j];\n }\n }\n }\n }\n\n // Read final output shapes from layersToOutputShapes.\n const outputShapes: Shape[] = [];\n const outputShapeKeys: string[] = [];\n for (let i = 0; i < this.outputLayers.length; i++) {\n const layer = this.outputLayers[i];\n const nodeIndex = this.outputLayersNodeIndices[i];\n const tensorIndex = this.outputLayersTensorIndices[i];\n const shapeKey = `${layer.name}_${nodeIndex}_${tensorIndex}`;\n outputShapeKeys.push(shapeKey);\n }\n\n for (let i = 0; i < outputShapeKeys.length; i++) {\n const key = outputShapeKeys[i];\n generic_utils.assert(key in layersToOutputShapes);\n outputShapes.push(layersToOutputShapes[key]);\n }\n\n // TODO(michaelterry): Update cache\n return generic_utils.singletonOrArray(outputShapes);\n }\n\n /**\n * Computes output tensors for new inputs.\n *\n * Note:\n * - Expects `inputs` to be a list (potentially with 1 element).\n *\n * @param inputs List of tensors\n * @param masks List of masks (tensors or null).\n * @return Three lists: outputTensors, outputMasks, outputShapes\n */\n protected runInternalGraph(inputs: Tensor[], masks?: Tensor[]):\n [Tensor[], Tensor[], Shape[]] {\n if (masks == null) {\n masks = generic_utils.pyListRepeat(null, inputs.length);\n }\n\n // Dictionary mapping reference tensors to tuples\n // (computed tensor, compute mask)\n // we assume a 1:1 mapping from tensor to mask\n // TODO: raise exception when a `.computeMask()` call\n // does not return a list the same size as `call`\n const tensorMap: {[tensorID: string]: [Tensor, Tensor]} = {};\n for (let i = 0; i < this.inputs.length; ++i) {\n const x = this.inputs[i];\n const y = inputs[i];\n const mask = masks[i];\n tensorMap[x.id] = [y, mask];\n }\n\n const depthKeys = Object.keys(this.nodesByDepth)\n .map(x => parseInt(x, 10))\n .sort(generic_utils.reverseNumberCompare);\n for (const depth of depthKeys) {\n const nodes = this.nodesByDepth[depth];\n for (const node of nodes) {\n // This is always a single layer, never a list.\n const layer = node.outboundLayer;\n const referenceInputTensors = node.inputTensors;\n const referenceOutputTensors = node.outputTensors;\n\n // If all previous input tensors are available in tensorMap,\n // then call node.inboundLayer on them.\n // List of tuples [input, mask]:\n const computedData = new Array<[Tensor, Tensor]>();\n for (const x of referenceInputTensors) {\n if (x.id in tensorMap) {\n computedData.push(tensorMap[x.id]);\n }\n }\n if (computedData.length === referenceInputTensors.length) {\n // TODO(michaelterry): Add K.name_scope here, if we need it.\n let kwargs: Kwargs = {};\n let computedTensors: Tensor[];\n let computedMasks: Tensor[];\n let outputTensors: Tensor[];\n let outputMasks: Tensor[];\n // call layer\n if (node.callArgs != null) {\n kwargs = node.callArgs;\n }\n if (computedData.length === 1) {\n const [computedTensor, computedMask] = computedData[0];\n if (kwargs['mask'] == null) {\n kwargs['mask'] = computedMask;\n }\n outputTensors =\n generic_utils.toList(layer.call(computedTensor, kwargs));\n outputMasks = generic_utils.toList(\n layer.computeMask(computedTensor, computedMask));\n computedTensors = [computedTensor];\n computedMasks = [computedMask];\n } else {\n computedTensors = computedData.map(x => x[0]);\n computedMasks = computedData.map(x => x[1]);\n if (kwargs['mask'] == null) {\n kwargs['mask'] = computedMasks;\n }\n outputTensors =\n generic_utils.toList(layer.call(computedTensors, kwargs));\n outputMasks = generic_utils.toList(\n layer.computeMask(computedTensors, computedMasks));\n }\n\n if (layer.activityRegularizer) {\n throw new NotImplementedError(\n 'LayersModel invocation with concrete Tensor value(s) in the ' +\n 'presence of activity regularizer(s) is not supported yet.');\n }\n // TODO(michaelterry): Add model updates and losses\n\n // Update tensor map.\n for (let i = 0; i < referenceOutputTensors.length; ++i) {\n const x = referenceOutputTensors[i];\n const y = outputTensors[i];\n const mask = outputMasks[i];\n tensorMap[x.id] = [y, mask];\n }\n }\n }\n }\n\n const outputTensors: Tensor[] = [];\n const outputMasks: Tensor[] = [];\n const outputShapes: Shape[] = [];\n for (const x of this.outputs) {\n generic_utils.assert(\n x.id in tensorMap, `Could not compute output ${x.name} : ${x.id}`);\n const [tensor, mask] = tensorMap[x.id];\n outputShapes.push(tensor.shape);\n outputTensors.push(tensor);\n outputMasks.push(mask);\n }\n\n // TODO(michaelterry): Add support for caches.\n return [outputTensors, outputMasks, outputShapes];\n }\n\n /**\n * Builds a map of internal node keys to node ordering.\n * Used in serializaion a node orderings may change as unused nodes are\n * dropped. Porting Note: This helper method was pulled out of getConfig to\n * improve readability.\n * @param layers An array of Layers in the model.\n * @returns Map of Node Keys to index order within the layer.\n */\n private buildNodeConversionMap(layers: Layer[]): {[nodeKey: string]: number} {\n const nodeConversionMap: {[nodeKey: string]: number} = {};\n let keptNodes: number;\n for (const layer of this.layers) {\n keptNodes = layer instanceof Container ? 1 : 0;\n for (let originalNodeIndex = 0;\n originalNodeIndex < layer.inboundNodes.length; originalNodeIndex++) {\n const nodeKey = Container.nodeKey(layer, originalNodeIndex);\n if (this.containerNodes.has(nodeKey)) {\n // i.e. we mark it to be saved\n nodeConversionMap[nodeKey] = keptNodes;\n keptNodes += 1;\n }\n }\n }\n return nodeConversionMap;\n }\n\n /**\n * Retrieves a layer based on either its name (unique) or index.\n *\n * Indices are based on order of horizontal graph traversal (bottom-up).\n *\n * If both `name` and `index` are specified, `index` takes precedence.\n *\n * @param name Name of layer.\n * @param index Index of layer.\n * @returns A Layer instance.\n * @throws ValueError: In case of invalid layer name or index.\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Classes',\n * namespace: 'layers',\n * subclasses: ['LayersModel']\n * }\n */\n getLayer(name?: string, index?: number): Layer {\n if (index != null) {\n if (this.layers.length <= index) {\n throw new ValueError(\n `Was asked to retrieve layer at index ${index}, but model only ` +\n `has ${this.layers.length} layer(s).`);\n } else {\n return this.layers[index];\n }\n } else {\n if (name == null) {\n throw new ValueError('Provide either a layer name or layer index');\n }\n }\n\n for (const layer of this.layers) {\n if (layer.name === name) {\n return layer;\n }\n }\n throw new ValueError(`No such layer: ${name}`);\n }\n\n /**\n * Retrieves the Container's current loss values.\n *\n * Used for regularizers during training.\n */\n override calculateLosses(): Scalar[] {\n // Porting Node: This is an augmentation to Container.loss in PyKeras.\n // In PyKeras, Container.loss returns symbolic tensors. Here a concrete\n // Tensor (specifically Scalar) values are returned. This is due to the\n // imperative backend.\n return tidy(() => {\n const losses: Scalar[] = [];\n for (const layer of this.layers) {\n for (let nodeIndex = 0; nodeIndex < layer.inboundNodes.length;\n ++nodeIndex) {\n const nodeKey = Container.nodeKey(layer, nodeIndex);\n if (this.containerNodes.has(nodeKey)) {\n losses.push(...layer.calculateLosses());\n }\n }\n }\n // TODO(cais): Add any unconditional model-level losses?\n return losses;\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {name: this.name};\n\n // Build a map from layer unique name (self._node_key)\n // to the index of the nodes that are saved in the config.\n // Only nodes in container_nodes are saved.\n const nodeConversionMap: {[nodeKey: string]: number} =\n this.buildNodeConversionMap(this.layers);\n\n // Serialize and save the layers in layerConfigs\n const layerConfigs = [];\n for (const layer of this.layers) {\n const layerClassName = layer.getClassName();\n const layerConfig = layer.getConfig();\n const filteredInboundNodes = [];\n for (let originalNodeIndex = 0;\n originalNodeIndex < layer.inboundNodes.length; originalNodeIndex++) {\n const node = layer.inboundNodes[originalNodeIndex];\n const nodeKey = Container.nodeKey(layer, originalNodeIndex);\n let kwargs = {};\n if (this.containerNodes.has(nodeKey)) {\n // The node is relevant to the model:\n // add to filteredInboundNodes.\n if (node.callArgs) {\n try {\n JSON.stringify(node.callArgs);\n kwargs = node.callArgs;\n } catch (err) {\n console.warn(\n `Layer ${layer.name} was passed ` +\n `non-serializable keyword arguments: ` +\n `${node.callArgs}. They will not be included ` +\n `in the serialized model (and thus will be ` +\n `missing at deserialization time).`);\n kwargs = {};\n }\n }\n if (node.inboundLayers.length > 0) {\n const nodeData = [];\n for (let i = 0; i < node.inboundLayers.length; i++) {\n const inboundLayer = node.inboundLayers[i];\n const nodeIndex = node.nodeIndices[i];\n const tensorIndex = node.tensorIndices[i];\n const nodeKey = Container.nodeKey(inboundLayer, nodeIndex);\n let newNodeIndex = nodeConversionMap[nodeKey];\n if (newNodeIndex == null) {\n newNodeIndex = 0;\n }\n nodeData.push(\n [inboundLayer.name, newNodeIndex, tensorIndex, kwargs]);\n }\n filteredInboundNodes.push(nodeData);\n }\n }\n }\n const dict: serialization.ConfigDict = {};\n dict['name'] = layer.name;\n dict['className'] = layerClassName;\n dict['config'] = layerConfig;\n dict['inboundNodes'] = filteredInboundNodes;\n layerConfigs.push(dict);\n }\n config['layers'] = layerConfigs;\n // Gather info about inputs and outputs\n const modelInputs = [];\n for (let i = 0; i < this.inputLayers.length; i++) {\n const layer = this.inputLayers[i];\n const nodeIndex = this.inputLayersNodeIndices[i];\n\n const nodeKey = Container.nodeKey(layer, nodeIndex);\n if (!this.containerNodes.has(nodeKey)) {\n continue;\n }\n let newNodeIndex = nodeConversionMap[nodeKey];\n if (newNodeIndex === null || newNodeIndex === undefined) {\n newNodeIndex = 0;\n }\n const tensorIndex = this.inputLayersTensorIndices[i];\n modelInputs.push([layer.name, newNodeIndex, tensorIndex]);\n }\n config['inputLayers'] = modelInputs;\n\n const modelOutputs = [];\n for (let i = 0; i < this.outputLayers.length; i++) {\n const layer = this.outputLayers[i];\n const nodeIndex = this.outputLayersNodeIndices[i];\n\n const nodeKey = Container.nodeKey(layer, nodeIndex);\n if (!this.containerNodes.has(nodeKey)) {\n continue;\n }\n let newNodeIndex = nodeConversionMap[nodeKey];\n if (newNodeIndex === null || newNodeIndex === undefined) {\n newNodeIndex = 0;\n }\n const tensorIndex = this.outputLayersTensorIndices[i];\n modelOutputs.push([layer.name, newNodeIndex, tensorIndex]);\n }\n config['outputLayers'] = modelOutputs;\n return config;\n }\n\n /**\n * Instantiates a LayersModel from its config (output of `get_config()`).\n * @param cls the class to create\n * @param config LayersModel config dictionary.\n * @param customObjects An optional dictionary of custom objects.\n * @param fastWeightInit Optional flag to use fast weight initialization\n * during deserialization. This is applicable to cases in which\n * the initialization will be immediately overwritten by loaded weight\n * values. Default: `false`.\n * @returns A LayersModel instance.\n * @throws ValueError: In case of improperly formatted config dict.\n */\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict,\n fastWeightInit = false): T {\n // Layer instances created during\n // the graph reconstruction process\n const createdLayers: {[layerName: string]: Layer} = {};\n\n // Dictionary mapping layer instances to\n // node data that specifies a layer call.\n // It acts as a queue that maintains any unprocessed\n // layer call until it becomes possible to process it\n // (i.e. until the input tensors to the call all exist).\n const unprocessedNodes: {[layer: string]: TensorKeyWithArgsArray[][]} = {};\n function addUnprocessedNode(\n layer: Layer, nodeData: TensorKeyWithArgsArray[]) {\n if (!(layer.name in unprocessedNodes)) {\n unprocessedNodes[layer.name] = [nodeData];\n } else {\n unprocessedNodes[layer.name].push(nodeData);\n }\n }\n\n function processNode(layer: Layer, nodeData: TensorKeyWithArgsArray[]) {\n const inputTensors: SymbolicTensor[] = [];\n let kwargs;\n for (const inputData of nodeData) {\n const inboundLayerName = inputData[0];\n const inboundNodeIndex = inputData[1];\n const inboundTensorIndex = inputData[2];\n\n kwargs = inputData[3] == null ?\n {} :\n inputData[3] as serialization.ConfigDict;\n if (!(inboundLayerName in createdLayers)) {\n addUnprocessedNode(layer, nodeData);\n return;\n }\n const inboundLayer = createdLayers[inboundLayerName];\n if (inboundLayer.inboundNodes.length <= inboundNodeIndex) {\n addUnprocessedNode(layer, nodeData);\n return;\n }\n const inboundNode = inboundLayer.inboundNodes[inboundNodeIndex];\n inputTensors.push(inboundNode.outputTensors[inboundTensorIndex]);\n }\n // Call layer on its inputs, thus creating the node\n // and building the layer if needed.\n // Note: This has Eager vs Graph Implications.\n if (inputTensors.length > 0) {\n layer.apply(\n generic_utils.singletonOrArray(inputTensors),\n kwargs); // was ** kwargs\n }\n }\n\n /**\n * Deserialize a layer, then call it on appropriate inputs.\n * @param layerData: layer config dict.\n * @throws ValueError: In case of improperly formatted `layer_data`\n * dict.\n */\n function processLayer(layerData: serialization.ConfigDict|null) {\n const layerName = layerData['name'] as string;\n // Instantiate layer.\n const layer =\n deserializeLayer(\n layerData,\n config['customObjects'] != null ?\n config['customObjects'] as serialization.ConfigDict :\n {}) as Layer;\n layer.setFastWeightInitDuringBuild(fastWeightInit);\n createdLayers[layerName] = layer;\n // Gather layer inputs.\n const inboundNodesData =\n layerData['inboundNodes'] as TensorKeyWithArgsArray[][];\n inboundNodesData.forEach(nodeData => {\n if (!(nodeData instanceof Array)) {\n throw new ValueError(\n `Corrupted configuration, expected array for nodeData: ${\n nodeData}`);\n }\n // We don't process nodes (i.e. make layer calls)\n // on the fly because the inbound node may not yet exist,\n // in case of layer shared at different topological depths\n // (e.g.a model such as A(B(A(B(x)))))\n addUnprocessedNode(layer, nodeData);\n });\n }\n\n // First, we create all layers and enqueue nodes to be processed.\n const name = config['name'];\n const layersFromConfig = config['layers'] as serialization.ConfigDict[];\n for (const layerData of layersFromConfig) {\n processLayer(layerData);\n }\n\n // Then we process nodes in order of layer depth.\n // Nodes that cannot yet be processed(if the inbound node\n // does not yet exist) are re - enqueued, and the process\n // is repeated until all nodes are processed.\n while (!generic_utils.isObjectEmpty(unprocessedNodes)) {\n for (const layerData of layersFromConfig) {\n const layer = createdLayers[layerData['name'] as string];\n if (layer.name in unprocessedNodes) {\n const currentUnprocessedNodesForLayer = unprocessedNodes[layer.name];\n delete unprocessedNodes[layer.name];\n for (const nodeData of currentUnprocessedNodesForLayer) {\n processNode(layer, nodeData);\n }\n }\n }\n }\n\n const inputTensors: SymbolicTensor[] = [];\n const outputTensors: SymbolicTensor[] = [];\n const inputLayersFromConfig =\n config['inputLayers'] as serialization.ConfigDict[];\n for (const layerData of inputLayersFromConfig) {\n const layerName = layerData[0] as string;\n const nodeIndex = layerData[1] as number;\n const tensorIndex = layerData[2] as number;\n generic_utils.assert(layerName in createdLayers);\n const layer = createdLayers[layerName];\n const layerOutputTensors = layer.inboundNodes[nodeIndex].outputTensors;\n inputTensors.push(layerOutputTensors[tensorIndex]);\n }\n const outputLayersFromConfig =\n config['outputLayers'] as serialization.ConfigDict[];\n for (const layerData of outputLayersFromConfig) {\n const layerName = layerData[0] as string;\n const nodeIndex = layerData[1] as number;\n const tensorIndex = layerData[2] as number;\n generic_utils.assert(layerName in createdLayers);\n const layer = createdLayers[layerName];\n const layerOutputTensors = layer.inboundNodes[nodeIndex].outputTensors;\n outputTensors.push(layerOutputTensors[tensorIndex]);\n }\n return new cls({inputs: inputTensors, outputs: outputTensors, name});\n }\n\n /**\n * Determine whether the container is stateful.\n *\n * Porting Note: this is the equivalent of the stateful @property of\n * the Container class in PyKeras.\n */\n override get stateful(): boolean {\n // Porting Note: This check is to prevent inadvertent setting of the\n // _stateful property of the Container instance.\n if (this._stateful) {\n throw new ValueError(\n 'Container instance unexpectedly has _stateful = true. The ' +\n 'statefulness of a Container is determined by the Layers it ' +\n 'contains. Its _stateful property must remain the default false.');\n }\n for (const layer of this.layers) {\n if (layer.stateful) {\n return true;\n }\n }\n return false;\n }\n\n /**\n * Reset the state of all stateful constituent layers (if any).\n *\n * Examples of stateful layers include RNN layers whose `stateful` property\n * is set as `true`.\n */\n override resetStates() {\n tidy(() => {\n this.layers.forEach(layer => {\n // tslint:disable:no-any\n if (layer.stateful) {\n layer.resetStates();\n }\n // tslint:enable:no-any\n });\n });\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {argMax, clone, dispose, mul, reshape, Tensor, Tensor1D, tensor1d, tidy} from '@tensorflow/tfjs-core';\n\n/**\n * For multi-class classification problems, this object is designed to store a\n * mapping from class index to the \"weight\" of the class, where higher weighted\n * classes have larger impact on loss, accuracy, and other metrics.\n *\n * This is useful for cases in which you want the model to \"pay more attention\"\n * to examples from an under-represented class, e.g., in unbalanced datasets.\n */\nexport type ClassWeight = {\n [classIndex: number]: number\n};\n\n/**\n * Class weighting for a model with multiple outputs.\n *\n * This object maps each output name to a class-weighting object.\n */\nexport type ClassWeightMap = {\n [outputName: string]: ClassWeight\n};\n\nfunction standardizeSampleOrClassWeights(\n xWeight: ClassWeight|ClassWeight[]|ClassWeightMap, outputNames: string[],\n weightType: 'sampleWeight'|'classWeight'): ClassWeight[] {\n const numOutputs = outputNames.length;\n if (xWeight == null || (Array.isArray(xWeight) && xWeight.length === 0)) {\n return outputNames.map(name => null);\n }\n if (numOutputs === 1) {\n if (Array.isArray(xWeight) && xWeight.length === 1) {\n return xWeight;\n } else if (typeof xWeight === 'object' && outputNames[0] in xWeight) {\n return [(xWeight as ClassWeightMap)[outputNames[0]]];\n } else {\n return [xWeight as ClassWeight];\n }\n }\n if (Array.isArray(xWeight)) {\n if (xWeight.length !== numOutputs) {\n throw new Error(\n `Provided ${weightType} is an array of ${xWeight.length} ` +\n `element(s), but the model has ${numOutputs} outputs. ` +\n `Make sure a set of weights is provided for each model output.`);\n }\n return xWeight;\n } else if (\n typeof xWeight === 'object' && Object.keys(xWeight).length > 0 &&\n typeof (xWeight as ClassWeightMap)[Object.keys(xWeight)[0]] ===\n 'object') {\n const output: ClassWeight[] = [];\n outputNames.forEach(outputName => {\n if (outputName in xWeight) {\n output.push((xWeight as ClassWeightMap)[outputName]);\n } else {\n output.push(null);\n }\n });\n return output;\n } else {\n throw new Error(\n `The model has multiple (${numOutputs}) outputs, ` +\n `so ${weightType} must be either an array with ` +\n `${numOutputs} elements or an object with ${outputNames} keys. ` +\n `Provided ${weightType} not understood: ${JSON.stringify(xWeight)}`);\n }\n}\n\n/**\n * Standardize class weighting objects.\n *\n * This function takes a single class-weighting object, an array of them,\n * or a map from output name to class-weighting object. It compares it to the\n * output name(s) of the model, base on which it outputs an array of\n * class-weighting objects of which the length matches the number of outputs.\n *\n * @param classWeight Input class-weighting object(s).\n * @param outputNames All output name(s) of the model.\n * @return An array of class-weighting objects. The length of the array matches\n * the model's number of outputs.\n */\nexport function standardizeClassWeights(\n classWeight: ClassWeight|ClassWeight[]|ClassWeightMap,\n outputNames: string[]): ClassWeight[] {\n return standardizeSampleOrClassWeights(\n classWeight, outputNames, 'classWeight');\n}\n\nexport function standardizeSampleWeights(\n classWeight: ClassWeight|ClassWeight[]|ClassWeightMap,\n outputNames: string[]): ClassWeight[] {\n return standardizeSampleOrClassWeights(\n classWeight, outputNames, 'sampleWeight');\n}\n\n/**\n * Standardize by-sample and/or by-class weights for training.\n *\n * Note that this function operates on one model output at a time. For a model\n * with multiple outputs, you must call this function multiple times.\n *\n * @param y The target tensor that the by-sample and/or by-class weight is for.\n * The values of y are assumed to encode the classes, either directly\n * as an integer index, or as one-hot encoding.\n * @param sampleWeight By-sample weights.\n * @param classWeight By-class weights: an object mapping class indices\n * (integers) to a weight (float) to apply to the model's loss for the\n * samples from this class during training. This can be useful to tell the\n * model to \"pay more attention\" to samples from an under-represented class.\n * @param sampleWeightMode The mode for the sample weights.\n * @return A Promise of weight tensor, of which the size of the first dimension\n * matches that of `y`.\n */\nexport async function standardizeWeights(\n y: Tensor, sampleWeight?: Tensor, classWeight?: ClassWeight,\n sampleWeightMode?: 'temporal'): Promise {\n if (sampleWeight != null || sampleWeightMode != null) {\n // TODO(cais): Once 'temporal' mode is implemented, document it in the doc\n // string.\n throw new Error('Support sampleWeight is not implemented yet');\n }\n\n if (classWeight != null) {\n // Apply class weights per sample.\n const yClasses: Tensor1D = tidy(() => {\n if (y.shape.length === 1) {\n // Assume class indices.\n return clone(y) as Tensor1D;\n } else if (y.shape.length === 2) {\n if (y.shape[1] > 1) {\n // Assume one-hot encoding of classes.\n const axis = 1;\n return argMax(y, axis);\n } else if (y.shape[1] === 1) {\n // Class index.\n return reshape(y, [y.shape[0]]);\n } else {\n throw new Error(\n `Encountered unexpected last-dimension size (${y.shape[1]}) ` +\n `during handling of class weights. The size is expected to be ` +\n `>= 1.`);\n }\n } else {\n throw new Error(\n `Unexpected rank of target (y) tensor (${y.rank}) during ` +\n `handling of class weights. The rank is expected to be 1 or 2.`);\n }\n });\n\n const yClassIndices = Array.from(await yClasses.data());\n dispose(yClasses);\n const classSampleWeight: number[] = [];\n yClassIndices.forEach(classIndex => {\n if (classWeight[classIndex] == null) {\n throw new Error(\n `classWeight must contain all classes in the training data. ` +\n `The class ${classIndex} exists in the data but not in ` +\n `classWeight`);\n } else {\n classSampleWeight.push(classWeight[classIndex]);\n }\n });\n\n return tensor1d(classSampleWeight, 'float32');\n } else {\n return null;\n }\n}\n\n/**\n * Apply per-sample weights on the loss values from a number of samples.\n *\n * @param losses Loss tensor of shape `[batchSize]`.\n * @param sampleWeights Per-sample weight tensor of shape `[batchSize]`.\n * @returns Tensor of the same shape as`losses`.\n */\nexport function computeWeightedLoss(losses: Tensor, sampleWeights: Tensor) {\n return mul(losses, sampleWeights);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Interfaces and methods for training models using TensorFlow.js datasets.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {scalar} from '@tensorflow/tfjs-core';\nimport {BaseCallback, configureCallbacks, CustomCallbackArgs, History, ModelLoggingVerbosity, standardizeCallbacks, YieldEveryOptions} from '../base_callbacks';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {disposeTensorsInLogs, UnresolvedLogs} from '../logs';\nimport {TensorOrArrayOrMap} from '../types';\nimport {singletonOrArray, toList} from '../utils/generic_utils';\n\nimport {Dataset, LazyIterator} from './dataset_stub';\nimport {ClassWeight, ClassWeightMap, standardizeClassWeights, standardizeWeights} from './training_utils';\n\n/**\n * Interface for configuring model training based on a dataset object.\n */\nexport interface ModelFitDatasetArgs {\n /**\n * (Optional) Total number of steps (batches of samples) before\n * declaring one epoch finished and starting the next epoch. It should\n * typically be equal to the number of samples of your dataset divided by\n * the batch size, so that `fitDataset`() call can utilize the entire dataset.\n * If it is not provided, use `done` return value in `iterator.next()` as\n * signal to finish an epoch.\n */\n batchesPerEpoch?: number;\n\n /**\n * Integer number of times to iterate over the training dataset.\n */\n epochs: number;\n\n /**\n * Verbosity level.\n *\n * Expected to be 0, 1, or 2. Default: 1.\n *\n * 0 - No printed message during fit() call.\n * 1 - In Node.js (tfjs-node), prints the progress bar, together with\n * real-time updates of loss and metric values and training speed.\n * In the browser: no action. This is the default.\n * 2 - Not implemented yet.\n */\n verbose?: ModelLoggingVerbosity;\n\n /**\n * List of callbacks to be called during training.\n * Can have one or more of the following callbacks:\n * - `onTrainBegin(logs)`: called when training starts.\n * - `onTrainEnd(logs)`: called when training ends.\n * - `onEpochBegin(epoch, logs)`: called at the start of every epoch.\n * - `onEpochEnd(epoch, logs)`: called at the end of every epoch.\n * - `onBatchBegin(batch, logs)`: called at the start of every batch.\n * - `onBatchEnd(batch, logs)`: called at the end of every batch.\n * - `onYield(epoch, batch, logs)`: called every `yieldEvery` milliseconds\n * with the current epoch, batch and logs. The logs are the same\n * as in `onBatchEnd()`. Note that `onYield` can skip batches or\n * epochs. See also docs for `yieldEvery` below.\n */\n callbacks?: BaseCallback[]|CustomCallbackArgs|CustomCallbackArgs[];\n\n /**\n * Data on which to evaluate the loss and any model\n * metrics at the end of each epoch. The model will not be trained on this\n * data. This could be any of the following:\n *\n * - An array `[xVal, yVal]`, where the two values may be `tf.Tensor`,\n * an array of Tensors, or a map of string to Tensor.\n * - Similarly, an array ` [xVal, yVal, valSampleWeights]`\n * (not implemented yet).\n * - a `Dataset` object with elements of the form `{xs: xVal, ys: yVal}`,\n * where `xs` and `ys` are the feature and label tensors, respectively.\n *\n * If `validationData` is an Array of Tensor objects, each `tf.Tensor` will be\n * sliced into batches during validation, using the parameter\n * `validationBatchSize` (which defaults to 32). The entirety of the\n * `tf.Tensor` objects will be used in the validation.\n *\n * If `validationData` is a dataset object, and the `validationBatches`\n * parameter is specified, the validation will use `validationBatches` batches\n * drawn from the dataset object. If `validationBatches` parameter is not\n * specified, the validation will stop when the dataset is exhausted.\n *\n * The model will not be trained on this data.\n */\n validationData?: [\n TensorOrArrayOrMap, TensorOrArrayOrMap\n ]|[TensorOrArrayOrMap, TensorOrArrayOrMap, TensorOrArrayOrMap]|Dataset;\n\n /**\n * Optional batch size for validation.\n *\n * Used only if `validationData` is an array of `tf.Tensor` objects, i.e., not\n * a dataset object.\n *\n * If not specified, its value defaults to 32.\n */\n validationBatchSize?: number;\n\n /**\n * (Optional) Only relevant if `validationData` is specified and is a dataset\n * object.\n *\n * Total number of batches of samples to draw from `validationData` for\n * validation purpose before stopping at the end of every epoch. If not\n * specified, `evaluateDataset` will use `iterator.next().done` as signal to\n * stop validation.\n */\n validationBatches?: number;\n\n /**\n * Configures the frequency of yielding the main thread to other tasks.\n *\n * In the browser environment, yielding the main thread can improve the\n * responsiveness of the page during training. In the Node.js environment,\n * it can ensure tasks queued in the event loop can be handled in a timely\n * manner.\n *\n * The value can be one of the following:\n * - `'auto'`: The yielding happens at a certain frame rate (currently set\n * at 125ms). This is the default.\n * - `'batch'`: yield every batch.\n * - `'epoch'`: yield every epoch.\n * - a `number`: Will yield every `number` milliseconds.\n * - `'never'`: never yield. (But yielding can still happen through `await\n * nextFrame()` calls in custom callbacks.)\n */\n yieldEvery?: YieldEveryOptions;\n\n /**\n * Epoch at which to start training (useful for resuming a previous training\n * run). When this is used, `epochs` is the index of the \"final epoch\".\n * The model is not trained for a number of iterations given by `epochs`,\n * but merely until the epoch of index `epochs` is reached.\n */\n initialEpoch?: number;\n\n /**\n * Optional object mapping class indices (integers) to\n * a weight (float) to apply to the model's loss for the samples from this\n * class during training. This can be useful to tell the model to \"pay more\n * attention\" to samples from an under-represented class.\n *\n * If the model has multiple outputs, a class weight can be specified for\n * each of the outputs by setting this field an array of weight object\n * or an object that maps model output names (e.g., `model.outputNames[0]`)\n * to weight objects.\n */\n classWeight?: ClassWeight|ClassWeight[]|ClassWeightMap;\n}\n\nexport interface FitDatasetElement {\n xs: TensorOrArrayOrMap;\n ys: TensorOrArrayOrMap;\n}\n\n/**\n * Interface for configuring model evaluation based on a dataset object.\n */\nexport interface ModelEvaluateDatasetArgs {\n /**\n * Number of batches to draw from the dataset object before ending the\n * evaluation.\n */\n batches?: number;\n\n /**\n * Verbosity mode.\n */\n verbose?: ModelLoggingVerbosity;\n}\n\n// Default batch size used during tensor-based validation.\nconst DEFAULT_VALIDATION_BATCH_SIZE = 32;\n\n/**\n * Standardize the output of a dataset iterator for use by\n * LayersModel.fitDataset().\n *\n * @param model: A `tf.LayersModel` object.\n * @param iteratorOut The output of a dataset iterator. It is required to be\n * an object of the form `{xs: TensorOrArrayOrMap, ys:\n * TensorOrArrayOrMap}`, where `TensorOrArrayOrMap` is a single `tf.Tensor`,\n * a `tf.Tensor[]`, or a flat map from string names to `tf.Tensor`s.\n * @returns A flat array of `tf.Tensor` objects: the input `tf.Tensor`s\n * followed by the target `tf.Tensor`s. When `tf.Tensor`s are provided\n * as a map, the order in the resulting array is taken from the `inputNames`\n * and `outputNames` of the model.\n */\nfunction standardizeDataIteratorOutput(\n // Type `model` as `any` here to avoid circular dependency w/\n // training.ts.\n // tslint:disable-next-line:no-any\n model: any, iteratorOut: {}): {xs: tfc.Tensor[], ys: tfc.Tensor[]} {\n let xs: TensorOrArrayOrMap;\n let ys: TensorOrArrayOrMap;\n\n const iteratorOutObj = iteratorOut as FitDatasetElement;\n xs = iteratorOutObj['xs'];\n ys = iteratorOutObj['ys'];\n tfc.util.assert(\n xs != null && ys != null,\n () => 'A Dataset iterator for fitDataset() is expected to generate ' +\n 'objects of the form `{xs: xVal, ys: yVal}`, where the two ' +\n 'values may be `tf.Tensor`, an array of Tensors, or a map of ' +\n 'string to Tensor. The provided Dataset instead generates ' +\n `${iteratorOut}`);\n\n const flattenedXs: tfc.Tensor[] =\n flattenTensorOrArrayOrMap('input', model.inputNames, xs);\n const flattenedYs: tfc.Tensor[] =\n flattenTensorOrArrayOrMap('output', model.outputNames, ys);\n\n const batchSize: number = flattenedXs[0].shape[0];\n\n tfc.util.assert(\n flattenedXs.length === model.inputs.length,\n () => `LayersModel has ${model.inputs.length} inputs, but the dataset ` +\n `provides ${flattenedXs.length} inputs. (Expected input keys: ` +\n `${JSON.stringify(model.inputNames)})`);\n\n tfc.util.assert(\n flattenedYs.length === model.outputs.length,\n () =>\n `LayersModel has ${model.outputs.length} outputs, but the dataset ` +\n `provides ${flattenedYs.length} outputs. (Expected output keys: ` +\n `${JSON.stringify(model.outputNames)})`);\n\n for (let xIndex = 0; xIndex < flattenedXs.length; xIndex++) {\n tfc.util.assert(\n flattenedXs[xIndex].shape[0] === batchSize,\n () => `Batch size mismatch: input ` +\n `${model.inputNames[xIndex]} has ${\n flattenedXs[xIndex].shape[0]}; ` +\n `expected ${batchSize} based on input ${model.inputNames[0]}.`);\n }\n\n for (let yIndex = 0; yIndex < flattenedYs.length; yIndex++) {\n tfc.util.assert(\n flattenedYs[yIndex].shape[0] === batchSize,\n () => `Batch size mismatch: output ` +\n `${model.outputNames[yIndex]} has ${\n flattenedYs[yIndex].shape[0]}; ` +\n `expected ${batchSize} based on input ${model.inputNames[0]}.`);\n }\n\n return {xs: flattenedXs, ys: flattenedYs};\n}\n\nfunction flattenTensorOrArrayOrMap(\n inputOrOutput: string, names: string[], values: TensorOrArrayOrMap) {\n if (values instanceof tfc.Tensor) {\n return [values];\n } else if (Array.isArray(values)) {\n tfc.util.assert(\n values.length === names.length,\n () => `Received an array of ${values.length} Tensors, but expected ${\n names.length} to match the ${inputOrOutput} keys ${names}.`);\n return values;\n } else {\n const result: tfc.Tensor[] = [];\n // Check that all the required keys are available.\n for (const name of names) {\n if (values[name] == null) {\n throw new ValueError(\n `The feature data generated by the dataset lacks the required ` +\n `${inputOrOutput} key '${name}'.`);\n }\n result.push(values[name]);\n }\n return result;\n }\n}\n\nfunction standardizeTensorValidationData(\n data:\n [\n tfc.Tensor|tfc.Tensor[], tfc.Tensor|tfc.Tensor[]\n ]|[tfc.Tensor | tfc.Tensor[], tfc.Tensor | tfc.Tensor[],\n tfc.Tensor | tfc.Tensor[]]):\n {xs: tfc.Tensor|tfc.Tensor[], ys: tfc.Tensor|tfc.Tensor[]} {\n if (data.length === 3) {\n throw new NotImplementedError(\n 'Validation with sample weights is not implemented yet.');\n }\n return {xs: data[0], ys: data[1]};\n}\n\nexport async function fitDataset(\n // Type `model` as `any` here to avoid circular dependency w/\n // training.ts.\n // tslint:disable-next-line:no-any\n model: any, dataset: Dataset,\n args: ModelFitDatasetArgs): Promise {\n const hasBatchesPerEpoch = args.batchesPerEpoch != null;\n tfc.util.assert(\n model.optimizer != null,\n () => 'You must compile a model before training/testing. Use ' +\n 'LayersModel.compile(modelCompileConfig).');\n\n tfc.util.assert(\n args != null,\n () => `For fitDataset(), the 2nd argument (config) is required, ` +\n `but it is not provided in this call.`);\n tfc.util.assert(\n args.epochs != null && args.epochs > 0 && Number.isInteger(args.epochs),\n () => `For fitDataset(), config.epochs is expected to be a positive ` +\n `integer, but got ${args.epochs}`);\n tfc.util.assert(\n !hasBatchesPerEpoch ||\n (args.batchesPerEpoch > 0 && Number.isInteger(args.batchesPerEpoch)),\n () => `For fitDataset(), config.batchesPerEpoch is expected to be a ` +\n `positive integer if specified, but got ${args.batchesPerEpoch}`);\n tfc.util.assert(\n // tslint:disable-next-line:no-any\n (args as any)['validationSplit'] == null,\n () => '`validationSplit` is not supported by `fitDataset()`. ' +\n 'Use validationData instead.');\n\n if (model.isTraining) {\n throw new Error(\n 'Cannot start training because another fit() call is ongoing.');\n }\n model.isTraining = true;\n\n try {\n const doValidation = args.validationData != null;\n let valXs: tfc.Tensor|tfc.Tensor[];\n let valYs: tfc.Tensor|tfc.Tensor[];\n if (doValidation) {\n if (isDatasetObject(args.validationData)) {\n tfc.util.assert(\n args.validationBatches == null ||\n (args.validationBatches > 0 &&\n Number.isInteger(args.validationBatches)),\n () => `For fitDataset() with dataset-based validation, ` +\n `config.validationBatches is expected not to be provided, ` +\n `or to be a positive integer, ` +\n `but got ${args.validationBatches}`);\n } else {\n const validationData = standardizeTensorValidationData(\n args.validationData as\n [tfc.Tensor | tfc.Tensor[], tfc.Tensor | tfc.Tensor[]] |\n [\n tfc.Tensor | tfc.Tensor[], tfc.Tensor | tfc.Tensor[],\n tfc.Tensor | tfc.Tensor[]\n ]);\n valXs = validationData.xs;\n valYs = validationData.ys;\n }\n }\n\n const trainFunction = model.makeTrainFunction();\n const outLabels = model.getDedupedMetricsNames() as string[];\n\n let callbackMetrics: string[];\n if (doValidation) {\n callbackMetrics =\n outLabels.slice().concat(outLabels.map(n => 'val_' + n));\n } else {\n callbackMetrics = outLabels.slice();\n }\n\n const callbacks = standardizeCallbacks(args.callbacks, args.yieldEvery);\n const verbose = args.verbose == null ? 1 : args.verbose;\n const {callbackList, history} = configureCallbacks(\n callbacks, verbose, args.epochs, null, null,\n getStepsPerEpoch(dataset, args),\n null, // Batch size determined by the dataset itself.\n doValidation, callbackMetrics);\n callbackList.setModel(model);\n model.history = history;\n\n await callbackList.onTrainBegin();\n model.stopTraining_ = false;\n let epoch = args.initialEpoch == null ? 0 : args.initialEpoch;\n\n let dataIterator = await dataset.iterator();\n while (epoch < args.epochs) {\n const epochLogs: UnresolvedLogs = {};\n await callbackList.onEpochBegin(epoch);\n let stepsDone = 0;\n let batchIndex = 0;\n if (!hasBatchesPerEpoch) {\n dataIterator = await dataset.iterator();\n }\n while (hasBatchesPerEpoch ? stepsDone < args.batchesPerEpoch : true) {\n const iteratorOut = await dataIterator.next();\n\n // If `batchesPerEpoch` is specified, the dataset should not be\n // exhausted until all epoches are done.\n if (hasBatchesPerEpoch && iteratorOut.done) {\n console.warn(\n 'You provided `batchesPerEpoch` as ' +\n `${args.batchesPerEpoch}, ` +\n 'but your dataset iterator ran out of data after ' +\n `${stepsDone} batches; ` +\n 'interrupting training. Make sure that your ' +\n 'dataset can generate at least `batchesPerEpoch * epochs` ' +\n 'batches (in this case, ' +\n `${args.batchesPerEpoch * args.epochs} batches). ` +\n 'You may need to use the repeat() function when building ' +\n 'your dataset.');\n break;\n }\n\n if (iteratorOut.value != null) {\n const {xs, ys} =\n standardizeDataIteratorOutput(model, iteratorOut.value);\n const batchLogs: UnresolvedLogs = {};\n batchLogs['batch'] = batchIndex;\n batchLogs['size'] = xs[0].shape[0];\n\n await callbackList.onBatchBegin(batchIndex, batchLogs);\n\n const sampleWeights: tfc.Tensor[] = [];\n if (args.classWeight != null) {\n const standardClassWeights =\n standardizeClassWeights(args.classWeight, model.outputNames);\n for (let i = 0; i < standardClassWeights.length; ++i) {\n sampleWeights.push(await standardizeWeights(\n ys[i], null, standardClassWeights[i]));\n }\n }\n\n // Train on batch.\n const ins = xs.concat(ys).concat(sampleWeights);\n const outs = trainFunction(ins);\n tfc.dispose(ins);\n for (let i = 0; i < outLabels.length; ++i) {\n const label = outLabels[i];\n const out = outs[i];\n batchLogs[label] = out;\n tfc.keep(out);\n }\n\n await callbackList.onBatchEnd(batchIndex, batchLogs);\n disposeTensorsInLogs(batchLogs);\n\n batchIndex++;\n stepsDone++;\n }\n\n if (hasBatchesPerEpoch ? stepsDone >= args.batchesPerEpoch :\n iteratorOut.done) {\n // Epoch finished. Perform validation.\n if (doValidation) {\n let valOuts: tfc.Scalar[];\n if (isDatasetObject(args.validationData)) {\n valOuts = toList(await model.evaluateDataset(\n args.validationData, {batches: args.validationBatches}));\n } else {\n valOuts = toList(model.evaluate(valXs, valYs, {\n batchSize: args.validationBatchSize == null ?\n DEFAULT_VALIDATION_BATCH_SIZE :\n args.validationBatchSize,\n verbose: 0\n }));\n }\n for (let i = 0; i < model.metricsNames.length; ++i) {\n epochLogs[`val_${model.metricsNames[i]}`] = valOuts[i];\n }\n }\n // Call `break` to exit one epoch lopp after validation is done. If\n // config.batchesPerEpoch is specified, an epoch while loop will\n // stop when `stepsDone >= config.batchesPerEpoch`. When\n // config.batchesPerEpoch is not provided, the following `break` is\n // required to exit the while lopp after dataset is exhausted.\n break;\n }\n\n if (model.stopTraining_) {\n break;\n }\n }\n await callbackList.onEpochEnd(epoch, epochLogs);\n epoch++;\n if (model.stopTraining_) {\n break;\n }\n }\n await callbackList.onTrainEnd();\n await model.history.syncData();\n return model.history;\n } finally {\n model.isTraining = false;\n }\n}\n\n/** Helper function that determines number of steps (batches) per epoch. */\nfunction getStepsPerEpoch(\n dataset: Dataset, args: ModelFitDatasetArgs): number {\n // Attempt to determine # of batches in an epoch.\n let stepsPerEpoch: number = null;\n if (args.batchesPerEpoch != null) {\n stepsPerEpoch = args.batchesPerEpoch;\n } else if (Number.isFinite(dataset.size)) {\n stepsPerEpoch = dataset.size;\n }\n return stepsPerEpoch;\n}\n\n// Check if provided object is a Dataset object by checking its .iterator\n// element.\nfunction isDatasetObject(\n dataset:\n [\n TensorOrArrayOrMap, TensorOrArrayOrMap\n ]|[TensorOrArrayOrMap, TensorOrArrayOrMap, TensorOrArrayOrMap]|\n Dataset): boolean {\n return (typeof (dataset as Dataset).iterator === 'function');\n}\n\n// Check if provided object is a LazyIterator object by checking it's .next\n// element.\nfunction isLazyIteratorObject(iterator: Dataset|\n LazyIterator): boolean {\n return (typeof (iterator as LazyIterator).next === 'function');\n}\n\nexport async function evaluateDataset(\n // Type `model` as `any` here to avoid circular dependency w/\n // training.ts.\n // tslint:disable-next-line:no-any\n model: any, dataset: Dataset|LazyIterator,\n args: ModelEvaluateDatasetArgs): Promise {\n args = args || {};\n const hasBatches = args.batches != null;\n const f = model.testFunction;\n let outs: tfc.Scalar[] = [];\n if (args.verbose > 0) {\n throw new NotImplementedError('Verbose mode is not implemented yet.');\n }\n\n tfc.util.assert(\n !hasBatches || (args.batches > 0 && Number.isInteger(args.batches)),\n () => 'Test loop expects `batches` to be a positive integer, but ' +\n `received ${JSON.stringify(args.batches)}`);\n const dataIterator = isLazyIteratorObject(dataset) ?\n dataset as LazyIterator:\n await (dataset as Dataset).iterator();\n // Keeps track of number of examples used in this evaluation.\n let numExamples = 0;\n let batch = 0;\n\n while (hasBatches ? batch < args.batches : true) {\n const iteratorOut = await dataIterator.next();\n outs = tfc.tidy(() => {\n if (iteratorOut.value) {\n // TODO(cais): Once real dataset is available, use\n // `map(x => standardizeDataIteratorOutput(model, x).map(f)`.\n const {xs, ys} =\n standardizeDataIteratorOutput(model, iteratorOut.value);\n const xsAndYs = xs.concat(ys);\n const batchOuts = tfc.tidy(() => f(xsAndYs));\n tfc.dispose(xsAndYs);\n\n if (batch === 0) {\n for (let i = 0; i < batchOuts.length; ++i) {\n outs.push(scalar(0));\n }\n }\n\n const batchSize = xsAndYs[0].shape[0];\n for (let i = 0; i < batchOuts.length; ++i) {\n const batchOut = batchOuts[i];\n const oldScalar = outs[i];\n outs[i] =\n tfc.tidy(() => tfc.add(outs[i], tfc.mul(batchSize, batchOut)));\n if (batch > 0) {\n tfc.dispose(oldScalar);\n }\n }\n tfc.dispose(batchOuts);\n numExamples += batchSize;\n\n ++batch;\n }\n return outs;\n });\n\n if (iteratorOut.done) {\n if (hasBatches) {\n console.warn(\n 'Your dataset iterator ran out of data during evaluateDataset(). ' +\n 'Interrupting evalution. Make sure that your ' +\n 'dataset can generate at least `batches` ' +\n `batches (in this case, ${args.batches} batches). ` +\n 'You may need to use the repeat() function when building ' +\n 'your dataset.');\n }\n break;\n }\n }\n\n for (let i = 0; i < outs.length; ++i) {\n const oldScalar = outs[i];\n outs[i] = tfc.div(outs[i], numExamples);\n tfc.dispose(oldScalar);\n }\n\n return singletonOrArray(outs);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Interfaces and methods for training models using tf.Tensor objects.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {Tensor, Tensor1D} from '@tensorflow/tfjs-core';\nimport {expandDims, gather, sliceAlongFirstAxis} from '../backend/tfjs_backend';\nimport {BaseCallback, CustomCallbackArgs, ModelLoggingVerbosity, YieldEveryOptions} from '../base_callbacks';\nimport {ClassWeight, ClassWeightMap} from './training_utils';\n\n/**\n * Interface configuration model training based on data as `tf.Tensor`s.\n */\nexport interface ModelFitArgs {\n /**\n * Number of samples per gradient update. If unspecified, it\n * will default to 32.\n */\n batchSize?: number;\n\n /**\n * Integer number of times to iterate over the training data arrays.\n */\n epochs?: number;\n\n /**\n * Verbosity level.\n *\n * Expected to be 0, 1, or 2. Default: 1.\n *\n * 0 - No printed message during fit() call.\n * 1 - In Node.js (tfjs-node), prints the progress bar, together with\n * real-time updates of loss and metric values and training speed.\n * In the browser: no action. This is the default.\n * 2 - Not implemented yet.\n */\n verbose?: ModelLoggingVerbosity;\n\n /**\n * List of callbacks to be called during training.\n * Can have one or more of the following callbacks:\n * - `onTrainBegin(logs)`: called when training starts.\n * - `onTrainEnd(logs)`: called when training ends.\n * - `onEpochBegin(epoch, logs)`: called at the start of every epoch.\n * - `onEpochEnd(epoch, logs)`: called at the end of every epoch.\n * - `onBatchBegin(batch, logs)`: called at the start of every batch.\n * - `onBatchEnd(batch, logs)`: called at the end of every batch.\n * - `onYield(epoch, batch, logs)`: called every `yieldEvery` milliseconds\n * with the current epoch, batch and logs. The logs are the same\n * as in `onBatchEnd()`. Note that `onYield` can skip batches or\n * epochs. See also docs for `yieldEvery` below.\n */\n callbacks?: BaseCallback[]|CustomCallbackArgs|CustomCallbackArgs[];\n\n /**\n * Float between 0 and 1: fraction of the training data\n * to be used as validation data. The model will set apart this fraction of\n * the training data, will not train on it, and will evaluate the loss and\n * any model metrics on this data at the end of each epoch.\n * The validation data is selected from the last samples in the `x` and `y`\n * data provided, before shuffling.\n */\n validationSplit?: number;\n\n /**\n * Data on which to evaluate the loss and any model\n * metrics at the end of each epoch. The model will not be trained on this\n * data. This could be a tuple [xVal, yVal] or a tuple [xVal, yVal,\n * valSampleWeights]. The model will not be trained on this data.\n * `validationData` will override `validationSplit`.\n */\n validationData?: [\n Tensor|Tensor[], Tensor|Tensor[]\n ]|[Tensor | Tensor[], Tensor|Tensor[], Tensor|Tensor[]];\n\n /**\n * Whether to shuffle the training data before each epoch. Has\n * no effect when `stepsPerEpoch` is not `null`.\n */\n shuffle?: boolean;\n\n /**\n * Optional object mapping class indices (integers) to\n * a weight (float) to apply to the model's loss for the samples from this\n * class during training. This can be useful to tell the model to \"pay more\n * attention\" to samples from an under-represented class.\n *\n * If the model has multiple outputs, a class weight can be specified for\n * each of the outputs by setting this field an array of weight object\n * or an object that maps model output names (e.g., `model.outputNames[0]`)\n * to weight objects.\n */\n classWeight?: ClassWeight|ClassWeight[]|ClassWeightMap;\n\n /**\n * Optional array of the same length as x, containing\n * weights to apply to the model's loss for each sample. In the case of\n * temporal data, you can pass a 2D array with shape (samples,\n * sequenceLength), to apply a different weight to every timestep of every\n * sample. In this case you should make sure to specify\n * sampleWeightMode=\"temporal\" in compile().\n */\n sampleWeight?: Tensor;\n\n /**\n * Epoch at which to start training (useful for resuming a previous training\n * run). When this is used, `epochs` is the index of the \"final epoch\".\n * The model is not trained for a number of iterations given by `epochs`,\n * but merely until the epoch of index `epochs` is reached.\n */\n initialEpoch?: number;\n\n /**\n * Total number of steps (batches of samples) before\n * declaring one epoch finished and starting the next epoch. When training\n * with Input Tensors such as TensorFlow data tensors, the default `null` is\n * equal to the number of unique samples in your dataset divided by the\n * batch size, or 1 if that cannot be determined.\n */\n stepsPerEpoch?: number;\n\n /**\n * Only relevant if `stepsPerEpoch` is specified. Total number of steps\n * (batches of samples) to validate before stopping.\n */\n validationSteps?: number;\n\n /**\n * Configures the frequency of yielding the main thread to other tasks.\n *\n * In the browser environment, yielding the main thread can improve the\n * responsiveness of the page during training. In the Node.js environment,\n * it can ensure tasks queued in the event loop can be handled in a timely\n * manner.\n *\n * The value can be one of the following:\n * - `'auto'`: The yielding happens at a certain frame rate (currently set\n * at 125ms). This is the default.\n * - `'batch'`: yield every batch.\n * - `'epoch'`: yield every epoch.\n * - any `number`: yield every `number` milliseconds.\n * - `'never'`: never yield. (yielding can still happen through `await\n * nextFrame()` calls in custom callbacks.)\n */\n yieldEvery?: YieldEveryOptions;\n}\n\nexport function checkBatchSize(batchSize: number) {\n tfc.util.assert(\n batchSize > 0 && Number.isInteger(batchSize),\n () => `batchSize is required to be a positive integer, but got ${\n batchSize}`);\n}\n\n/**\n * Slice a Tensor or an Array of Tensors, by start and stop indices.\n *\n * Porting Note: The `_slice_arrays` function in PyKeras is covered by this\n * function and `sliceArraysByIndices()` together.\n *\n * @param arrays: the input.\n * @param start: the starting index (inclusive).\n * @param stop: the stopping index (exclusive).\n * @returns The result of the slicing. If `arrays` is an `Array` of\n * `tf.Tensor`s, the slicing will be applied to all elements of the `Array`\n * in the same way.\n */\nexport function sliceArrays(\n arrays: Tensor|Tensor[], start: number, stop: number): Tensor|Tensor[] {\n if (arrays == null) {\n return [null];\n } else if (Array.isArray(arrays)) {\n return arrays.map(array => sliceAlongFirstAxis(array, start, stop - start));\n } else { // Tensor.\n return sliceAlongFirstAxis(arrays, start, stop - start);\n }\n}\n\n/**\n * Slice a Tensor or an Array of Tensors, by random-order indices.\n *\n * Porting Note: The `_slice_arrays` function in PyKeras is covered by this\n * function and `sliceArrays()` together.\n *\n * @param arrays The input `tf.Tensor` or `Array` of `tf.Tensor`s to slice.\n * If an `Array` of `tf.Tensor`s, all `tf.Tensor`s will be sliced in the\n * same fashion.\n * @param indices The indices to use for slicing along the first (batch)\n * dimension.\n * @returns Result(s) of the slicing.\n */\nexport function sliceArraysByIndices(\n arrays: Tensor|Tensor[], indices: Tensor1D): Tensor|Tensor[] {\n return tfc.tidy(() => {\n if (arrays == null) {\n return null;\n } else if (Array.isArray(arrays)) {\n return arrays.map(\n array => (sliceArraysByIndices(array, indices) as Tensor));\n } else {\n // TODO(cais): indices should be a pre-constructed Tensor1D to avoid\n // tensor1d() calls.\n return gather(\n arrays,\n indices.dtype === 'int32' ? indices : tfc.cast(indices, 'int32'));\n }\n });\n}\n\n/**\n * Returns a list of batch indices (tuples of indices).\n * @param size: Integer, total size of the data to slice into batches.\n * @param batchSize: Integer, batch size.\n * @returns An Array of [batchStart, batchEnd] tuples. batchStart is\n * inclusive; batchEnd is exclusive. I.e., each batch consists of indices x\n * that satisfy batchStart <= x < batchEnd.\n */\nexport function makeBatches(\n size: number, batchSize: number): Array<[number, number]> {\n const output: Array<[number, number]> = [];\n let batchStart = 0;\n let batchEnd: number = null;\n while (batchStart < size) {\n batchEnd = batchStart + batchSize;\n if (batchEnd >= size) {\n batchEnd = size;\n }\n output.push([batchStart, batchEnd]);\n batchStart = batchEnd;\n }\n return output;\n}\n\n/**\n * Ensure tensors all have a rank of at least 2.\n *\n * If a tensor has a rank of 1, it is dimension-expanded to rank 2.\n * If any tensor has a rank of 0 (i.e., is a scalar), an error will be thrown.\n */\nexport function ensureTensorsRank2OrHigher(tensors: Tensor|Tensor[]): Tensor[] {\n const outs: Tensor[] = [];\n if (tensors instanceof Tensor) {\n tensors = [tensors];\n }\n\n // Make Tensors at least 2D.\n for (let i = 0; i < tensors.length; ++i) {\n const tensor = tensors[i];\n if (tensor.rank === 1) {\n outs.push(expandDims(tensor, 1));\n } else if (tensor.rank === 0) {\n throw new Error(\n 'Expected tensor to be at least 1D, but received a 0D tensor ' +\n '(scalar).');\n } else {\n outs.push(tensor);\n }\n }\n return outs;\n}\n\n/**\n * Compare a set of tensors with a reference (old) set, discard the ones\n * in the new set that are not present in the reference set.\n *\n * This method is used for memory clenaup during calls such as\n * LayersModel.fit().\n *\n * @param tensors New set which may contain Tensors not present in\n * `refTensors`.\n * @param refTensors Reference Tensor set.\n */\n// TODO(cais, kangyizhang): Deduplicate with tfjs-data.\nexport function disposeNewTensors(\n tensors: Tensor|Tensor[]|{[inputName: string]: Tensor},\n refTensors: Tensor|Tensor[]|{[inputName: string]: Tensor}): void {\n if (tensors == null) {\n return;\n }\n const oldTensorIds: number[] = [];\n if (refTensors instanceof Tensor) {\n oldTensorIds.push(refTensors.id);\n } else if (Array.isArray(refTensors)) {\n refTensors.forEach(t => oldTensorIds.push(t.id));\n } else if (refTensors != null) {\n // `oldTensors` is a map from string name to Tensor.\n for (const name in refTensors) {\n const oldTensor = refTensors[name];\n oldTensorIds.push(oldTensor.id);\n }\n }\n\n const tensorsToDispose: Tensor[] = [];\n if (tensors instanceof Tensor) {\n if (oldTensorIds.indexOf(tensors.id) === -1) {\n tensorsToDispose.push(tensors);\n }\n } else if (Array.isArray(tensors)) {\n tensors.forEach(t => {\n if (oldTensorIds.indexOf(t.id) === -1) {\n tensorsToDispose.push(t);\n }\n });\n } else if (tensors != null) {\n // `oldTensors` is a map from string name to Tensor.\n for (const name in tensors) {\n const tensor = tensors[name];\n if (oldTensorIds.indexOf(tensor.id) === -1) {\n tensorsToDispose.push(tensor);\n }\n }\n }\n\n tensorsToDispose.forEach(t => {\n if (!t.isDisposed) {\n t.dispose();\n }\n });\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original Source: engine/training.py */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {io, ModelPredictConfig as ModelPredictArgs, NamedTensorMap, Optimizer, Scalar, scalar, serialization, Tensor, Tensor1D, tensor1d, util} from '@tensorflow/tfjs-core';\n\nimport * as K from '../backend/tfjs_backend';\nimport {BaseCallback, configureCallbacks, History, ModelLoggingVerbosity, standardizeCallbacks} from '../base_callbacks';\nimport {nameScope} from '../common';\nimport {NotImplementedError, RuntimeError, ValueError} from '../errors';\nimport {Shape} from '../keras_format/common';\nimport {LossIdentifier} from '../keras_format/loss_config';\nimport {OptimizerSerialization} from '../keras_format/optimizer_config';\nimport {MetricsIdentifier, TrainingConfig} from '../keras_format/training_config';\nimport {deserialize} from '../layers/serialization';\nimport { disposeTensorsInLogs, UnresolvedLogs } from '../logs';\nimport * as losses from '../losses';\nimport * as Metrics from '../metrics';\nimport * as optimizers from '../optimizers';\nimport {LossOrMetricFn, NamedTensor} from '../types';\nimport {checkUserDefinedMetadata} from '../user_defined_metadata';\nimport {count, pyListRepeat, singletonOrArray, toCamelCase, toSnakeCase, unique} from '../utils/generic_utils';\nimport {printSummary} from '../utils/layer_utils';\nimport {range} from '../utils/math_utils';\nimport {convertPythonicToTs} from '../utils/serialization_utils';\nimport {LayerVariable} from '../variables';\nimport {version} from '../version';\n\nimport {Container, ContainerArgs} from './container';\nimport {Dataset} from './dataset_stub';\nimport {execute, FeedDict} from './executor';\nimport {DisposeResult, SymbolicTensor} from './topology';\nimport {evaluateDataset, fitDataset, ModelEvaluateDatasetArgs, ModelFitDatasetArgs} from './training_dataset';\nimport {checkBatchSize, disposeNewTensors, ensureTensorsRank2OrHigher, makeBatches, ModelFitArgs, sliceArrays, sliceArraysByIndices} from './training_tensors';\nimport {ClassWeight, ClassWeightMap, computeWeightedLoss, standardizeClassWeights, standardizeWeights} from './training_utils';\n\n/**\n * Helper function for polymorphic input data: 1. singleton Tensor.\n */\nexport function isDataTensor(x: Tensor|Tensor[]|{[inputName: string]: Tensor}|\n {[inputName: string]: Tensor[]}): boolean {\n return x instanceof Tensor;\n}\n\n/**\n * Helper function for polymorphic input data: 2. Array of Tensor.\n */\nexport function isDataArray(x: Tensor|Tensor[]|\n {[inputName: string]: Tensor}): boolean {\n return Array.isArray(x);\n}\n\n/**\n * Helper function for polymorphic input data: 3. \"dict\" of Tensor.\n */\nexport function isDataDict(x: Tensor|Tensor[]|\n {[inputName: string]: Tensor}): boolean {\n return !isDataTensor(x) && !isDataArray(x);\n}\n\n/**\n * Normalizes inputs and targets provided by users.\n * @param data User-provided input data (polymorphic).\n * @param names An Array of expected Tensor names.\n * @param shapes Optional Array of expected Tensor shapes.\n * @param checkBatchAxis Whether to check that the batch axis of the arrays\n * match the expected value found in `shapes`.\n * @param exceptionPrefix String prefix used for exception formatting.\n * @returns List of standardized input Tensors (one Tensor per model input).\n * @throws ValueError: in case of improperly formatted user data.\n */\nexport function standardizeInputData(\n data: Tensor|Tensor[]|{[inputName: string]: Tensor}, names: string[],\n shapes?: Shape[], checkBatchAxis = true, exceptionPrefix = ''): Tensor[] {\n if (names == null || names.length === 0) {\n // Check for the case where the model expected no data, but some data got\n // sent.\n if (data != null) {\n let gotUnexpectedData = false;\n if (isDataArray(data) && (data as Tensor[]).length > 0) {\n gotUnexpectedData = true;\n } else if (isDataDict(data)) {\n for (const key in data) {\n if (data.hasOwnProperty(key)) {\n gotUnexpectedData = true;\n break;\n }\n }\n } else {\n // `data` is a singleton Tensor in this case.\n gotUnexpectedData = true;\n }\n if (gotUnexpectedData) {\n throw new ValueError(\n `Error when checking model ${exceptionPrefix} expected no data, ` +\n `but got ${data}`);\n }\n }\n return [];\n }\n if (data == null) {\n return names.map(name => null);\n }\n\n let arrays: Tensor[];\n if (isDataDict(data)) {\n data = data as {[inputName: string]: Tensor};\n arrays = [];\n for (const name of names) {\n if (data[name] == null) {\n throw new ValueError(\n `No data provided for \"${name}\". Need data for each key in: ` +\n `${names}`);\n }\n arrays.push(data[name]);\n }\n } else if (isDataArray(data)) {\n data = data as Tensor[];\n if (data.length !== names.length) {\n throw new ValueError(\n `Error when checking model ${exceptionPrefix}: the Array of ` +\n `Tensors that you are passing to your model is not the size the ` +\n `model expected. Expected to see ${names.length} Tensor(s), but ` +\n `instead got the following list of Tensor(s): ${data}`);\n }\n arrays = data;\n } else {\n data = data as Tensor;\n if (names.length > 1) {\n throw new ValueError(\n `The model ${exceptionPrefix} expects ${names.length} Tensor(s), ` +\n `but only received one Tensor. Found: Tensor with shape ${\n data.shape}`);\n }\n arrays = [data];\n }\n\n arrays = ensureTensorsRank2OrHigher(arrays);\n\n // Check shape compatibility.\n if (shapes != null) {\n for (let i = 0; i < names.length; ++i) {\n if (shapes[i] == null) {\n continue;\n }\n const array = arrays[i];\n if (array.shape.length !== shapes[i].length) {\n throw new ValueError(\n `Error when checking ${exceptionPrefix}: expected ${names[i]} ` +\n `to have ${shapes[i].length} dimension(s). but got array with ` +\n `shape ${array.shape}`);\n }\n for (let j = 0; j < shapes[i].length; ++j) {\n if (j === 0 && !checkBatchAxis) {\n // Skip the first (batch) axis.\n continue;\n }\n const dim = array.shape[j];\n const refDim = shapes[i][j];\n if (refDim != null && refDim >= 0 && dim !== refDim) {\n throw new ValueError(\n `${exceptionPrefix} expected a batch of elements where each ` +\n `example has shape [${shapes[i].slice(1, shapes[i].length)}] ` +\n `(i.e.,tensor shape [*,${\n shapes[i].slice(1, shapes[i].length)}])` +\n ` but the ${exceptionPrefix} received an input with ${\n array.shape[0]}` +\n ` examples, each with shape [${\n array.shape.slice(1, array.shape.length)}]` +\n ` (tensor shape [${array.shape}])`);\n }\n }\n }\n }\n return arrays;\n}\n\n/**\n * User input validation for Tensors.\n * @param inputs `Array` of `tf.Tensor`s for inputs.\n * @param targets `Array` of `tf.Tensor`s for targets.\n * @param weights Optional `Array` of `tf.Tensor`s for sample weights.\n * @throws ValueError: in case of incorrectly formatted data.\n */\nexport function checkArrayLengths(\n inputs: Tensor[], targets: Tensor[], weights?: Tensor[]) {\n const setX = unique(inputs.map(input => input.shape[0]));\n setX.sort();\n const setY = unique(targets.map(target => target.shape[0]));\n setY.sort();\n // TODO(cais): Check `weights` as well.\n if (setX.length > 1) {\n throw new ValueError(\n `All input Tensors (x) should have the same number of samples. ` +\n `Got array shapes: ` +\n `${JSON.stringify(inputs.map(input => input.shape))}`);\n }\n if (setY.length > 1) {\n throw new ValueError(\n `All target Tensors (y) should have the same number of samples. ` +\n `Got array shapes: ` +\n `${JSON.stringify(targets.map(target => target.shape))}`);\n }\n if (setX.length > 0 && setY.length > 0 && !util.arraysEqual(setX, setY)) {\n throw new ValueError(\n `Input Tensors should have the same number of samples as target ` +\n `Tensors. Found ${setX[0]} input sample(s) and ${setY[0]} target ` +\n `sample(s).`);\n }\n}\n\n/**\n * Validation on the compatibility of targes and loss functions.\n *\n * This helps prevent users from using loss functions incorrectly.\n *\n * @param targets `Array` of `tf.Tensor`s of targets.\n * @param lossFns `Array` of loss functions.\n * @param outputShapes `Array` of shapes of model outputs.\n */\nfunction checkLossAndTargetCompatibility(\n targets: Tensor[], lossFns: LossOrMetricFn[], outputShapes: Shape[]) {\n // TODO(cais): Dedicated test coverage?\n const keyLosses = [\n losses.meanSquaredError, losses.binaryCrossentropy,\n losses.categoricalCrossentropy\n ];\n for (let i = 0; i < targets.length; ++i) {\n const y = targets[i];\n const loss = lossFns[i];\n const shape = outputShapes[i];\n if (loss == null) {\n continue;\n }\n if (loss === losses.categoricalCrossentropy) {\n if (y.shape[y.shape.length - 1] === 1) {\n throw new ValueError(\n `You are passing a target array of shape ${y.shape} while using ` +\n `a loss 'categorical_crossentropy'. 'categorical_crossentropy'` +\n `expects targets to be binary matrices (1s and 0s) of shape ` +\n `[samples, classes].`);\n // TODO(cais): Example code in error message.\n }\n }\n if (keyLosses.indexOf(loss) !== -1) {\n const slicedYShape = y.shape.slice(1);\n const slicedShape = shape.slice(1);\n for (let j = 0; j < slicedYShape.length; ++j) {\n const targetDim = slicedYShape[j];\n const outDim = slicedShape[j];\n if (outDim != null && targetDim !== outDim) {\n throw new ValueError(\n `A target Tensor with shape ${y.shape} was passed for an ` +\n `output of shape ${shape}, while using a loss function that ` +\n `expects targets to have the same shape as the output.`);\n }\n }\n }\n }\n}\n\n/**\n * Check inputs provided by the user.\n *\n * Porting Note: This corresponds to _standardize_input_data() in Python\n * Keras. Because of the strong typing in TF.js, we do not need to convert\n * the data. Specifically:\n * 1) in PyKeras, `data` can be `DataFrame` instances from pandas, for\n * example. We don't need to worry about that here because there is no\n * widely popular javascript/typesdcript equivalent of pandas (so far).\n * If one becomes available in the future, we can add support.\n * 2) in PyKeras, inputs can be Python dict. But here we are stipulating\n * that the data is either a single `tf.Tensor` or an Array of `tf.Tensor`s. We\n * may add support for `Object` data inputs in the future when the need\n * arises.\n *\n * Instead, we perform basic checks for number of parameters and shapes.\n *\n * @param data: The input data.\n * @param names: Name for the inputs, from the model.\n * @param shapes: Expected shapes for the input data, from the model.\n * @param checkBatchAxis: Whether the size along the batch axis (i.e., the\n * first dimension) will be checked for matching.\n * @param exceptionPrefix: Execption prefix message, used in generating error\n * messages.\n * @throws ValueError: on incorrect number of inputs or mismatches in shapes.\n */\nfunction checkInputData(\n data: Tensor|Tensor[], names: string[], shapes?: Shape[],\n checkBatchAxis = true, exceptionPrefix = '') {\n let arrays: Tensor[];\n if (Array.isArray(data)) {\n if (data.length !== names.length) {\n throw new ValueError(\n `Error when checking model ${exceptionPrefix}: the Array of ` +\n `Tensors that you are passing to your model is not the size the ` +\n `the model expected. Expected to see ${names.length} Tensor(s),` +\n ` but instead got ${data.length} Tensors(s).`);\n }\n arrays = data;\n } else {\n if (names.length > 1) {\n throw new ValueError(\n `The model expects ${names.length} ${exceptionPrefix} Tensors, ` +\n `but only received one Tensor. Found: array with shape ` +\n `${JSON.stringify(data.shape)}.`);\n }\n arrays = [data];\n }\n\n if (shapes != null) {\n for (let i = 0; i < names.length; ++i) {\n if (shapes[i] == null) {\n continue;\n }\n const array = arrays[i];\n if (array.shape.length !== shapes[i].length) {\n throw new ValueError(\n `Error when checking ${exceptionPrefix}: expected ${names[i]} ` +\n `to have ${shapes[i].length} dimension(s), but got array with ` +\n `shape ${JSON.stringify(array.shape)}`);\n }\n for (let j = 0; j < shapes[i].length; ++j) {\n if (j === 0 && !checkBatchAxis) {\n continue;\n }\n const dim = array.shape[j];\n const refDim = shapes[i][j];\n if (refDim != null) {\n if (refDim !== dim) {\n throw new ValueError(\n `Error when checking ${exceptionPrefix}: expected ` +\n `${names[i]} to have shape ${JSON.stringify(shapes[i])} but ` +\n `got array with shape ${JSON.stringify(array.shape)}.`);\n }\n }\n }\n }\n }\n}\n\n/**\n * Maps metric functions to model outputs.\n * @param metrics An shortcut strings name, metric function, `Array` or dict\n * (`Object`) of metric functions.\n * @param outputNames An `Array` of the names of model outputs.\n * @returns An `Array` (one entry per model output) of `Array` of metric\n * functions. For instance, if the model has 2 outputs, and for the first\n * output we want to compute `binaryAccuracy` and `binaryCrossentropy`,\n * and just `binaryAccuracy` for the second output, the `Array` would look\n * like:\n * `[[binaryAccuracy, binaryCrossentropy], [binaryAccuracy]]`\n * @throws TypeError: incompatible metrics format.\n */\nexport function collectMetrics(\n metrics: string|LossOrMetricFn|Array|\n {[outputName: string]: string | LossOrMetricFn},\n outputNames: string[]): Array> {\n if (metrics == null || Array.isArray(metrics) && metrics.length === 0) {\n return outputNames.map(name => []);\n }\n\n let wrappedMetrics: Array|\n {[outputName: string]: string | LossOrMetricFn};\n if (typeof metrics === 'string' || typeof metrics === 'function') {\n wrappedMetrics = [metrics];\n } else if (Array.isArray(metrics) || typeof metrics === 'object') {\n wrappedMetrics = metrics as Array|\n {[outputName: string]: string} | {[outputName: string]: LossOrMetricFn};\n } else {\n throw new TypeError(\n 'Type of metrics argument not understood. Expected an string,' +\n `function, Array, or Object, found: ${metrics}`);\n }\n\n if (Array.isArray(wrappedMetrics)) {\n // We then apply all metrics to all outputs.\n return outputNames.map(\n name => wrappedMetrics as Array);\n } else {\n // In this case, metrics is a dict.\n const nestedMetrics: Array> = [];\n for (const name of outputNames) {\n let outputMetrics: string|LossOrMetricFn|Array =\n wrappedMetrics.hasOwnProperty(name) ? wrappedMetrics[name] : [];\n if (!Array.isArray(outputMetrics)) {\n outputMetrics = [outputMetrics];\n }\n nestedMetrics.push(outputMetrics);\n }\n return nestedMetrics;\n }\n}\n\nexport interface ModelEvaluateArgs {\n /**\n * Batch size (Integer). If unspecified, it will default to 32.\n */\n batchSize?: number;\n\n /**\n * Verbosity mode.\n */\n verbose?: ModelLoggingVerbosity;\n\n /**\n * Tensor of weights to weight the contribution of different samples to the\n * loss and metrics.\n */\n sampleWeight?: Tensor;\n\n /**\n * integer: total number of steps (batches of samples)\n * before declaring the evaluation round finished. Ignored with the default\n * value of `undefined`.\n */\n steps?: number;\n}\n\n/**\n * Configuration for calls to `LayersModel.compile()`.\n */\nexport interface ModelCompileArgs {\n /**\n * An instance of `tf.train.Optimizer` or a string name for an Optimizer.\n */\n optimizer: string|Optimizer;\n\n /**\n * Object function(s) or name(s) of object function(s).\n * If the model has multiple outputs, you can use a different loss\n * on each output by passing a dictionary or an Array of losses.\n * The loss value that will be minimized by the model will then be the sum\n * of all individual losses.\n */\n loss: string|string[]|{[outputName: string]: string}|LossOrMetricFn|\n LossOrMetricFn[]|{[outputName: string]: LossOrMetricFn};\n\n /**\n * List of metrics to be evaluated by the model during training and testing.\n * Typically you will use `metrics=['accuracy']`.\n * To specify different metrics for different outputs of a multi-output\n * model, you could also pass a dictionary.\n */\n metrics?: string|LossOrMetricFn|Array|\n {[outputName: string]: string | LossOrMetricFn};\n\n // TODO(cais): Add lossWeights, sampleWeightMode, weightedMetrics, and\n // targetTensors.\n}\n\nconst LAYERS_MODEL_FORMAT_NAME = 'layers-model';\n\n/**\n * A `tf.LayersModel` is a directed, acyclic graph of `tf.Layer`s plus methods\n * for training, evaluation, prediction and saving.\n *\n * `tf.LayersModel` is the basic unit of training, inference and evaluation in\n * TensorFlow.js. To create a `tf.LayersModel`, use `tf.LayersModel`.\n *\n * See also:\n * `tf.Sequential`, `tf.loadLayersModel`.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\nexport class LayersModel extends Container implements tfc.InferenceModel {\n // The class name is 'Model' rather than 'LayersModel' for backwards\n // compatibility since this class name shows up in the serialization format.\n /** @nocollapse */\n static className = 'Model';\n protected optimizer_: Optimizer;\n // Whether the model instance owns the optimizer: `true` if and only if\n // `optimizer` is created from a string parameter during `compile()` call.\n protected isOptimizerOwned: boolean;\n\n loss: string|string[]|{[outputName: string]: string}|LossOrMetricFn|\n LossOrMetricFn[]|{[outputName: string]: LossOrMetricFn};\n lossFunctions: LossOrMetricFn[];\n\n // TODO(cais): These private variables should probably not have the string\n // 'feed' in their names, because we are not dealing with a symbolic\n // backend.\n private feedOutputShapes: Shape[];\n private feedLossFns: LossOrMetricFn[];\n private collectedTrainableWeights: LayerVariable[];\n private testFunction: (data: Tensor[]) => Scalar[];\n history: History;\n\n // A public property that can be set by Callbacks to order early stopping\n // during `fit()` calls.\n protected stopTraining_: boolean;\n protected isTraining: boolean;\n\n metrics: string|LossOrMetricFn|Array|\n {[outputName: string]: string | LossOrMetricFn};\n metricsNames: string[];\n // Porting Note: `metrics_tensors` in PyKeras is a symbolic tensor. But given\n // the imperative nature of tfjs-core, `metricsTensors` is a\n // TypeScript function here.\n // Also note that due to the imperative nature of tfjs-core, `metricsTensor`\n // here needs an output index to keep track of which output of the\n // LayersModel a metric belongs to. This is unlike `metrics_tensors` in\n // PyKeras, which is a `list` of symbolic tensors, each of which has\n // implicit \"knowledge\" of the outputs it depends on.\n metricsTensors: Array<[LossOrMetricFn, number]>;\n\n // User defind metadata (if any).\n private userDefinedMetadata: {};\n\n constructor(args: ContainerArgs) {\n super(args);\n this.isTraining = false;\n }\n\n /**\n * Print a text summary of the model's layers.\n *\n * The summary includes\n * - Name and type of all layers that comprise the model.\n * - Output shape(s) of the layers\n * - Number of weight parameters of each layer\n * - If the model has non-sequential-like topology, the inputs each layer\n * receives\n * - The total number of trainable and non-trainable parameters of the model.\n *\n * ```js\n * const input1 = tf.input({shape: [10]});\n * const input2 = tf.input({shape: [20]});\n * const dense1 = tf.layers.dense({units: 4}).apply(input1);\n * const dense2 = tf.layers.dense({units: 8}).apply(input2);\n * const concat = tf.layers.concatenate().apply([dense1, dense2]);\n * const output =\n * tf.layers.dense({units: 3, activation: 'softmax'}).apply(concat);\n *\n * const model = tf.model({inputs: [input1, input2], outputs: output});\n * model.summary();\n * ```\n *\n * @param lineLength Custom line length, in number of characters.\n * @param positions Custom widths of each of the columns, as either\n * fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number\n * of characters (e.g., `[30, 50, 65]`). Each number corresponds to\n * right-most (i.e., ending) position of a column.\n * @param printFn Custom print function. Can be used to replace the default\n * `console.log`. For example, you can use `x => {}` to mute the printed\n * messages in the console.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n summary(\n lineLength?: number, positions?: number[],\n printFn:\n // tslint:disable-next-line:no-any\n (message?: any, ...optionalParams: any[]) => void = console.log) {\n if (!this.built) {\n throw new ValueError(\n `This model has never been called, thus its weights have not been ` +\n `created yet. So no summary can be displayed. Build the model ` +\n `first (e.g., by calling it on some test data).`);\n }\n printSummary(this, lineLength, positions, printFn);\n }\n\n /**\n * Configures and prepares the model for training and evaluation. Compiling\n * outfits the model with an optimizer, loss, and/or metrics. Calling `fit`\n * or `evaluate` on an un-compiled model will throw an error.\n *\n * @param args a `ModelCompileArgs` specifying the loss, optimizer, and\n * metrics to be used for fitting and evaluating this model.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n compile(args: ModelCompileArgs): void {\n if (args.loss == null) {\n args.loss = [];\n }\n this.loss = args.loss;\n\n if (typeof args.optimizer === 'string') {\n this.optimizer_ = optimizers.getOptimizer(args.optimizer);\n this.isOptimizerOwned = true;\n } else {\n if (!(args.optimizer instanceof Optimizer)) {\n throw new ValueError(\n `User-defined optimizer must be an instance of tf.Optimizer.`);\n }\n this.optimizer_ = args.optimizer;\n this.isOptimizerOwned = false;\n }\n\n // TODO(cais): Add lossWeights.\n // TODO(cais): Add sampleWeightMode.\n\n // Prepare loss functions.\n let lossFunctions: LossOrMetricFn[] = [];\n if (!Array.isArray(args.loss) && typeof args.loss !== 'string' &&\n typeof args.loss !== 'function') {\n args.loss = args.loss as {[outputName: string]: string};\n for (const name in args.loss) {\n if (this.outputNames.indexOf(name) === -1) {\n throw new ValueError(\n `Unknown entry in loss dictionary: \"${name}\". ` +\n `Only expected the following keys: ${this.outputNames}`);\n }\n }\n for (const name of this.outputNames) {\n if (args.loss[name] == null) {\n console.warn(\n `Output \"${name}\" is missing from loss dictionary. We assume ` +\n `this was done on purpose, and we will not be expecting data ` +\n `to be passed to ${name} during training`);\n }\n lossFunctions.push(losses.get(args.loss[name]));\n }\n } else if (Array.isArray(args.loss)) {\n if (args.loss.length !== this.outputs.length) {\n throw new ValueError(\n `When passing an Array as loss, it should have one entry per ` +\n `model output. The model has ${this.outputs.length} output(s), ` +\n `but you passed loss=${args.loss}.`);\n }\n const theLosses = args.loss as Array;\n lossFunctions = theLosses.map(l => losses.get(l));\n } else {\n const lossFunction = losses.get(args.loss);\n this.outputs.forEach(_ => {\n lossFunctions.push(lossFunction);\n });\n }\n\n this.lossFunctions = lossFunctions;\n\n this.feedOutputNames = [];\n this.feedOutputShapes = [];\n this.feedLossFns = [];\n for (let i = 0; i < this.outputs.length; ++i) {\n // TODO(cais): Logic for skipping target(s).\n const shape = this.internalOutputShapes[i];\n const name = this.outputNames[i];\n this.feedOutputNames.push(name);\n this.feedOutputShapes.push(shape);\n this.feedLossFns.push(this.lossFunctions[i]);\n }\n\n // TODO(cais): Add logic for output masks.\n // TODO(cais): Add logic for sample weights.\n const skipTargetIndices: number[] = [];\n\n // Prepare metrics.\n this.metrics = args.metrics;\n // TODO(cais): Add weightedMetrics.\n this.metricsNames = ['loss'];\n this.metricsTensors = [];\n\n // Compute total loss.\n // Porting Note: In PyKeras, metrics_tensors are symbolic tensor objects.\n // Here, metricsTensors are TypeScript functions. This difference is due\n // to the difference in symbolic/imperative property of the backends.\n nameScope('loss', () => {\n for (let i = 0; i < this.outputs.length; ++i) {\n if (skipTargetIndices.indexOf(i) !== -1) {\n continue;\n }\n // TODO(cais): Add weightedLoss, sampleWeight and mask.\n // The following line should be weightedLoss\n const weightedLoss = this.lossFunctions[i];\n if (this.outputs.length > 1) {\n this.metricsTensors.push([weightedLoss, i]);\n this.metricsNames.push(this.outputNames[i] + '_loss');\n }\n }\n\n // Porting Note: Due to the imperative nature of the backend, we calculate\n // the regularizer penalties in the totalLossFunction, instead of here.\n });\n\n const nestedMetrics = collectMetrics(args.metrics, this.outputNames);\n // TODO(cais): Add nestedWeightedMetrics.\n\n /**\n * Helper function used in loop below.\n */\n const appendMetric =\n (outputIndex: number, metricName: string,\n metricTensor: LossOrMetricFn) => {\n if (this.outputNames.length > 1) {\n metricName = this.outputNames[outputIndex] + '_' + metricName;\n }\n this.metricsNames.push(metricName);\n this.metricsTensors.push([metricTensor, outputIndex]);\n };\n\n nameScope('metric', () => {\n for (let i = 0; i < this.outputs.length; ++i) {\n if (skipTargetIndices.indexOf(i) !== -1) {\n continue;\n }\n const outputMetrics = nestedMetrics[i];\n // TODO(cais): Add weights and outputWeightedMetrics.\n\n // TODO(cais): Add optional arg `weights` to the following function.\n const handleMetrics = (metrics: Array) => {\n const metricNamePrefix = '';\n let metricName: string;\n let accFn: LossOrMetricFn;\n let weightedMetricFn: LossOrMetricFn;\n // TODO(cais): Use 'weights_' for weighted metrics.\n\n for (const metric of metrics) {\n if (typeof metric === 'string' &&\n ['accuracy', 'acc', 'crossentropy', 'ce'].indexOf(metric) !==\n -1) {\n const outputShape = this.internalOutputShapes[i];\n\n if (outputShape[outputShape.length - 1] === 1 ||\n this.lossFunctions[i] === losses.binaryCrossentropy) {\n // case: binary accuracy/crossentropy.\n if (['accuracy', 'acc'].indexOf(metric) !== -1) {\n accFn = Metrics.binaryAccuracy;\n } else if (['crossentropy', 'ce'].indexOf(metric) !== -1) {\n accFn = Metrics.binaryCrossentropy;\n }\n } else if (\n this.lossFunctions[i] ===\n losses.sparseCategoricalCrossentropy) {\n // case: categorical accuracy / crossentropy with sparse\n // targets.\n if (['accuracy', 'acc'].indexOf(metric) !== -1) {\n accFn = Metrics.sparseCategoricalAccuracy;\n } else if (['crossentropy', 'ce'].indexOf(metric) !== -1) {\n accFn = Metrics.sparseCategoricalCrossentropy;\n }\n } else {\n // case: categorical accuracy / crossentropy.\n if (['accuracy', 'acc'].indexOf(metric) !== -1) {\n accFn = Metrics.categoricalAccuracy;\n } else if (['crossentropy', 'ce'].indexOf(metric) !== -1) {\n accFn = Metrics.categoricalCrossentropy;\n }\n }\n let suffix: string;\n if (['accuracy', 'acc'].indexOf(metric) !== -1) {\n suffix = 'acc';\n } else if (['crossentropy', 'ce'].indexOf(metric) !== -1) {\n suffix = 'ce';\n }\n // TODO(cais): Add weighting actually.\n weightedMetricFn = accFn;\n metricName = metricNamePrefix + suffix;\n } else {\n const metricFn = Metrics.get(metric);\n // TODO(cais): Add weighting actually.\n weightedMetricFn = metricFn;\n metricName =\n metricNamePrefix + Metrics.getLossOrMetricName(metric);\n }\n\n // TODO(cais): Add weighting and masking to metricResult.\n let metricResult: LossOrMetricFn;\n nameScope(metricName, () => {\n metricResult = weightedMetricFn;\n });\n appendMetric(i, metricName, metricResult);\n }\n };\n\n handleMetrics(outputMetrics);\n // TODO(cais): Call handleMetrics with weights.\n }\n });\n\n // Porting Notes: Given the imperative backend of tfjs-core,\n // there is no need for constructing the symbolic graph and placeholders.\n this.collectedTrainableWeights = this.trainableWeights;\n }\n\n /**\n * Check trainable weights count consistency.\n *\n * This will raise a warning if `this.trainableWeights` and\n * `this.collectedTrainableWeights` are inconsistent (i.e., have different\n * numbers of parameters).\n * Inconsistency will typically arise when one modifies `model.trainable`\n * without calling `model.compile()` again.\n */\n protected checkTrainableWeightsConsistency(): void {\n if (this.collectedTrainableWeights == null) {\n return;\n }\n if (this.trainableWeights.length !==\n this.collectedTrainableWeights.length) {\n console.warn(\n 'Discrepancy between trainableweights and collected trainable ' +\n 'weights. Did you set `model.trainable` without calling ' +\n '`model.compile()` afterwards?');\n }\n }\n\n /**\n * Returns the loss value & metrics values for the model in test mode.\n *\n * Loss and metrics are specified during `compile()`, which needs to happen\n * before calls to `evaluate()`.\n *\n * Computation is done in batches.\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});\n * const result = model.evaluate(\n * tf.ones([8, 10]), tf.ones([8, 1]), {batchSize: 4});\n * result.print();\n * ```\n *\n * @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the\n * model has multiple inputs.\n * @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the\n * model has multiple outputs.\n * @param args A `ModelEvaluateArgs`, containing optional fields.\n *\n * @return `Scalar` test loss (if the model has a single output and no\n * metrics) or `Array` of `Scalar`s (if the model has multiple outputs\n * and/or metrics). The attribute `model.metricsNames`\n * will give you the display labels for the scalar outputs.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n evaluate(\n x: Tensor|Tensor[], y: Tensor|Tensor[],\n args: ModelEvaluateArgs = {}): Scalar|Scalar[] {\n const batchSize = args.batchSize == null ? 32 : args.batchSize;\n checkBatchSize(batchSize);\n\n // TODO(cais): Standardize `config.sampleWeights` as well.\n // Validate user data.\n const checkBatchAxis = true;\n const standardizedOuts =\n this.standardizeUserDataXY(x, y, checkBatchAxis, batchSize);\n try {\n // TODO(cais): If uses `useLearningPhase`, set the corresponding element\n // of the input to 0.\n const ins = standardizedOuts[0].concat(standardizedOuts[1]);\n this.makeTestFunction();\n const f = this.testFunction;\n const testOuts =\n this.testLoop(f, ins, batchSize, args.verbose, args.steps);\n return singletonOrArray(testOuts);\n } finally {\n disposeNewTensors(standardizedOuts[0], x);\n disposeNewTensors(standardizedOuts[1], y);\n }\n }\n\n // TODO(cais): Add code snippet below once real dataset objects are\n // available.\n /**\n * Evaluate model using a dataset object.\n *\n * Note: Unlike `evaluate()`, this method is asynchronous (`async`).\n *\n * @param dataset A dataset object. Its `iterator()` method is expected\n * to generate a dataset iterator object, the `next()` method of which\n * is expected to produce data batches for evaluation. The return value\n * of the `next()` call ought to contain a boolean `done` field and a\n * `value` field. The `value` field is expected to be an array of two\n * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former\n * case is for models with exactly one input and one output (e.g.\n * a sequential model). The latter case is for models with multiple\n * inputs and/or multiple outputs. Of the two items in the array, the\n * first is the input feature(s) and the second is the output target(s).\n * @param args A configuration object for the dataset-based evaluation.\n * @returns Loss and metric values as an Array of `Scalar` objects.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async evaluateDataset(dataset: Dataset<{}>, args?: ModelEvaluateDatasetArgs):\n Promise {\n this.makeTestFunction();\n return evaluateDataset(this, dataset, args);\n }\n\n /**\n * Get number of samples provided for training, evaluation or prediction.\n *\n * @param ins Input `tf.Tensor`.\n * @param batchSize Integer batch size, optional.\n * @param steps Total number of steps (batches of samples) before\n * declaring loop finished. Optional.\n * @param stepsName The public API's parameter name for `steps`.\n * @returns Number of samples provided.\n */\n private checkNumSamples(\n ins: Tensor|Tensor[], batchSize?: number, steps?: number,\n stepsName = 'steps'): number {\n let numSamples: number;\n if (steps != null) {\n numSamples = null;\n if (batchSize != null) {\n throw new ValueError(\n `If ${stepsName} is set, batchSize must be null or undefined.` +\n `Got batchSize = ${batchSize}`);\n }\n } else if (ins != null) {\n if (Array.isArray(ins)) {\n numSamples = ins[0].shape[0];\n } else {\n numSamples = ins.shape[0];\n }\n } else {\n throw new ValueError(\n `Either the input data should have a defined shape, or ` +\n `${stepsName} shoud be specified.`);\n }\n return numSamples;\n }\n\n /**\n * Execute internal tensors of the model with input data feed.\n * @param inputs Input data feed. Must match the inputs of the model.\n * @param outputs Names of the output tensors to be fetched. Must match\n * names of the SymbolicTensors that belong to the graph.\n * @returns Fetched values for `outputs`.\n */\n execute(inputs: Tensor|Tensor[]|NamedTensorMap, outputs: string|string[]):\n Tensor|Tensor[] {\n if (Array.isArray(outputs) && outputs.length === 0) {\n throw new ValueError(\n '`outputs` is an empty Array, which is not allowed.');\n }\n\n const outputsIsArray = Array.isArray(outputs);\n const outputNames =\n (outputsIsArray ? outputs : [outputs]);\n const outputSymbolicTensors = this.retrieveSymbolicTensors(outputNames);\n\n // Format the input into a FeedDict.\n const feedDict = new FeedDict();\n if (inputs instanceof Tensor) {\n inputs = [inputs];\n }\n if (Array.isArray(inputs)) {\n if (inputs.length !== this.inputs.length) {\n throw new ValueError(\n `The number of inputs provided (${inputs.length}) ` +\n `does not match the number of inputs of this model ` +\n `(${this.inputs.length}).`);\n }\n for (let i = 0; i < this.inputs.length; ++i) {\n feedDict.add(this.inputs[i], inputs[i]);\n }\n } else {\n for (const input of this.inputs) {\n const tensorValue = inputs[input.name];\n if (tensorValue == null) {\n throw new ValueError(\n `No value is provided for the model's input ${input.name}`);\n }\n feedDict.add(input, tensorValue);\n }\n }\n\n // Run execution.\n const executeOutputs = execute(outputSymbolicTensors, feedDict) as Tensor[];\n return outputsIsArray ? executeOutputs : executeOutputs[0];\n }\n\n /**\n * Retrieve the model's internal symbolic tensors from symbolic-tensor names.\n */\n private retrieveSymbolicTensors(symbolicTensorNames: string[]):\n SymbolicTensor[] {\n const outputSymbolicTensors: SymbolicTensor[] =\n pyListRepeat(null, symbolicTensorNames.length);\n let outputsRemaining = symbolicTensorNames.length;\n for (const layer of this.layers) {\n const layerOutputs: SymbolicTensor[] =\n Array.isArray(layer.output) ? layer.output : [layer.output];\n const layerOutputNames = layerOutputs.map(output => output.name);\n for (let i = 0; i < symbolicTensorNames.length; ++i) {\n const index = layerOutputNames.indexOf(symbolicTensorNames[i]);\n if (index !== -1) {\n outputSymbolicTensors[i] = layerOutputs[index];\n outputsRemaining--;\n }\n if (outputsRemaining === 0) {\n break;\n }\n }\n if (outputsRemaining === 0) {\n break;\n }\n }\n\n if (outputsRemaining > 0) {\n const remainingNames: string[] = [];\n outputSymbolicTensors.forEach((tensor, i) => {\n if (tensor == null) {\n remainingNames.push(symbolicTensorNames[i]);\n }\n });\n throw new ValueError(\n `Cannot find SymbolicTensors for output name(s): ` +\n `${JSON.stringify(remainingNames)}`);\n }\n return outputSymbolicTensors;\n }\n\n /**\n * Helper method to loop over some data in batches.\n *\n * Porting Note: Not using the functional approach in the Python equivalent\n * due to the imperative backend.\n * Porting Note: Does not support step mode currently.\n *\n * @param ins: input data\n * @param batchSize: integer batch size.\n * @param verbose: verbosity model\n * @returns: Predictions as `tf.Tensor` (if a single output) or an `Array` of\n * `tf.Tensor` (if multipe outputs).\n */\n private predictLoop(ins: Tensor|Tensor[], batchSize = 32, verbose = false):\n Tensor|Tensor[] {\n return tfc.tidy(() => {\n const numSamples = this.checkNumSamples(ins);\n if (verbose) {\n throw new NotImplementedError(\n 'Verbose predictLoop() is not implemented yet.');\n }\n\n // Sample-based predictions.\n // Porting Note: Tensor currently does not support sliced assignments as\n // in numpy, e.g., x[1:3] = y. Therefore we use concatenation while\n // iterating over the batches.\n\n const batches = makeBatches(numSamples, batchSize);\n const outsBatches: Tensor[][] = this.outputs.map(output => []);\n\n // TODO(cais): Can the scope() be pushed down inside the for loop?\n for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) {\n const batchOuts = tfc.tidy(() => {\n const batchStart = batches[batchIndex][0];\n const batchEnd = batches[batchIndex][1];\n // TODO(cais): Take care of the case of the last element is a flag for\n // training/test.\n const insBatch = sliceArrays(ins, batchStart, batchEnd);\n\n // Construct the feeds for execute();\n const feeds = [];\n if (Array.isArray(insBatch)) {\n for (let i = 0; i < insBatch.length; ++i) {\n feeds.push({key: this.inputs[i], value: insBatch[i]});\n }\n } else {\n feeds.push({key: this.inputs[0], value: insBatch});\n }\n const feedDict = new FeedDict(feeds);\n return execute(this.outputs, feedDict) as Tensor[];\n });\n batchOuts.forEach((batchOut, i) => outsBatches[i].push(batchOut));\n }\n return singletonOrArray(\n outsBatches.map(batches => tfc.concat(batches, 0)));\n });\n }\n\n /**\n * Generates output predictions for the input samples.\n *\n * Computation is done in batches.\n *\n * Note: the \"step\" mode of predict() is currently not supported.\n * This is because the TensorFlow.js core backend is imperative only.\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.predict(tf.ones([8, 10]), {batchSize: 4}).print();\n * ```\n *\n * @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if\n * the model has multiple inputs.\n * @param args A `ModelPredictArgs` object containing optional fields.\n *\n * @return Prediction results as a `tf.Tensor`(s).\n *\n * @exception ValueError In case of mismatch between the provided input data\n * and the model's expectations, or in case a stateful model receives a\n * number of samples that is not a multiple of the batch size.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n predict(x: Tensor|Tensor[], args: ModelPredictArgs = {}): Tensor|Tensor[] {\n const xsRank2OrHigher = ensureTensorsRank2OrHigher(x);\n checkInputData(\n xsRank2OrHigher, this.inputNames, this.feedInputShapes, false);\n try {\n // TODO(cais): Take care of stateful models.\n // if (this.stateful) ...\n // TODO(cais): Take care of the learning_phase boolean flag.\n // if (this.useLearningPhase) ...\n const batchSize = args.batchSize == null ? 32 : args.batchSize;\n checkBatchSize(batchSize);\n return this.predictLoop(xsRank2OrHigher, batchSize);\n } finally {\n disposeNewTensors(xsRank2OrHigher, x);\n }\n }\n\n /**\n * Returns predictions for a single batch of samples.\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.predictOnBatch(tf.ones([8, 10])).print();\n * ```\n * @param x: Input samples, as a Tensor (for models with exactly one\n * input) or an array of Tensors (for models with more than one input).\n * @return Tensor(s) of predictions\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n predictOnBatch(x: Tensor|Tensor[]): Tensor|Tensor[] {\n checkInputData(x, this.inputNames, this.feedInputShapes, true);\n // TODO(cais): Take care of the learning_phase boolean flag.\n // if (this.useLearningPhase) ...\n const batchSize = (Array.isArray(x) ? x[0] : x).shape[0];\n return this.predictLoop(x, batchSize);\n }\n\n protected standardizeUserDataXY(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|{[inputName: string]: Tensor}, checkBatchAxis = true,\n batchSize?: number): [Tensor[], Tensor[]] {\n // TODO(cais): Add sampleWeight, classWeight\n if (this.optimizer_ == null) {\n throw new RuntimeError(\n 'You must compile a model before training/testing. Use ' +\n 'LayersModel.compile(modelCompileArgs).');\n }\n const outputShapes: Shape[] = [];\n for (let i = 0; i < this.feedOutputShapes.length; ++i) {\n const outputShape = this.feedOutputShapes[i];\n const lossFn = this.feedLossFns[i];\n if (lossFn === losses.sparseCategoricalCrossentropy) {\n outputShapes.push(\n outputShape.slice(0, outputShape.length - 1).concat([1]));\n } else {\n // Porting Note: Because of strong typing `lossFn` must be a function.\n outputShapes.push(outputShape);\n }\n }\n x = standardizeInputData(\n x, this.feedInputNames, this.feedInputShapes, false, 'input');\n y = standardizeInputData(\n y, this.feedOutputNames, outputShapes, false, 'target');\n // TODO(cais): Standardize sampleWeights & classWeights.\n checkArrayLengths(x, y, null);\n // TODO(cais): Check sampleWeights as well.\n checkLossAndTargetCompatibility(y, this.feedLossFns, this.feedOutputShapes);\n if (this.stateful && batchSize != null && batchSize > 0) {\n if (x[0].shape[0] % batchSize !== 0) {\n throw new ValueError(\n `In a stateful network, you should only pass inputs with a ` +\n `number of samples that is divisible by the batch size ` +\n `${batchSize}. Found: ${x[0].shape[0]} sample(s).`);\n }\n }\n return [x, y];\n }\n\n protected async standardizeUserData(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|{[inputName: string]: Tensor},\n sampleWeight?: Tensor|Tensor[]|{[outputName: string]: Tensor},\n classWeight?: ClassWeight|ClassWeight[]|ClassWeightMap,\n checkBatchAxis = true,\n batchSize?: number): Promise<[Tensor[], Tensor[], Tensor[]]> {\n const [standardXs, standardYs] =\n this.standardizeUserDataXY(x, y, checkBatchAxis, batchSize);\n // TODO(cais): Handle sampleWeights.\n if (sampleWeight != null) {\n throw new Error('sample weight is not supported yet.');\n }\n\n let standardSampleWeights: Tensor[] = null;\n if (classWeight != null) {\n const classWeights =\n standardizeClassWeights(classWeight, this.outputNames);\n standardSampleWeights = [];\n for (let i = 0; i < classWeights.length; ++i) {\n standardSampleWeights.push(\n await standardizeWeights(standardYs[i], null, classWeights[i]));\n }\n }\n\n // TODO(cais): Deal with the case of model.stateful == true.\n return [standardXs, standardYs, standardSampleWeights];\n }\n\n /**\n * Loop over some test data in batches.\n * @param f A Function returning a list of tensors.\n * @param ins Array of tensors to be fed to `f`.\n * @param batchSize Integer batch size or `null` / `undefined`.\n * @param verbose verbosity mode.\n * @param steps Total number of steps (batches of samples) before\n * declaring test finished. Ignored with the default value of `null` /\n * `undefined`.\n * @returns Array of Scalars.\n */\n private testLoop(\n f: (data: Tensor[]) => Scalar[], ins: Tensor[], batchSize?: number,\n verbose = 0, steps?: number): Scalar[] {\n return tfc.tidy(() => {\n const numSamples = this.checkNumSamples(ins, batchSize, steps, 'steps');\n const outs: Scalar[] = [];\n if (verbose > 0) {\n throw new NotImplementedError('Verbose mode is not implemented yet.');\n }\n // TODO(cais): Use `indicesForConversionToDense' to prevent slow down.\n if (steps != null) {\n throw new NotImplementedError(\n 'steps mode in testLoop() is not implemented yet');\n } else {\n const batches = makeBatches(numSamples, batchSize);\n const indexArray = tensor1d(range(0, numSamples));\n for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) {\n const batchStart = batches[batchIndex][0];\n const batchEnd = batches[batchIndex][1];\n const batchIds =\n K.sliceAlongFirstAxis(\n indexArray, batchStart, batchEnd - batchStart) as Tensor1D;\n // TODO(cais): In ins, train flag can be a number, instead of an\n // Tensor? Do we need to handle this in tfjs-layers?\n const insBatch = sliceArraysByIndices(ins, batchIds) as Scalar[];\n const batchOuts = f(insBatch);\n if (batchIndex === 0) {\n for (let i = 0; i < batchOuts.length; ++i) {\n outs.push(scalar(0));\n }\n }\n for (let i = 0; i < batchOuts.length; ++i) {\n const batchOut = batchOuts[i];\n outs[i] =\n tfc.add(outs[i], tfc.mul(batchEnd - batchStart, batchOut));\n }\n }\n for (let i = 0; i < outs.length; ++i) {\n outs[i] = tfc.div(outs[i], numSamples);\n }\n }\n return outs;\n });\n }\n\n protected getDedupedMetricsNames(): string[] {\n const outLabels = this.metricsNames;\n // Rename duplicated metrics names (can happen with an output layer\n // shared among multiple dataflows).\n const dedupedOutLabels = [];\n for (let i = 0; i < outLabels.length; ++i) {\n const label = outLabels[i];\n let newLabel = label;\n if (count(outLabels, label) > 1) {\n const dupIndex = count(outLabels.slice(0, i), label);\n newLabel += `_${dupIndex}`;\n }\n dedupedOutLabels.push(newLabel);\n }\n return dedupedOutLabels;\n }\n\n /**\n * Creates a function that performs the following actions:\n *\n * 1. computes the losses\n * 2. sums them to get the total loss\n * 3. call the optimizer computes the gradients of the LayersModel's\n * trainable weights w.r.t. the total loss and update the variables\n * 4. calculates the metrics\n * 5. returns the values of the losses and metrics.\n */\n protected makeTrainFunction(): (data: Tensor[]) => Scalar[] {\n return (data: Tensor[]) => {\n const lossValues: Scalar[] = [];\n\n const inputs = data.slice(0, this.inputs.length);\n const targets = data.slice(\n this.inputs.length, this.inputs.length + this.outputs.length);\n const sampleWeights = data.slice(\n this.inputs.length + this.outputs.length,\n this.inputs.length + this.outputs.length * 2);\n\n const metricsValues: Scalar[] = [];\n\n // Create a function that computes the total loss based on the\n // inputs. This function is used for obtaining gradients through\n // backprop.\n const totalLossFunction = () => {\n const feeds = [];\n for (let i = 0; i < this.inputs.length; ++i) {\n feeds.push({key: this.inputs[i], value: inputs[i]});\n }\n const feedDict = new FeedDict(feeds);\n const outputs =\n execute(this.outputs, feedDict, {'training': true}) as Tensor[];\n // TODO(cais): Take care of the case of multiple outputs from a\n // single layer?\n\n let totalLoss: Tensor;\n for (let i = 0; i < this.lossFunctions.length; ++i) {\n const lossFunction = this.lossFunctions[i];\n let loss = lossFunction(targets[i], outputs[i]);\n if (sampleWeights[i] != null) {\n loss = computeWeightedLoss(loss, sampleWeights[i]);\n }\n\n // TODO(cais): push Scalar instead.\n const meanLoss: Scalar = tfc.mean(loss);\n // TODO(cais): Use a scope() instead, to avoid ownership.\n lossValues.push(meanLoss);\n if (i === 0) {\n totalLoss = loss;\n } else {\n totalLoss = tfc.add(totalLoss, loss);\n }\n }\n\n // Compute the metrics.\n // TODO(cais): These should probably be calculated outside\n // totalLossFunction to benefit speed?\n for (let i = 0; i < this.metricsTensors.length; ++i) {\n let weightedMetric: Scalar;\n\n if (this.outputs.length > 1 && i < this.outputs.length) {\n weightedMetric = lossValues[i];\n } else {\n const metric = this.metricsTensors[i][0];\n const outputIndex = this.metricsTensors[i][1];\n weightedMetric =\n tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));\n }\n\n tfc.keep(weightedMetric);\n // TODO(cais): Use a scope() instead, to avoid ownership.\n metricsValues.push(weightedMetric);\n }\n\n totalLoss = tfc.mean(totalLoss);\n\n // Add regularizer penalties.\n this.calculateLosses().forEach(regularizerLoss => {\n totalLoss = tfc.add(totalLoss, regularizerLoss);\n });\n\n return totalLoss as Scalar;\n };\n\n const variables = this.collectedTrainableWeights.map(\n param => param.read() as tfc.Variable);\n const returnCost = true;\n const totalLossValue =\n this.optimizer_.minimize(totalLossFunction, returnCost, variables);\n\n return [totalLossValue].concat(metricsValues);\n };\n }\n\n /**\n * Create a function which, when invoked with an array of `tf.Tensor`s as a\n * batch of inputs, returns the prespecified loss and metrics of the model\n * under the batch of input data.\n */\n private makeTestFunction() {\n this.testFunction = (data: Tensor[]) => {\n return tfc.tidy(() => {\n const valOutputs: Scalar[] = [];\n let totalLoss: Scalar;\n const inputs = data.slice(0, this.inputs.length);\n const targets = data.slice(\n this.inputs.length, this.inputs.length + this.outputs.length);\n const feeds = [];\n for (let i = 0; i < this.inputs.length; ++i) {\n feeds.push({key: this.inputs[i], value: inputs[i]});\n }\n const feedDict = new FeedDict(feeds);\n const outputs = execute(this.outputs, feedDict) as Tensor[];\n // Compute total loss.\n for (let i = 0; i < this.lossFunctions.length; ++i) {\n const lossFunction = this.lossFunctions[i];\n // TODO(cais): Add sample weighting and replace the simple\n // averaging.\n const loss: Scalar = tfc.mean(lossFunction(targets[i], outputs[i]));\n if (i === 0) {\n totalLoss = loss;\n } else {\n totalLoss = tfc.add(totalLoss, loss);\n }\n valOutputs.push(totalLoss);\n }\n // Compute the metrics.\n for (let i = 0; i < this.metricsTensors.length; ++i) {\n const metric = this.metricsTensors[i][0];\n const outputIndex = this.metricsTensors[i][1];\n // TODO(cais): Replace K.mean() with a proper weighting function.\n const meanMetric =\n tfc.mean(metric(targets[outputIndex], outputs[outputIndex]));\n valOutputs.push(meanMetric as Scalar);\n }\n return valOutputs;\n });\n };\n }\n\n /**\n * Trains the model for a fixed number of epochs (iterations on a\n * dataset).\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});\n * for (let i = 1; i < 5 ; ++i) {\n * const h = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), {\n * batchSize: 4,\n * epochs: 3\n * });\n * console.log(\"Loss after Epoch \" + i + \" : \" + h.history.loss[0]);\n * }\n * ```\n *\n * @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the\n * model has multiple inputs. If all inputs in the model are named, you\n * can also pass a dictionary mapping input names to `tf.Tensor`s.\n * @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if\n * the model has multiple outputs. If all outputs in the model are named,\n * you can also pass a dictionary mapping output names to `tf.Tensor`s.\n * @param args A `ModelFitArgs`, containing optional fields.\n *\n * @return A `History` instance. Its `history` attribute contains all\n * information collected during training.\n *\n * @exception ValueError In case of mismatch between the provided input\n * data and what the model expects.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async fit(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|{[inputName: string]: Tensor},\n args: ModelFitArgs = {}): Promise {\n if (this.isTraining) {\n throw new Error(\n 'Cannot start training because another fit() call is ongoing.');\n }\n this.isTraining = true;\n let inputs: Tensor[];\n let targets: Tensor[];\n let originalInputs: Tensor[];\n let originalTargets: Tensor[];\n let inputValX: Tensor|Tensor[];\n let inputValY: Tensor|Tensor[];\n let valX: Tensor|Tensor[];\n let valY: Tensor|Tensor[];\n let sampleWeights: Tensor[];\n try {\n const batchSize = args.batchSize == null ? 32 : args.batchSize;\n checkBatchSize(batchSize);\n\n // Validate user data.\n // TODO(cais): Support sampleWeight.\n const checkBatchAxis = false;\n const standardizedOuts =\n await this.standardizeUserData(\n x, y, args.sampleWeight, args.classWeight, checkBatchAxis,\n batchSize) as [Tensor[], Tensor[], Tensor[]];\n inputs = standardizedOuts[0];\n targets = standardizedOuts[1];\n sampleWeights = standardizedOuts[2];\n\n // Prepare validation data.\n let doValidation = false;\n let valIns: Tensor[];\n if (args.validationData != null && args.validationData.length > 0) {\n doValidation = true;\n if (args.validationData.length === 2) {\n // config.validationData consists of valX and valY.\n inputValX = args.validationData[0];\n inputValY = args.validationData[1];\n } else if (args.validationData.length === 3) {\n throw new NotImplementedError(\n 'validationData including sample weights is not supported yet.');\n } else {\n throw new ValueError(\n `When passing validation data, it must contain 2 (valX, valY) ` +\n `or 3 (valX, valY, valSampleWeight) items; ` +\n `${args.validationData} is invalid.`);\n }\n\n const checkBatchAxis = true;\n const valStandardized =\n await this.standardizeUserData(\n inputValX, inputValY, null, /** Unused sample weights. */\n null, /** Unused class weights. */\n checkBatchAxis, batchSize) as [Tensor[], Tensor[], Tensor[]];\n valX = valStandardized[0];\n valY = valStandardized[1];\n valIns = valX.concat(valY);\n // TODO(cais): Add useLearningPhase data properly.\n } else if (\n args.validationSplit != null && args.validationSplit > 0 &&\n args.validationSplit < 1) {\n doValidation = true;\n // Porting Note: In tfjs-layers, inputs[0] is always a Tensor.\n const splitAt =\n Math.floor(inputs[0].shape[0] * (1 - args.validationSplit));\n const originalBatchSize = inputs[0].shape[0];\n valX = sliceArrays(inputs, splitAt, originalBatchSize) as Tensor[];\n originalInputs = inputs;\n inputs = sliceArrays(inputs, 0, splitAt) as Tensor[];\n valY = sliceArrays(targets, splitAt, originalBatchSize) as Tensor[];\n originalTargets = targets;\n targets = sliceArrays(targets, 0, splitAt) as Tensor[];\n // TODO(cais): Once sampleWeights becomes available, slice it to get\n // valSampleWeights.\n valIns = valX.concat(valY);\n\n // TODO(cais): Add useLearningPhase data properly.\n } else if (args.validationSteps != null) {\n doValidation = true;\n // TODO(cais): Add useLearningPhase.\n }\n\n const ins = inputs.concat(targets).concat(sampleWeights);\n\n this.checkTrainableWeightsConsistency();\n\n // TODO(cais): Handle use_learning_phase and learning_phase?\n\n // Porting Note: Here we see a key deviation of tfjs-layers from\n // Keras.\n // Due to the imperative nature of tfjs-layers' backend (tfjs-core),\n // we do not construct symbolic computation graphs to embody the\n // training process. Instead, we define a function that performs the\n // training action. In PyKeras, the data (inputs and targets) are fed\n // through graph placeholders. In tfjs-layers, the data are fed as\n // function arguments. Since the function are defined below in the\n // scope, we don't have equivalents of PyKeras's\n // `_make_train_funciton`.\n const trainFunction = this.makeTrainFunction();\n const outLabels = this.getDedupedMetricsNames();\n\n let valFunction: (data: Tensor[]) => Scalar[];\n let callbackMetrics: string[];\n if (doValidation) {\n this.makeTestFunction();\n valFunction = this.testFunction;\n callbackMetrics =\n outLabels.slice().concat(outLabels.map(n => 'val_' + n));\n } else {\n valFunction = null;\n valIns = [];\n callbackMetrics = outLabels.slice();\n }\n\n const callbacks = standardizeCallbacks(args.callbacks, args.yieldEvery);\n const out = await this.fitLoop(\n trainFunction, ins, outLabels, batchSize, args.epochs,\n args.verbose, callbacks, valFunction, valIns, args.shuffle,\n callbackMetrics, args.initialEpoch, null, null);\n return out;\n } finally {\n this.isTraining = false;\n // Memory clean up.\n disposeNewTensors(inputs, x);\n disposeNewTensors(targets, y);\n disposeNewTensors(originalInputs, x);\n disposeNewTensors(originalTargets, y);\n disposeNewTensors(valX as Tensor[], inputValX);\n disposeNewTensors(valY as Tensor[], inputValY);\n if (sampleWeights != null) {\n tfc.dispose(sampleWeights);\n }\n }\n // TODO(cais): Add value to outLabels.\n }\n\n /**\n * Abstract fit function for `f(ins)`.\n * @param f A Function returning a list of tensors. For training, this\n * function is expected to perform the updates to the variables.\n * @param ins List of tensors to be fed to `f`.\n * @param outLabels List of strings, display names of the outputs of `f`.\n * @param batchSize Integer batch size or `== null` if unknown. Default : 32.\n * @param epochs Number of times to iterate over the data. Default : 1.\n * @param verbose Verbosity mode: 0, 1, or 2. Default: 1.\n * @param callbacks List of callbacks to be called during training.\n * @param valF Function to call for validation.\n * @param valIns List of tensors to be fed to `valF`.\n * @param shuffle Whether to shuffle the data at the beginning of every\n * epoch. Default : true.\n * @param callbackMetrics List of strings, the display names of the metrics\n * passed to the callbacks. They should be the concatenation of the\n * display names of the outputs of `f` and the list of display names\n * of the outputs of `valF`.\n * @param initialEpoch Epoch at which to start training (useful for\n * resuming a previous training run). Default : 0.\n * @param stepsPerEpoch Total number of steps (batches on samples) before\n * declaring one epoch finished and starting the next epoch. Ignored with\n * the default value of `undefined` or `null`.\n * @param validationSteps Number of steps to run validation for (only if\n * doing validation from data tensors). Not applicable for tfjs-layers.\n * @returns A `History` object.\n */\n async fitLoop(\n f: (data: Tensor[]) => Scalar[], ins: Tensor[], outLabels?:\n string[], batchSize?: number, epochs?: number, verbose?: number,\n callbacks?: BaseCallback[], valF?: (data: Tensor[]) => Scalar[], valIns?:\n Tensor[], shuffle?: boolean|string, callbackMetrics?: string[],\n initialEpoch?: number, stepsPerEpoch?: number, validationSteps?: number):\n Promise {\n if (batchSize == null) {\n batchSize = 32;\n }\n if (epochs == null) {\n epochs = 1;\n }\n if (shuffle == null) {\n shuffle = true;\n }\n if (initialEpoch == null) {\n initialEpoch = 0;\n }\n\n // TODO(cais): Change const to let below when implementing validation.\n let doValidation = false;\n if (valF != null && valIns != null) {\n doValidation = true;\n // TODO(cais): verbose message.\n }\n if (validationSteps != null) {\n doValidation = true;\n if (stepsPerEpoch == null) {\n throw new ValueError(\n 'Can only use `validationSteps` when doing step-wise training, ' +\n 'i.e., `stepsPerEpoch` must be set.');\n }\n }\n\n const numTrainSamples =\n this.checkNumSamples(ins, batchSize, stepsPerEpoch, 'steps_per_epoch');\n let indexArray: number[];\n if (numTrainSamples != null) {\n indexArray = range(0, numTrainSamples);\n }\n\n if (verbose == null) {\n verbose = 1;\n }\n\n const {callbackList, history} = configureCallbacks(\n callbacks, verbose, epochs, initialEpoch, numTrainSamples,\n stepsPerEpoch, batchSize, doValidation, callbackMetrics);\n callbackList.setModel(this);\n this.history = history;\n await callbackList.onTrainBegin();\n this.stopTraining_ = false;\n // TODO(cais): Take care of callbacks.validation_data as in PyKeras.\n // TODO(cais): Pre-convert feeds for performance as in PyKeras.\n\n for (let epoch = initialEpoch; epoch < epochs; ++epoch) {\n await callbackList.onEpochBegin(epoch);\n const epochLogs: UnresolvedLogs = {};\n if (stepsPerEpoch != null) {\n throw new NotImplementedError(\n 'stepsPerEpoch mode is not implemented yet.');\n } else {\n if (shuffle === 'batch') {\n throw new NotImplementedError('batch shuffling is not implemneted'\n + ' yet');\n } else if (shuffle) {\n util.shuffle(indexArray);\n }\n // Convert the potentially shuffled indices to Tensor1D, to avoid the\n // cost of repeated creation of Array1Ds later on.\n const epochIndexArray1D = tensor1d(indexArray);\n\n const batches = makeBatches(numTrainSamples, batchSize);\n for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) {\n const batchLogs: UnresolvedLogs = {};\n await callbackList.onBatchBegin(batchIndex, batchLogs);\n\n tfc.tidy(() => {\n const batchStart = batches[batchIndex][0];\n const batchEnd = batches[batchIndex][1];\n const batchIds = K.sliceAlongFirstAxis(\n epochIndexArray1D, batchStart,\n batchEnd - batchStart) as Tensor1D;\n batchLogs['batch'] = batchIndex;\n batchLogs['size'] = batchEnd - batchStart;\n\n // TODO(cais): In ins, train flag can be a number, instead of an\n // Tensor? Do we need to handle this in tfjs-layers?\n const insBatch = sliceArraysByIndices(ins, batchIds) as Tensor[];\n const outs = f(insBatch);\n for (let i = 0; i < outLabels.length; ++i) {\n const label = outLabels[i];\n const out = outs[i];\n batchLogs[label] = out;\n tfc.keep(out);\n // TODO(cais): Use scope() to avoid ownership.\n }\n\n if (batchIndex === batches.length - 1) { // Last batch.\n if (doValidation) {\n const valOuts = this.testLoop(valF, valIns, batchSize);\n // Porting Notes: In tfjs-layers, valOuts is always an Array.\n for (let i = 0; i < outLabels.length; ++i) {\n const label = outLabels[i];\n const out = valOuts[i];\n tfc.keep(out);\n // TODO(cais): Use scope() to avoid ownership.\n epochLogs['val_' + label] = out;\n }\n }\n }\n });\n\n await callbackList.onBatchEnd(batchIndex, batchLogs);\n disposeTensorsInLogs(batchLogs);\n\n if (this.stopTraining_) {\n break;\n }\n // TODO(cais): return outs as list of Tensor.\n }\n\n epochIndexArray1D.dispose();\n }\n // TODO(cais): Run validation at the end of the epoch.\n await callbackList.onEpochEnd(epoch, epochLogs);\n if (this.stopTraining_) {\n break;\n }\n }\n await callbackList.onTrainEnd();\n\n await this.history.syncData();\n return this.history;\n }\n\n // TODO(cais): Add code snippet below when it's possible to instantiate\n // actual dataset objects.\n /**\n * Trains the model using a dataset object.\n *\n * @param dataset A dataset object. Its `iterator()` method is expected\n * to generate a dataset iterator object, the `next()` method of which\n * is expected to produce data batches for training. The return value\n * of the `next()` call ought to contain a boolean `done` field and a\n * `value` field. The `value` field is expected to be an array of two\n * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former\n * case is for models with exactly one input and one output (e.g.\n * a sequential model). The latter case is for models with multiple\n * inputs and/or multiple outputs.\n * Of the two items in the array, the first is the input feature(s) and\n * the second is the output target(s).\n * @param args A `ModelFitDatasetArgs`, containing optional fields.\n *\n * @return A `History` instance. Its `history` attribute contains all\n * information collected during training.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async fitDataset(dataset: Dataset, args: ModelFitDatasetArgs):\n Promise {\n return fitDataset(this, dataset, args);\n }\n\n /**\n * Runs a single gradient update on a single batch of data.\n *\n * This method differs from `fit()` and `fitDataset()` in the following\n * regards:\n * - It operates on exactly one batch of data.\n * - It returns only the loss and metric values, instead of\n * returning the batch-by-batch loss and metric values.\n * - It doesn't support fine-grained options such as verbosity and\n * callbacks.\n *\n * @param x Input data. It could be one of the following:\n * - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has\n * multiple inputs).\n * - An Object mapping input names to corresponding `tf.Tensor` (if the\n * model has named inputs).\n * @param y Target data. It could be either a `tf.Tensor` or multiple\n * `tf.Tensor`s. It should be consistent with `x`.\n * @returns Training loss or losses (in case the model has\n * multiple outputs), along with metrics (if any), as numbers.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async trainOnBatch(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|\n {[inputName: string]: Tensor}): Promise {\n // TODO(cais): Support sampleWeight and classWeight.\n // TODO(cais): Support Dataset objects.\n const standardizeOut = await this.standardizeUserData(x, y);\n const inputs = standardizeOut[0];\n const targets = standardizeOut[1];\n const trainFunction = this.makeTrainFunction();\n const losses = trainFunction(inputs.concat(targets));\n const lossValues: number[] = [];\n for (const loss of losses) {\n const v = await loss.data();\n lossValues.push(v[0]);\n }\n tfc.dispose(losses);\n disposeNewTensors(standardizeOut[0], x);\n disposeNewTensors(standardizeOut[1], y);\n return singletonOrArray(lossValues);\n }\n\n /**\n * Extract weight values of the model.\n *\n * @param config: An instance of `io.SaveConfig`, which specifies\n * model-saving options such as whether only trainable weights are to be\n * saved.\n * @returns A `NamedTensorMap` mapping original weight names (i.e.,\n * non-uniqueified weight names) to their values.\n */\n protected getNamedWeights(config?: io.SaveConfig): NamedTensor[] {\n const namedWeights: NamedTensor[] = [];\n\n const trainableOnly = config != null && config.trainableOnly;\n const weights = trainableOnly ? this.trainableWeights : this.weights;\n const weightValues = this.getWeights(trainableOnly);\n for (let i = 0; i < weights.length; ++i) {\n if (trainableOnly && !weights[i].trainable) {\n // Optionally skip non-trainable weights.\n continue;\n }\n namedWeights.push(\n {name: weights[i].originalName, tensor: weightValues[i]});\n }\n return namedWeights;\n }\n\n /**\n * Setter used for force stopping of LayersModel.fit() (i.e., training).\n *\n * Example:\n *\n * ```js\n * const input = tf.input({shape: [10]});\n * const output = tf.layers.dense({units: 1}).apply(input);\n * const model = tf.model({inputs: [input], outputs: [output]});\n * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});\n * const xs = tf.ones([8, 10]);\n * const ys = tf.zeros([8, 1]);\n *\n * const history = await model.fit(xs, ys, {\n * epochs: 10,\n * callbacks: {\n * onEpochEnd: async (epoch, logs) => {\n * if (epoch === 2) {\n * model.stopTraining = true;\n * }\n * }\n * }\n * });\n *\n * // There should be only 3 values in the loss array, instead of 10\n * values,\n * // due to the stopping after 3 epochs.\n * console.log(history.history.loss);\n * ```\n */\n set stopTraining(stop: boolean) {\n this.stopTraining_ = stop;\n }\n\n get stopTraining(): boolean {\n return this.stopTraining_;\n }\n\n get optimizer(): Optimizer {\n return this.optimizer_;\n }\n\n set optimizer(optimizer: Optimizer) {\n if (this.optimizer_ !== optimizer) {\n this.optimizer_ = optimizer;\n this.isOptimizerOwned = false;\n }\n }\n\n override dispose(): DisposeResult {\n const result = super.dispose();\n if (result.refCountAfterDispose === 0 && this.optimizer != null &&\n this.isOptimizerOwned) {\n const numTensorsBeforeOptmizerDisposal = tfc.memory().numTensors;\n this.optimizer_.dispose();\n result.numDisposedVariables +=\n numTensorsBeforeOptmizerDisposal - tfc.memory().numTensors;\n }\n return result;\n }\n\n private getLossIdentifiers(): LossIdentifier|LossIdentifier[]|\n {[outputName: string]: LossIdentifier} {\n let lossNames: LossIdentifier|LossIdentifier[]|\n {[outputName: string]: LossIdentifier};\n if (typeof this.loss === 'string') {\n lossNames = toSnakeCase(this.loss) as LossIdentifier;\n } else if (Array.isArray(this.loss)) {\n for (const loss of this.loss) {\n if (typeof loss !== 'string') {\n throw new Error('Serialization of non-string loss is not supported.');\n }\n }\n lossNames = (this.loss as string[]).map(name => toSnakeCase(name)) as\n LossIdentifier[];\n } else {\n const outputNames = Object.keys(this.loss);\n lossNames = {} as {[outputName: string]: LossIdentifier};\n const losses =\n this.loss as {[outputName: string]: LossOrMetricFn | string};\n for (const outputName of outputNames) {\n if (typeof losses[outputName] === 'string') {\n lossNames[outputName] =\n toSnakeCase(losses[outputName] as string) as LossIdentifier;\n } else {\n throw new Error('Serialization of non-string loss is not supported.');\n }\n }\n }\n return lossNames;\n }\n\n private getMetricIdentifiers(): MetricsIdentifier[]|\n {[key: string]: MetricsIdentifier} {\n if (typeof this.metrics === 'string' ||\n typeof this.metrics === 'function') {\n return [toSnakeCase(Metrics.getLossOrMetricName(this.metrics))];\n } else if (Array.isArray(this.metrics)) {\n return this.metrics.map(\n metric => toSnakeCase(Metrics.getLossOrMetricName(metric)));\n } else {\n const metricsIdentifiers: {[key: string]: MetricsIdentifier} = {};\n for (const key in this.metrics) {\n metricsIdentifiers[key] =\n toSnakeCase(Metrics.getLossOrMetricName(this.metrics[key]));\n }\n return metricsIdentifiers;\n }\n }\n\n protected getTrainingConfig(): TrainingConfig {\n return {\n loss: this.getLossIdentifiers(),\n metrics: this.getMetricIdentifiers(),\n optimizer_config: {\n class_name: this.optimizer.getClassName(),\n config: this.optimizer.getConfig()\n } as OptimizerSerialization\n };\n // TODO(cais): Add weight_metrics when they are supported.\n // TODO(cais): Add sample_weight_mode when it's supported.\n // TODO(cais): Add loss_weights when it's supported.\n }\n\n loadTrainingConfig(trainingConfig: TrainingConfig) {\n if (trainingConfig.weighted_metrics != null) {\n throw new Error('Loading weight_metrics is not supported yet.');\n }\n if (trainingConfig.loss_weights != null) {\n throw new Error('Loading loss_weights is not supported yet.');\n }\n if (trainingConfig.sample_weight_mode != null) {\n throw new Error('Loading sample_weight_mode is not supported yet.');\n }\n\n const tsConfig = convertPythonicToTs(trainingConfig.optimizer_config) as\n serialization.ConfigDict;\n const optimizer = deserialize(tsConfig) as Optimizer;\n\n let loss;\n if (typeof trainingConfig.loss === 'string') {\n loss = toCamelCase(trainingConfig.loss);\n } else if (Array.isArray(trainingConfig.loss)) {\n loss = trainingConfig.loss.map(lossEntry => toCamelCase(lossEntry));\n } else if (trainingConfig.loss != null) {\n loss = {} as {[outputName: string]: LossIdentifier};\n for (const key in trainingConfig.loss) {\n loss[key] = toCamelCase(trainingConfig.loss[key]) as LossIdentifier;\n }\n }\n\n let metrics;\n if (Array.isArray(trainingConfig.metrics)) {\n metrics = trainingConfig.metrics.map(metric => toCamelCase(metric));\n } else if (trainingConfig.metrics != null) {\n metrics = {} as {[outputName: string]: MetricsIdentifier};\n for (const key in trainingConfig.metrics) {\n metrics[key] = toCamelCase(trainingConfig.metrics[key]);\n }\n }\n\n this.compile({loss, metrics, optimizer});\n }\n\n /**\n * Save the configuration and/or weights of the LayersModel.\n *\n * An `IOHandler` is an object that has a `save` method of the proper\n * signature defined. The `save` method manages the storing or\n * transmission of serialized data (\"artifacts\") that represent the\n * model's topology and weights onto or via a specific medium, such as\n * file downloads, local storage, IndexedDB in the web browser and HTTP\n * requests to a server. TensorFlow.js provides `IOHandler`\n * implementations for a number of frequently used saving mediums, such as\n * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`\n * for more details.\n *\n * This method also allows you to refer to certain types of `IOHandler`s\n * as URL-like string shortcuts, such as 'localstorage://' and\n * 'indexeddb://'.\n *\n * Example 1: Save `model`'s topology and weights to browser [local\n * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);\n * then load it back.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * console.log('Prediction from original model:');\n * model.predict(tf.ones([1, 3])).print();\n *\n * const saveResults = await model.save('localstorage://my-model-1');\n *\n * const loadedModel = await tf.loadLayersModel('localstorage://my-model-1');\n * console.log('Prediction from loaded model:');\n * loadedModel.predict(tf.ones([1, 3])).print();\n * ```\n *\n * Example 2. Saving `model`'s topology and weights to browser\n * [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API);\n * then load it back.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * console.log('Prediction from original model:');\n * model.predict(tf.ones([1, 3])).print();\n *\n * const saveResults = await model.save('indexeddb://my-model-1');\n *\n * const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1');\n * console.log('Prediction from loaded model:');\n * loadedModel.predict(tf.ones([1, 3])).print();\n * ```\n *\n * Example 3. Saving `model`'s topology and weights as two files\n * (`my-model-1.json` and `my-model-1.weights.bin`) downloaded from\n * browser.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * const saveResults = await model.save('downloads://my-model-1');\n * ```\n *\n * Example 4. Send `model`'s topology and weights to an HTTP server.\n * See the documentation of `tf.io.http` for more details\n * including specifying request parameters and implementation of the\n * server.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * const saveResults = await model.save('http://my-server/model/upload');\n * ```\n *\n * @param handlerOrURL An instance of `IOHandler` or a URL-like,\n * scheme-based string shortcut for `IOHandler`.\n * @param config Options for saving the model.\n * @returns A `Promise` of `SaveResult`, which summarizes the result of\n * the saving, such as byte sizes of the saved artifacts for the model's\n * topology and weight values.\n *\n * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}\n */\n async save(handlerOrURL: io.IOHandler|string, config?: io.SaveConfig):\n Promise {\n if (typeof handlerOrURL === 'string') {\n const handlers = io.getSaveHandlers(handlerOrURL);\n if (handlers.length === 0) {\n throw new ValueError(\n `Cannot find any save handlers for URL '${handlerOrURL}'`);\n } else if (handlers.length > 1) {\n throw new ValueError(\n `Found more than one (${handlers.length}) save handlers for ` +\n `URL '${handlerOrURL}'`);\n }\n handlerOrURL = handlers[0];\n }\n if (handlerOrURL.save == null) {\n throw new ValueError(\n 'LayersModel.save() cannot proceed because the IOHandler ' +\n 'provided does not have the `save` attribute defined.');\n }\n\n const weightDataAndSpecs =\n await io.encodeWeights(this.getNamedWeights(config));\n\n const returnString = false;\n const unusedArg: {} = null;\n const modelConfig = this.toJSON(unusedArg, returnString);\n const modelArtifacts: io.ModelArtifacts = {\n modelTopology: modelConfig,\n format: LAYERS_MODEL_FORMAT_NAME,\n generatedBy: `TensorFlow.js tfjs-layers v${version}`,\n convertedBy: null,\n };\n\n const includeOptimizer = config == null ? false : config.includeOptimizer;\n if (includeOptimizer && this.optimizer != null) {\n modelArtifacts.trainingConfig = this.getTrainingConfig();\n const weightType = 'optimizer';\n const {data: optimizerWeightData, specs: optimizerWeightSpecs} =\n await io.encodeWeights(await this.optimizer.getWeights(), weightType);\n weightDataAndSpecs.specs.push(...optimizerWeightSpecs);\n weightDataAndSpecs.data = io.concatenateArrayBuffers(\n [weightDataAndSpecs.data, optimizerWeightData]);\n }\n\n if (this.userDefinedMetadata != null) {\n // Check serialized size of user-defined metadata.\n const checkSize = true;\n checkUserDefinedMetadata(this.userDefinedMetadata, this.name, checkSize);\n modelArtifacts.userDefinedMetadata = this.userDefinedMetadata;\n }\n\n modelArtifacts.weightData = weightDataAndSpecs.data;\n modelArtifacts.weightSpecs = weightDataAndSpecs.specs;\n return handlerOrURL.save(modelArtifacts);\n }\n\n /**\n * Set user-defined metadata.\n *\n * The set metadata will be serialized together with the topology\n * and weights of the model during `save()` calls.\n *\n * @param setUserDefinedMetadata\n */\n setUserDefinedMetadata(userDefinedMetadata: {}): void {\n checkUserDefinedMetadata(userDefinedMetadata, this.name);\n this.userDefinedMetadata = userDefinedMetadata;\n }\n\n /**\n * Get user-defined metadata.\n *\n * The metadata is supplied via one of the two routes:\n * 1. By calling `setUserDefinedMetadata()`.\n * 2. Loaded during model loading (if the model is constructed\n * via `tf.loadLayersModel()`.)\n *\n * If no user-defined metadata is available from either of the\n * two routes, this function will return `undefined`.\n */\n getUserDefinedMetadata(): {} {\n return this.userDefinedMetadata;\n }\n}\nserialization.registerClass(LayersModel);\n\n/**\n * A `tf.Functional` is an alias to `tf.LayersModel`.\n *\n * See also:\n * `tf.LayersModel`, `tf.Sequential`, `tf.loadLayersModel`.\n */\n/** @doc {heading: 'Models', subheading: 'Classes'} */\nexport class Functional extends LayersModel {\n static override className = 'Functional';\n}\nserialization.registerClass(Functional);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Optimizers.\n */\n\nimport {Optimizer, train} from '@tensorflow/tfjs-core';\n\nimport {epsilon} from './backend/common';\n\nimport {ValueError} from './errors';\n\n// Add (de)serialize()\n\n// Porting note: This diverges from the PyKeras implementation and may need to\n// change based on (de)serialization requirements.\nexport function getOptimizer(identifier: string): Optimizer {\n const optimizerMap: {[optimizerName: string]: () => Optimizer} = {\n 'Adagrad': () => train.adagrad(0.01),\n 'Adadelta': () => train.adadelta(1, 0.95, epsilon()),\n 'Adam': () => train.adam(0.001, 0.9, 0.999, epsilon()),\n 'Adamax': () => train.adamax(0.002, 0.9, 0.999, epsilon(), 0),\n 'RMSProp': () => train.rmsprop(0.001, 0.9, 0, epsilon()),\n 'SGD': () => train.sgd(0.01)\n };\n optimizerMap['adagrad'] = optimizerMap['Adagrad'];\n optimizerMap['adadelta'] = optimizerMap['Adadelta'];\n optimizerMap['adam'] = optimizerMap['Adam'];\n optimizerMap['adamax'] = optimizerMap['Adamax'];\n optimizerMap['rmsprop'] = optimizerMap['RMSProp'];\n optimizerMap['sgd'] = optimizerMap['SGD'];\n\n if (identifier in optimizerMap) {\n return optimizerMap[identifier]();\n }\n throw new ValueError(`Unknown Optimizer ${identifier}`);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source keras/models.py */\n\nimport {dispose, io, NamedTensorMap, Optimizer, Scalar, serialization, Tensor, util} from '@tensorflow/tfjs-core';\n\nimport {getUid} from './backend/state';\nimport {History} from './base_callbacks';\nimport {Dataset} from './engine/dataset_stub';\nimport {Input} from './engine/input_layer';\nimport {getSourceInputs, Layer, Node, SymbolicTensor} from './engine/topology';\nimport {LayersModel, ModelCompileArgs, ModelEvaluateArgs} from './engine/training';\nimport {ModelEvaluateDatasetArgs, ModelFitDatasetArgs} from './engine/training_dataset';\nimport {ModelFitArgs} from './engine/training_tensors';\nimport {NotImplementedError, RuntimeError, ValueError} from './errors';\nimport {Shape} from './keras_format/common';\nimport {TrainingConfig} from './keras_format/training_config';\nimport {PyJsonDict} from './keras_format/types';\nimport {deserialize} from './layers/serialization';\nimport {Kwargs, NamedTensor} from './types';\nimport * as generic_utils from './utils/generic_utils';\nimport {convertPythonicToTs} from './utils/serialization_utils';\nimport {getExactlyOneShape} from './utils/types_utils';\n\n/**\n * Parses a JSON model configuration file and returns a model instance.\n *\n * ```js\n * // This example shows how to serialize a model using `toJSON()` and\n * // deserialize it as another model using `tf.models.modelFromJSON()`.\n * // Note: this example serializes and deserializes only the topology\n * // of the model; the weights of the loaded model will be different\n * // from those of the the original model, due to random weight\n * // initialization.\n * // To load the topology and weights of a model, use `tf.loadLayersModel()`.\n * const model1 = tf.sequential();\n * model1.add(tf.layers.repeatVector({inputShape: [2], n: 4}));\n * // Serialize `model1` as a JSON object.\n * const model1JSON = model1.toJSON(null, false);\n * model1.summary();\n *\n * const model2 = await tf.models.modelFromJSON(model1JSON);\n * model2.summary();\n * ```\n *\n * @param modelAndWeightsConfig JSON object or string encoding a model and\n * weights configuration. It can also be only the topology JSON of the\n * model, in which case the weights will not be loaded.\n * @param custom_objects Optional dictionary mapping names\n * (strings) to custom classes or functions to be\n * considered during deserialization.\n * @returns A TensorFlow.js Layers `tf.LayersModel` instance (uncompiled).\n */\nexport async function modelFromJSON(\n modelAndWeightsConfig: ModelAndWeightsConfig|PyJsonDict,\n customObjects?: serialization.ConfigDict): Promise {\n if (!('modelTopology' in modelAndWeightsConfig)) {\n modelAndWeightsConfig = {modelTopology: modelAndWeightsConfig};\n }\n modelAndWeightsConfig = modelAndWeightsConfig as ModelAndWeightsConfig;\n\n let modelTopology = modelAndWeightsConfig.modelTopology;\n if (modelTopology['model_config'] != null) {\n // If the model-topology JSON contains a 'model_config' field, then it is\n // a full model JSON (e.g., from `keras.Model.save()`), which contains\n // not only the model's architecture in its 'model_config' field, but\n // additional information such as the model's optimizer. We use only the\n // 'model_config' field currently.\n modelTopology = modelTopology['model_config'] as PyJsonDict;\n }\n const tsConfig =\n convertPythonicToTs(modelTopology) as serialization.ConfigDict;\n const model = deserialize(tsConfig, customObjects) as LayersModel;\n\n if (modelAndWeightsConfig.weightsManifest != null) {\n // Load the weight values keyed by the original tensor names in the model\n // file that was loaded. These should match the keys of the weight\n // manifest.\n const weightValues = await io.loadWeights(\n modelAndWeightsConfig.weightsManifest, modelAndWeightsConfig.pathPrefix,\n model.weights.map(weight => weight.originalName));\n\n // Map the weights to the unique tensor names generated during model loading\n const uniqueWeightValues: NamedTensorMap = {};\n for (const weight of model.weights) {\n uniqueWeightValues[weight.originalName] =\n weightValues[weight.originalName];\n }\n\n model.loadWeights(uniqueWeightValues);\n // Dispose temporary weight values.\n dispose(weightValues);\n }\n return model;\n}\n\n/**\n * Options for loading a saved mode in TensorFlow.js format.\n */\nexport interface ModelAndWeightsConfig {\n /**\n * A JSON object or JSON string containing the model config.\n *\n * This can be either of the following two formats:\n * - A model archiecture-only config, i.e., a format consistent with the\n * return value of`keras.Model.to_json()`.\n * - A full model config, containing not only model architecture, but also\n * training options and state, i.e., a format consistent with the return\n * value of `keras.models.save_model()`.\n */\n modelTopology: PyJsonDict;\n\n /**\n * A weights manifest in TensorFlow.js format.\n */\n weightsManifest?: io.WeightsManifestConfig;\n\n /**\n * Path to prepend to the paths in `weightManifest` before fetching.\n *\n * The path may optionally end in a slash ('/').\n */\n pathPrefix?: string;\n}\n\n// TODO(nielsene): Remove after: https://github.com/tensorflow/tfjs/issues/400\nexport interface ModelPredictArgs {\n /**\n * Optional. Batch size (Integer). If unspecified, it will default to 32.\n */\n batchSize?: number;\n\n /**\n * Optional. Verbosity mode. Defaults to false.\n */\n verbose?: boolean;\n}\n\n/**\n * Load a model composed of Layer objects, including its topology and optionally\n * weights. See the Tutorial named \"How to import a Keras Model\" for usage\n * examples.\n *\n * This method is applicable to:\n *\n * 1. Models created with the `tf.layers.*`, `tf.sequential`, and\n * `tf.model` APIs of TensorFlow.js and later saved with the\n * `tf.LayersModel.save` method.\n * 2. Models converted from Keras or TensorFlow tf.keras using the\n * [tensorflowjs_converter](https://github.com/tensorflow/tfjs/tree/master/tfjs-converter).\n *\n * This mode is *not* applicable to TensorFlow `SavedModel`s or their converted\n * forms. For those models, use `tf.loadGraphModel`.\n *\n * Example 1. Load a model from an HTTP server.\n *\n * ```js\n * const model = await tf.loadLayersModel(\n * 'https://storage.googleapis.com/tfjs-models/tfjs/iris_v1/model.json');\n * model.summary();\n * ```\n *\n * Example 2: Save `model`'s topology and weights to browser [local\n * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);\n * then load it back.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * console.log('Prediction from original model:');\n * model.predict(tf.ones([1, 3])).print();\n *\n * const saveResults = await model.save('localstorage://my-model-1');\n *\n * const loadedModel = await tf.loadLayersModel('localstorage://my-model-1');\n * console.log('Prediction from loaded model:');\n * loadedModel.predict(tf.ones([1, 3])).print();\n * ```\n *\n * Example 3. Saving `model`'s topology and weights to browser\n * [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API);\n * then load it back.\n *\n * ```js\n * const model = tf.sequential(\n * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]});\n * console.log('Prediction from original model:');\n * model.predict(tf.ones([1, 3])).print();\n *\n * const saveResults = await model.save('indexeddb://my-model-1');\n *\n * const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1');\n * console.log('Prediction from loaded model:');\n * loadedModel.predict(tf.ones([1, 3])).print();\n * ```\n *\n * Example 4. Load a model from user-selected files from HTML\n * [file input\n * elements](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file).\n *\n * ```js\n * // Note: this code snippet will not work without the HTML elements in the\n * // page\n * const jsonUpload = document.getElementById('json-upload');\n * const weightsUpload = document.getElementById('weights-upload');\n *\n * const model = await tf.loadLayersModel(\n * tf.io.browserFiles([jsonUpload.files[0], weightsUpload.files[0]]));\n * ```\n *\n * @param pathOrIOHandler Can be either of the two formats\n * 1. A string path to the `ModelAndWeightsConfig` JSON describing\n * the model in the canonical TensorFlow.js format. For file://\n * (tfjs-node-only), http:// and https:// schemas, the path can be\n * either absolute or relative. The content of the JSON file is assumed to\n * be a JSON object with the following fields and values:\n * - 'modelTopology': A JSON object that can be either of:\n * 1. a model architecture JSON consistent with the format of the return\n * value of `keras.Model.to_json()`\n * 2. a full model JSON in the format of `keras.models.save_model()`.\n * - 'weightsManifest': A TensorFlow.js weights manifest.\n * See the Python converter function `save_model()` for more details.\n * It is also assumed that model weights can be accessed from relative\n * paths described by the `paths` fields in weights manifest.\n * 2. A `tf.io.IOHandler` object that loads model artifacts with its `load`\n * method.\n * @param options Optional configuration arguments for the model loading,\n * including:\n * - `strict`: Require that the provided weights exactly match those required\n * by the layers. Default true. Passing false means that both extra\n * weights and missing weights will be silently ignored.\n * - `onProgress`: A progress callback of the form:\n * `(fraction: number) => void`. This callback can be used to monitor the\n * model-loading process.\n * @returns A `Promise` of `tf.LayersModel`, with the topology and weights\n * loaded.\n *\n * @doc {heading: 'Models', subheading: 'Loading'}\n */\nexport async function loadLayersModel(\n pathOrIOHandler: string|io.IOHandler,\n options?: io.LoadOptions): Promise {\n if (options == null) {\n options = {};\n }\n if (typeof pathOrIOHandler === 'string') {\n const handlers = io.getLoadHandlers(pathOrIOHandler, options);\n if (handlers.length === 0) {\n // For backward compatibility: if no load handler can be found,\n // assume it is a relative http path.\n // TODO(cais): Reformat the args into a single `LoadOptions` once the core\n // is refactored.\n handlers.push(io.browserHTTPRequest(pathOrIOHandler, options));\n } else if (handlers.length > 1) {\n throw new ValueError(\n `Found more than one (${handlers.length}) load handlers for ` +\n `URL '${pathOrIOHandler}'`);\n }\n pathOrIOHandler = handlers[0];\n }\n return loadLayersModelFromIOHandler(pathOrIOHandler, undefined, options);\n}\n\n/**\n * Load a model and optionally its weights, using an IOHandler object.\n *\n * @param handler The instance of `IOHandler` to be used during the model\n * loading.\n * @param customObjects Any optional custom objects to be used during model\n * loading.\n * @param strict Whether the weight loading will be done in strict mode.\n * Default: `true`.\n */\nexport async function loadLayersModelFromIOHandler(\n handler: io.IOHandler, customObjects?: serialization.ConfigDict,\n options?: io.LoadOptions): Promise {\n if (options == null) {\n options = {};\n }\n if (handler.load == null) {\n throw new ValueError(\n 'Cannot proceed with model loading because the IOHandler provided ' +\n 'does not have the `load` method implemented.');\n }\n const artifacts = await handler.load();\n let modelTopology = artifacts.modelTopology as PyJsonDict;\n if (modelTopology['model_config'] != null) {\n modelTopology = modelTopology['model_config'] as PyJsonDict;\n }\n\n const strict = options.strict == null ? true : options.strict;\n // If weights are provided and the weight-loading mode is strict, use\n // fast weight initialization. This skips costly initializers such as\n // 'orthogonal' and saves unnecessary computation in cases where\n // the initialized weight values will immediately be overwritten by\n // loaded weight values.\n const fastWeightInit =\n artifacts.weightData != null && artifacts.weightSpecs != null && strict;\n const model =\n deserialize(\n convertPythonicToTs(modelTopology) as serialization.ConfigDict,\n customObjects, fastWeightInit) as LayersModel;\n\n const trainingConfig = artifacts.trainingConfig as TrainingConfig;\n if (trainingConfig != null) {\n model.loadTrainingConfig(trainingConfig);\n }\n if (artifacts.userDefinedMetadata != null) {\n model.setUserDefinedMetadata(artifacts.userDefinedMetadata);\n }\n\n // If weightData is present, load the weights into the model.\n if (artifacts.weightData != null) {\n // Loading weights requires weightSpecs.\n if (artifacts.weightSpecs == null) {\n throw new ValueError(\n 'LayersModel artifacts contains weight data, but not weight specs. ' +\n 'Therefore loading of weights cannot proceed.');\n }\n\n const {modelWeights, optimizerWeights} = decodeModelAndOptimizerWeights(\n artifacts.weightData, artifacts.weightSpecs);\n model.loadWeights(modelWeights, strict);\n\n if (model.optimizer != null && optimizerWeights.length > 0) {\n await model.optimizer.setWeights(optimizerWeights);\n }\n\n // Dispose temporary weight values.\n dispose(modelWeights);\n dispose(optimizerWeights.map(w => w.tensor));\n }\n return model;\n}\n\nfunction decodeModelAndOptimizerWeights(\n buffer: ArrayBuffer, specs: io.WeightsManifestEntry[]):\n {modelWeights: NamedTensorMap, optimizerWeights: NamedTensor[]} {\n const name2Tensor = io.decodeWeights(buffer, specs);\n const modelWeights: NamedTensorMap = {};\n const optimizerWeights: NamedTensor[] = [];\n specs.forEach(spec => {\n if (spec.group === 'optimizer') {\n optimizerWeights.push({name: spec.name, tensor: name2Tensor[spec.name]});\n } else {\n modelWeights[spec.name] = name2Tensor[spec.name];\n }\n });\n return {modelWeights, optimizerWeights};\n}\n\n/**\n * Configuration for a Sequential model.\n */\nexport interface SequentialArgs {\n /** Stack of layers for the model. */\n layers?: Layer[];\n\n /** The name of this model. */\n name?: string;\n}\n\n/**\n * A model with a stack of layers, feeding linearly from one to the next.\n *\n * `tf.sequential` is a factory function that creates an instance of\n * `tf.Sequential`.\n *\n * ```js\n * // Define a model for linear regression.\n * const model = tf.sequential();\n * model.add(tf.layers.dense({units: 1, inputShape: [1]}));\n *\n * // Prepare the model for training: Specify the loss and the optimizer.\n * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});\n *\n * // Generate some synthetic data for training.\n * const xs = tf.tensor2d([1, 2, 3, 4], [4, 1]);\n * const ys = tf.tensor2d([1, 3, 5, 7], [4, 1]);\n *\n * // Train the model using the data then do inference on a data point the\n * // model hasn't seen:\n * await model.fit(xs, ys);\n * model.predict(tf.tensor2d([5], [1, 1])).print();\n * ```\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\nexport class Sequential extends LayersModel {\n /** @nocollapse */\n static override className = 'Sequential';\n private model: LayersModel;\n constructor(args?: SequentialArgs) {\n super({inputs: [], outputs: []});\n args = args || {};\n\n this.trainable = true;\n this.built = false;\n\n // Set model name.\n this.name = (args.name != null) ? args.name : getUid('sequential_');\n\n // Add to the model any layers passed to the constructor.\n if (args.layers != null) {\n for (const layer of args.layers) {\n this.add(layer);\n }\n }\n }\n\n // Helper function to Sequential.add Throws if the new output shape will be\n // invalid.\n private checkShape(layer: Layer) {\n const shape = layer.inboundNodes[0].outputTensors[0].shape;\n if (shape.some(x => x < 0)) {\n throw new ValueError(\n 'Negative dimension size caused by adding layer ' +\n `${layer.name} with input shape [` +\n `${layer.inboundNodes[0].inputTensors[0].shape}]`);\n }\n }\n\n /**\n * Adds a layer instance on top of the layer stack.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense({units: 8, inputShape: [1]}));\n * model.add(tf.layers.dense({units: 4, activation: 'relu6'}));\n * model.add(tf.layers.dense({units: 1, activation: 'relu6'}));\n * // Note that the untrained model is random at this point.\n * model.predict(tf.randomNormal([10, 1])).print();\n * ```\n * @param layer Layer instance.\n *\n * @exception ValueError In case the `layer` argument does not know its\n * input shape.\n * @exception ValueError In case the `layer` argument has multiple output\n * tensors, or is already connected somewhere else (forbidden in\n * `Sequential` models).\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n add(layer: Layer): void {\n const isLayerModelInstance =\n layer instanceof Sequential || layer instanceof LayersModel;\n let modelLayer: LayersModel;\n if (isLayerModelInstance) {\n modelLayer = layer as LayersModel;\n if (modelLayer.outputs.length !== 1) {\n throw new ValueError(\n 'All layers in a Sequential model ' +\n 'should have a single output tensor. ' +\n 'For multi-output layers, ' +\n 'use the functional API.');\n }\n if (modelLayer.inputs.length !== 1) {\n throw new ValueError(\n 'All layers in a Sequential model ' +\n 'should have a single input tensor. ' +\n 'For multi-input layers, ' +\n 'use the functional API.');\n }\n }\n\n if (this.outputs.length === 0) {\n // first layer in model: check that it is an input layer\n if (layer.inboundNodes.length === 0) {\n // create an input layer\n if (layer.batchInputShape == null) {\n throw new ValueError(\n 'The first layer in a Sequential model must ' +\n 'get an `inputShape` or `batchInputShape` argument.');\n }\n // Instantiate the input layer.\n const x = Input({\n batchShape: layer.batchInputShape,\n dtype: layer.dtype,\n name: layer.name + '_input'\n });\n // This will build the current layer and create the node connecting\n // the current layer to the input layer we just created.\n layer.apply(x);\n }\n\n if (isLayerModelInstance) {\n this.outputs = modelLayer.outputs;\n this.inputs = modelLayer.inputs;\n } else {\n if (layer.inboundNodes.length !== 1) {\n throw new ValueError(\n 'A layer added to a Sequential model must not already be ' +\n `connected somewhere else. LayersModel received layer ${\n layer.name} ` +\n `which has ${layer.inboundNodes.length} pre-existing inbound ` +\n 'connections.');\n }\n\n if (layer.inboundNodes[0].outputTensors.length !== 1) {\n throw new ValueError(\n 'All layers in a Sequential model ' +\n 'should have a single output tensor. ' +\n 'For multi-output layers, ' +\n 'use the functional API.');\n }\n this.checkShape(layer);\n this.outputs = [layer.inboundNodes[0].outputTensors[0]];\n this.inputs = getSourceInputs(this.outputs[0]);\n }\n\n this.inboundNodes = [];\n // We create an input node, which we will keep updated\n // as we add more layers.\n // (This call has side effects.)\n // tslint:disable-next-line:no-unused-expression\n new Node({\n outboundLayer: this,\n inboundLayers: [],\n nodeIndices: [],\n tensorIndices: [],\n inputTensors: this.inputs,\n outputTensors: this.outputs,\n // no model-level masking for now\n inputMasks: generic_utils.pyListRepeat(null, this.inputs.length),\n outputMasks: [null],\n inputShapes: this.inputs.map(x => x.shape),\n outputShapes: this.outputs[0].shape\n });\n } else {\n const outputTensor = layer.apply(this.outputs[0]);\n if (Array.isArray(outputTensor)) {\n throw new TypeError(\n 'All layers in a Sequential model ' +\n 'should have a single output tensor. ' +\n 'For multi-output layers, ' +\n 'use the functional API.');\n }\n this.checkShape(layer);\n this.outputs = [outputTensor as SymbolicTensor];\n // update self.inbound_nodes\n this.inboundNodes[0].outputTensors = this.outputs;\n this.inboundNodes[0].outputShapes = [this.outputs[0].shape];\n }\n\n this.layers.push(layer);\n this.built = false;\n }\n\n /**\n * Removes the last layer in the model.\n *\n * @exception TypeError if there are no layers in the model.\n */\n pop(): void {\n if (this.layers.length === 0) {\n throw new TypeError('There are no layers in the model.');\n }\n\n this.layers.pop();\n if (this.layers.length === 0) {\n this.outputs = [];\n this.inboundNodes = [];\n this.outboundNodes = [];\n } else {\n const lastLayerIndex = this.layers.length - 1;\n this.layers[lastLayerIndex].outboundNodes = [];\n this.outputs = [this.layers[lastLayerIndex].output as SymbolicTensor];\n // update self.inbound_nodes\n this.inboundNodes[0].outputTensors = this.outputs;\n this.inboundNodes[0].outputShapes = [this.outputs[0].shape];\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n if (this.model == null) {\n this.build();\n }\n return this.model.call(inputs, kwargs);\n }\n\n override build(inputShape?: Shape|Shape[]) {\n // Call `getExactlyOneShape` without using its return value,\n // to verify that exactly one input shape is provided.\n getExactlyOneShape(inputShape);\n\n if (this.inputs.length === 0 || this.outputs.length === 0) {\n throw new TypeError(\n 'Sequential model cannot be built: model is empty.' +\n ' Add some layers first.');\n }\n // actually create the model\n this.model = new LayersModel({\n inputs: this.inputs,\n outputs: this.outputs[0],\n name: this.name + '_model'\n });\n this.model.trainable = this.trainable;\n\n // mirror model attributes\n this.supportsMasking = this.model.supportsMasking;\n // TODO(michaelterry): Add caches\n this.inputLayers = this.model.inputLayers;\n this.inputLayersNodeIndices = this.model.inputLayersNodeIndices;\n this.inputLayersTensorIndices = this.model.inputLayersTensorIndices;\n this.outputLayers = this.model.outputLayers;\n this.outputLayersNodeIndices = this.model.outputLayersNodeIndices;\n this.outputLayersTensorIndices = this.model.outputLayersTensorIndices;\n this.nodesByDepth = this.model.nodesByDepth;\n this.containerNodes = this.model.containerNodes;\n this.outputNames = this.model.outputNames;\n this.inputNames = this.model.inputNames;\n // TODO(michaelterry): Add feedInputNames, feedInputs, if needed.\n // TODO(michaelterry): Add callbackModel if needed.\n this.built = true;\n }\n\n override countParams(): number {\n if (!this.built) {\n this.build();\n }\n return super.countParams();\n }\n\n /**\n * Print a text summary of the Sequential model's layers.\n *\n * The summary includes\n * - Name and type of all layers that comprise the model.\n * - Output shape(s) of the layers\n * - Number of weight parameters of each layer\n * - The total number of trainable and non-trainable parameters of the\n * model.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(\n * tf.layers.dense({units: 100, inputShape: [10], activation: 'relu'}));\n * model.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));\n *\n * model.summary();\n * ```\n *\n * @param lineLength Custom line length, in number of characters.\n * @param positions Custom widths of each of the columns, as either\n * fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number\n * of characters (e.g., `[30, 50, 65]`). Each number corresponds to\n * right-most (i.e., ending) position of a column.\n * @param printFn Custom print function. Can be used to replace the default\n * `console.log`. For example, you can use `x => {}` to mute the printed\n * messages in the console.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override summary(\n lineLength?: number, positions?: number[],\n printFn:\n // tslint:disable-next-line:no-any\n (message?: any, ...optionalParams: any[]) => void = console.log) {\n if (!this.built) {\n this.build();\n }\n super.summary(lineLength, positions, printFn);\n }\n\n /**\n * Sets the weights of the model.\n *\n * @param weights Should be a list of Tensors with shapes and types matching\n * the output of `model.getWeights()`.\n */\n override setWeights(weights: Tensor[]): void {\n if (this.model == null) {\n this.build();\n }\n this.model.setWeights(weights);\n }\n\n /**\n * Returns the loss value & metrics values for the model in test mode.\n *\n * Loss and metrics are specified during `compile()`, which needs to happen\n * before calls to `evaluate()`.\n *\n * Computation is done in batches.\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});\n * const result = model.evaluate(tf.ones([8, 10]), tf.ones([8, 1]), {\n * batchSize: 4,\n * });\n * result.print();\n * ```\n *\n * @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the\n * model has multiple inputs.\n * @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the\n * model has multiple outputs.\n * @param args A `ModelEvaluateConfig`, containing optional fields.\n *\n * @return `Scalar` test loss (if the model has a single output and no\n * metrics) or `Array` of `Scalar`s (if the model has multiple outputs\n * and/or metrics). The attribute `model.metricsNames`\n * will give you the display labels for the scalar outputs.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override evaluate(\n x: Tensor|Tensor[], y: Tensor|Tensor[],\n args: ModelEvaluateArgs = {}): Scalar|Scalar[] {\n if (!this.built) {\n throw new RuntimeError(\n 'The model needs to be compiled before being used.');\n }\n return this.model.evaluate(x, y, args);\n }\n\n // TODO(cais): Add code snippet below once real dataset objects are\n // available.\n /**\n * Evaluate model using a dataset object.\n *\n * Note: Unlike `evaluate()`, this method is asynchronous (`async`).\n *\n * @param dataset A dataset object. Its `iterator()` method is expected\n * to generate a dataset iterator object, the `next()` method of which\n * is expected to produce data batches for evaluation. The return value\n * of the `next()` call ought to contain a boolean `done` field and a\n * `value` field. The `value` field is expected to be an array of two\n * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former\n * case is for models with exactly one input and one output (e.g.\n * a sequential model). The latter case is for models with multiple\n * inputs and/or multiple outputs. Of the two items in the array, the\n * first is the input feature(s) and the second is the output target(s).\n * @param args A configuration object for the dataset-based evaluation.\n * @returns Loss and metric values as an Array of `Scalar` objects.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override async evaluateDataset(dataset: Dataset<{}>,\n args: ModelEvaluateDatasetArgs): Promise {\n if (!this.built) {\n throw new RuntimeError(\n 'The model needs to be compiled before being used.');\n }\n return this.model.evaluateDataset(dataset, args);\n }\n\n /**\n * Generates output predictions for the input samples.\n *\n * Computation is done in batches.\n *\n * Note: the \"step\" mode of predict() is currently not supported.\n * This is because the TensorFlow.js core backend is imperative only.\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.predict(tf.ones([2, 10])).print();\n * ```\n *\n * @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if\n * the model has multiple inputs.\n * @param conifg A `ModelPredictConfig` object containing optional fields.\n *\n * @return `tf.Tensor`(s) of predictions.\n *\n * @exception ValueError In case of mismatch between the provided input data\n * and the model's expectations, or in case a stateful model receives a\n * number of samples that is not a multiple of the batch size.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override predict(x: Tensor|Tensor[], args: ModelPredictArgs = {}):\n Tensor|Tensor[] {\n if (this.model == null) {\n this.build();\n }\n return this.model.predict(x, args);\n }\n\n /**\n * Returns predictions for a single batch of samples.\n *\n * @param x: Input samples, as a Tensor, or list of Tensors (if the model\n * has multiple inputs).\n * @return Tensor(s) of predictions\n */\n override predictOnBatch(x: Tensor): Tensor|Tensor[] {\n if (this.model == null) {\n this.build();\n }\n return this.model.predictOnBatch(x);\n }\n\n /**\n * See `LayersModel.compile`.\n *\n * @param args\n */\n override compile(args: ModelCompileArgs): void {\n this.build();\n this.model.compile(args);\n this.optimizer_ = this.model.optimizer;\n // tslint:disable-next-line:no-any\n this.isOptimizerOwned = (this.model as any).isOptimizerOwned;\n this.loss = this.model.loss;\n this.metrics = this.model.metrics;\n // TODO(cais): Add this.lossWeights, this.sampleWeightMode,\n // this.weightedMetrics, this.targets.\n this.metricsTensors = this.model.metricsTensors;\n this.metricsNames = this.model.metricsNames;\n // TODO(cais): Add sampleWeights.\n }\n\n override get optimizer(): Optimizer {\n return this.model == null ? undefined : this.model.optimizer;\n }\n\n override set optimizer(optimizer: Optimizer) {\n this.model.optimizer = optimizer;\n }\n\n /**\n * Trains the model for a fixed number of epochs (iterations on a dataset).\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [10]})]\n * });\n * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});\n * const history = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), {\n * batchSize: 4,\n * epochs: 3\n * });\n * console.log(history.history.loss[0]);\n * ```\n *\n * @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the\n * model has multiple inputs. If all inputs in the model are named, you can\n * also pass a dictionary mapping input names to `tf.Tensor`s.\n * @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if\n * the model has multiple outputs. If all outputs in the model are named, you\n * can also pass a dictionary mapping output names to `tf.Tensor`s.\n * @param args A `ModelFitConfig`, containing optional fields.\n *\n * @return A `History` instance. Its `history` attribute contains all\n * information collected during training.\n *\n * @exception ValueError In case of mismatch between the provided input data\n * and what the model expects.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override async fit(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|{[inputName: string]: Tensor},\n args: ModelFitArgs = {}): Promise {\n if (!this.built) {\n throw new RuntimeError(\n 'The model needs to be compiled before ' +\n 'being used.');\n }\n return this.model.fit(x, y, args);\n }\n\n /**\n * Trains the model using a dataset object.\n *\n * ```js\n * const xArray = [\n * [1, 1, 1, 1, 1, 1, 1, 1, 1],\n * [1, 1, 1, 1, 1, 1, 1, 1, 1],\n * [1, 1, 1, 1, 1, 1, 1, 1, 1],\n * [1, 1, 1, 1, 1, 1, 1, 1, 1],\n * ];\n * const yArray = [1, 1, 1, 1];\n * // Create a dataset from the JavaScript array.\n * const xDataset = tf.data.array(xArray);\n * const yDataset = tf.data.array(yArray);\n * // Zip combines the `x` and `y` Datasets into a single Dataset, the\n * // iterator of which will return an object containing of two tensors,\n * // corresponding to `x` and `y`. The call to `batch(4)` will bundle\n * // four such samples into a single object, with the same keys now pointing\n * // to tensors that hold 4 examples, organized along the batch dimension.\n * // The call to `shuffle(4)` causes each iteration through the dataset to\n * // happen in a different order. The size of the shuffle window is 4.\n * const xyDataset = tf.data.zip({xs: xDataset, ys: yDataset})\n * .batch(4)\n * .shuffle(4);\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 1, inputShape: [9]})]\n * });\n * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'});\n * const history = await model.fitDataset(xyDataset, {\n * epochs: 4,\n * callbacks: {onEpochEnd: (epoch, logs) => console.log(logs.loss)}\n * });\n * ```\n *\n * @param dataset A dataset object. Its `iterator()` method is expected to\n * generate a dataset iterator object, the `next()` method of which is\n * expected to produce data batches for evaluation. The return value of the\n * `next()` call ought to contain a boolean `done` field and a `value`\n * field.\n *\n * The `value` field is expected to be an object of with fields\n * `xs` and `ys`, which point to the feature tensor and the target tensor,\n * respectively. This case is for models with exactly one input and one\n * output (e.g. a sequential model). For example:\n * ```js\n * {value: {xs: xsTensor, ys: ysTensor}, done: false}\n * ```\n *\n * If the model has multiple inputs, the `xs` field of `value` should\n * be an object mapping input names to their respective feature tensors.\n * For example:\n * ```js\n * {\n * value: {\n * xs: {\n * input_1: xsTensor1,\n * input_2: xsTensor2\n * },\n * ys: ysTensor\n * },\n * done: false\n * }\n * ```\n * If the model has multiple outputs, the `ys` field of `value` should\n * be an object mapping output names to their respective target tensors.\n * For example:\n * ```js\n * {\n * value: {\n * xs: xsTensor,\n * ys: {\n * output_1: ysTensor1,\n * output_2: ysTensor2\n * },\n * },\n * done: false\n * }\n * ```\n * @param args A `ModelFitDatasetArgs`, containing optional fields.\n *\n * @return A `History` instance. Its `history` attribute contains all\n * information collected during training.\n *\n * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}\n */\n override async fitDataset(dataset: Dataset,\n args: ModelFitDatasetArgs): Promise {\n if (!this.built) {\n throw new RuntimeError(\n 'The model needs to be compiled before ' +\n 'being used.');\n }\n return this.model.fitDataset(dataset, args);\n }\n\n /**\n * Runs a single gradient update on a single batch of data.\n *\n * This method differs from `fit()` and `fitDataset()` in the following\n * regards:\n * - It operates on exactly one batch of data.\n * - It returns only the loss and metric values, instead of\n * returning the batch-by-batch loss and metric values.\n * - It doesn't support fine-grained options such as verbosity and\n * callbacks.\n *\n * @param x Input data. It could be one of the following:\n * - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has\n * multiple inputs).\n * - An Object mapping input names to corresponding `tf.Tensor` (if the\n * model has named inputs).\n * @param y Target data. It could be either a `tf.Tensor` or multiple\n * `tf.Tensor`s. It should be consistent with `x`.\n * @returns Training loss or losses (in case the model has\n * multiple outputs), along with metrics (if any), as numbers.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n override async trainOnBatch(\n x: Tensor|Tensor[]|{[inputName: string]: Tensor},\n y: Tensor|Tensor[]|\n {[inputName: string]: Tensor}): Promise {\n return this.model.trainOnBatch(x, y);\n }\n\n /* See parent class for JsDoc */\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict,\n fastWeightInit = false): T {\n let configArray: serialization.ConfigDictArray;\n let extraModelConfig: serialization.ConfigDict = {};\n if (config instanceof Array) {\n if (!(config[0].className != null) ||\n config[0]['className'] === 'Merge') {\n throw new ValueError('Legacy serialization format not supported yet.');\n }\n configArray = config;\n } else {\n util.assert(\n config['layers'] != null,\n () =>\n `When the config data for a Sequential model is not an Array, ` +\n `it must be an Object that contains the 'layers' field.`);\n configArray = config['layers'] as serialization.ConfigDictArray;\n delete config['layers'];\n extraModelConfig = config;\n }\n\n const model = new cls(extraModelConfig);\n if (!(model instanceof Sequential)) {\n throw new NotImplementedError(\n `Sequential.fromConfig called on non-Sequential input: ${model}`);\n }\n for (const conf of configArray) {\n const customObjects: serialization.ConfigDict = undefined;\n const layer = deserialize(\n conf as serialization.ConfigDict, customObjects,\n fastWeightInit) as Layer;\n if (fastWeightInit) {\n layer.setFastWeightInitDuringBuild(true);\n }\n model.add(layer);\n }\n return model;\n }\n\n /**\n * Setter used for force stopping of LayersModel.fit() (i.e., training).\n *\n * Example:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense({units: 1, inputShape: [10]}));\n * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'});\n * const xs = tf.ones([8, 10]);\n * const ys = tf.zeros([8, 1]);\n *\n * const history = await model.fit(xs, ys, {\n * epochs: 10,\n * callbacks: {\n * onEpochEnd: async (epoch, logs) => {\n * if (epoch === 2) {\n * model.stopTraining = true;\n * }\n * }\n * }\n * });\n *\n * // There should be only 3 values in the loss array, instead of 10 values,\n * // due to the stopping after 3 epochs.\n * console.log(history.history.loss);\n * ```\n */\n override set stopTraining(stop: boolean) {\n // TODO(cais): When refactoring to remove the composition pattern happens,\n // remove this method overriding.\n if (this.model == null) {\n throw new ValueError(\n 'Cannot set the stopTraining property of a sequential model before ' +\n 'it is compiled.');\n }\n this.model.stopTraining = stop;\n }\n\n override get stopTraining(): boolean {\n if (this.model == null) {\n throw new ValueError(\n 'Cannot get the stopTraining property of a sequential model before ' +\n 'it is compiled.');\n }\n return this.model.stopTraining;\n }\n\n // TODO(cais): Override get trainableWeights() here\n\n // tslint:disable-next-line:no-any\n override getConfig(): any {\n // NOTE(cais): We override the return type of getConfig() to `any` here,\n // because the `Sequential` class is a special case among `Container`\n // subtypes in that its getConfig() method returns an Array (not a\n // dict).\n const layers: serialization.ConfigDict[] = [];\n for (const layer of this.layers) {\n const dict: serialization.ConfigDict = {};\n dict['className'] = layer.getClassName();\n dict['config'] = layer.getConfig();\n layers.push(dict);\n }\n return {name: this.name, layers};\n }\n}\nserialization.registerClass(Sequential);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Exported functions.\n */\n\nimport {BaseCallbackConstructor, CallbackConstructorRegistry} from './base_callbacks';\nimport {ContainerArgs} from './engine/container';\nimport {Input, InputConfig,} from './engine/input_layer';\nimport {SymbolicTensor} from './engine/topology';\nimport {LayersModel} from './engine/training';\nimport {Sequential, SequentialArgs} from './models';\n\nexport {loadLayersModel} from './models';\n\n// TODO(cais): Add doc string to all the public static functions in this\n// class; include exectuable JavaScript code snippets where applicable\n// (b/74074458).\n\n// LayersModel and related factory methods.\n\n/**\n * A model is a data structure that consists of `Layers` and defines inputs\n * and outputs.\n *\n * The key difference between `tf.model` and `tf.sequential` is that\n * `tf.model` is more generic, supporting an arbitrary graph (without\n * cycles) of layers. `tf.sequential` is less generic and supports only a linear\n * stack of layers.\n *\n * When creating a `tf.LayersModel`, specify its input(s) and output(s). Layers\n * are used to wire input(s) to output(s).\n *\n * For example, the following code snippet defines a model consisting of\n * two `dense` layers, with 10 and 4 units, respectively.\n *\n * ```js\n * // Define input, which has a size of 5 (not including batch dimension).\n * const input = tf.input({shape: [5]});\n *\n * // First dense layer uses relu activation.\n * const denseLayer1 = tf.layers.dense({units: 10, activation: 'relu'});\n * // Second dense layer uses softmax activation.\n * const denseLayer2 = tf.layers.dense({units: 4, activation: 'softmax'});\n *\n * // Obtain the output symbolic tensor by applying the layers on the input.\n * const output = denseLayer2.apply(denseLayer1.apply(input));\n *\n * // Create the model based on the inputs.\n * const model = tf.model({inputs: input, outputs: output});\n *\n * // The model can be used for training, evaluation and prediction.\n * // For example, the following line runs prediction with the model on\n * // some fake data.\n * model.predict(tf.ones([2, 5])).print();\n * ```\n * See also:\n * `tf.sequential`, `tf.loadLayersModel`.\n *\n * @doc {heading: 'Models', subheading: 'Creation'}\n */\nexport function model(args: ContainerArgs): LayersModel {\n return new LayersModel(args);\n}\n\n/**\n * Creates a `tf.Sequential` model. A sequential model is any model where the\n * outputs of one layer are the inputs to the next layer, i.e. the model\n * topology is a simple 'stack' of layers, with no branching or skipping.\n *\n * This means that the first layer passed to a `tf.Sequential` model should have\n * a defined input shape. What that means is that it should have received an\n * `inputShape` or `batchInputShape` argument, or for some type of layers\n * (recurrent, Dense...) an `inputDim` argument.\n *\n * The key difference between `tf.model` and `tf.sequential` is that\n * `tf.sequential` is less generic, supporting only a linear stack of layers.\n * `tf.model` is more generic and supports an arbitrary graph (without\n * cycles) of layers.\n *\n * Examples:\n *\n * ```js\n * const model = tf.sequential();\n *\n * // First layer must have an input shape defined.\n * model.add(tf.layers.dense({units: 32, inputShape: [50]}));\n * // Afterwards, TF.js does automatic shape inference.\n * model.add(tf.layers.dense({units: 4}));\n *\n * // Inspect the inferred shape of the model's output, which equals\n * // `[null, 4]`. The 1st dimension is the undetermined batch dimension; the\n * // 2nd is the output size of the model's last layer.\n * console.log(JSON.stringify(model.outputs[0].shape));\n * ```\n *\n * It is also possible to specify a batch size (with potentially undetermined\n * batch dimension, denoted by \"null\") for the first layer using the\n * `batchInputShape` key. The following example is equivalent to the above:\n *\n * ```js\n * const model = tf.sequential();\n *\n * // First layer must have a defined input shape\n * model.add(tf.layers.dense({units: 32, batchInputShape: [null, 50]}));\n * // Afterwards, TF.js does automatic shape inference.\n * model.add(tf.layers.dense({units: 4}));\n *\n * // Inspect the inferred shape of the model's output.\n * console.log(JSON.stringify(model.outputs[0].shape));\n * ```\n *\n * You can also use an `Array` of already-constructed `Layer`s to create\n * a `tf.Sequential` model:\n *\n * ```js\n * const model = tf.sequential({\n * layers: [tf.layers.dense({units: 32, inputShape: [50]}),\n * tf.layers.dense({units: 4})]\n * });\n * console.log(JSON.stringify(model.outputs[0].shape));\n * ```\n *\n * @doc {heading: 'Models', subheading: 'Creation'}\n */\nexport function sequential(config?: SequentialArgs): Sequential {\n return new Sequential(config);\n}\n\n/**\n * Used to instantiate an input to a model as a `tf.SymbolicTensor`.\n *\n * Users should call the `input` factory function for\n * consistency with other generator functions.\n *\n * Example:\n *\n * ```js\n * // Defines a simple logistic regression model with 32 dimensional input\n * // and 3 dimensional output.\n * const x = tf.input({shape: [32]});\n * const y = tf.layers.dense({units: 3, activation: 'softmax'}).apply(x);\n * const model = tf.model({inputs: x, outputs: y});\n * model.predict(tf.ones([2, 32])).print();\n * ```\n *\n * Note: `input` is only necessary when using `model`. When using\n * `sequential`, specify `inputShape` for the first layer or use `inputLayer`\n * as the first layer.\n *\n * @doc {heading: 'Models', subheading: 'Inputs'}\n */\nexport function input(config: InputConfig): SymbolicTensor {\n return Input(config);\n}\n\nexport function registerCallbackConstructor(\n verbosityLevel: number,\n callbackConstructor: BaseCallbackConstructor): void {\n CallbackConstructorRegistry.registerCallbackConstructor(\n verbosityLevel, callbackConstructor);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n// Layer activation functions\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\nimport * as K from './backend/tfjs_backend';\nimport {ActivationIdentifier} from './keras_format/activation_config';\nimport {deserializeKerasObject} from './utils/generic_utils';\n\n/**\n * Base class for Activations.\n *\n * Special note: due to cross-language compatibility reasons, the\n * static readonly className field in this family of classes must be set to\n * the initialLowerCamelCase name of the activation.\n */\nexport abstract class Activation extends serialization.Serializable {\n abstract apply(tensor: Tensor, axis?: number): Tensor;\n getConfig(): serialization.ConfigDict {\n return {};\n }\n}\n\n/**\n * Exponential linear unit (ELU).\n * Reference: https://arxiv.org/abs/1511.07289\n */\nexport class Elu extends Activation {\n /** @nocollapse */\n static readonly className = 'elu';\n /**\n * Calculate the activation function.\n *\n * @param x: Input.\n * @param alpha: Scaling factor the negative section.\n * @return Output of the ELU activation.\n */\n apply(x: Tensor, alpha = 1): Tensor {\n return K.elu(x, alpha);\n }\n}\nserialization.registerClass(Elu);\n\n/**\n * Scaled Exponential Linear Unit. (Klambauer et al., 2017).\n * Reference: Self-Normalizing Neural Networks, https://arxiv.org/abs/1706.02515\n * Notes:\n * - To be used together with the initialization \"lecunNormal\".\n * - To be used together with the dropout variant \"AlphaDropout\".\n */\nexport class Selu extends Activation {\n /** @nocollapse */\n static readonly className = 'selu';\n apply(x: Tensor): Tensor {\n return tfc.selu(x);\n }\n}\nserialization.registerClass(Selu);\n\n/**\n * Rectified linear unit\n */\nexport class Relu extends Activation {\n /** @nocollapse */\n static readonly className = 'relu';\n apply(x: Tensor): Tensor {\n return tfc.relu(x);\n }\n}\nserialization.registerClass(Relu);\n\n/**\n * Rectified linear unit activation maxing out at 6.0.\n */\nexport class Relu6 extends Activation {\n /** @nocollapse */\n static readonly className = 'relu6';\n apply(x: Tensor): Tensor {\n return tidy(() => tfc.minimum(6.0, tfc.relu(x)));\n }\n}\nserialization.registerClass(Relu6);\n\n//* Linear activation (no-op) */\nexport class Linear extends Activation {\n /** @nocollapse */\n static readonly className = 'linear';\n apply(x: Tensor): Tensor {\n return x;\n }\n}\nserialization.registerClass(Linear);\n\n/**\n * Sigmoid activation function.\n */\nexport class Sigmoid extends Activation {\n /** @nocollapse */\n static readonly className = 'sigmoid';\n apply(x: Tensor): Tensor {\n return tfc.sigmoid(x);\n }\n}\nserialization.registerClass(Sigmoid);\n\n/**\n * Segment-wise linear approximation of sigmoid.\n */\nexport class HardSigmoid extends Activation {\n /** @nocollapse */\n static readonly className = 'hardSigmoid';\n apply(x: Tensor): Tensor {\n return K.hardSigmoid(x);\n }\n}\nserialization.registerClass(HardSigmoid);\n\n/**\n * Softplus activation function.\n */\nexport class Softplus extends Activation {\n /** @nocollapse */\n static readonly className = 'softplus';\n apply(x: Tensor): Tensor {\n return tfc.softplus(x);\n }\n}\nserialization.registerClass(Softplus);\n\n/**\n * Softsign activation function.\n */\nexport class Softsign extends Activation {\n /** @nocollapse */\n static readonly className = 'softsign';\n apply(x: Tensor): Tensor {\n return K.softsign(x);\n }\n}\nserialization.registerClass(Softsign);\n\n/**\n * Hyperbolic tangent function.\n */\nexport class Tanh extends Activation {\n /** @nocollapse */\n static readonly className = 'tanh';\n apply(x: Tensor): Tensor {\n return tfc.tanh(x);\n }\n}\nserialization.registerClass(Tanh);\n\n/**\n * Softmax activation function\n */\nexport class Softmax extends Activation {\n /** @nocollapse */\n static readonly className = 'softmax';\n /**\n * Calculate the activation function.\n *\n * @param x Tensor.\n * @param axis Integer, axis along which the softmax normalization is applied.\n * Invalid if < 2, as softmax across 1 (the batch dimension) is assumed to be\n * an error.\n *\n * @returns a Tensor of the same shape as x\n *\n * @throws ValueError: In case `dim(x) < 2`.\n */\n apply(x: Tensor, axis: number = (-1)): Tensor {\n return tfc.softmax(x, axis);\n }\n}\nserialization.registerClass(Softmax);\n\n/**\n * Log softmax activation function\n */\nexport class LogSoftmax extends Activation {\n /** @nocollapse */\n static readonly className = 'logSoftmax';\n /**\n * Calculate the activation function of log softmax:\n * log( exp(x_i) / sum(exp(x)) )\n *\n * @param x Tensor.\n * @param axis Integer, axis along which the softmax normalization is applied.\n * Invalid if < 2, as softmax across 1 (the batch dimension) is assumed to be\n * an error.\n *\n * @returns a Tensor of the same shape as x\n *\n * @throws ValueError: In case `dim(x) < 2`.\n */\n apply(x: Tensor, axis: number = (-1)): Tensor {\n return tfc.logSoftmax(x, axis);\n }\n}\nserialization.registerClass(LogSoftmax);\n\n/**\n * Swish activation function\n */\nexport class Swish extends Activation {\n /** @nocollapse */\n static readonly className = 'swish';\n /**\n * Calculate the activation function.\n *\n * @param x Tensor.\n * @param alpha Scaling factor for the sigmoid function.\n * @returns a Tensor of the same shape as x\n */\n apply(x: Tensor, alpha = 1): Tensor {\n return tidy(() => tfc.mul(tfc.sigmoid(tfc.mul(x, alpha)), x));\n }\n}\nserialization.registerClass(Swish);\n\n/**\n * Mish activation function\n */\nexport class Mish extends Activation {\n /** @nocollapse */\n static readonly className = 'mish';\n /**\n * Calculate the activation function.\n *\n * @param x Tensor.\n * @returns a Tensor of the same shape as x\n */\n apply(x: Tensor): Tensor {\n return tidy(() => tfc.mul(x, tfc.tanh(tfc.softplus(x))));\n }\n}\nserialization.registerClass(Mish);\n\nexport function serializeActivation(activation: Activation): string {\n return activation.getClassName();\n}\n\nexport function deserializeActivation(\n config: serialization.ConfigDict,\n customObjects: serialization.ConfigDict = {}): Activation {\n return deserializeKerasObject(\n config, serialization.SerializationMap.getMap().classNameMap,\n customObjects, 'activation');\n}\n\nexport function getActivation(identifier: ActivationIdentifier|\n serialization.ConfigDict|Activation): Activation {\n if (identifier == null) {\n const config: serialization.ConfigDict = {};\n config['className'] = 'linear';\n config['config'] = {};\n return deserializeActivation(config);\n }\n if (typeof identifier === 'string') {\n const config: serialization.ConfigDict = {};\n config['className'] = identifier;\n config['config'] = {};\n return deserializeActivation(config);\n } else if (identifier instanceof Activation) {\n return identifier;\n } else {\n return deserializeActivation(identifier);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* original source: keras/regularizers.py */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {abs, add, Scalar, serialization, sum, Tensor, tidy, zeros} from '@tensorflow/tfjs-core';\nimport * as K from './backend/tfjs_backend';\nimport {deserializeKerasObject, serializeKerasObject} from './utils/generic_utils';\n\nfunction assertObjectArgs(args: L1Args|L2Args|L1L2Args): void {\n if (args != null && typeof args !== 'object') {\n throw new Error(\n `Argument to L1L2 regularizer's constructor is expected to be an ` +\n `object, but received: ${args}`);\n }\n}\n\n/**\n * Regularizer base class.\n */\nexport abstract class Regularizer extends serialization.Serializable {\n abstract apply(x: Tensor): Scalar;\n}\n\nexport interface L1L2Args {\n /** L1 regularization rate. Defaults to 0.01. */\n l1?: number;\n /** L2 regularization rate. Defaults to 0.01. */\n l2?: number;\n}\n\nexport interface L1Args {\n /** L1 regularization rate. Defaults to 0.01. */\n l1: number;\n}\n\nexport interface L2Args {\n /** L2 regularization rate. Defaults to 0.01. */\n l2: number;\n}\n\nexport class L1L2 extends Regularizer {\n /** @nocollapse */\n static className = 'L1L2';\n\n private readonly l1: number;\n private readonly l2: number;\n private readonly hasL1: boolean;\n private readonly hasL2: boolean;\n constructor(args?: L1L2Args) {\n super();\n\n assertObjectArgs(args);\n\n this.l1 = args == null || args.l1 == null ? 0.01 : args.l1;\n this.l2 = args == null || args.l2 == null ? 0.01 : args.l2;\n this.hasL1 = this.l1 !== 0;\n this.hasL2 = this.l2 !== 0;\n }\n\n /**\n * Porting note: Renamed from __call__.\n * @param x Variable of which to calculate the regularization score.\n */\n apply(x: Tensor): Scalar {\n return tidy(() => {\n let regularization: Tensor = zeros([1]);\n if (this.hasL1) {\n regularization = add(regularization, sum(tfc.mul(this.l1, abs(x))));\n }\n if (this.hasL2) {\n regularization =\n add(regularization, sum(tfc.mul(this.l2, K.square(x))));\n }\n return tfc.reshape(regularization, []);\n });\n }\n\n getConfig(): serialization.ConfigDict {\n return {'l1': this.l1, 'l2': this.l2};\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict): T {\n return new cls({l1: config['l1'] as number, l2: config['l2'] as number});\n }\n}\nserialization.registerClass(L1L2);\n\nexport function l1(args?: L1Args) {\n assertObjectArgs(args);\n return new L1L2({l1: args != null ? args.l1 : null, l2: 0});\n}\n\nexport function l2(args: L2Args) {\n assertObjectArgs(args);\n return new L1L2({l2: args != null ? args.l2 : null, l1: 0});\n}\n\n/** @docinline */\nexport type RegularizerIdentifier = 'l1l2'|string;\n\n// Maps the JavaScript-like identifier keys to the corresponding keras symbols.\nexport const REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP:\n {[identifier in RegularizerIdentifier]: string} = {\n 'l1l2': 'L1L2'\n };\n\nexport function serializeRegularizer(constraint: Regularizer):\n serialization.ConfigDictValue {\n return serializeKerasObject(constraint);\n}\n\nexport function deserializeRegularizer(\n config: serialization.ConfigDict,\n customObjects: serialization.ConfigDict = {}): Regularizer {\n return deserializeKerasObject(\n config, serialization.SerializationMap.getMap().classNameMap,\n customObjects, 'regularizer');\n}\n\nexport function getRegularizer(identifier: RegularizerIdentifier|\n serialization.ConfigDict|\n Regularizer): Regularizer {\n if (identifier == null) {\n return null;\n }\n if (typeof identifier === 'string') {\n const className = identifier in REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP ?\n REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] :\n identifier;\n const config = {className, config: {}};\n return deserializeRegularizer(config);\n } else if (identifier instanceof Regularizer) {\n return identifier;\n } else {\n return deserializeRegularizer(identifier);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Advanced activation layers.\n */\n\nimport {cast, clipByValue, elu, greater, leakyRelu, mul, prelu, relu, serialization, Tensor} from '@tensorflow/tfjs-core';\n\nimport {Softmax as softmaxActivation} from '../activations';\nimport {Constraint, getConstraint, serializeConstraint} from '../constraints';\nimport {InputSpec, Layer, LayerArgs} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\nexport declare interface ReLULayerArgs extends LayerArgs {\n /**\n * Float, the maximum output value.\n */\n maxValue?: number;\n}\n\nexport class ReLU extends Layer {\n /** @nocollapse */\n static className = 'ReLU';\n maxValue: number;\n\n constructor(args?: ReLULayerArgs) {\n super(args == null ? {} : args);\n this.supportsMasking = true;\n if (args != null) {\n this.maxValue = args.maxValue;\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n inputs = getExactlyOneTensor(inputs);\n let output = relu(inputs);\n if (this.maxValue != null) {\n output = clipByValue(output, 0, this.maxValue);\n }\n return output;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {maxValue: this.maxValue};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(ReLU);\n\nexport declare interface LeakyReLULayerArgs extends LayerArgs {\n /**\n * Float `>= 0`. Negative slope coefficient. Defaults to `0.3`.\n */\n alpha?: number;\n}\n\nexport class LeakyReLU extends Layer {\n /** @nocollapse */\n static className = 'LeakyReLU';\n readonly alpha: number;\n\n readonly DEFAULT_ALPHA = 0.3;\n\n constructor(args?: LeakyReLULayerArgs) {\n super(args == null ? {} : args);\n if (args == null) {\n args = {};\n }\n this.alpha = args.alpha == null ? this.DEFAULT_ALPHA : args.alpha;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n const x = getExactlyOneTensor(inputs);\n return leakyRelu(x, this.alpha);\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {alpha: this.alpha};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(LeakyReLU);\n\nexport declare interface PReLULayerArgs extends LayerArgs {\n /**\n * Initializer for the learnable alpha.\n */\n alphaInitializer?: Initializer|InitializerIdentifier;\n\n /**\n * Regularizer for the learnable alpha.\n */\n alphaRegularizer?: Regularizer;\n\n /**\n * Constraint for the learnable alpha.\n */\n alphaConstraint?: Constraint;\n\n /**\n * The axes along which to share learnable parameters for the activation\n * function. For example, if the incoming feature maps are from a 2D\n * convolution with output shape `[numExamples, height, width, channels]`,\n * and you wish to share parameters across space (height and width) so that\n * each filter channels has only one set of parameters, set\n * `shared_axes: [1, 2]`.\n */\n sharedAxes?: number|number[];\n}\n\nexport class PReLU extends Layer {\n /** @nocollapse */\n static className = 'PReLU';\n private readonly alphaInitializer: Initializer;\n private readonly alphaRegularizer: Regularizer;\n private readonly alphaConstraint: Constraint;\n private readonly sharedAxes: number[];\n private alpha: LayerVariable;\n\n readonly DEFAULT_ALPHA_INITIALIZER: InitializerIdentifier = 'zeros';\n\n constructor(args?: PReLULayerArgs) {\n super(args == null ? {} : args);\n if (args == null) {\n args = {};\n }\n\n this.supportsMasking = true;\n this.alphaInitializer =\n getInitializer(args.alphaInitializer || this.DEFAULT_ALPHA_INITIALIZER);\n this.alphaRegularizer = getRegularizer(args.alphaRegularizer);\n this.alphaConstraint = getConstraint(args.alphaConstraint);\n if (args.sharedAxes == null) {\n this.sharedAxes = null;\n } else if (Array.isArray(args.sharedAxes)) {\n this.sharedAxes = args.sharedAxes;\n } else if (typeof args.sharedAxes === 'number') {\n this.sharedAxes = [args.sharedAxes];\n } else {\n throw new ValueError(\n `Expected sharedAxes to be a number or an array of numbers, ` +\n `but got ${args.sharedAxes}`);\n }\n }\n\n override build(inputShape: Shape|Shape[]) {\n inputShape = getExactlyOneShape(inputShape);\n const paramShape: Shape = inputShape.slice(1);\n if (this.sharedAxes != null) {\n for (const i of this.sharedAxes) {\n paramShape[i - 1] = 1;\n }\n }\n this.alpha = this.addWeight(\n 'alpha', paramShape, 'float32', this.alphaInitializer,\n this.alphaRegularizer, true, this.alphaConstraint);\n // Set input spec.\n const axes: {[axis: number]: number} = {};\n if (this.sharedAxes != null) {\n for (let i = 1; i < inputShape.length; ++i) {\n axes[i] = inputShape[i];\n }\n }\n this.inputSpec = [new InputSpec({\n ndim: inputShape.length,\n axes,\n })];\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n inputs = getExactlyOneTensor(inputs);\n return prelu(inputs, this.alpha.read());\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n alphaInitializer: serializeInitializer(this.alphaInitializer),\n alphaRegularizer: serializeRegularizer(this.alphaRegularizer),\n alphaConstraint: serializeConstraint(this.alphaConstraint),\n sharedAxes: this.sharedAxes\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(PReLU);\n\nexport declare interface ELULayerArgs extends LayerArgs {\n /**\n * Float `>= 0`. Negative slope coefficient. Defaults to `1.0`.\n */\n alpha?: number;\n}\n\nexport class ELU extends Layer {\n /** @nocollapse */\n static className = 'ELU';\n readonly alpha: number;\n\n readonly DEFAULT_ALPHA = 1.0;\n\n constructor(args?: ELULayerArgs) {\n super(args == null ? {} : args);\n if (args == null) {\n args = {};\n }\n\n if (args.alpha != null && args.alpha !== this.DEFAULT_ALPHA) {\n throw new NotImplementedError(\n `Non-default alpha value (${args.alpha}) is not supported by the ` +\n `ELU layer yet.`);\n }\n\n this.alpha = args.alpha == null ? this.DEFAULT_ALPHA : args.alpha;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n const x = getExactlyOneTensor(inputs);\n return elu(x);\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {alpha: this.alpha};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(ELU);\n\nexport declare interface ThresholdedReLULayerArgs extends LayerArgs {\n /**\n * Float >= 0. Threshold location of activation.\n */\n theta?: number;\n}\n\nexport class ThresholdedReLU extends Layer {\n /** @nocollapse */\n static className = 'ThresholdedReLU';\n readonly theta: number;\n\n readonly DEFAULT_THETA = 1.0;\n\n constructor(args?: ThresholdedReLULayerArgs) {\n super(args == null ? {} : args);\n if (args == null) {\n args = {};\n }\n\n this.theta = args.theta == null ? this.DEFAULT_THETA : args.theta;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n const x = getExactlyOneTensor(inputs);\n return mul(x, cast(greater(x, this.theta), 'float32'));\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {theta: this.theta};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(ThresholdedReLU);\n\nexport declare interface SoftmaxLayerArgs extends LayerArgs {\n /**\n * Integer, axis along which the softmax normalization is applied.\n * Defaults to `-1` (i.e., the last axis).\n */\n axis?: number;\n}\n\nexport class Softmax extends Layer {\n /** @nocollapse */\n static className = 'Softmax';\n readonly axis: number;\n readonly softmax: (t: Tensor, a?: number) => Tensor;\n readonly DEFAULT_AXIS = 1.0;\n\n constructor(args?: SoftmaxLayerArgs) {\n super(args == null ? {} : args);\n if (args == null) {\n args = {};\n }\n this.softmax = new softmaxActivation().apply;\n this.axis = args.axis == null ? this.DEFAULT_AXIS : args.axis;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n const x = getExactlyOneTensor(inputs);\n return this.softmax(x, this.axis);\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {axis: this.axis};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Softmax);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {ValueError} from '../errors';\nimport {PaddingMode} from '../keras_format/common';\n\nimport {pyListRepeat} from './generic_utils';\nimport {isInteger, max} from './math_utils';\n\n/**\n * Transforms a single number of array of numbers into an array of numbers.\n * @param value\n * @param n: The size of the tuple to be returned.\n * @param name: Name of the parameter, used for generating error messages.\n * @returns An array of numbers.\n */\nexport function normalizeArray(\n value: number|number[], n: number, name: string): number[] {\n if (typeof value === 'number') {\n return pyListRepeat(value, n);\n } else {\n if (value.length !== n) {\n throw new ValueError(\n `The ${name} argument must be an integer or tuple of ${n} integers.` +\n ` Received: ${value.length} elements.`);\n }\n for (let i = 0; i < n; ++i) {\n const singleValue = value[i];\n if (!isInteger(singleValue)) {\n throw new ValueError(\n `The ${name} argument must be an integer or tuple of ${n}` +\n ` integers. Received: ${JSON.stringify(value)} including a` +\n ` non-integer number ${singleValue}`);\n }\n }\n return value;\n }\n}\n\n/**\n * Determines output length of a convolution given input length.\n * @param inputLength\n * @param filterSize\n * @param padding\n * @param stride\n * @param dilation: dilation rate.\n */\nexport function convOutputLength(\n inputLength: number, filterSize: number, padding: PaddingMode,\n stride: number, dilation = 1): number {\n if (inputLength == null) {\n return inputLength;\n }\n const dilatedFilterSize = filterSize + (filterSize - 1) * (dilation - 1);\n let outputLength: number;\n if (padding === 'same') {\n outputLength = inputLength;\n } else { // VALID\n outputLength = inputLength - dilatedFilterSize + 1;\n }\n return Math.floor((outputLength + stride - 1) / stride);\n}\n\nexport function deconvLength(\n dimSize: number, strideSize: number, kernelSize: number,\n padding: PaddingMode): number {\n if (dimSize == null) {\n return null;\n }\n\n if (padding === 'valid') {\n dimSize = dimSize * strideSize + max([kernelSize - strideSize, 0]);\n } else if (padding === 'same') {\n dimSize = dimSize * strideSize;\n } else {\n throw new ValueError(`Unsupport padding mode: ${padding}.`);\n }\n return dimSize;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Convolutional Layers\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {fused, serialization, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, Tensor5D, tidy} from '@tensorflow/tfjs-core';\n\nimport {Activation, getActivation, serializeActivation} from '../activations';\nimport {imageDataFormat} from '../backend/common';\nimport * as K from '../backend/tfjs_backend';\nimport {checkDataFormat, checkInterpolationFormat, checkPaddingMode} from '../common';\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {InputSpec, Layer, LayerArgs} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {ActivationIdentifier} from '../keras_format/activation_config';\nimport {DataFormat, InterpolationFormat, PaddingMode, Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport {convOutputLength, deconvLength, normalizeArray} from '../utils/conv_utils';\nimport * as generic_utils from '../utils/generic_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\n/**\n * Transpose and cast the input before the conv2d.\n * @param x Input image tensor.\n * @param dataFormat\n */\nexport function preprocessConv2DInput(\n x: Tensor, dataFormat: DataFormat): Tensor {\n // TODO(cais): Cast type to float32 if not.\n return tidy(() => {\n checkDataFormat(dataFormat);\n if (dataFormat === 'channelsFirst') {\n return tfc.transpose(x, [0, 2, 3, 1]); // NCHW -> NHWC.\n } else {\n return x;\n }\n });\n}\n\n/**\n * Transpose and cast the input before the conv3d.\n * @param x Input image tensor.\n * @param dataFormat\n */\nexport function preprocessConv3DInput(\n x: Tensor, dataFormat: DataFormat): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n if (dataFormat === 'channelsFirst') {\n return tfc.transpose(x, [0, 2, 3, 4, 1]); // NCDHW -> NDHWC.\n } else {\n return x;\n }\n });\n}\n\n/**\n * 1D-convolution with bias added.\n *\n * Porting Note: This function does not exist in the Python Keras backend.\n * It is exactly the same as `conv2d`, except the added `bias`.\n *\n * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`.\n * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`.\n * @param bias Bias, rank-3, of shape `[outDepth]`.\n * @param strides\n * @param padding Padding mode.\n * @param dataFormat Data format.\n * @param dilationRate\n * @returns The result of the 1D convolution.\n * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank.\n */\nexport function conv1dWithBias(\n x: Tensor, kernel: Tensor, bias: Tensor, strides = 1, padding = 'valid',\n dataFormat?: DataFormat, dilationRate = 1): Tensor {\n return tidy(() => {\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n checkDataFormat(dataFormat);\n // Check the ranks of x, kernel and bias.\n if (x.shape.length !== 3) {\n throw new ValueError(\n `The input of a conv1dWithBias operation should be 3, but is ` +\n `${x.shape.length} instead.`);\n }\n if (kernel.shape.length !== 3) {\n throw new ValueError(\n `The kernel for a conv1dWithBias operation should be 3, but is ` +\n `${kernel.shape.length} instead`);\n }\n if (bias != null && bias.shape.length !== 1) {\n throw new ValueError(\n `The bias for a conv1dWithBias operation should be 1, but is ` +\n `${kernel.shape.length} instead`);\n }\n // TODO(cais): Support CAUSAL padding mode.\n if (dataFormat === 'channelsFirst') {\n x = tfc.transpose(x, [0, 2, 1]); // NCW -> NWC.\n }\n if (padding === 'causal') {\n throw new NotImplementedError(\n 'The support for CAUSAL padding mode in conv1dWithBias is not ' +\n 'implemented yet.');\n }\n let y: Tensor = tfc.conv1d(\n x as Tensor2D | Tensor3D, kernel as Tensor3D, strides,\n padding === 'same' ? 'same' : 'valid', 'NWC', dilationRate);\n if (bias != null) {\n y = K.biasAdd(y, bias);\n }\n return y;\n });\n}\n\n/**\n * 1D-convolution.\n *\n * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`.\n * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`.s\n * @param strides\n * @param padding Padding mode.\n * @param dataFormat Data format.\n * @param dilationRate\n * @returns The result of the 1D convolution.\n * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank.\n */\nexport function conv1d(\n x: Tensor, kernel: Tensor, strides = 1, padding = 'valid',\n dataFormat?: DataFormat, dilationRate = 1): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n return conv1dWithBias(\n x, kernel, null, strides, padding, dataFormat, dilationRate);\n });\n}\n\n/**\n * 2D Convolution\n * @param x\n * @param kernel kernel of the convolution.\n * @param strides strides array.\n * @param padding padding mode. Default to 'valid'.\n * @param dataFormat data format. Defaults to 'channelsLast'.\n * @param dilationRate dilation rate array.\n * @returns Result of the 2D pooling.\n */\nexport function conv2d(\n x: Tensor, kernel: Tensor, strides = [1, 1], padding = 'valid',\n dataFormat?: DataFormat, dilationRate?: [number, number]): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n return conv2dWithBiasActivation(\n x, kernel, null, strides, padding, dataFormat, dilationRate);\n });\n}\n\n/**\n * 2D Convolution with an added bias and optional activation.\n * Note: This function does not exist in the Python Keras Backend. This function\n * is exactly the same as `conv2d`, except the added `bias`.\n */\nexport function conv2dWithBiasActivation(\n x: Tensor, kernel: Tensor, bias: Tensor, strides = [1, 1],\n padding = 'valid', dataFormat?: DataFormat, dilationRate?: [number, number],\n activation: fused.Activation = null): Tensor {\n return tidy(() => {\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n checkDataFormat(dataFormat);\n if (x.rank !== 3 && x.rank !== 4) {\n throw new ValueError(\n `conv2dWithBiasActivation expects input to be of rank 3 or 4, ` +\n `but received ${x.rank}.`);\n }\n if (kernel.rank !== 3 && kernel.rank !== 4) {\n throw new ValueError(\n `conv2dWithBiasActivation expects kernel to be of rank 3 or 4, ` +\n `but received ${x.rank}.`);\n }\n let y = preprocessConv2DInput(x, dataFormat);\n if (padding === 'causal') {\n throw new NotImplementedError(\n 'The support for CAUSAL padding mode in conv1dWithBias is not ' +\n 'implemented yet.');\n }\n y = tfc.fused.conv2d({\n x: y as Tensor3D | Tensor4D,\n filter: kernel as Tensor4D,\n strides: strides as [number, number],\n pad: padding === 'same' ? 'same' : 'valid',\n dilations: dilationRate,\n dataFormat: 'NHWC',\n bias,\n activation\n });\n if (dataFormat === 'channelsFirst') {\n y = tfc.transpose(y, [0, 3, 1, 2]);\n }\n return y;\n });\n}\n\n/**\n * 3D Convolution.\n * @param x\n * @param kernel kernel of the convolution.\n * @param strides strides array.\n * @param padding padding mode. Default to 'valid'.\n * @param dataFormat data format. Defaults to 'channelsLast'.\n * @param dilationRate dilation rate array.\n * @returns Result of the 3D convolution.\n */\nexport function conv3d(\n x: Tensor, kernel: Tensor, strides = [1, 1, 1], padding = 'valid',\n dataFormat?: DataFormat, dilationRate?: [number, number, number]): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n return conv3dWithBias(\n x, kernel, null, strides, padding, dataFormat, dilationRate);\n });\n}\n\n/**\n * 3D Convolution with an added bias.\n * Note: This function does not exist in the Python Keras Backend. This function\n * is exactly the same as `conv3d`, except the added `bias`.\n */\nexport function conv3dWithBias(\n x: Tensor, kernel: Tensor, bias: Tensor, strides = [1, 1, 1],\n padding = 'valid', dataFormat?: DataFormat,\n dilationRate?: [number, number, number]): Tensor {\n return tidy(() => {\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n checkDataFormat(dataFormat);\n if (x.rank !== 4 && x.rank !== 5) {\n throw new ValueError(\n `conv3dWithBias expects input to be of rank 4 or 5, but received ` +\n `${x.rank}.`);\n }\n if (kernel.rank !== 4 && kernel.rank !== 5) {\n throw new ValueError(\n `conv3dWithBias expects kernel to be of rank 4 or 5, but received ` +\n `${x.rank}.`);\n }\n let y = preprocessConv3DInput(x, dataFormat);\n if (padding === 'causal') {\n throw new NotImplementedError(\n 'The support for CAUSAL padding mode in conv3dWithBias is not ' +\n 'implemented yet.');\n }\n y = tfc.conv3d(\n y as Tensor4D | tfc.Tensor,\n kernel as tfc.Tensor, strides as [number, number, number],\n padding === 'same' ? 'same' : 'valid', 'NDHWC', dilationRate);\n if (bias != null) {\n y = K.biasAdd(y, bias as Tensor1D);\n }\n if (dataFormat === 'channelsFirst') {\n y = tfc.transpose(y, [0, 4, 1, 2, 3]);\n }\n return y;\n });\n}\n\n/**\n * Base LayerConfig for depthwise and non-depthwise convolutional layers.\n */\nexport declare interface BaseConvLayerArgs extends LayerArgs {\n /**\n * The dimensions of the convolution window. If kernelSize is a number, the\n * convolutional window will be square.\n */\n kernelSize: number|number[];\n\n /**\n * The strides of the convolution in each dimension. If strides is a number,\n * strides in both dimensions are equal.\n *\n * Specifying any stride value != 1 is incompatible with specifying any\n * `dilationRate` value != 1.\n */\n strides?: number|number[];\n\n /**\n * Padding mode.\n */\n padding?: PaddingMode;\n\n /**\n * Format of the data, which determines the ordering of the dimensions in\n * the inputs.\n *\n * `channels_last` corresponds to inputs with shape\n * `(batch, ..., channels)`\n *\n * `channels_first` corresponds to inputs with shape `(batch, channels,\n * ...)`.\n *\n * Defaults to `channels_last`.\n */\n dataFormat?: DataFormat;\n\n /**\n * The dilation rate to use for the dilated convolution in each dimension.\n * Should be an integer or array of two or three integers.\n *\n * Currently, specifying any `dilationRate` value != 1 is incompatible with\n * specifying any `strides` value != 1.\n */\n dilationRate?: number|[number]|[number, number]|[number, number, number];\n\n /**\n * Activation function of the layer.\n *\n * If you don't specify the activation, none is applied.\n */\n activation?: ActivationIdentifier;\n\n /**\n * Whether the layer uses a bias vector. Defaults to `true`.\n */\n useBias?: boolean;\n\n /**\n * Initializer for the convolutional kernel weights matrix.\n */\n kernelInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the bias vector.\n */\n biasInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Constraint for the convolutional kernel weights.\n */\n kernelConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint for the bias vector.\n */\n biasConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Regularizer function applied to the kernel weights matrix.\n */\n kernelRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the bias vector.\n */\n biasRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the activation.\n */\n activityRegularizer?: RegularizerIdentifier|Regularizer;\n}\n\n/**\n * LayerConfig for non-depthwise convolutional layers.\n * Applies to non-depthwise convolution of all ranks (e.g, Conv1D, Conv2D,\n * Conv3D).\n */\nexport declare interface ConvLayerArgs extends BaseConvLayerArgs {\n /**\n * The dimensionality of the output space (i.e. the number of filters in the\n * convolution).\n */\n filters: number;\n}\n\n/**\n * Abstract convolution layer.\n */\nexport abstract class BaseConv extends Layer {\n protected readonly rank: number;\n protected readonly kernelSize: number[];\n protected readonly strides: number[];\n protected readonly padding: PaddingMode;\n protected readonly dataFormat: DataFormat;\n protected readonly activation: Activation;\n protected readonly useBias: boolean;\n protected readonly dilationRate: number[];\n\n // Bias-related members are here because all convolution subclasses use the\n // same configuration parmeters to control bias. Kernel-related members\n // are in subclass `Conv` because some subclasses use different parameters to\n // control kernel properties, for instance, `DepthwiseConv2D` uses\n // `depthwiseInitializer` instead of `kernelInitializer`.\n protected readonly biasInitializer?: Initializer;\n protected readonly biasConstraint?: Constraint;\n protected readonly biasRegularizer?: Regularizer;\n\n protected bias: LayerVariable = null;\n\n readonly DEFAULT_KERNEL_INITIALIZER: InitializerIdentifier = 'glorotNormal';\n readonly DEFAULT_BIAS_INITIALIZER: InitializerIdentifier = 'zeros';\n\n constructor(rank: number, args: BaseConvLayerArgs) {\n super(args as LayerArgs);\n BaseConv.verifyArgs(args);\n this.rank = rank;\n generic_utils.assertPositiveInteger(this.rank, 'rank');\n if (this.rank !== 1 && this.rank !== 2 && this.rank !== 3) {\n throw new NotImplementedError(\n `Convolution layer for rank other than 1, 2, or 3 (${\n this.rank}) is ` +\n `not implemented yet.`);\n }\n this.kernelSize = normalizeArray(args.kernelSize, rank, 'kernelSize');\n this.strides = normalizeArray(\n args.strides == null ? 1 : args.strides, rank, 'strides');\n this.padding = args.padding == null ? 'valid' : args.padding;\n checkPaddingMode(this.padding);\n this.dataFormat =\n args.dataFormat == null ? 'channelsLast' : args.dataFormat;\n checkDataFormat(this.dataFormat);\n this.activation = getActivation(args.activation);\n this.useBias = args.useBias == null ? true : args.useBias;\n this.biasInitializer =\n getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER);\n this.biasConstraint = getConstraint(args.biasConstraint);\n this.biasRegularizer = getRegularizer(args.biasRegularizer);\n this.activityRegularizer = getRegularizer(args.activityRegularizer);\n this.dilationRate = normalizeArray(\n args.dilationRate == null ? 1 : args.dilationRate, rank,\n 'dilationRate');\n if (this.rank === 1 &&\n (Array.isArray(this.dilationRate) && this.dilationRate.length !== 1)) {\n throw new ValueError(\n `dilationRate must be a number or an array of a single number ` +\n `for 1D convolution, but received ` +\n `${JSON.stringify(this.dilationRate)}`);\n } else if (this.rank === 2) {\n if (typeof this.dilationRate === 'number') {\n this.dilationRate = [this.dilationRate, this.dilationRate];\n } else if (this.dilationRate.length !== 2) {\n throw new ValueError(\n `dilationRate must be a number or array of two numbers for 2D ` +\n `convolution, but received ${JSON.stringify(this.dilationRate)}`);\n }\n } else if (this.rank === 3) {\n if (typeof this.dilationRate === 'number') {\n this.dilationRate =\n [this.dilationRate, this.dilationRate, this.dilationRate];\n } else if (this.dilationRate.length !== 3) {\n throw new ValueError(\n `dilationRate must be a number or array of three numbers for 3D ` +\n `convolution, but received ${JSON.stringify(this.dilationRate)}`);\n }\n }\n }\n\n protected static verifyArgs(args: BaseConvLayerArgs) {\n // Check config.kernelSize type and shape.\n generic_utils.assert(\n 'kernelSize' in args, `required key 'kernelSize' not in config`);\n if (typeof args.kernelSize !== 'number' &&\n !generic_utils.checkArrayTypeAndLength(\n args.kernelSize, 'number', 1, 3)) {\n throw new ValueError(\n `BaseConv expects config.kernelSize to be number or number[] with ` +\n `length 1, 2, or 3, but received ${\n JSON.stringify(args.kernelSize)}.`);\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n kernelSize: this.kernelSize,\n strides: this.strides,\n padding: this.padding,\n dataFormat: this.dataFormat,\n dilationRate: this.dilationRate,\n activation: serializeActivation(this.activation),\n useBias: this.useBias,\n biasInitializer: serializeInitializer(this.biasInitializer),\n biasRegularizer: serializeRegularizer(this.biasRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n biasConstraint: serializeConstraint(this.biasConstraint)\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\n\n/**\n * Abstract nD convolution layer. Ancestor of convolution layers which reduce\n * across channels, i.e., Conv1D and Conv2D, but not DepthwiseConv2D.\n */\nexport abstract class Conv extends BaseConv {\n protected readonly filters: number;\n\n protected kernel: LayerVariable = null;\n\n // Bias-related properties are stored in the superclass `BaseConv` because all\n // convolution subclasses use the same configuration parameters to control\n // bias. Kernel-related properties are defined here rather than in the\n // superclass because some convolution subclasses use different names and\n // configuration parameters for their internal kernel state.\n protected readonly kernelInitializer?: Initializer;\n protected readonly kernelConstraint?: Constraint;\n protected readonly kernelRegularizer?: Regularizer;\n\n constructor(rank: number, args: ConvLayerArgs) {\n super(rank, args as BaseConvLayerArgs);\n Conv.verifyArgs(args);\n this.filters = args.filters;\n generic_utils.assertPositiveInteger(this.filters, 'filters');\n this.kernelInitializer = getInitializer(\n args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.kernelConstraint = getConstraint(args.kernelConstraint);\n this.kernelRegularizer = getRegularizer(args.kernelRegularizer);\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const channelAxis =\n this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;\n if (inputShape[channelAxis] == null) {\n throw new ValueError(\n `The channel dimension of the input should be defined. ` +\n `Found ${inputShape[channelAxis]}`);\n }\n const inputDim = inputShape[channelAxis];\n\n const kernelShape = this.kernelSize.concat([inputDim, this.filters]);\n\n this.kernel = this.addWeight(\n 'kernel', kernelShape, null, this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.filters], null, this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n }\n\n this.inputSpec = [{ndim: this.rank + 2, axes: {[channelAxis]: inputDim}}];\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n let outputs: Tensor;\n const biasValue = this.bias == null ? null : this.bias.read();\n const fusedActivationName = generic_utils.mapActivationToFusedKernel(\n this.activation.getClassName());\n\n if (fusedActivationName != null && this.rank === 2) {\n outputs = conv2dWithBiasActivation(\n inputs, this.kernel.read(), biasValue, this.strides, this.padding,\n this.dataFormat, this.dilationRate as [number, number],\n fusedActivationName);\n } else {\n if (this.rank === 1) {\n outputs = conv1dWithBias(\n inputs, this.kernel.read(), biasValue, this.strides[0],\n this.padding, this.dataFormat, this.dilationRate[0]);\n } else if (this.rank === 2) {\n // TODO(cais): Move up to constructor.\n outputs = conv2dWithBiasActivation(\n inputs, this.kernel.read(), biasValue, this.strides, this.padding,\n this.dataFormat, this.dilationRate as [number, number]);\n } else if (this.rank === 3) {\n outputs = conv3dWithBias(\n inputs, this.kernel.read(), biasValue, this.strides, this.padding,\n this.dataFormat, this.dilationRate as [number, number, number]);\n } else {\n throw new NotImplementedError(\n 'convolutions greater than 3D are not implemented yet.');\n }\n\n if (this.activation != null) {\n outputs = this.activation.apply(outputs);\n }\n }\n\n return outputs;\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const newSpace: number[] = [];\n const space = (this.dataFormat === 'channelsLast') ?\n inputShape.slice(1, inputShape.length - 1) :\n inputShape.slice(2);\n for (let i = 0; i < space.length; ++i) {\n const newDim = convOutputLength(\n space[i], this.kernelSize[i], this.padding, this.strides[i],\n typeof this.dilationRate === 'number' ? this.dilationRate :\n this.dilationRate[i]);\n newSpace.push(newDim);\n }\n\n let outputShape = [inputShape[0]];\n if (this.dataFormat === 'channelsLast') {\n outputShape = outputShape.concat(newSpace);\n outputShape.push(this.filters);\n } else {\n outputShape.push(this.filters);\n outputShape = outputShape.concat(newSpace);\n }\n return outputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n filters: this.filters,\n kernelInitializer: serializeInitializer(this.kernelInitializer),\n kernelRegularizer: serializeRegularizer(this.kernelRegularizer),\n kernelConstraint: serializeConstraint(this.kernelConstraint)\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n protected static override verifyArgs(args: ConvLayerArgs) {\n // Check config.filters type, shape, and value.\n if (!('filters' in args) || typeof args.filters !== 'number' ||\n args.filters < 1) {\n throw new ValueError(\n `Convolution layer expected config.filters to be a 'number' > 0 ` +\n `but got ${JSON.stringify(args.filters)}`);\n }\n }\n}\n\nexport class Conv2D extends Conv {\n /** @nocollapse */\n static className = 'Conv2D';\n constructor(args: ConvLayerArgs) {\n super(2, args);\n Conv2D.verifyArgs(args);\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['rank'];\n return config;\n }\n\n protected static override verifyArgs(args: ConvLayerArgs) {\n // config.kernelSize must be a number or array of numbers.\n if ((typeof args.kernelSize !== 'number') &&\n !generic_utils.checkArrayTypeAndLength(\n args.kernelSize, 'number', 1, 2)) {\n throw new ValueError(\n `Conv2D expects config.kernelSize to be number or number[] with ` +\n `length 1 or 2, but received ${JSON.stringify(args.kernelSize)}.`);\n }\n }\n}\nserialization.registerClass(Conv2D);\n\nexport class Conv3D extends Conv {\n /** @nocollapse */\n static className = 'Conv3D';\n constructor(args: ConvLayerArgs) {\n super(3, args);\n Conv3D.verifyArgs(args);\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['rank'];\n return config;\n }\n\n protected static override verifyArgs(args: ConvLayerArgs) {\n // config.kernelSize must be a number or array of numbers.\n if (typeof args.kernelSize !== 'number') {\n if (!(Array.isArray(args.kernelSize) &&\n (args.kernelSize.length === 1 || args.kernelSize.length === 3))) {\n throw new ValueError(\n `Conv3D expects config.kernelSize to be number or` +\n ` [number, number, number], but received ${\n JSON.stringify(args.kernelSize)}.`);\n }\n }\n }\n}\nserialization.registerClass(Conv3D);\n\nexport class Conv2DTranspose extends Conv2D {\n /** @nocollapse */\n static override className = 'Conv2DTranspose';\n\n constructor(args: ConvLayerArgs) {\n super(args);\n this.inputSpec = [new InputSpec({ndim: 4})];\n\n if (this.padding !== 'same' && this.padding !== 'valid') {\n throw new ValueError(\n `Conv2DTranspose currently supports only padding modes 'same' ` +\n `and 'valid', but received padding mode ${this.padding}`);\n }\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n\n if (inputShape.length !== 4) {\n throw new ValueError(\n 'Input should have rank 4; Received input shape: ' +\n JSON.stringify(inputShape));\n }\n\n const channelAxis =\n this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;\n if (inputShape[channelAxis] == null) {\n throw new ValueError(\n 'The channel dimension of the inputs should be defined. ' +\n 'Found `None`.');\n }\n const inputDim = inputShape[channelAxis];\n const kernelShape = this.kernelSize.concat([this.filters, inputDim]);\n\n this.kernel = this.addWeight(\n 'kernel', kernelShape, 'float32', this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.filters], 'float32', this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n }\n\n // Set input spec.\n this.inputSpec =\n [new InputSpec({ndim: 4, axes: {[channelAxis]: inputDim}})];\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tfc.tidy(() => {\n let input = getExactlyOneTensor(inputs);\n if (input.shape.length !== 4) {\n throw new ValueError(\n `Conv2DTranspose.call() expects input tensor to be rank-4, but ` +\n `received a tensor of rank-${input.shape.length}`);\n }\n\n const inputShape = input.shape;\n const batchSize = inputShape[0];\n\n let hAxis: number;\n let wAxis: number;\n if (this.dataFormat === 'channelsFirst') {\n hAxis = 2;\n wAxis = 3;\n } else {\n hAxis = 1;\n wAxis = 2;\n }\n\n const height = inputShape[hAxis];\n const width = inputShape[wAxis];\n const kernelH = this.kernelSize[0];\n const kernelW = this.kernelSize[1];\n const strideH = this.strides[0];\n const strideW = this.strides[1];\n\n // Infer the dynamic output shape.\n const outHeight = deconvLength(height, strideH, kernelH, this.padding);\n const outWidth = deconvLength(width, strideW, kernelW, this.padding);\n\n // Porting Note: We don't branch based on `this.dataFormat` here,\n // because\n // the tjfs-core function `conv2dTranspose` called below always\n // assumes channelsLast.\n const outputShape: [number, number, number, number] =\n [batchSize, outHeight, outWidth, this.filters];\n\n if (this.dataFormat !== 'channelsLast') {\n input = tfc.transpose(input, [0, 2, 3, 1]);\n }\n let outputs = tfc.conv2dTranspose(\n input as Tensor4D, this.kernel.read() as Tensor4D, outputShape,\n this.strides as [number, number], this.padding as 'same' | 'valid');\n if (this.dataFormat !== 'channelsLast') {\n outputs = tfc.transpose(outputs, [0, 3, 1, 2]);\n }\n\n if (this.bias != null) {\n outputs =\n K.biasAdd(outputs, this.bias.read(), this.dataFormat) as Tensor4D;\n }\n if (this.activation != null) {\n outputs = this.activation.apply(outputs) as Tensor4D;\n }\n return outputs;\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const outputShape = inputShape.slice();\n\n let channelAxis: number;\n let heightAxis: number;\n let widthAxis: number;\n if (this.dataFormat === 'channelsFirst') {\n channelAxis = 1;\n heightAxis = 2;\n widthAxis = 3;\n } else {\n channelAxis = 3;\n heightAxis = 1;\n widthAxis = 2;\n }\n\n const kernelH = this.kernelSize[0];\n const kernelW = this.kernelSize[1];\n const strideH = this.strides[0];\n const strideW = this.strides[1];\n\n outputShape[channelAxis] = this.filters;\n outputShape[heightAxis] =\n deconvLength(outputShape[heightAxis], strideH, kernelH, this.padding);\n outputShape[widthAxis] =\n deconvLength(outputShape[widthAxis], strideW, kernelW, this.padding);\n return outputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['dilationRate'];\n return config;\n }\n}\nserialization.registerClass(Conv2DTranspose);\n\nexport class Conv3DTranspose extends Conv3D {\n /** @nocollapse */\n static override className = 'Conv3DTranspose';\n\n constructor(args: ConvLayerArgs) {\n super(args);\n this.inputSpec = [new InputSpec({ndim: 5})];\n\n if (this.padding !== 'same' && this.padding !== 'valid') {\n throw new ValueError(\n `Conv3DTranspose currently supports only padding modes 'same' ` +\n `and 'valid', but received padding mode ${this.padding}`);\n }\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n\n if (inputShape.length !== 5) {\n throw new ValueError(\n 'Input should have rank 5; Received input shape: ' +\n JSON.stringify(inputShape));\n }\n\n const channelAxis =\n this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;\n if (inputShape[channelAxis] == null) {\n throw new ValueError(\n 'The channel dimension of the inputs should be defined. ' +\n 'Found `None`.');\n }\n const inputDim = inputShape[channelAxis];\n const kernelShape = this.kernelSize.concat([this.filters, inputDim]);\n\n this.kernel = this.addWeight(\n 'kernel', kernelShape, 'float32', this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.filters], 'float32', this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n }\n\n // Set input spec.\n this.inputSpec =\n [new InputSpec({ndim: 5, axes: {[channelAxis]: inputDim}})];\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tfc.tidy(() => {\n let input = getExactlyOneTensor(inputs);\n if (input.shape.length !== 5) {\n throw new ValueError(\n `Conv3DTranspose.call() expects input tensor to be rank-4, but ` +\n `received a tensor of rank-${input.shape.length}`);\n }\n\n const inputShape = input.shape;\n const batchSize = inputShape[0];\n\n let hAxis: number;\n let wAxis: number;\n let dAxis: number;\n\n if (this.dataFormat === 'channelsFirst') {\n dAxis = 2;\n hAxis = 3;\n wAxis = 4;\n } else {\n dAxis = 1;\n hAxis = 2;\n wAxis = 3;\n }\n\n const depth = inputShape[dAxis];\n const height = inputShape[hAxis];\n const width = inputShape[wAxis];\n const kernelD = this.kernelSize[0];\n const kernelH = this.kernelSize[1];\n const kernelW = this.kernelSize[2];\n const strideD = this.strides[0];\n const strideH = this.strides[1];\n const strideW = this.strides[2];\n\n // Infer the dynamic output shape.\n const outDepth = deconvLength(depth, strideD, kernelD, this.padding);\n const outHeight = deconvLength(height, strideH, kernelH, this.padding);\n const outWidth = deconvLength(width, strideW, kernelW, this.padding);\n\n // Same as `conv2dTranspose`. We always assumes channelsLast.\n const outputShape: [number, number, number, number, number] =\n [batchSize, outDepth, outHeight, outWidth, this.filters];\n if (this.dataFormat !== 'channelsLast') {\n input = tfc.transpose(input, [0, 2, 3, 4, 1]);\n }\n let outputs = tfc.conv3dTranspose(\n input as Tensor5D, this.kernel.read() as Tensor5D, outputShape,\n this.strides as [number, number, number],\n this.padding as 'same' | 'valid');\n if (this.dataFormat !== 'channelsLast') {\n outputs = tfc.transpose(outputs, [0, 4, 1, 2, 3]);\n }\n\n if (this.bias !== null) {\n outputs =\n K.biasAdd(outputs, this.bias.read(), this.dataFormat) as Tensor5D;\n }\n if (this.activation !== null) {\n outputs = this.activation.apply(outputs) as Tensor5D;\n }\n return outputs;\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const outputShape = inputShape.slice();\n\n let channelAxis: number;\n let depthAxis: number;\n let heightAxis: number;\n let widthAxis: number;\n if (this.dataFormat === 'channelsFirst') {\n channelAxis = 1;\n depthAxis = 2;\n heightAxis = 3;\n widthAxis = 4;\n } else {\n channelAxis = 4;\n depthAxis = 1;\n heightAxis = 2;\n widthAxis = 3;\n }\n\n const kernelD = this.kernelSize[0];\n const kernelH = this.kernelSize[1];\n const kernelW = this.kernelSize[2];\n const strideD = this.strides[0];\n const strideH = this.strides[1];\n const strideW = this.strides[2];\n\n outputShape[channelAxis] = this.filters;\n outputShape[depthAxis] =\n deconvLength(outputShape[depthAxis], strideD, kernelD, this.padding);\n outputShape[heightAxis] =\n deconvLength(outputShape[heightAxis], strideH, kernelH, this.padding);\n outputShape[widthAxis] =\n deconvLength(outputShape[widthAxis], strideW, kernelW, this.padding);\n return outputShape;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['dilationRate'];\n return config;\n }\n}\nserialization.registerClass(Conv3DTranspose);\n\nexport declare interface SeparableConvLayerArgs extends ConvLayerArgs {\n /**\n * The number of depthwise convolution output channels for each input\n * channel.\n * The total number of depthwise convolution output channels will be equal\n * to `filtersIn * depthMultiplier`. Default: 1.\n */\n depthMultiplier?: number;\n\n /**\n * Initializer for the depthwise kernel matrix.\n */\n depthwiseInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the pointwise kernel matrix.\n */\n pointwiseInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Regularizer function applied to the depthwise kernel matrix.\n */\n depthwiseRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the pointwise kernel matrix.\n */\n pointwiseRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Constraint function applied to the depthwise kernel matrix.\n */\n depthwiseConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint function applied to the pointwise kernel matrix.\n */\n pointwiseConstraint?: ConstraintIdentifier|Constraint;\n}\n\nexport class SeparableConv extends Conv {\n /** @nocollapse */\n static className = 'SeparableConv';\n\n readonly depthMultiplier: number;\n\n protected readonly depthwiseInitializer?: Initializer;\n protected readonly depthwiseRegularizer?: Regularizer;\n protected readonly depthwiseConstraint?: Constraint;\n protected readonly pointwiseInitializer?: Initializer;\n protected readonly pointwiseRegularizer?: Regularizer;\n protected readonly pointwiseConstraint?: Constraint;\n\n readonly DEFAULT_DEPTHWISE_INITIALIZER: InitializerIdentifier =\n 'glorotUniform';\n readonly DEFAULT_POINTWISE_INITIALIZER: InitializerIdentifier =\n 'glorotUniform';\n\n protected depthwiseKernel: LayerVariable = null;\n protected pointwiseKernel: LayerVariable = null;\n\n constructor(rank: number, config?: SeparableConvLayerArgs) {\n super(rank, config);\n\n if (config.filters == null) {\n throw new ValueError(\n 'The `filters` configuration field is required by SeparableConv, ' +\n 'but is unspecified.');\n }\n if (config.kernelInitializer != null || config.kernelRegularizer != null ||\n config.kernelConstraint != null) {\n throw new ValueError(\n 'Fields kernelInitializer, kernelRegularizer and kernelConstraint ' +\n 'are invalid for SeparableConv2D. Use depthwiseInitializer, ' +\n 'depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, ' +\n 'pointwiseRegularizer and pointwiseConstraint instead.');\n }\n if (config.padding != null && config.padding !== 'same' &&\n config.padding !== 'valid') {\n throw new ValueError(\n `SeparableConv${this.rank}D supports only padding modes: ` +\n `'same' and 'valid', but received ${JSON.stringify(config.padding)}`);\n }\n\n this.depthMultiplier =\n config.depthMultiplier == null ? 1 : config.depthMultiplier;\n this.depthwiseInitializer = getInitializer(\n config.depthwiseInitializer || this.DEFAULT_DEPTHWISE_INITIALIZER);\n this.depthwiseRegularizer = getRegularizer(config.depthwiseRegularizer);\n this.depthwiseConstraint = getConstraint(config.depthwiseConstraint);\n this.pointwiseInitializer = getInitializer(\n config.depthwiseInitializer || this.DEFAULT_POINTWISE_INITIALIZER);\n this.pointwiseRegularizer = getRegularizer(config.pointwiseRegularizer);\n this.pointwiseConstraint = getConstraint(config.pointwiseConstraint);\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n if (inputShape.length < this.rank + 2) {\n throw new ValueError(\n `Inputs to SeparableConv${this.rank}D should have rank ` +\n `${this.rank + 2}, but received input shape: ` +\n `${JSON.stringify(inputShape)}`);\n }\n const channelAxis =\n this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;\n if (inputShape[channelAxis] == null || inputShape[channelAxis] < 0) {\n throw new ValueError(\n `The channel dimension of the inputs should be defined, ` +\n `but found ${JSON.stringify(inputShape[channelAxis])}`);\n }\n\n const inputDim = inputShape[channelAxis];\n const depthwiseKernelShape =\n this.kernelSize.concat([inputDim, this.depthMultiplier]);\n const pointwiseKernelShape = [];\n for (let i = 0; i < this.rank; ++i) {\n pointwiseKernelShape.push(1);\n }\n pointwiseKernelShape.push(inputDim * this.depthMultiplier, this.filters);\n\n const trainable = true;\n this.depthwiseKernel = this.addWeight(\n 'depthwise_kernel', depthwiseKernelShape, 'float32',\n this.depthwiseInitializer, this.depthwiseRegularizer, trainable,\n this.depthwiseConstraint);\n this.pointwiseKernel = this.addWeight(\n 'pointwise_kernel', pointwiseKernelShape, 'float32',\n this.pointwiseInitializer, this.pointwiseRegularizer, trainable,\n this.pointwiseConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.filters], 'float32', this.biasInitializer,\n this.biasRegularizer, trainable, this.biasConstraint);\n } else {\n this.bias = null;\n }\n\n this.inputSpec =\n [new InputSpec({ndim: this.rank + 2, axes: {[channelAxis]: inputDim}})];\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n\n let output: Tensor;\n if (this.rank === 1) {\n throw new NotImplementedError(\n '1D separable convolution is not implemented yet.');\n } else if (this.rank === 2) {\n if (this.dataFormat === 'channelsFirst') {\n inputs = tfc.transpose(inputs, [0, 2, 3, 1]); // NCHW -> NHWC.\n }\n\n output = tfc.separableConv2d(\n inputs as Tensor4D, this.depthwiseKernel.read() as Tensor4D,\n this.pointwiseKernel.read() as Tensor4D,\n this.strides as [number, number], this.padding as 'same' | 'valid',\n this.dilationRate as [number, number], 'NHWC');\n }\n\n if (this.useBias) {\n output = K.biasAdd(output, this.bias.read(), this.dataFormat);\n }\n if (this.activation != null) {\n output = this.activation.apply(output);\n }\n\n if (this.dataFormat === 'channelsFirst') {\n output = tfc.transpose(output, [0, 3, 1, 2]); // NHWC -> NCHW.\n }\n return output;\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['rank'];\n delete config['kernelInitializer'];\n delete config['kernelRegularizer'];\n delete config['kernelConstraint'];\n config['depthwiseInitializer'] =\n serializeInitializer(this.depthwiseInitializer);\n config['pointwiseInitializer'] =\n serializeInitializer(this.pointwiseInitializer);\n config['depthwiseRegularizer'] =\n serializeRegularizer(this.depthwiseRegularizer);\n config['pointwiseRegularizer'] =\n serializeRegularizer(this.pointwiseRegularizer);\n config['depthwiseConstraint'] =\n serializeConstraint(this.depthwiseConstraint);\n config['pointwiseConstraint'] =\n serializeConstraint(this.pointwiseConstraint);\n return config;\n }\n}\n\nexport class SeparableConv2D extends SeparableConv {\n /** @nocollapse */\n static override className = 'SeparableConv2D';\n constructor(args?: SeparableConvLayerArgs) {\n super(2, args);\n }\n}\nserialization.registerClass(SeparableConv2D);\n\nexport class Conv1D extends Conv {\n /** @nocollapse */\n static className = 'Conv1D';\n constructor(args: ConvLayerArgs) {\n super(1, args);\n Conv1D.verifyArgs(args);\n this.inputSpec = [{ndim: 3}];\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n delete config['rank'];\n delete config['dataFormat'];\n return config;\n }\n\n protected static override verifyArgs(args: ConvLayerArgs) {\n // config.kernelSize must be a number or array of numbers.\n if (typeof args.kernelSize !== 'number' &&\n !generic_utils.checkArrayTypeAndLength(\n args.kernelSize, 'number', 1, 1)) {\n throw new ValueError(\n `Conv1D expects config.kernelSize to be number or number[] with ` +\n `length 1, but received ${JSON.stringify(args.kernelSize)}.`);\n }\n }\n}\nserialization.registerClass(Conv1D);\n\nexport declare interface Cropping2DLayerArgs extends LayerArgs {\n /**\n * Dimension of the cropping along the width and the height.\n * - If integer: the same symmetric cropping\n * is applied to width and height.\n * - If list of 2 integers:\n * interpreted as two different\n * symmetric cropping values for height and width:\n * `[symmetric_height_crop, symmetric_width_crop]`.\n * - If a list of 2 lists of 2 integers:\n * interpreted as\n * `[[top_crop, bottom_crop], [left_crop, right_crop]]`\n */\n cropping: number|[number, number]|[[number, number], [number, number]];\n\n /**\n * Format of the data, which determines the ordering of the dimensions in\n * the inputs.\n *\n * `channels_last` corresponds to inputs with shape\n * `(batch, ..., channels)`\n *\n * `channels_first` corresponds to inputs with shape\n * `(batch, channels, ...)`\n *\n * Defaults to `channels_last`.\n */\n dataFormat?: DataFormat;\n}\n\nexport class Cropping2D extends Layer {\n /** @nocollapse */\n static className = 'Cropping2D';\n protected readonly cropping: [[number, number], [number, number]];\n protected readonly dataFormat: DataFormat;\n\n constructor(args: Cropping2DLayerArgs) {\n super(args);\n if (typeof args.cropping === 'number') {\n this.cropping =\n [[args.cropping, args.cropping], [args.cropping, args.cropping]];\n } else if (typeof args.cropping[0] === 'number') {\n this.cropping = [\n [args.cropping[0], args.cropping[0]],\n [args.cropping[1] as number, args.cropping[1] as number]\n ];\n } else {\n this.cropping = args.cropping as [[number, number], [number, number]];\n }\n this.dataFormat =\n args.dataFormat === undefined ? 'channelsLast' : args.dataFormat;\n this.inputSpec = [{ndim: 4}];\n }\n\n override computeOutputShape(inputShape: Shape): Shape {\n if (this.dataFormat === 'channelsFirst') {\n return [\n inputShape[0], inputShape[1],\n inputShape[2] - this.cropping[0][0] - this.cropping[0][1],\n inputShape[3] - this.cropping[1][0] - this.cropping[1][1]\n ];\n } else {\n return [\n inputShape[0],\n inputShape[1] - this.cropping[0][0] - this.cropping[0][1],\n inputShape[2] - this.cropping[1][0] - this.cropping[1][1], inputShape[3]\n ];\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n\n if (this.dataFormat === 'channelsLast') {\n const hSliced = K.sliceAlongAxis(\n inputs, this.cropping[0][0],\n inputs.shape[1] - this.cropping[0][0] - this.cropping[0][1], 2);\n return K.sliceAlongAxis(\n hSliced, this.cropping[1][0],\n inputs.shape[2] - this.cropping[1][1] - this.cropping[1][0], 3);\n } else {\n const hSliced = K.sliceAlongAxis(\n inputs, this.cropping[0][0],\n inputs.shape[2] - this.cropping[0][0] - this.cropping[0][1], 3);\n return K.sliceAlongAxis(\n hSliced, this.cropping[1][0],\n inputs.shape[3] - this.cropping[1][1] - this.cropping[1][0], 4);\n }\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {cropping: this.cropping, dataFormat: this.dataFormat};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Cropping2D);\n\nexport declare interface UpSampling2DLayerArgs extends LayerArgs {\n /**\n * The upsampling factors for rows and columns.\n *\n * Defaults to `[2, 2]`.\n */\n size?: number[];\n /**\n * Format of the data, which determines the ordering of the dimensions in\n * the inputs.\n *\n * `\"channelsLast\"` corresponds to inputs with shape\n * `[batch, ..., channels]`\n *\n * `\"channelsFirst\"` corresponds to inputs with shape `[batch, channels,\n * ...]`.\n *\n * Defaults to `\"channelsLast\"`.\n */\n dataFormat?: DataFormat;\n /**\n * The interpolation mechanism, one of `\"nearest\"` or `\"bilinear\"`, default\n * to `\"nearest\"`.\n */\n interpolation?: InterpolationFormat;\n}\n\nexport class UpSampling2D extends Layer {\n /** @nocollapse */\n static className = 'UpSampling2D';\n protected readonly DEFAULT_SIZE = [2, 2];\n protected readonly size: number[];\n protected readonly dataFormat: DataFormat;\n protected readonly interpolation: InterpolationFormat;\n\n constructor(args: UpSampling2DLayerArgs) {\n super(args);\n this.inputSpec = [{ndim: 4}];\n this.size = args.size == null ? this.DEFAULT_SIZE : args.size;\n this.dataFormat =\n args.dataFormat == null ? 'channelsLast' : args.dataFormat;\n checkDataFormat(this.dataFormat);\n this.interpolation =\n args.interpolation == null ? 'nearest' : args.interpolation;\n checkInterpolationFormat(this.interpolation);\n }\n\n override computeOutputShape(inputShape: Shape): Shape {\n if (this.dataFormat === 'channelsFirst') {\n const height =\n inputShape[2] == null ? null : this.size[0] * inputShape[2];\n const width = inputShape[3] == null ? null : this.size[1] * inputShape[3];\n return [inputShape[0], inputShape[1], height, width];\n } else {\n const height =\n inputShape[1] == null ? null : this.size[0] * inputShape[1];\n const width = inputShape[2] == null ? null : this.size[1] * inputShape[2];\n return [inputShape[0], height, width, inputShape[3]];\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tfc.tidy(() => {\n let input = getExactlyOneTensor(inputs) as Tensor4D;\n const inputShape = input.shape;\n\n if (this.dataFormat === 'channelsFirst') {\n input = tfc.transpose(input, [0, 2, 3, 1]);\n const height = this.size[0] * inputShape[2];\n const width = this.size[1] * inputShape[3];\n\n const resized = this.interpolation === 'nearest' ?\n tfc.image.resizeNearestNeighbor(input, [height, width]) :\n tfc.image.resizeBilinear(input, [height, width]);\n return tfc.transpose(resized, [0, 3, 1, 2]);\n } else {\n const height = this.size[0] * inputShape[1];\n const width = this.size[1] * inputShape[2];\n return this.interpolation === 'nearest' ?\n tfc.image.resizeNearestNeighbor(input, [height, width]) :\n tfc.image.resizeBilinear(input, [height, width]);\n }\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n size: this.size,\n dataFormat: this.dataFormat,\n interpolation: this.interpolation\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(UpSampling2D);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Depthwise Convolutional Layers\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, Tensor4D, tidy} from '@tensorflow/tfjs-core';\n\nimport {imageDataFormat} from '../backend/common';\nimport * as K from '../backend/tfjs_backend';\nimport {checkDataFormat} from '../common';\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {DataFormat, Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport {convOutputLength} from '../utils/conv_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\nimport {BaseConv, BaseConvLayerArgs, ConvLayerArgs, preprocessConv2DInput} from './convolutional';\n\n/**\n * 2D convolution with separable filters.\n * @param x Input tensor.\n * @param depthwiseKernel Convolution kernel for depthwise convolution.\n * @param strides Strides (Array of two integers).\n * @param padding Padding model.\n * @param dataFormat Data format.\n * @param dilationRate Array of two integers, dilation rates for the separable\n * convolution.\n * @returns Output tensor.\n * @throws ValueError If depthwiseKernel is not a 4D array.\n */\nexport function depthwiseConv2d(\n x: Tensor, depthwiseKernel: Tensor, strides: [number, number] = [1, 1],\n padding = 'valid', dataFormat?: DataFormat,\n dilationRate?: [number, number]): Tensor {\n return tidy(() => {\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n checkDataFormat(dataFormat);\n let y = preprocessConv2DInput(x, dataFormat);\n if (x.rank !== 4) {\n throw new ValueError(\n `Input for depthwiseConv2d is required to be 4-D, but is instead ` +\n `${x.rank}-D`);\n }\n if (depthwiseKernel.rank !== 4) {\n throw new ValueError(\n `depthwiseKernel is required to be 4-D, but is instead ` +\n `${depthwiseKernel.rank}-D`);\n }\n y = tfc.depthwiseConv2d(\n y as Tensor4D, depthwiseKernel as Tensor4D, strides,\n padding === 'same' ? 'same' : 'valid', 'NHWC', dilationRate);\n if (dataFormat === 'channelsFirst') {\n y = tfc.transpose(y, [0, 3, 1, 2]);\n }\n return y;\n });\n}\n\nexport declare interface DepthwiseConv2DLayerArgs extends BaseConvLayerArgs {\n /**\n * An integer or Array of 2 integers, specifying the width and height of the\n * 2D convolution window. Can be a single integer to specify the same value\n * for all spatial dimensions.\n */\n kernelSize: number|[number, number];\n\n /**\n * The number of depthwise convolution output channels for each input\n * channel.\n * The total number of depthwise convolution output channels will be equal to\n * `filtersIn * depthMultiplier`.\n * Default: 1.\n */\n depthMultiplier?: number;\n\n /**\n * Initializer for the depthwise kernel matrix.\n * Default: GlorotNormal.\n */\n depthwiseInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Constraint for the depthwise kernel matrix.\n */\n depthwiseConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Regularizer function for the depthwise kernel matrix.\n */\n depthwiseRegularizer?: RegularizerIdentifier|Regularizer;\n}\n\nexport class DepthwiseConv2D extends BaseConv {\n /** @nocollapse */\n static className = 'DepthwiseConv2D';\n private readonly depthMultiplier: number;\n private readonly depthwiseInitializer: Initializer;\n private readonly depthwiseConstraint: Constraint;\n private readonly depthwiseRegularizer: Regularizer;\n\n private depthwiseKernel: LayerVariable = null;\n\n constructor(args: DepthwiseConv2DLayerArgs) {\n super(2, args as ConvLayerArgs);\n this.depthMultiplier =\n args.depthMultiplier == null ? 1 : args.depthMultiplier;\n this.depthwiseInitializer = getInitializer(\n args.depthwiseInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.depthwiseConstraint = getConstraint(args.depthwiseConstraint);\n this.depthwiseRegularizer = getRegularizer(args.depthwiseRegularizer);\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n if (inputShape.length < 4) {\n throw new ValueError(\n `Inputs to DepthwiseConv2D should have rank 4. ` +\n `Received input shape: ${JSON.stringify(inputShape)}.`);\n }\n const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : 3;\n if (inputShape[channelAxis] == null || inputShape[channelAxis] < 0) {\n throw new ValueError(\n 'The channel dimension of the inputs to DepthwiseConv2D should ' +\n `be defined, but is not (${inputShape[channelAxis]}).`);\n }\n const inputDim = inputShape[channelAxis];\n const depthwiseKernelShape: Shape = [\n this.kernelSize[0], this.kernelSize[1], inputDim, this.depthMultiplier\n ];\n\n this.depthwiseKernel = this.addWeight(\n 'depthwise_kernel', depthwiseKernelShape, null,\n this.depthwiseInitializer, this.depthwiseRegularizer, true,\n this.depthwiseConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [inputDim * this.depthMultiplier], null, this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n } else {\n this.bias = null;\n }\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n let outputs = depthwiseConv2d(\n inputs, this.depthwiseKernel.read(), this.strides as [number, number],\n this.padding, this.dataFormat, null);\n // TODO(cais): Add support for dilation.\n if (this.useBias) {\n outputs = K.biasAdd(outputs, this.bias.read(), this.dataFormat);\n }\n if (this.activation != null) {\n outputs = this.activation.apply(outputs);\n }\n return outputs;\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const rows =\n this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1];\n const cols =\n this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2];\n const outFilters = this.dataFormat === 'channelsFirst' ?\n inputShape[1] * this.depthMultiplier :\n inputShape[3] * this.depthMultiplier;\n const outRows = convOutputLength(\n rows, this.kernelSize[0], this.padding, this.strides[0]);\n const outCols = convOutputLength(\n cols, this.kernelSize[1], this.padding, this.strides[1]);\n if (this.dataFormat === 'channelsFirst') {\n return [inputShape[0], outFilters, outRows, outCols];\n } else {\n // In this case, assume 'channelsLast'.\n return [inputShape[0], outRows, outCols, outFilters];\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = super.getConfig();\n config['depthMultiplier'] = this.depthMultiplier;\n config['depthwiseInitializer'] =\n serializeInitializer(this.depthwiseInitializer);\n config['depthwiseRegularizer'] =\n serializeRegularizer(this.depthwiseRegularizer);\n config['depthwiseConstraint'] =\n serializeConstraint(this.depthwiseRegularizer);\n return config;\n }\n}\nserialization.registerClass(DepthwiseConv2D);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Recurrent Neural Network Layers.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {DataType, serialization, Tensor, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {Activation, getActivation, serializeActivation} from '../activations';\nimport * as K from '../backend/tfjs_backend';\nimport {nameScope} from '../common';\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {InputSpec, SymbolicTensor} from '../engine/topology';\nimport {Layer, LayerArgs} from '../engine/topology';\nimport {AttributeError, NotImplementedError, ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, Ones, serializeInitializer} from '../initializers';\nimport {ActivationIdentifier} from '../keras_format/activation_config';\nimport {Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs, RnnStepFunction} from '../types';\nimport {assertPositiveInteger} from '../utils/generic_utils';\nimport * as math_utils from '../utils/math_utils';\nimport {getExactlyOneShape, getExactlyOneTensor, isArrayOfShapes} from '../utils/types_utils';\nimport {batchGetValue, batchSetValue, LayerVariable} from '../variables';\n\nimport {deserialize} from './serialization';\n\n/**\n * Standardize `apply()` args to a single list of tensor inputs.\n *\n * When running a model loaded from file, the input tensors `initialState` and\n * `constants` are passed to `RNN.apply()` as part of `inputs` instead of the\n * dedicated kwargs fields. `inputs` consists of\n * `[inputs, initialState0, initialState1, ..., constant0, constant1]` in this\n * case.\n * This method makes sure that arguments are\n * separated and that `initialState` and `constants` are `Array`s of tensors\n * (or None).\n *\n * @param inputs Tensor or `Array` of tensors.\n * @param initialState Tensor or `Array` of tensors or `null`/`undefined`.\n * @param constants Tensor or `Array` of tensors or `null`/`undefined`.\n * @returns An object consisting of\n * inputs: A tensor.\n * initialState: `Array` of tensors or `null`.\n * constants: `Array` of tensors or `null`.\n * @throws ValueError, if `inputs` is an `Array` but either `initialState` or\n * `constants` is provided.\n */\nexport function standardizeArgs(\n inputs: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n initialState: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n constants: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n numConstants?: number): {\n inputs: Tensor|SymbolicTensor,\n initialState: Tensor[]|SymbolicTensor[],\n constants: Tensor[]|SymbolicTensor[]\n} {\n if (Array.isArray(inputs)) {\n if (initialState != null || constants != null) {\n throw new ValueError(\n 'When inputs is an array, neither initialState or constants ' +\n 'should be provided');\n }\n if (numConstants != null) {\n constants = inputs.slice(inputs.length - numConstants, inputs.length);\n inputs = inputs.slice(0, inputs.length - numConstants);\n }\n if (inputs.length > 1) {\n initialState = inputs.slice(1, inputs.length);\n }\n inputs = inputs[0];\n }\n\n function toListOrNull(x: Tensor|Tensor[]|SymbolicTensor|\n SymbolicTensor[]): Tensor[]|SymbolicTensor[] {\n if (x == null || Array.isArray(x)) {\n return x as Tensor[] | SymbolicTensor[];\n } else {\n return [x] as Tensor[] | SymbolicTensor[];\n }\n }\n\n initialState = toListOrNull(initialState);\n constants = toListOrNull(constants);\n\n return {inputs, initialState, constants};\n}\n\n/**\n * Iterates over the time dimension of a tensor.\n *\n * @param stepFunction RNN step function.\n * Parameters:\n * inputs: tensor with shape `[samples, ...]` (no time dimension),\n * representing input for the batch of samples at a certain time step.\n * states: an Array of tensors.\n * Returns:\n * outputs: tensor with shape `[samples, outputDim]` (no time dimension).\n * newStates: list of tensors, same length and shapes as `states`. The first\n * state in the list must be the output tensor at the previous timestep.\n * @param inputs Tensor of temporal data of shape `[samples, time, ...]` (at\n * least 3D).\n * @param initialStates Tensor with shape `[samples, outputDim]` (no time\n * dimension), containing the initial values of the states used in the step\n * function.\n * @param goBackwards If `true`, do the iteration over the time dimension in\n * reverse order and return the reversed sequence.\n * @param mask Binary tensor with shape `[sample, time, 1]`, with a zero for\n * every element that is masked.\n * @param constants An Array of constant values passed at each step.\n * @param unroll Whether to unroll the RNN or to use a symbolic loop. *Not*\n * applicable to this imperative deeplearn.js backend. Its value is ignored.\n * @param needPerStepOutputs Whether the per-step outputs are to be\n * concatenated into a single tensor and returned (as the second return\n * value). Default: `false`. This arg is included so that the relatively\n * expensive concatenation of the stepwise outputs can be omitted unless\n * the stepwise outputs need to be kept (e.g., for an LSTM layer of which\n * `returnSequence` is `true`.)\n * @returns An Array: `[lastOutput, outputs, newStates]`.\n * lastOutput: the lastest output of the RNN, of shape `[samples, ...]`.\n * outputs: tensor with shape `[samples, time, ...]` where each entry\n * `output[s, t]` is the output of the step function at time `t` for sample\n * `s`. This return value is provided if and only if the\n * `needPerStepOutputs` is set as `true`. If it is set as `false`, this\n * return value will be `undefined`.\n * newStates: Array of tensors, latest states returned by the step function,\n * of shape `(samples, ...)`.\n * @throws ValueError If input dimension is less than 3.\n *\n * TODO(nielsene): This needs to be tidy-ed.\n */\nexport function rnn(\n stepFunction: RnnStepFunction, inputs: Tensor, initialStates: Tensor[],\n goBackwards = false, mask?: Tensor, constants?: Tensor[], unroll = false,\n needPerStepOutputs = false): [Tensor, Tensor, Tensor[]] {\n return tfc.tidy(() => {\n const ndim = inputs.shape.length;\n if (ndim < 3) {\n throw new ValueError(`Input should be at least 3D, but is ${ndim}D.`);\n }\n\n // Transpose to time-major, i.e., from [batch, time, ...] to [time, batch,\n // ...].\n const axes = [1, 0].concat(math_utils.range(2, ndim));\n inputs = tfc.transpose(inputs, axes);\n\n if (constants != null) {\n throw new NotImplementedError(\n 'The rnn() functoin of the deeplearn.js backend does not support ' +\n 'constants yet.');\n }\n\n // Porting Note: the unroll option is ignored by the imperative backend.\n if (unroll) {\n console.warn(\n 'Backend rnn(): the unroll = true option is not applicable to the ' +\n 'imperative deeplearn.js backend.');\n }\n\n if (mask != null) {\n mask = tfc.cast(tfc.cast(mask, 'bool'), 'float32');\n if (mask.rank === ndim - 1) {\n mask = tfc.expandDims(mask, -1);\n }\n mask = tfc.transpose(mask, axes);\n }\n\n if (goBackwards) {\n inputs = tfc.reverse(inputs, 0);\n if (mask != null) {\n mask = tfc.reverse(mask, 0);\n }\n }\n\n // Porting Note: PyKeras with TensorFlow backend uses a symbolic loop\n // (tf.while_loop). But for the imperative deeplearn.js backend, we just\n // use the usual TypeScript control flow to iterate over the time steps in\n // the inputs.\n // Porting Note: PyKeras patches a \"_use_learning_phase\" attribute to\n // outputs.\n // This is not idiomatic in TypeScript. The info regarding whether we are\n // in a learning (i.e., training) phase for RNN is passed in a different\n // way.\n\n const perStepOutputs: Tensor[] = [];\n let lastOutput: Tensor;\n let states = initialStates;\n const timeSteps = inputs.shape[0];\n const perStepInputs = tfc.unstack(inputs);\n let perStepMasks: Tensor[];\n if (mask != null) {\n perStepMasks = tfc.unstack(mask);\n }\n\n for (let t = 0; t < timeSteps; ++t) {\n const currentInput = perStepInputs[t];\n const stepOutputs = tfc.tidy(() => stepFunction(currentInput, states));\n\n if (mask == null) {\n lastOutput = stepOutputs[0];\n states = stepOutputs[1];\n } else {\n const maskedOutputs = tfc.tidy(() => {\n const stepMask = perStepMasks[t];\n const negStepMask = tfc.sub(tfc.onesLike(stepMask), stepMask);\n // TODO(cais): Would tfc.where() be better for performance?\n const output = tfc.add(\n tfc.mul(stepOutputs[0], stepMask),\n tfc.mul(states[0], negStepMask));\n const newStates = states.map((state, i) => {\n return tfc.add(\n tfc.mul(stepOutputs[1][i], stepMask),\n tfc.mul(state, negStepMask));\n });\n return {output, newStates};\n });\n lastOutput = maskedOutputs.output;\n states = maskedOutputs.newStates;\n }\n\n if (needPerStepOutputs) {\n perStepOutputs.push(lastOutput);\n }\n }\n let outputs: Tensor;\n if (needPerStepOutputs) {\n const axis = 1;\n outputs = tfc.stack(perStepOutputs, axis);\n }\n return [lastOutput, outputs, states] as [Tensor, Tensor, Tensor[]];\n });\n}\n\nexport declare interface BaseRNNLayerArgs extends LayerArgs {\n /**\n * A RNN cell instance. A RNN cell is a class that has:\n * - a `call()` method, which takes `[Tensor, Tensor]` as the\n * first input argument. The first item is the input at time t, and\n * second item is the cell state at time t.\n * The `call()` method returns `[outputAtT, statesAtTPlus1]`.\n * The `call()` method of the cell can also take the argument `constants`,\n * see section \"Note on passing external constants\" below.\n * Porting Node: PyKeras overrides the `call()` signature of RNN cells,\n * which are Layer subtypes, to accept two arguments. tfjs-layers does\n * not do such overriding. Instead we preseve the `call()` signature,\n * which due to its `Tensor|Tensor[]` argument and return value is\n * flexible enough to handle the inputs and states.\n * - a `stateSize` attribute. This can be a single integer (single state)\n * in which case it is the size of the recurrent state (which should be\n * the same as the size of the cell output). This can also be an Array of\n * integers (one size per state). In this case, the first entry\n * (`stateSize[0]`) should be the same as the size of the cell output.\n * It is also possible for `cell` to be a list of RNN cell instances, in which\n * case the cells get stacked on after the other in the RNN, implementing an\n * efficient stacked RNN.\n */\n cell?: RNNCell|RNNCell[];\n\n /**\n * Whether to return the last output in the output sequence, or the full\n * sequence.\n */\n returnSequences?: boolean;\n\n /**\n * Whether to return the last state in addition to the output.\n */\n returnState?: boolean;\n\n /**\n * If `true`, process the input sequence backwards and return the reversed\n * sequence (default: `false`).\n */\n goBackwards?: boolean;\n\n /**\n * If `true`, the last state for each sample at index i in a batch will be\n * used as initial state of the sample of index i in the following batch\n * (default: `false`).\n *\n * You can set RNN layers to be \"stateful\", which means that the states\n * computed for the samples in one batch will be reused as initial states\n * for the samples in the next batch. This assumes a one-to-one mapping\n * between samples in different successive batches.\n *\n * To enable \"statefulness\":\n * - specify `stateful: true` in the layer constructor.\n * - specify a fixed batch size for your model, by passing\n * - if sequential model:\n * `batchInputShape: [...]` to the first layer in your model.\n * - else for functional model with 1 or more Input layers:\n * `batchShape: [...]` to all the first layers in your model.\n * This is the expected shape of your inputs\n * *including the batch size*.\n * It should be a tuple of integers, e.g., `[32, 10, 100]`.\n * - specify `shuffle: false` when calling `LayersModel.fit()`.\n *\n * To reset the state of your model, call `resetStates()` on either the\n * specific layer or on the entire model.\n */\n stateful?: boolean;\n // TODO(cais): Explore whether we can warn users when they fail to set\n // `shuffle: false` when training a model consisting of stateful RNNs\n // and any stateful Layers in general.\n\n /**\n * If `true`, the network will be unrolled, else a symbolic loop will be\n * used. Unrolling can speed up a RNN, although it tends to be more\n * memory-intensive. Unrolling is only suitable for short sequences (default:\n * `false`).\n * Porting Note: tfjs-layers has an imperative backend. RNNs are executed with\n * normal TypeScript control flow. Hence this property is inapplicable and\n * ignored in tfjs-layers.\n */\n unroll?: boolean;\n\n /**\n * Dimensionality of the input (integer).\n * This option (or alternatively, the option `inputShape`) is required when\n * this layer is used as the first layer in a model.\n */\n inputDim?: number;\n\n /**\n * Length of the input sequences, to be specified when it is constant.\n * This argument is required if you are going to connect `Flatten` then\n * `Dense` layers upstream (without it, the shape of the dense outputs cannot\n * be computed). Note that if the recurrent layer is not the first layer in\n * your model, you would need to specify the input length at the level of the\n * first layer (e.g., via the `inputShape` option).\n */\n inputLength?: number;\n}\n\nexport class RNN extends Layer {\n /** @nocollapse */\n static className = 'RNN';\n public readonly cell: RNNCell;\n public readonly returnSequences: boolean;\n public readonly returnState: boolean;\n public readonly goBackwards: boolean;\n public readonly unroll: boolean;\n\n public stateSpec: InputSpec[];\n protected states_: Tensor[];\n\n // NOTE(cais): For stateful RNNs, the old states cannot be disposed right\n // away when new states are set, because the old states may need to be used\n // later for backpropagation through time (BPTT) and other purposes. So we\n // keep them here for final disposal when the state is reset completely\n // (i.e., through no-arg call to `resetStates()`).\n protected keptStates: Tensor[][];\n\n private numConstants: number;\n\n constructor(args: RNNLayerArgs) {\n super(args);\n let cell: RNNCell;\n if (args.cell == null) {\n throw new ValueError(\n 'cell property is missing for the constructor of RNN.');\n } else if (Array.isArray(args.cell)) {\n cell = new StackedRNNCells({cells: args.cell});\n } else {\n cell = args.cell;\n }\n if (cell.stateSize == null) {\n throw new ValueError(\n 'The RNN cell should have an attribute `stateSize` (tuple of ' +\n 'integers, one integer per RNN state).');\n }\n this.cell = cell;\n this.returnSequences =\n args.returnSequences == null ? false : args.returnSequences;\n this.returnState = args.returnState == null ? false : args.returnState;\n this.goBackwards = args.goBackwards == null ? false : args.goBackwards;\n this._stateful = args.stateful == null ? false : args.stateful;\n this.unroll = args.unroll == null ? false : args.unroll;\n\n this.supportsMasking = true;\n this.inputSpec = [new InputSpec({ndim: 3})];\n this.stateSpec = null;\n this.states_ = null;\n // TODO(cais): Add constantsSpec and numConstants.\n this.numConstants = null;\n // TODO(cais): Look into the use of initial_state in the kwargs of the\n // constructor.\n\n this.keptStates = [];\n }\n\n // Porting Note: This is the equivalent of `RNN.states` property getter in\n // PyKeras.\n getStates(): Tensor[] {\n if (this.states_ == null) {\n const numStates =\n Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1;\n return math_utils.range(0, numStates).map(x => null);\n } else {\n return this.states_;\n }\n }\n\n // Porting Note: This is the equivalent of the `RNN.states` property setter in\n // PyKeras.\n setStates(states: Tensor[]): void {\n this.states_ = states;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n if (isArrayOfShapes(inputShape)) {\n inputShape = (inputShape as Shape[])[0];\n }\n inputShape = inputShape as Shape;\n\n // TODO(cais): Remove the casting once stacked RNN cells become supported.\n let stateSize = this.cell.stateSize;\n if (!Array.isArray(stateSize)) {\n stateSize = [stateSize];\n }\n const outputDim = stateSize[0];\n let outputShape: Shape|Shape[];\n if (this.returnSequences) {\n outputShape = [inputShape[0], inputShape[1], outputDim];\n } else {\n outputShape = [inputShape[0], outputDim];\n }\n\n if (this.returnState) {\n const stateShape: Shape[] = [];\n for (const dim of stateSize) {\n stateShape.push([inputShape[0], dim]);\n }\n return [outputShape].concat(stateShape);\n } else {\n return outputShape;\n }\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]): Tensor\n |Tensor[] {\n return tfc.tidy(() => {\n if (Array.isArray(mask)) {\n mask = mask[0];\n }\n const outputMask = this.returnSequences ? mask : null;\n\n if (this.returnState) {\n const stateMask = this.states.map(s => null);\n return [outputMask].concat(stateMask);\n } else {\n return outputMask;\n }\n });\n }\n\n /**\n * Get the current state tensors of the RNN.\n *\n * If the state hasn't been set, return an array of `null`s of the correct\n * length.\n */\n get states(): Tensor[] {\n if (this.states_ == null) {\n const numStates =\n Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1;\n const output: Tensor[] = [];\n for (let i = 0; i < numStates; ++i) {\n output.push(null);\n }\n return output;\n } else {\n return this.states_;\n }\n }\n\n set states(s: Tensor[]) {\n this.states_ = s;\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n // Note inputShape will be an Array of Shapes of initial states and\n // constants if these are passed in apply().\n const constantShape: Shape[] = null;\n if (this.numConstants != null) {\n throw new NotImplementedError(\n 'Constants support is not implemented in RNN yet.');\n }\n\n if (isArrayOfShapes(inputShape)) {\n inputShape = (inputShape as Shape[])[0];\n }\n inputShape = inputShape as Shape;\n\n const batchSize: number = this.stateful ? inputShape[0] : null;\n const inputDim = inputShape.slice(2);\n this.inputSpec[0] = new InputSpec({shape: [batchSize, null, ...inputDim]});\n\n // Allow cell (if RNNCell Layer) to build before we set or validate\n // stateSpec.\n const stepInputShape = [inputShape[0]].concat(inputShape.slice(2));\n if (constantShape != null) {\n throw new NotImplementedError(\n 'Constants support is not implemented in RNN yet.');\n } else {\n this.cell.build(stepInputShape);\n }\n\n // Set or validate stateSpec.\n let stateSize: number[];\n if (Array.isArray(this.cell.stateSize)) {\n stateSize = this.cell.stateSize;\n } else {\n stateSize = [this.cell.stateSize];\n }\n\n if (this.stateSpec != null) {\n if (!util.arraysEqual(\n this.stateSpec.map(spec => spec.shape[spec.shape.length - 1]),\n stateSize)) {\n throw new ValueError(\n `An initialState was passed that is not compatible with ` +\n `cell.stateSize. Received stateSpec=${this.stateSpec}; ` +\n `However cell.stateSize is ${this.cell.stateSize}`);\n }\n } else {\n this.stateSpec =\n stateSize.map(dim => new InputSpec({shape: [null, dim]}));\n }\n if (this.stateful) {\n this.resetStates();\n }\n }\n\n /**\n * Reset the state tensors of the RNN.\n *\n * If the `states` argument is `undefined` or `null`, will set the\n * state tensor(s) of the RNN to all-zero tensors of the appropriate\n * shape(s).\n *\n * If `states` is provided, will set the state tensors of the RNN to its\n * value.\n *\n * @param states Optional externally-provided initial states.\n * @param training Whether this call is done during training. For stateful\n * RNNs, this affects whether the old states are kept or discarded. In\n * particular, if `training` is `true`, the old states will be kept so\n * that subsequent backpropgataion through time (BPTT) may work properly.\n * Else, the old states will be discarded.\n */\n override resetStates(states?: Tensor|Tensor[], training = false): void {\n tidy(() => {\n if (!this.stateful) {\n throw new AttributeError(\n 'Cannot call resetStates() on an RNN Layer that is not stateful.');\n }\n const batchSize = this.inputSpec[0].shape[0];\n if (batchSize == null) {\n throw new ValueError(\n 'If an RNN is stateful, it needs to know its batch size. Specify ' +\n 'the batch size of your input tensors: \\n' +\n '- If using a Sequential model, specify the batch size by ' +\n 'passing a `batchInputShape` option to your first layer.\\n' +\n '- If using the functional API, specify the batch size by ' +\n 'passing a `batchShape` option to your Input layer.');\n }\n // Initialize state if null.\n if (this.states_ == null) {\n if (Array.isArray(this.cell.stateSize)) {\n this.states_ =\n this.cell.stateSize.map(dim => tfc.zeros([batchSize, dim]));\n } else {\n this.states_ = [tfc.zeros([batchSize, this.cell.stateSize])];\n }\n } else if (states == null) {\n // Dispose old state tensors.\n tfc.dispose(this.states_);\n // For stateful RNNs, fully dispose kept old states.\n if (this.keptStates != null) {\n tfc.dispose(this.keptStates);\n this.keptStates = [];\n }\n\n if (Array.isArray(this.cell.stateSize)) {\n this.states_ =\n this.cell.stateSize.map(dim => tfc.zeros([batchSize, dim]));\n } else {\n this.states_[0] = tfc.zeros([batchSize, this.cell.stateSize]);\n }\n } else {\n if (!Array.isArray(states)) {\n states = [states];\n }\n if (states.length !== this.states_.length) {\n throw new ValueError(\n `Layer ${this.name} expects ${this.states_.length} state(s), ` +\n `but it received ${states.length} state value(s). Input ` +\n `received: ${states}`);\n }\n\n if (training === true) {\n // Store old state tensors for complete disposal later, i.e., during\n // the next no-arg call to this method. We do not dispose the old\n // states immediately because that BPTT (among other things) require\n // them.\n this.keptStates.push(this.states_.slice());\n } else {\n tfc.dispose(this.states_);\n }\n\n for (let index = 0; index < this.states_.length; ++index) {\n const value = states[index];\n const dim = Array.isArray(this.cell.stateSize) ?\n this.cell.stateSize[index] :\n this.cell.stateSize;\n const expectedShape = [batchSize, dim];\n if (!util.arraysEqual(value.shape, expectedShape)) {\n throw new ValueError(\n `State ${index} is incompatible with layer ${this.name}: ` +\n `expected shape=${expectedShape}, received shape=${\n value.shape}`);\n }\n this.states_[index] = value;\n }\n }\n this.states_ = this.states_.map(state => tfc.keep(state.clone()));\n });\n }\n\n override apply(\n inputs: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n kwargs?: Kwargs): Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[] {\n // TODO(cais): Figure out whether initialState is in kwargs or inputs.\n let initialState: Tensor[]|SymbolicTensor[] =\n kwargs == null ? null : kwargs['initialState'];\n let constants: Tensor[]|SymbolicTensor[] =\n kwargs == null ? null : kwargs['constants'];\n if (kwargs == null) {\n kwargs = {};\n }\n\n const standardized =\n standardizeArgs(inputs, initialState, constants, this.numConstants);\n inputs = standardized.inputs;\n initialState = standardized.initialState;\n constants = standardized.constants;\n\n // If any of `initial_state` or `constants` are specified and are\n // `tf.SymbolicTensor`s, then add them to the inputs and temporarily modify\n // the input_spec to include them.\n\n let additionalInputs: Array = [];\n let additionalSpecs: InputSpec[] = [];\n if (initialState != null) {\n kwargs['initialState'] = initialState;\n additionalInputs = additionalInputs.concat(initialState);\n this.stateSpec = [];\n for (const state of initialState) {\n this.stateSpec.push(new InputSpec({shape: state.shape}));\n }\n // TODO(cais): Use the following instead.\n // this.stateSpec = initialState.map(state => new InputSpec({shape:\n // state.shape}));\n additionalSpecs = additionalSpecs.concat(this.stateSpec);\n }\n if (constants != null) {\n kwargs['constants'] = constants;\n additionalInputs = additionalInputs.concat(constants);\n // TODO(cais): Add this.constantsSpec.\n this.numConstants = constants.length;\n }\n\n const isTensor = additionalInputs[0] instanceof SymbolicTensor;\n if (isTensor) {\n // Compute full input spec, including state and constants.\n const fullInput =\n [inputs].concat(additionalInputs) as Tensor[] | SymbolicTensor[];\n const fullInputSpec = this.inputSpec.concat(additionalSpecs);\n // Perform the call with temporarily replaced inputSpec.\n const originalInputSpec = this.inputSpec;\n this.inputSpec = fullInputSpec;\n const output = super.apply(fullInput, kwargs);\n this.inputSpec = originalInputSpec;\n return output;\n } else {\n return super.apply(inputs, kwargs);\n }\n }\n\n // tslint:disable-next-line:no-any\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n // Input shape: `[samples, time (padded with zeros), input_dim]`.\n // Note that the .build() method of subclasses **must** define\n // this.inputSpec and this.stateSpec owith complete input shapes.\n return tidy(() => {\n const mask = kwargs == null ? null : kwargs['mask'] as Tensor;\n const training = kwargs == null ? null : kwargs['training'];\n let initialState: Tensor[] =\n kwargs == null ? null : kwargs['initialState'];\n\n inputs = getExactlyOneTensor(inputs);\n if (initialState == null) {\n if (this.stateful) {\n initialState = this.states_;\n } else {\n initialState = this.getInitialState(inputs);\n }\n }\n\n const numStates =\n Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1;\n if (initialState.length !== numStates) {\n throw new ValueError(\n `RNN Layer has ${numStates} state(s) but was passed ` +\n `${initialState.length} initial state(s).`);\n }\n if (this.unroll) {\n console.warn(\n 'Ignoring unroll = true for RNN layer, due to imperative backend.');\n }\n\n const cellCallKwargs: Kwargs = {training};\n\n // TODO(cais): Add support for constants.\n const step = (inputs: Tensor, states: Tensor[]) => {\n // `inputs` and `states` are concatenated to form a single `Array` of\n // `tf.Tensor`s as the input to `cell.call()`.\n const outputs =\n this.cell.call([inputs].concat(states), cellCallKwargs) as Tensor[];\n // Marshall the return value into output and new states.\n return [outputs[0], outputs.slice(1)] as [Tensor, Tensor[]];\n };\n\n // TODO(cais): Add support for constants.\n\n const rnnOutputs =\n rnn(step, inputs, initialState, this.goBackwards, mask, null,\n this.unroll, this.returnSequences);\n const lastOutput = rnnOutputs[0];\n const outputs = rnnOutputs[1];\n const states = rnnOutputs[2];\n\n if (this.stateful) {\n this.resetStates(states, training);\n }\n\n const output = this.returnSequences ? outputs : lastOutput;\n\n // TODO(cais): Porperty set learning phase flag.\n\n if (this.returnState) {\n return [output].concat(states);\n } else {\n return output;\n }\n });\n }\n\n getInitialState(inputs: Tensor): Tensor[] {\n return tidy(() => {\n // Build an all-zero tensor of shape [samples, outputDim].\n // [Samples, timeSteps, inputDim].\n let initialState = tfc.zeros(inputs.shape);\n // [Samples].\n initialState = tfc.sum(initialState, [1, 2]);\n initialState = K.expandDims(initialState); // [Samples, 1].\n\n if (Array.isArray(this.cell.stateSize)) {\n return this.cell.stateSize.map(\n dim => dim > 1 ? K.tile(initialState, [1, dim]) : initialState);\n } else {\n return this.cell.stateSize > 1 ?\n [K.tile(initialState, [1, this.cell.stateSize])] :\n [initialState];\n }\n });\n }\n\n override get trainableWeights(): LayerVariable[] {\n if (!this.trainable) {\n return [];\n }\n // Porting Note: In TypeScript, `this` is always an instance of `Layer`.\n return this.cell.trainableWeights;\n }\n\n override get nonTrainableWeights(): LayerVariable[] {\n // Porting Note: In TypeScript, `this` is always an instance of `Layer`.\n if (!this.trainable) {\n return this.cell.weights;\n }\n return this.cell.nonTrainableWeights;\n }\n\n override setFastWeightInitDuringBuild(value: boolean) {\n super.setFastWeightInitDuringBuild(value);\n if (this.cell != null) {\n this.cell.setFastWeightInitDuringBuild(value);\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const baseConfig = super.getConfig();\n\n const config: serialization.ConfigDict = {\n returnSequences: this.returnSequences,\n returnState: this.returnState,\n goBackwards: this.goBackwards,\n stateful: this.stateful,\n unroll: this.unroll,\n };\n\n if (this.numConstants != null) {\n config['numConstants'] = this.numConstants;\n }\n\n const cellConfig = this.cell.getConfig();\n\n if (this.getClassName() === RNN.className) {\n config['cell'] = {\n 'className': this.cell.getClassName(),\n 'config': cellConfig,\n } as serialization.ConfigDictValue;\n }\n\n // this order is necessary, to prevent cell name from replacing layer name\n return {...cellConfig, ...baseConfig, ...config};\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict): T {\n const cellConfig = config['cell'] as serialization.ConfigDict;\n const cell = deserialize(cellConfig, customObjects) as RNNCell;\n return new cls(Object.assign(config, {cell}));\n }\n}\nserialization.registerClass(RNN);\n\n// Porting Note: This is a common parent class for RNN cells. There is no\n// equivalent of this in PyKeras. Having a common parent class forgoes the\n// need for `has_attr(cell, ...)` checks or its TypeScript equivalent.\n/**\n * An RNNCell layer.\n *\n * @doc {heading: 'Layers', subheading: 'Classes'}\n */\nexport abstract class RNNCell extends Layer {\n /**\n * Size(s) of the states.\n * For RNN cells with only a single state, this is a single integer.\n */\n // See\n // https://www.typescriptlang.org/docs/handbook/release-notes/typescript-4-0.html#properties-overriding-accessors-and-vice-versa-is-an-error\n public abstract stateSize: number|number[];\n public dropoutMask: Tensor|Tensor[];\n public recurrentDropoutMask: Tensor|Tensor[];\n}\n\nexport declare interface SimpleRNNCellLayerArgs extends LayerArgs {\n /**\n * units: Positive integer, dimensionality of the output space.\n */\n units: number;\n\n /**\n * Activation function to use.\n * Default: hyperbolic tangent ('tanh').\n * If you pass `null`, 'linear' activation will be applied.\n */\n activation?: ActivationIdentifier;\n\n /**\n * Whether the layer uses a bias vector.\n */\n useBias?: boolean;\n\n /**\n * Initializer for the `kernel` weights matrix, used for the linear\n * transformation of the inputs.\n */\n kernelInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the `recurrentKernel` weights matrix, used for\n * linear transformation of the recurrent state.\n */\n recurrentInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the bias vector.\n */\n biasInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Regularizer function applied to the `kernel` weights matrix.\n */\n kernelRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the `recurrent_kernel` weights matrix.\n */\n recurrentRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the bias vector.\n */\n biasRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Constraint function applied to the `kernel` weights matrix.\n */\n kernelConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint function applied to the `recurrentKernel` weights matrix.\n */\n recurrentConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint function applied to the bias vector.\n */\n biasConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Float number between 0 and 1. Fraction of the units to drop for the linear\n * transformation of the inputs.\n */\n dropout?: number;\n\n /**\n * Float number between 0 and 1. Fraction of the units to drop for the linear\n * transformation of the recurrent state.\n */\n recurrentDropout?: number;\n\n /**\n * This is added for test DI purpose.\n */\n dropoutFunc?: Function;\n}\n\nexport class SimpleRNNCell extends RNNCell {\n /** @nocollapse */\n static className = 'SimpleRNNCell';\n readonly units: number;\n readonly activation: Activation;\n readonly useBias: boolean;\n\n readonly kernelInitializer: Initializer;\n readonly recurrentInitializer: Initializer;\n readonly biasInitializer: Initializer;\n\n readonly kernelConstraint: Constraint;\n readonly recurrentConstraint: Constraint;\n readonly biasConstraint: Constraint;\n\n readonly kernelRegularizer: Regularizer;\n readonly recurrentRegularizer: Regularizer;\n readonly biasRegularizer: Regularizer;\n\n readonly dropout: number;\n readonly recurrentDropout: number;\n readonly dropoutFunc: Function;\n\n readonly stateSize: number;\n\n kernel: LayerVariable;\n recurrentKernel: LayerVariable;\n bias: LayerVariable;\n\n readonly DEFAULT_ACTIVATION = 'tanh';\n readonly DEFAULT_KERNEL_INITIALIZER = 'glorotNormal';\n readonly DEFAULT_RECURRENT_INITIALIZER = 'orthogonal';\n readonly DEFAULT_BIAS_INITIALIZER: InitializerIdentifier = 'zeros';\n\n constructor(args: SimpleRNNCellLayerArgs) {\n super(args);\n this.units = args.units;\n assertPositiveInteger(this.units, `units`);\n this.activation = getActivation(\n args.activation == null ? this.DEFAULT_ACTIVATION : args.activation);\n this.useBias = args.useBias == null ? true : args.useBias;\n\n this.kernelInitializer = getInitializer(\n args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.recurrentInitializer = getInitializer(\n args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER);\n\n this.biasInitializer =\n getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER);\n\n this.kernelRegularizer = getRegularizer(args.kernelRegularizer);\n this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer);\n this.biasRegularizer = getRegularizer(args.biasRegularizer);\n\n this.kernelConstraint = getConstraint(args.kernelConstraint);\n this.recurrentConstraint = getConstraint(args.recurrentConstraint);\n this.biasConstraint = getConstraint(args.biasConstraint);\n\n this.dropout = math_utils.min(\n [1, math_utils.max([0, args.dropout == null ? 0 : args.dropout])]);\n this.recurrentDropout = math_utils.min([\n 1,\n math_utils.max(\n [0, args.recurrentDropout == null ? 0 : args.recurrentDropout])\n ]);\n this.dropoutFunc = args.dropoutFunc;\n this.stateSize = this.units;\n this.dropoutMask = null;\n this.recurrentDropoutMask = null;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n // TODO(cais): Use regularizer.\n this.kernel = this.addWeight(\n 'kernel', [inputShape[inputShape.length - 1], this.units], null,\n this.kernelInitializer, this.kernelRegularizer, true,\n this.kernelConstraint);\n this.recurrentKernel = this.addWeight(\n 'recurrent_kernel', [this.units, this.units], null,\n this.recurrentInitializer, this.recurrentRegularizer, true,\n this.recurrentConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.units], null, this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n } else {\n this.bias = null;\n }\n this.built = true;\n }\n\n // Porting Note: PyKeras' equivalent of this method takes two tensor inputs:\n // `inputs` and `states`. Here, the two tensors are combined into an\n // `Tensor[]` Array as the first input argument.\n // Similarly, PyKeras' equivalent of this method returns two values:\n // `output` and `[output]`. Here the two are combined into one length-2\n // `Tensor[]`, consisting of `output` repeated.\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = inputs as Tensor[];\n if (inputs.length !== 2) {\n throw new ValueError(\n `SimpleRNNCell expects 2 input Tensors, got ${inputs.length}.`);\n }\n let prevOutput = inputs[1];\n inputs = inputs[0];\n const training = kwargs['training'] == null ? false : kwargs['training'];\n\n if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) {\n this.dropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(inputs as Tensor),\n rate: this.dropout,\n training,\n dropoutFunc: this.dropoutFunc,\n }) as Tensor;\n }\n if (0 < this.recurrentDropout && this.recurrentDropout < 1 &&\n this.recurrentDropoutMask == null) {\n this.recurrentDropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(prevOutput),\n rate: this.recurrentDropout,\n training,\n dropoutFunc: this.dropoutFunc,\n }) as Tensor;\n }\n let h: Tensor;\n const dpMask: Tensor = this.dropoutMask as Tensor;\n const recDpMask: Tensor = this.recurrentDropoutMask as Tensor;\n if (dpMask != null) {\n h = K.dot(tfc.mul(inputs, dpMask), this.kernel.read());\n } else {\n h = K.dot(inputs, this.kernel.read());\n }\n if (this.bias != null) {\n h = K.biasAdd(h, this.bias.read());\n }\n if (recDpMask != null) {\n prevOutput = tfc.mul(prevOutput, recDpMask);\n }\n let output = tfc.add(h, K.dot(prevOutput, this.recurrentKernel.read()));\n if (this.activation != null) {\n output = this.activation.apply(output);\n }\n\n // TODO(cais): Properly set learning phase on output tensor?\n return [output, output];\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const baseConfig = super.getConfig();\n\n const config: serialization.ConfigDict = {\n units: this.units,\n activation: serializeActivation(this.activation),\n useBias: this.useBias,\n kernelInitializer: serializeInitializer(this.kernelInitializer),\n recurrentInitializer: serializeInitializer(this.recurrentInitializer),\n biasInitializer: serializeInitializer(this.biasInitializer),\n kernelRegularizer: serializeRegularizer(this.kernelRegularizer),\n recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer),\n biasRegularizer: serializeRegularizer(this.biasRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n kernelConstraint: serializeConstraint(this.kernelConstraint),\n recurrentConstraint: serializeConstraint(this.recurrentConstraint),\n biasConstraint: serializeConstraint(this.biasConstraint),\n dropout: this.dropout,\n recurrentDropout: this.recurrentDropout,\n };\n\n return {...baseConfig, ...config};\n }\n}\nserialization.registerClass(SimpleRNNCell);\n\nexport declare interface SimpleRNNLayerArgs extends BaseRNNLayerArgs {\n /**\n * Positive integer, dimensionality of the output space.\n */\n units: number;\n\n /**\n * Activation function to use.\n *\n * Defaults to hyperbolic tangent (`tanh`)\n *\n * If you pass `null`, no activation will be applied.\n */\n activation?: ActivationIdentifier;\n\n /**\n * Whether the layer uses a bias vector.\n */\n useBias?: boolean;\n\n /**\n * Initializer for the `kernel` weights matrix, used for the linear\n * transformation of the inputs.\n */\n kernelInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the `recurrentKernel` weights matrix, used for\n * linear transformation of the recurrent state.\n */\n recurrentInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the bias vector.\n */\n biasInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Regularizer function applied to the kernel weights matrix.\n */\n kernelRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the recurrentKernel weights matrix.\n */\n recurrentRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the bias vector.\n */\n biasRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Constraint function applied to the kernel weights matrix.\n */\n kernelConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint function applied to the recurrentKernel weights matrix.\n */\n recurrentConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint function applied to the bias vector.\n */\n biasConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Number between 0 and 1. Fraction of the units to drop for the linear\n * transformation of the inputs.\n */\n dropout?: number;\n\n /**\n * Number between 0 and 1. Fraction of the units to drop for the linear\n * transformation of the recurrent state.\n */\n recurrentDropout?: number;\n\n /**\n * This is added for test DI purpose.\n */\n dropoutFunc?: Function;\n}\n\n/**\n * RNNLayerConfig is identical to BaseRNNLayerConfig, except it makes the\n * `cell` property required. This interface is to be used with constructors\n * of concrete RNN layer subtypes.\n */\nexport declare interface RNNLayerArgs extends BaseRNNLayerArgs {\n cell: RNNCell|RNNCell[];\n}\n\nexport class SimpleRNN extends RNN {\n /** @nocollapse */\n static override className = 'SimpleRNN';\n constructor(args: SimpleRNNLayerArgs) {\n args.cell = new SimpleRNNCell(args);\n super(args as RNNLayerArgs);\n // TODO(cais): Add activityRegularizer.\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n if (this.cell.dropoutMask != null) {\n tfc.dispose(this.cell.dropoutMask);\n this.cell.dropoutMask = null;\n }\n if (this.cell.recurrentDropoutMask != null) {\n tfc.dispose(this.cell.recurrentDropoutMask);\n this.cell.recurrentDropoutMask = null;\n }\n const mask = kwargs == null ? null : kwargs['mask'];\n const training = kwargs == null ? null : kwargs['training'];\n const initialState: Tensor[] =\n kwargs == null ? null : kwargs['initialState'];\n return super.call(inputs, {mask, training, initialState});\n });\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict): T {\n return new cls(config);\n }\n}\nserialization.registerClass(SimpleRNN);\n\n// Porting Note: Since this is a superset of SimpleRNNLayerConfig, we extend\n// that interface instead of repeating the fields.\nexport declare interface GRUCellLayerArgs extends SimpleRNNCellLayerArgs {\n /**\n * Activation function to use for the recurrent step.\n *\n * Defaults to hard sigmoid (`hardSigmoid`).\n *\n * If `null`, no activation is applied.\n */\n recurrentActivation?: ActivationIdentifier;\n\n /**\n * Implementation mode, either 1 or 2.\n *\n * Mode 1 will structure its operations as a larger number of\n * smaller dot products and additions.\n *\n * Mode 2 will batch them into fewer, larger operations. These modes will\n * have different performance profiles on different hardware and\n * for different applications.\n *\n * Note: For superior performance, TensorFlow.js always uses implementation\n * 2, regardless of the actual value of this configuration field.\n */\n implementation?: number;\n\n /**\n * GRU convention (whether to apply reset gate after or before matrix\n * multiplication). false = \"before\", true = \"after\" (only false is\n * supported).\n */\n resetAfter?: boolean;\n}\n\nexport class GRUCell extends RNNCell {\n /** @nocollapse */\n static className = 'GRUCell';\n readonly units: number;\n readonly activation: Activation;\n readonly recurrentActivation: Activation;\n readonly useBias: boolean;\n\n readonly kernelInitializer: Initializer;\n readonly recurrentInitializer: Initializer;\n readonly biasInitializer: Initializer;\n\n readonly kernelRegularizer: Regularizer;\n readonly recurrentRegularizer: Regularizer;\n readonly biasRegularizer: Regularizer;\n\n readonly kernelConstraint: Constraint;\n readonly recurrentConstraint: Constraint;\n readonly biasConstraint: Constraint;\n\n readonly dropout: number;\n readonly recurrentDropout: number;\n readonly dropoutFunc: Function;\n\n readonly stateSize: number;\n readonly implementation: number;\n\n readonly DEFAULT_ACTIVATION = 'tanh';\n readonly DEFAULT_RECURRENT_ACTIVATION: ActivationIdentifier = 'hardSigmoid';\n\n readonly DEFAULT_KERNEL_INITIALIZER = 'glorotNormal';\n readonly DEFAULT_RECURRENT_INITIALIZER = 'orthogonal';\n readonly DEFAULT_BIAS_INITIALIZER: InitializerIdentifier = 'zeros';\n\n kernel: LayerVariable;\n recurrentKernel: LayerVariable;\n bias: LayerVariable;\n\n constructor(args: GRUCellLayerArgs) {\n super(args);\n if (args.resetAfter) {\n throw new ValueError(\n `GRUCell does not support reset_after parameter set to true.`);\n }\n this.units = args.units;\n assertPositiveInteger(this.units, 'units');\n this.activation = getActivation(\n args.activation === undefined ? this.DEFAULT_ACTIVATION :\n args.activation);\n this.recurrentActivation = getActivation(\n args.recurrentActivation === undefined ?\n this.DEFAULT_RECURRENT_ACTIVATION :\n args.recurrentActivation);\n this.useBias = args.useBias == null ? true : args.useBias;\n\n this.kernelInitializer = getInitializer(\n args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.recurrentInitializer = getInitializer(\n args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER);\n\n this.biasInitializer =\n getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER);\n\n this.kernelRegularizer = getRegularizer(args.kernelRegularizer);\n this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer);\n this.biasRegularizer = getRegularizer(args.biasRegularizer);\n\n this.kernelConstraint = getConstraint(args.kernelConstraint);\n this.recurrentConstraint = getConstraint(args.recurrentConstraint);\n this.biasConstraint = getConstraint(args.biasConstraint);\n\n this.dropout = math_utils.min(\n [1, math_utils.max([0, args.dropout == null ? 0 : args.dropout])]);\n this.recurrentDropout = math_utils.min([\n 1,\n math_utils.max(\n [0, args.recurrentDropout == null ? 0 : args.recurrentDropout])\n ]);\n this.dropoutFunc = args.dropoutFunc;\n this.implementation = args.implementation;\n this.stateSize = this.units;\n this.dropoutMask = null;\n this.recurrentDropoutMask = null;\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const inputDim = inputShape[inputShape.length - 1];\n this.kernel = this.addWeight(\n 'kernel', [inputDim, this.units * 3], null, this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n this.recurrentKernel = this.addWeight(\n 'recurrent_kernel', [this.units, this.units * 3], null,\n this.recurrentInitializer, this.recurrentRegularizer, true,\n this.recurrentConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.units * 3], null, this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n } else {\n this.bias = null;\n }\n // Porting Notes: Unlike the PyKeras implementation, we perform slicing\n // of the weights and bias in the call() method, at execution time.\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = inputs as Tensor[];\n if (inputs.length !== 2) {\n throw new ValueError(\n `GRUCell expects 2 input Tensors (inputs, h, c), got ` +\n `${inputs.length}.`);\n }\n\n const training = kwargs['training'] == null ? false : kwargs['training'];\n let hTMinus1 = inputs[1]; // Previous memory state.\n inputs = inputs[0];\n\n // Note: For superior performance, TensorFlow.js always uses\n // implementation 2, regardless of the actual value of\n // config.implementation.\n if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) {\n this.dropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(inputs as Tensor),\n rate: this.dropout,\n training,\n count: 3,\n dropoutFunc: this.dropoutFunc,\n }) as Tensor[];\n }\n if (0 < this.recurrentDropout && this.recurrentDropout < 1 &&\n this.recurrentDropoutMask == null) {\n this.recurrentDropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(hTMinus1),\n rate: this.recurrentDropout,\n training,\n count: 3,\n dropoutFunc: this.dropoutFunc,\n }) as Tensor[];\n }\n const dpMask = this.dropoutMask as [Tensor, Tensor, Tensor];\n const recDpMask = this.recurrentDropoutMask as [Tensor, Tensor, Tensor];\n let z: Tensor;\n let r: Tensor;\n let hh: Tensor;\n\n if (0 < this.dropout && this.dropout < 1) {\n inputs = tfc.mul(inputs, dpMask[0]);\n }\n let matrixX = K.dot(inputs, this.kernel.read());\n if (this.useBias) {\n matrixX = K.biasAdd(matrixX, this.bias.read());\n }\n if (0 < this.recurrentDropout && this.recurrentDropout < 1) {\n hTMinus1 = tfc.mul(hTMinus1, recDpMask[0]);\n }\n\n const recurrentKernelValue = this.recurrentKernel.read();\n const [rk1, rk2] = tfc.split(\n recurrentKernelValue, [2 * this.units, this.units],\n recurrentKernelValue.rank - 1);\n const matrixInner = K.dot(hTMinus1, rk1);\n\n const [xZ, xR, xH] = tfc.split(matrixX, 3, matrixX.rank - 1);\n const [recurrentZ, recurrentR] =\n tfc.split(matrixInner, 2, matrixInner.rank - 1);\n z = this.recurrentActivation.apply(tfc.add(xZ, recurrentZ));\n r = this.recurrentActivation.apply(tfc.add(xR, recurrentR));\n\n const recurrentH = K.dot(tfc.mul(r, hTMinus1), rk2);\n hh = this.activation.apply(tfc.add(xH, recurrentH));\n\n const h =\n tfc.add(tfc.mul(z, hTMinus1), tfc.mul(tfc.add(1, tfc.neg(z)), hh));\n // TODO(cais): Add use_learning_phase flag properly.\n return [h, h];\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const baseConfig = super.getConfig();\n\n const config: serialization.ConfigDict = {\n units: this.units,\n activation: serializeActivation(this.activation),\n recurrentActivation: serializeActivation(this.recurrentActivation),\n useBias: this.useBias,\n kernelInitializer: serializeInitializer(this.kernelInitializer),\n recurrentInitializer: serializeInitializer(this.recurrentInitializer),\n biasInitializer: serializeInitializer(this.biasInitializer),\n kernelRegularizer: serializeRegularizer(this.kernelRegularizer),\n recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer),\n biasRegularizer: serializeRegularizer(this.biasRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n kernelConstraint: serializeConstraint(this.kernelConstraint),\n recurrentConstraint: serializeConstraint(this.recurrentConstraint),\n biasConstraint: serializeConstraint(this.biasConstraint),\n dropout: this.dropout,\n recurrentDropout: this.recurrentDropout,\n implementation: this.implementation,\n resetAfter: false\n };\n\n return {...baseConfig, ...config};\n }\n}\nserialization.registerClass(GRUCell);\n\n// Porting Note: Since this is a superset of SimpleRNNLayerConfig, we inherit\n// from that interface instead of repeating the fields here.\nexport declare interface GRULayerArgs extends SimpleRNNLayerArgs {\n /**\n * Activation function to use for the recurrent step.\n *\n * Defaults to hard sigmoid (`hardSigmoid`).\n *\n * If `null`, no activation is applied.\n */\n recurrentActivation?: ActivationIdentifier;\n\n /**\n * Implementation mode, either 1 or 2.\n *\n * Mode 1 will structure its operations as a larger number of\n * smaller dot products and additions.\n *\n * Mode 2 will batch them into fewer, larger operations. These modes will\n * have different performance profiles on different hardware and\n * for different applications.\n *\n * Note: For superior performance, TensorFlow.js always uses implementation\n * 2, regardless of the actual value of this configuration field.\n */\n implementation?: number;\n}\n\nexport class GRU extends RNN {\n /** @nocollapse */\n static override className = 'GRU';\n constructor(args: GRULayerArgs) {\n if (args.implementation === 0) {\n console.warn(\n '`implementation=0` has been deprecated, and now defaults to ' +\n '`implementation=1`. Please update your layer call.');\n }\n args.cell = new GRUCell(args);\n super(args as RNNLayerArgs);\n // TODO(cais): Add activityRegularizer.\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n if (this.cell.dropoutMask != null) {\n tfc.dispose(this.cell.dropoutMask);\n this.cell.dropoutMask = null;\n }\n if (this.cell.recurrentDropoutMask != null) {\n tfc.dispose(this.cell.recurrentDropoutMask);\n this.cell.recurrentDropoutMask = null;\n }\n const mask = kwargs == null ? null : kwargs['mask'];\n const training = kwargs == null ? null : kwargs['training'];\n const initialState: Tensor[] =\n kwargs == null ? null : kwargs['initialState'];\n return super.call(inputs, {mask, training, initialState});\n });\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict): T {\n if (config['implmentation'] === 0) {\n config['implementation'] = 1;\n }\n return new cls(config);\n }\n}\nserialization.registerClass(GRU);\n\n// Porting Note: Since this is a superset of SimpleRNNLayerConfig, we extend\n// that interface instead of repeating the fields.\nexport declare interface LSTMCellLayerArgs extends SimpleRNNCellLayerArgs {\n /**\n * Activation function to use for the recurrent step.\n *\n * Defaults to hard sigmoid (`hardSigmoid`).\n *\n * If `null`, no activation is applied.\n */\n recurrentActivation?: ActivationIdentifier;\n\n /**\n * If `true`, add 1 to the bias of the forget gate at initialization.\n * Setting it to `true` will also force `biasInitializer = 'zeros'`.\n * This is recommended in\n * [Jozefowicz et\n * al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n */\n unitForgetBias?: boolean;\n\n /**\n * Implementation mode, either 1 or 2.\n *\n * Mode 1 will structure its operations as a larger number of\n * smaller dot products and additions.\n *\n * Mode 2 will batch them into fewer, larger operations. These modes will\n * have different performance profiles on different hardware and\n * for different applications.\n *\n * Note: For superior performance, TensorFlow.js always uses implementation\n * 2, regardless of the actual value of this configuration field.\n */\n implementation?: number;\n}\n\nexport class LSTMCell extends RNNCell {\n /** @nocollapse */\n static className = 'LSTMCell';\n readonly units: number;\n readonly activation: Activation;\n readonly recurrentActivation: Activation;\n readonly useBias: boolean;\n\n readonly kernelInitializer: Initializer;\n readonly recurrentInitializer: Initializer;\n readonly biasInitializer: Initializer;\n readonly unitForgetBias: boolean;\n\n readonly kernelConstraint: Constraint;\n readonly recurrentConstraint: Constraint;\n readonly biasConstraint: Constraint;\n\n readonly kernelRegularizer: Regularizer;\n readonly recurrentRegularizer: Regularizer;\n readonly biasRegularizer: Regularizer;\n\n readonly dropout: number;\n readonly recurrentDropout: number;\n readonly dropoutFunc: Function;\n\n readonly stateSize: number[];\n readonly implementation: number;\n\n readonly DEFAULT_ACTIVATION = 'tanh';\n readonly DEFAULT_RECURRENT_ACTIVATION = 'hardSigmoid';\n readonly DEFAULT_KERNEL_INITIALIZER = 'glorotNormal';\n readonly DEFAULT_RECURRENT_INITIALIZER = 'orthogonal';\n\n readonly DEFAULT_BIAS_INITIALIZER = 'zeros';\n\n kernel: LayerVariable;\n recurrentKernel: LayerVariable;\n bias: LayerVariable;\n\n constructor(args: LSTMCellLayerArgs) {\n super(args);\n\n this.units = args.units;\n assertPositiveInteger(this.units, 'units');\n this.activation = getActivation(\n args.activation === undefined ? this.DEFAULT_ACTIVATION :\n args.activation);\n this.recurrentActivation = getActivation(\n args.recurrentActivation === undefined ?\n this.DEFAULT_RECURRENT_ACTIVATION :\n args.recurrentActivation);\n this.useBias = args.useBias == null ? true : args.useBias;\n\n this.kernelInitializer = getInitializer(\n args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.recurrentInitializer = getInitializer(\n args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER);\n\n this.biasInitializer =\n getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER);\n this.unitForgetBias = args.unitForgetBias;\n\n this.kernelRegularizer = getRegularizer(args.kernelRegularizer);\n this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer);\n this.biasRegularizer = getRegularizer(args.biasRegularizer);\n\n this.kernelConstraint = getConstraint(args.kernelConstraint);\n this.recurrentConstraint = getConstraint(args.recurrentConstraint);\n this.biasConstraint = getConstraint(args.biasConstraint);\n\n this.dropout = math_utils.min(\n [1, math_utils.max([0, args.dropout == null ? 0 : args.dropout])]);\n this.recurrentDropout = math_utils.min([\n 1,\n math_utils.max(\n [0, args.recurrentDropout == null ? 0 : args.recurrentDropout])\n ]);\n this.dropoutFunc = args.dropoutFunc;\n this.implementation = args.implementation;\n this.stateSize = [this.units, this.units];\n this.dropoutMask = null;\n this.recurrentDropoutMask = null;\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const inputDim = inputShape[inputShape.length - 1];\n this.kernel = this.addWeight(\n 'kernel', [inputDim, this.units * 4], null, this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n this.recurrentKernel = this.addWeight(\n 'recurrent_kernel', [this.units, this.units * 4], null,\n this.recurrentInitializer, this.recurrentRegularizer, true,\n this.recurrentConstraint);\n let biasInitializer: Initializer;\n if (this.useBias) {\n if (this.unitForgetBias) {\n const capturedBiasInit = this.biasInitializer;\n const capturedUnits = this.units;\n biasInitializer = new (class CustomInit extends Initializer {\n /** @nocollapse */\n static className = 'CustomInit';\n\n apply(shape: Shape, dtype?: DataType): Tensor {\n // TODO(cais): More informative variable names?\n const bI = capturedBiasInit.apply([capturedUnits]);\n const bF = (new Ones()).apply([capturedUnits]);\n const bCAndH = capturedBiasInit.apply([capturedUnits * 2]);\n return K.concatAlongFirstAxis(\n K.concatAlongFirstAxis(bI, bF), bCAndH);\n }\n })();\n } else {\n biasInitializer = this.biasInitializer;\n }\n this.bias = this.addWeight(\n 'bias', [this.units * 4], null, biasInitializer, this.biasRegularizer,\n true, this.biasConstraint);\n } else {\n this.bias = null;\n }\n // Porting Notes: Unlike the PyKeras implementation, we perform slicing\n // of the weights and bias in the call() method, at execution time.\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const training = kwargs['training'] == null ? false : kwargs['training'];\n inputs = inputs as Tensor[];\n if (inputs.length !== 3) {\n throw new ValueError(\n `LSTMCell expects 3 input Tensors (inputs, h, c), got ` +\n `${inputs.length}.`);\n }\n let hTMinus1 = inputs[1]; // Previous memory state.\n const cTMinus1 = inputs[2]; // Previous carry state.\n inputs = inputs[0];\n if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) {\n this.dropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(inputs as Tensor),\n rate: this.dropout,\n training,\n count: 4,\n dropoutFunc: this.dropoutFunc\n }) as Tensor[];\n }\n if (0 < this.recurrentDropout && this.recurrentDropout < 1 &&\n this.recurrentDropoutMask == null) {\n this.recurrentDropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(hTMinus1),\n rate: this.recurrentDropout,\n training,\n count: 4,\n dropoutFunc: this.dropoutFunc\n }) as Tensor[];\n }\n const dpMask = this.dropoutMask as [Tensor, Tensor, Tensor, Tensor];\n const recDpMask =\n this.recurrentDropoutMask as [Tensor, Tensor, Tensor, Tensor];\n\n // Note: For superior performance, TensorFlow.js always uses\n // implementation 2 regardless of the actual value of\n // config.implementation.\n let i: Tensor;\n let f: Tensor;\n let c: Tensor;\n let o: Tensor;\n if (0 < this.dropout && this.dropout < 1) {\n inputs = tfc.mul(inputs, dpMask[0]);\n }\n let z = K.dot(inputs, this.kernel.read());\n if (0 < this.recurrentDropout && this.recurrentDropout < 1) {\n hTMinus1 = tfc.mul(hTMinus1, recDpMask[0]);\n }\n z = tfc.add(z, K.dot(hTMinus1, this.recurrentKernel.read()));\n if (this.useBias) {\n z = K.biasAdd(z, this.bias.read());\n }\n\n const [z0, z1, z2, z3] = tfc.split(z, 4, z.rank - 1);\n\n i = this.recurrentActivation.apply(z0);\n f = this.recurrentActivation.apply(z1);\n c = tfc.add(tfc.mul(f, cTMinus1), tfc.mul(i, this.activation.apply(z2)));\n o = this.recurrentActivation.apply(z3);\n\n const h = tfc.mul(o, this.activation.apply(c));\n // TODO(cais): Add use_learning_phase flag properly.\n return [h, h, c];\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const baseConfig = super.getConfig();\n\n const config: serialization.ConfigDict = {\n units: this.units,\n activation: serializeActivation(this.activation),\n recurrentActivation: serializeActivation(this.recurrentActivation),\n useBias: this.useBias,\n kernelInitializer: serializeInitializer(this.kernelInitializer),\n recurrentInitializer: serializeInitializer(this.recurrentInitializer),\n biasInitializer: serializeInitializer(this.biasInitializer),\n unitForgetBias: this.unitForgetBias,\n kernelRegularizer: serializeRegularizer(this.kernelRegularizer),\n recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer),\n biasRegularizer: serializeRegularizer(this.biasRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n kernelConstraint: serializeConstraint(this.kernelConstraint),\n recurrentConstraint: serializeConstraint(this.recurrentConstraint),\n biasConstraint: serializeConstraint(this.biasConstraint),\n dropout: this.dropout,\n recurrentDropout: this.recurrentDropout,\n implementation: this.implementation,\n };\n\n return {...baseConfig, ...config};\n }\n}\nserialization.registerClass(LSTMCell);\n\n// Porting Note: Since this is a superset of SimpleRNNLayerConfig, we inherit\n// from that interface instead of repeating the fields here.\nexport declare interface LSTMLayerArgs extends SimpleRNNLayerArgs {\n /**\n * Activation function to use for the recurrent step.\n *\n * Defaults to hard sigmoid (`hardSigmoid`).\n *\n * If `null`, no activation is applied.\n */\n recurrentActivation?: ActivationIdentifier;\n\n /**\n * If `true`, add 1 to the bias of the forget gate at initialization.\n * Setting it to `true` will also force `biasInitializer = 'zeros'`.\n * This is recommended in\n * [Jozefowicz et\n * al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n */\n unitForgetBias?: boolean;\n\n /**\n * Implementation mode, either 1 or 2.\n * Mode 1 will structure its operations as a larger number of\n * smaller dot products and additions, whereas mode 2 will\n * batch them into fewer, larger operations. These modes will\n * have different performance profiles on different hardware and\n * for different applications.\n *\n * Note: For superior performance, TensorFlow.js always uses implementation\n * 2, regardless of the actual value of this config field.\n */\n implementation?: number;\n}\n\nexport class LSTM extends RNN {\n /** @nocollapse */\n static override className = 'LSTM';\n constructor(args: LSTMLayerArgs) {\n if (args.implementation === 0) {\n console.warn(\n '`implementation=0` has been deprecated, and now defaults to ' +\n '`implementation=1`. Please update your layer call.');\n }\n args.cell = new LSTMCell(args);\n super(args as RNNLayerArgs);\n // TODO(cais): Add activityRegularizer.\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n if (this.cell.dropoutMask != null) {\n tfc.dispose(this.cell.dropoutMask);\n this.cell.dropoutMask = null;\n }\n if (this.cell.recurrentDropoutMask != null) {\n tfc.dispose(this.cell.recurrentDropoutMask);\n this.cell.recurrentDropoutMask = null;\n }\n const mask = kwargs == null ? null : kwargs['mask'];\n const training = kwargs == null ? null : kwargs['training'];\n const initialState: Tensor[] =\n kwargs == null ? null : kwargs['initialState'];\n return super.call(inputs, {mask, training, initialState});\n });\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict): T {\n if (config['implmentation'] === 0) {\n config['implementation'] = 1;\n }\n return new cls(config);\n }\n}\nserialization.registerClass(LSTM);\n\nexport declare interface StackedRNNCellsArgs extends LayerArgs {\n /**\n * An `Array` of `RNNCell` instances.\n */\n cells: RNNCell[];\n}\n\nexport class StackedRNNCells extends RNNCell {\n /** @nocollapse */\n static className = 'StackedRNNCells';\n protected cells: RNNCell[];\n\n constructor(args: StackedRNNCellsArgs) {\n super(args);\n this.cells = args.cells;\n }\n\n get stateSize(): number[] {\n // States are a flat list in reverse order of the cell stack.\n // This allows perserving the requirement `stack.statesize[0] ===\n // outputDim`. E.g., states of a 2-layer LSTM would be `[h2, c2, h1, c1]`,\n // assuming one LSTM has states `[h, c]`.\n const stateSize: number[] = [];\n for (const cell of this.cells.slice().reverse()) {\n if (Array.isArray(cell.stateSize)) {\n stateSize.push(...cell.stateSize);\n } else {\n stateSize.push(cell.stateSize);\n }\n }\n return stateSize;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = inputs as Tensor[];\n let states = inputs.slice(1);\n\n // Recover per-cell states.\n const nestedStates: Tensor[][] = [];\n for (const cell of this.cells.slice().reverse()) {\n if (Array.isArray(cell.stateSize)) {\n nestedStates.push(states.splice(0, cell.stateSize.length));\n } else {\n nestedStates.push(states.splice(0, 1));\n }\n }\n nestedStates.reverse();\n\n // Call the cells in order and store the returned states.\n const newNestedStates: Tensor[][] = [];\n let callInputs: Tensor[];\n for (let i = 0; i < this.cells.length; ++i) {\n const cell = this.cells[i];\n states = nestedStates[i];\n // TODO(cais): Take care of constants.\n if (i === 0) {\n callInputs = [inputs[0]].concat(states);\n } else {\n callInputs = [callInputs[0]].concat(states);\n }\n callInputs = cell.call(callInputs, kwargs) as Tensor[];\n newNestedStates.push(callInputs.slice(1));\n }\n\n // Format the new states as a flat list in reverse cell order.\n states = [];\n for (const cellStates of newNestedStates.slice().reverse()) {\n states.push(...cellStates);\n }\n return [callInputs[0]].concat(states);\n });\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n if (isArrayOfShapes(inputShape)) {\n // TODO(cais): Take care of input constants.\n // const constantShape = inputShape.slice(1);\n inputShape = (inputShape as Shape[])[0];\n }\n inputShape = inputShape as Shape;\n let outputDim: number;\n this.cells.forEach((cell, i) => {\n nameScope(`RNNCell_${i}`, () => {\n // TODO(cais): Take care of input constants.\n\n cell.build(inputShape);\n if (Array.isArray(cell.stateSize)) {\n outputDim = cell.stateSize[0];\n } else {\n outputDim = cell.stateSize;\n }\n inputShape = [inputShape[0], outputDim] as Shape;\n });\n });\n this.built = true;\n }\n\n override getConfig(): serialization.ConfigDict {\n const baseConfig = super.getConfig();\n\n const getCellConfig = (cell: RNNCell) => {\n return {\n 'className': cell.getClassName(),\n 'config': cell.getConfig(),\n };\n };\n\n const cellConfigs = this.cells.map(getCellConfig);\n\n const config = {'cells': cellConfigs};\n\n return {...baseConfig, ...config};\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict): T {\n const cells: RNNCell[] = [];\n for (const cellConfig of (config['cells'] as serialization.ConfigDict[])) {\n cells.push(deserialize(cellConfig, customObjects) as RNNCell);\n }\n return new cls({cells});\n }\n\n override get trainableWeights(): LayerVariable[] {\n if (!this.trainable) {\n return [];\n }\n const weights: LayerVariable[] = [];\n for (const cell of this.cells) {\n weights.push(...cell.trainableWeights);\n }\n return weights;\n }\n\n override get nonTrainableWeights(): LayerVariable[] {\n const weights: LayerVariable[] = [];\n for (const cell of this.cells) {\n weights.push(...cell.nonTrainableWeights);\n }\n if (!this.trainable) {\n const trainableWeights: LayerVariable[] = [];\n for (const cell of this.cells) {\n trainableWeights.push(...cell.trainableWeights);\n }\n return trainableWeights.concat(weights);\n }\n return weights;\n }\n\n /**\n * Retrieve the weights of a the model.\n *\n * @returns A flat `Array` of `tf.Tensor`s.\n */\n override getWeights(): Tensor[] {\n const weights: LayerVariable[] = [];\n for (const cell of this.cells) {\n weights.push(...cell.weights);\n }\n return batchGetValue(weights);\n }\n\n /**\n * Set the weights of the model.\n *\n * @param weights An `Array` of `tf.Tensor`s with shapes and types matching\n * the output of `getWeights()`.\n */\n override setWeights(weights: Tensor[]): void {\n const tuples: Array<[LayerVariable, Tensor]> = [];\n for (const cell of this.cells) {\n const numParams = cell.weights.length;\n const inputWeights = weights.splice(numParams);\n for (let i = 0; i < cell.weights.length; ++i) {\n tuples.push([cell.weights[i], inputWeights[i]]);\n }\n }\n batchSetValue(tuples);\n }\n\n // TODO(cais): Maybe implemnt `losses` and `getLossesFor`.\n}\nserialization.registerClass(StackedRNNCells);\n\nexport function generateDropoutMask(args: {\n ones: () => tfc.Tensor,\n rate: number,\n training?: boolean,\n count?: number,\n dropoutFunc?: Function,\n}): tfc.Tensor|tfc.Tensor[] {\n const {ones, rate, training = false, count = 1, dropoutFunc} = args;\n\n const droppedInputs = () =>\n dropoutFunc != null ? dropoutFunc(ones(), rate) : K.dropout(ones(), rate);\n\n const createMask = () => K.inTrainPhase(droppedInputs, ones, training);\n\n // just in case count is provided with null or undefined\n if (!count || count <= 1) {\n return tfc.keep(createMask().clone());\n }\n\n const masks = Array(count).fill(undefined).map(createMask);\n\n return masks.map(m => tfc.keep(m.clone()));\n}\n","/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {Tensor, util} from '@tensorflow/tfjs-core';\n\nimport {Activation} from '../activations';\nimport * as K from '../backend/tfjs_backend';\nimport {checkDataFormat, checkPaddingMode} from '../common';\nimport {Constraint} from '../constraints';\nimport {InputSpec} from '../engine/topology';\nimport {AttributeError, NotImplementedError, ValueError} from '../errors';\nimport {Initializer} from '../initializers';\nimport {DataFormat, DataType, PaddingMode, Shape} from '../keras_format/common';\nimport {Regularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport {convOutputLength, normalizeArray} from '../utils/conv_utils';\nimport {assertPositiveInteger} from '../utils/generic_utils';\nimport {getExactlyOneShape} from '../utils/types_utils';\n\nimport {BaseRNNLayerArgs, generateDropoutMask, LSTMCell, LSTMCellLayerArgs, LSTMLayerArgs, RNN, RNNCell, RNNLayerArgs, SimpleRNNCellLayerArgs} from './recurrent';\n\ndeclare interface ConvRNN2DCellArgs extends\n Omit {\n /**\n * The dimensionality of the output space (i.e. the number of filters in the\n * convolution).\n */\n filters: number;\n\n /**\n * The dimensions of the convolution window. If kernelSize is a number, the\n * convolutional window will be square.\n */\n kernelSize: number|number[];\n\n /**\n * The strides of the convolution in each dimension. If strides is a number,\n * strides in both dimensions are equal.\n *\n * Specifying any stride value != 1 is incompatible with specifying any\n * `dilationRate` value != 1.\n */\n strides?: number|number[];\n\n /**\n * Padding mode.\n */\n padding?: PaddingMode;\n\n /**\n * Format of the data, which determines the ordering of the dimensions in\n * the inputs.\n *\n * `channels_last` corresponds to inputs with shape\n * `(batch, ..., channels)`\n *\n * `channels_first` corresponds to inputs with shape `(batch, channels,\n * ...)`.\n *\n * Defaults to `channels_last`.\n */\n dataFormat?: DataFormat;\n\n /**\n * The dilation rate to use for the dilated convolution in each dimension.\n * Should be an integer or array of two or three integers.\n *\n * Currently, specifying any `dilationRate` value != 1 is incompatible with\n * specifying any `strides` value != 1.\n */\n dilationRate?: number|[number]|[number, number];\n}\n\nabstract class ConvRNN2DCell extends RNNCell {\n readonly filters: number;\n readonly kernelSize: number[];\n readonly strides: number[];\n readonly padding: PaddingMode;\n readonly dataFormat: DataFormat;\n readonly dilationRate: number[];\n\n readonly activation: Activation;\n readonly useBias: boolean;\n\n readonly kernelInitializer: Initializer;\n readonly recurrentInitializer: Initializer;\n readonly biasInitializer: Initializer;\n\n readonly kernelConstraint: Constraint;\n readonly recurrentConstraint: Constraint;\n readonly biasConstraint: Constraint;\n\n readonly kernelRegularizer: Regularizer;\n readonly recurrentRegularizer: Regularizer;\n readonly biasRegularizer: Regularizer;\n\n readonly dropout: number;\n readonly recurrentDropout: number;\n}\n\ndeclare interface ConvRNN2DLayerArgs extends BaseRNNLayerArgs,\n ConvRNN2DCellArgs {}\n\n/**\n * Base class for convolutional-recurrent layers.\n */\nclass ConvRNN2D extends RNN {\n /** @nocollapse */\n static override className = 'ConvRNN2D';\n\n declare readonly cell: ConvRNN2DCell;\n\n constructor(args: ConvRNN2DLayerArgs) {\n if (args.unroll) {\n throw new NotImplementedError(\n 'Unrolling is not possible with convolutional RNNs.');\n }\n\n if (Array.isArray(args.cell)) {\n throw new NotImplementedError(\n 'It is not possible at the moment to stack convolutional cells.');\n }\n\n super(args as RNNLayerArgs);\n\n this.inputSpec = [new InputSpec({ndim: 5})];\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tfc.tidy(() => {\n if (this.cell.dropoutMask != null) {\n tfc.dispose(this.cell.dropoutMask);\n\n this.cell.dropoutMask = null;\n }\n\n if (this.cell.recurrentDropoutMask != null) {\n tfc.dispose(this.cell.recurrentDropoutMask);\n\n this.cell.recurrentDropoutMask = null;\n }\n\n if (kwargs && kwargs['constants']) {\n throw new ValueError('ConvRNN2D cell does not support constants');\n }\n\n const mask = kwargs == null ? null : kwargs['mask'];\n\n const training = kwargs == null ? null : kwargs['training'];\n\n const initialState: Tensor[] =\n kwargs == null ? null : kwargs['initialState'];\n\n return super.call(inputs, {mask, training, initialState});\n });\n }\n\n override computeOutputShape(inputShape: Shape): Shape|Shape[] {\n let outShape: Shape = this.computeSingleOutputShape(inputShape);\n\n if (!this.returnSequences) {\n outShape = [outShape[0], ...outShape.slice(2)];\n }\n\n if (this.returnState) {\n outShape =\n [outShape, ...Array(2).fill([inputShape[0], ...outShape.slice(-3)])];\n }\n\n return outShape;\n }\n\n override getInitialState(inputs: tfc.Tensor): tfc.Tensor[] {\n return tfc.tidy(() => {\n const {stateSize} = this.cell;\n\n const inputShape = inputs.shape;\n\n const outputShape = this.computeSingleOutputShape(inputShape);\n\n const stateShape = [outputShape[0], ...outputShape.slice(2)];\n\n const initialState = tfc.zeros(stateShape);\n\n if (Array.isArray(stateSize)) {\n return Array(stateSize.length).fill(initialState);\n }\n\n return [initialState];\n });\n }\n\n override resetStates(states?: Tensor|Tensor[], training = false): void {\n tfc.tidy(() => {\n if (!this.stateful) {\n throw new AttributeError(\n 'Cannot call resetStates() on an RNN Layer that is not stateful.');\n }\n\n const inputShape = this.inputSpec[0].shape;\n\n const outputShape = this.computeSingleOutputShape(inputShape);\n\n const stateShape = [outputShape[0], ...outputShape.slice(2)];\n\n const batchSize = inputShape[0];\n\n if (batchSize == null) {\n throw new ValueError(\n 'If an RNN is stateful, it needs to know its batch size. Specify ' +\n 'the batch size of your input tensors: \\n' +\n '- If using a Sequential model, specify the batch size by ' +\n 'passing a `batchInputShape` option to your first layer.\\n' +\n '- If using the functional API, specify the batch size by ' +\n 'passing a `batchShape` option to your Input layer.');\n }\n\n // Initialize state if null.\n if (this.getStates() == null) {\n if (Array.isArray(this.cell.stateSize)) {\n this.states_ = this.cell.stateSize.map(() => tfc.zeros(stateShape));\n } else {\n this.states_ = [tfc.zeros(stateShape)];\n }\n } else if (states == null) {\n // Dispose old state tensors.\n tfc.dispose(this.states_);\n\n // For stateful RNNs, fully dispose kept old states.\n if (this.keptStates != null) {\n tfc.dispose(this.keptStates);\n this.keptStates = [];\n }\n\n if (Array.isArray(this.cell.stateSize)) {\n this.states_ = this.cell.stateSize.map(() => tfc.zeros(stateShape));\n } else {\n this.states_[0] = tfc.zeros(stateShape);\n }\n } else {\n if (!Array.isArray(states)) {\n states = [states];\n }\n\n if (states.length !== this.states_.length) {\n throw new ValueError(\n `Layer ${this.name} expects ${this.states_.length} state(s), ` +\n `but it received ${states.length} state value(s). Input ` +\n `received: ${states}`);\n }\n\n if (training) {\n // Store old state tensors for complete disposal later, i.e., during\n // the next no-arg call to this method. We do not dispose the old\n // states immediately because that BPTT (among other things) require\n // them.\n this.keptStates.push(this.states_.slice());\n } else {\n tfc.dispose(this.states_);\n }\n\n for (let index = 0; index < this.states_.length; ++index) {\n const value = states[index];\n\n const expectedShape = stateShape;\n\n if (!util.arraysEqual(value.shape, expectedShape)) {\n throw new ValueError(\n `State ${index} is incompatible with layer ${this.name}: ` +\n `expected shape=${expectedShape}, received shape=${\n value.shape}`);\n }\n\n this.states_[index] = value;\n }\n }\n\n this.states_ = this.states_.map(state => tfc.keep(state.clone()));\n });\n }\n\n protected computeSingleOutputShape(inputShape: Shape): Shape {\n const {dataFormat, filters, kernelSize, padding, strides, dilationRate} =\n this.cell;\n\n const isChannelsFirst = dataFormat === 'channelsFirst';\n\n const h = inputShape[isChannelsFirst ? 3 : 2];\n const w = inputShape[isChannelsFirst ? 4 : 3];\n\n const hOut = convOutputLength(\n h, kernelSize[0], padding, strides[0], dilationRate[0]);\n const wOut = convOutputLength(\n w, kernelSize[1], padding, strides[1], dilationRate[1]);\n\n const outShape: Shape = [\n ...inputShape.slice(0, 2),\n ...(isChannelsFirst ? [filters, hOut, wOut] : [hOut, wOut, filters])\n ];\n\n return outShape;\n }\n}\n\nexport declare interface ConvLSTM2DCellArgs extends\n Omit, ConvRNN2DCellArgs {}\n\nexport class ConvLSTM2DCell extends LSTMCell implements ConvRNN2DCell {\n /** @nocollapse */\n static override className = 'ConvLSTM2DCell';\n\n readonly filters: number;\n readonly kernelSize: number[];\n readonly strides: number[];\n readonly padding: PaddingMode;\n readonly dataFormat: DataFormat;\n readonly dilationRate: number[];\n\n constructor(args: ConvLSTM2DCellArgs) {\n const {\n filters,\n kernelSize,\n strides,\n padding,\n dataFormat,\n dilationRate,\n } = args;\n\n super({...args, units: filters});\n\n this.filters = filters;\n assertPositiveInteger(this.filters, 'filters');\n\n this.kernelSize = normalizeArray(kernelSize, 2, 'kernelSize');\n this.kernelSize.forEach(size => assertPositiveInteger(size, 'kernelSize'));\n\n this.strides = normalizeArray(strides || 1, 2, 'strides');\n this.strides.forEach(stride => assertPositiveInteger(stride, 'strides'));\n\n this.padding = padding || 'valid';\n checkPaddingMode(this.padding);\n\n this.dataFormat = dataFormat || 'channelsLast';\n checkDataFormat(this.dataFormat);\n\n this.dilationRate = normalizeArray(dilationRate || 1, 2, 'dilationRate');\n this.dilationRate.forEach(\n rate => assertPositiveInteger(rate, 'dilationRate'));\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n\n const channelAxis =\n this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1;\n\n if (inputShape[channelAxis] == null) {\n throw new ValueError(\n `The channel dimension of the input should be defined. ` +\n `Found ${inputShape[channelAxis]}`);\n }\n\n const inputDim = inputShape[channelAxis];\n\n const numOfKernels = 4;\n\n const kernelShape =\n this.kernelSize.concat([inputDim, this.filters * numOfKernels]);\n\n this.kernel = this.addWeight(\n 'kernel', kernelShape, null, this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n\n const recurrentKernelShape =\n this.kernelSize.concat([this.filters, this.filters * numOfKernels]);\n\n this.recurrentKernel = this.addWeight(\n 'recurrent_kernel', recurrentKernelShape, null,\n this.recurrentInitializer, this.recurrentRegularizer, true,\n this.recurrentConstraint);\n\n if (this.useBias) {\n let biasInitializer: Initializer;\n\n if (this.unitForgetBias) {\n const init = this.biasInitializer;\n\n const filters = this.filters;\n\n biasInitializer = new (class CustomInit extends Initializer {\n /** @nocollapse */\n static className = 'CustomInit';\n\n apply(shape: Shape, dtype?: DataType): tfc.Tensor {\n const biasI = init.apply([filters]);\n const biasF = tfc.ones([filters]);\n const biasCAndO = init.apply([filters * 2]);\n return K.concatenate([biasI, biasF, biasCAndO]);\n }\n })();\n } else {\n biasInitializer = this.biasInitializer;\n }\n\n this.bias = this.addWeight(\n 'bias', [this.filters * numOfKernels], null, biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n }\n\n this.built = true;\n }\n\n override call(inputs: tfc.Tensor[], kwargs: Kwargs): tfc.Tensor[] {\n return tfc.tidy(() => {\n if (inputs.length !== 3) {\n throw new ValueError(\n `ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ` +\n `${inputs.length}.`);\n }\n\n const training = kwargs['training'] || false;\n\n const x = inputs[0]; // Current input\n const hTMinus1 = inputs[1]; // Previous memory state.\n const cTMinus1 = inputs[2]; // Previous carry state.\n\n const numOfKernels = 4;\n\n type DropoutMasks = [tfc.Tensor, tfc.Tensor, tfc.Tensor, tfc.Tensor];\n\n if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) {\n this.dropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(x),\n rate: this.dropout,\n training,\n count: numOfKernels,\n dropoutFunc: this.dropoutFunc\n }) as tfc.Tensor[];\n }\n\n const dropoutMask = this.dropoutMask as DropoutMasks;\n\n const applyDropout =\n (x: tfc.Tensor, mask: tfc.Tensor[], index: number) => {\n if (!mask || !mask[index]) {\n return x;\n }\n\n return tfc.mul(mask[index], x);\n };\n\n let xI = applyDropout(x, dropoutMask, 0);\n let xF = applyDropout(x, dropoutMask, 1);\n let xC = applyDropout(x, dropoutMask, 2);\n let xO = applyDropout(x, dropoutMask, 3);\n\n if (0 < this.recurrentDropout && this.recurrentDropout < 1 &&\n this.recurrentDropoutMask == null) {\n this.recurrentDropoutMask = generateDropoutMask({\n ones: () => tfc.onesLike(hTMinus1),\n rate: this.recurrentDropout,\n training,\n count: numOfKernels,\n dropoutFunc: this.dropoutFunc\n }) as tfc.Tensor[];\n }\n\n const recDropoutMask = this.recurrentDropoutMask as DropoutMasks;\n\n let hI = applyDropout(hTMinus1, recDropoutMask, 0);\n let hF = applyDropout(hTMinus1, recDropoutMask, 1);\n let hC = applyDropout(hTMinus1, recDropoutMask, 2);\n let hO = applyDropout(hTMinus1, recDropoutMask, 3);\n\n const kernelChannelAxis = 3;\n\n const [kernelI, kernelF, kernelC, kernelO]: tfc.Tensor[] =\n tfc.split(this.kernel.read(), numOfKernels, kernelChannelAxis);\n\n const [biasI, biasF, biasC, biasO]: tfc.Tensor[] = this.useBias ?\n tfc.split(this.bias.read(), numOfKernels) :\n [null, null, null, null];\n\n xI = this.inputConv(xI, kernelI, biasI, this.padding);\n xF = this.inputConv(xF, kernelF, biasF, this.padding);\n xC = this.inputConv(xC, kernelC, biasC, this.padding);\n xO = this.inputConv(xO, kernelO, biasO, this.padding);\n\n const [recKernelI, recKernelF, recKernelC, recKernelO]: tfc.Tensor[] =\n tfc.split(\n this.recurrentKernel.read(), numOfKernels, kernelChannelAxis);\n\n hI = this.recurrentConv(hI, recKernelI);\n hF = this.recurrentConv(hF, recKernelF);\n hC = this.recurrentConv(hC, recKernelC);\n hO = this.recurrentConv(hO, recKernelO);\n\n const i = this.recurrentActivation.apply(tfc.add(xI, hI));\n const f = this.recurrentActivation.apply(tfc.add(xF, hF));\n const c = tfc.add(\n tfc.mul(f, cTMinus1),\n tfc.mul(i, this.activation.apply(tfc.add(xC, hC))));\n const h = tfc.mul(\n this.recurrentActivation.apply(tfc.add(xO, hO)),\n this.activation.apply(c));\n\n return [h, h, c];\n });\n }\n\n override getConfig(): tfc.serialization.ConfigDict {\n const {'units': _, ...baseConfig} = super.getConfig();\n\n const config: tfc.serialization.ConfigDict = {\n filters: this.filters,\n kernelSize: this.kernelSize,\n padding: this.padding,\n dataFormat: this.dataFormat,\n dilationRate: this.dilationRate,\n strides: this.strides,\n };\n\n return {...baseConfig, ...config};\n }\n\n inputConv(x: Tensor, w: Tensor, b?: Tensor, padding?: PaddingMode) {\n const out = tfc.conv2d(\n x as tfc.Tensor3D, w as tfc.Tensor4D, this.strides as [number, number],\n (padding || 'valid') as 'same' | 'valid',\n this.dataFormat === 'channelsFirst' ? 'NCHW' : 'NHWC',\n this.dilationRate as [number, number]);\n\n if (b) {\n return K.biasAdd(out, b, this.dataFormat) as tfc.Tensor3D;\n }\n\n return out;\n }\n\n recurrentConv(x: Tensor, w: Tensor) {\n const strides = 1;\n\n return tfc.conv2d(\n x as tfc.Tensor3D, w as tfc.Tensor4D, strides, 'same',\n this.dataFormat === 'channelsFirst' ? 'NCHW' : 'NHWC');\n }\n}\n\ntfc.serialization.registerClass(ConvLSTM2DCell);\n\nexport declare interface ConvLSTM2DArgs extends\n Omit, ConvRNN2DLayerArgs {}\n\nexport class ConvLSTM2D extends ConvRNN2D {\n /** @nocollapse */\n static override className = 'ConvLSTM2D';\n\n constructor(args: ConvLSTM2DArgs) {\n const cell = new ConvLSTM2DCell(args);\n\n super({...args, cell} as ConvRNN2DLayerArgs);\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: tfc.serialization.SerializableConstructor,\n config: tfc.serialization.ConfigDict): T {\n return new cls(config);\n }\n}\n\ntfc.serialization.registerClass(ConvLSTM2D);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Basic Layers.\n */\n\nimport {any, cast, mul, notEqual, reshape, serialization, Tensor, tidy, transpose, util} from '@tensorflow/tfjs-core';\n\nimport {Activation as ActivationFn, getActivation, serializeActivation} from '../activations';\nimport * as K from '../backend/tfjs_backend';\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {DisposeResult, InputSpec, Layer, LayerArgs} from '../engine/topology';\nimport {ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {ActivationIdentifier} from '../keras_format/activation_config';\nimport {DataFormat, Shape} from '../keras_format/common';\nimport {LayerConfig} from '../keras_format/topology_config';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport {assertPositiveInteger, mapActivationToFusedKernel} from '../utils/generic_utils';\nimport {arrayProd, range} from '../utils/math_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\nexport declare interface DropoutLayerArgs extends LayerArgs {\n /** Float between 0 and 1. Fraction of the input units to drop. */\n rate: number;\n\n /**\n * Integer array representing the shape of the binary dropout mask that will\n * be multiplied with the input.\n *\n * For instance, if your inputs have shape `(batchSize, timesteps, features)`\n * and you want the dropout mask to be the same for all timesteps, you can use\n * `noise_shape=(batch_size, 1, features)`.\n */\n noiseShape?: number[];\n\n /** An integer to use as random seed. */\n seed?: number;\n}\n\nexport class Dropout extends Layer {\n /** @nocollapse */\n static className = 'Dropout';\n private readonly rate: number;\n private readonly noiseShape: number[];\n private readonly seed: number;\n\n constructor(args: DropoutLayerArgs) {\n super(args);\n this.rate = Math.max(Math.min(args.rate, 1), 0);\n // So that the scalar doesn't get tidied up between executions.\n this.noiseShape = args.noiseShape;\n this.seed = args.seed;\n this.supportsMasking = true;\n }\n\n protected getNoiseShape(input: Tensor): Shape {\n if (this.noiseShape == null) {\n return this.noiseShape;\n }\n const inputShape = input.shape;\n const noiseShape: Shape = [];\n for (let i = 0; i < this.noiseShape.length; ++i) {\n noiseShape.push(\n this.noiseShape[i] == null ? inputShape[i] : this.noiseShape[i]);\n }\n return noiseShape;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n if (0 < this.rate && this.rate < 1) {\n const training =\n kwargs['training'] == null ? false : kwargs['training'];\n const noiseShape = this.getNoiseShape(input);\n const output = K.inTrainPhase(\n () => K.dropout(input, this.rate, noiseShape, this.seed),\n () => input, training);\n return output;\n }\n return inputs;\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n rate: this.rate,\n noiseShape: this.noiseShape,\n seed: this.seed,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override dispose(): DisposeResult {\n return super.dispose();\n }\n}\nserialization.registerClass(Dropout);\n\nexport declare interface DenseLayerArgs extends LayerArgs {\n /** Positive integer, dimensionality of the output space. */\n units: number;\n /**\n * Activation function to use.\n *\n * If unspecified, no activation is applied.\n */\n activation?: ActivationIdentifier;\n /** Whether to apply a bias. */\n useBias?: boolean;\n /**\n * Initializer for the dense kernel weights matrix.\n */\n kernelInitializer?: InitializerIdentifier|Initializer;\n /**\n * Initializer for the bias vector.\n */\n biasInitializer?: InitializerIdentifier|Initializer;\n /**\n * If specified, defines inputShape as `[inputDim]`.\n */\n inputDim?: number;\n\n /**\n * Constraint for the kernel weights.\n */\n kernelConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint for the bias vector.\n */\n biasConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Regularizer function applied to the dense kernel weights matrix.\n */\n kernelRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the bias vector.\n */\n biasRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer function applied to the activation.\n */\n activityRegularizer?: RegularizerIdentifier|Regularizer;\n}\n\nexport interface SpatialDropout1DLayerConfig extends LayerConfig {\n /** Float between 0 and 1. Fraction of the input units to drop. */\n rate: number;\n\n /** An integer to use as random seed. */\n seed?: number;\n}\n\nexport class SpatialDropout1D extends Dropout {\n /** @nocollapse */\n static override className = 'SpatialDropout1D';\n\n constructor(args: SpatialDropout1DLayerConfig) {\n super(args);\n this.inputSpec = [{ndim: 3}];\n }\n\n protected override getNoiseShape(input: Tensor): Shape {\n const inputShape = input.shape;\n return [inputShape[0], 1, inputShape[2]];\n }\n}\nserialization.registerClass(SpatialDropout1D);\n\nexport class Dense extends Layer {\n /** @nocollapse */\n static className = 'Dense';\n private units: number;\n // Default activation: Linear (none).\n private activation: ActivationFn = null;\n private useBias = true;\n private kernelInitializer: Initializer;\n private biasInitializer: Initializer;\n private kernel: LayerVariable = null;\n private bias: LayerVariable = null;\n\n readonly DEFAULT_KERNEL_INITIALIZER: InitializerIdentifier = 'glorotNormal';\n readonly DEFAULT_BIAS_INITIALIZER: InitializerIdentifier = 'zeros';\n private readonly kernelConstraint?: Constraint;\n private readonly biasConstraint?: Constraint;\n private readonly kernelRegularizer?: Regularizer;\n private readonly biasRegularizer?: Regularizer;\n\n constructor(args: DenseLayerArgs) {\n super(args);\n if (args.batchInputShape == null && args.inputShape == null &&\n args.inputDim != null) {\n // This logic is copied from Layer's constructor, since we can't\n // do exactly what the Python constructor does for Dense().\n let batchSize: number = null;\n if (args.batchSize != null) {\n batchSize = args.batchSize;\n }\n this.batchInputShape = [batchSize, args.inputDim];\n }\n\n this.units = args.units;\n assertPositiveInteger(this.units, 'units');\n this.activation = getActivation(args.activation);\n if (args.useBias != null) {\n this.useBias = args.useBias;\n }\n this.kernelInitializer = getInitializer(\n args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER);\n this.biasInitializer =\n getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER);\n this.kernelConstraint = getConstraint(args.kernelConstraint);\n this.biasConstraint = getConstraint(args.biasConstraint);\n this.kernelRegularizer = getRegularizer(args.kernelRegularizer);\n this.biasRegularizer = getRegularizer(args.biasRegularizer);\n this.activityRegularizer = getRegularizer(args.activityRegularizer);\n this.supportsMasking = true;\n\n this.inputSpec = [{minNDim: 2}];\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const inputLastDim = inputShape[inputShape.length - 1];\n if (this.kernel == null) {\n this.kernel = this.addWeight(\n 'kernel', [inputLastDim, this.units], null, this.kernelInitializer,\n this.kernelRegularizer, true, this.kernelConstraint);\n if (this.useBias) {\n this.bias = this.addWeight(\n 'bias', [this.units], null, this.biasInitializer,\n this.biasRegularizer, true, this.biasConstraint);\n }\n }\n\n this.inputSpec = [{minNDim: 2, axes: {[-1]: inputLastDim}}];\n this.built = true;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const outputShape = inputShape.slice();\n outputShape[outputShape.length - 1] = this.units;\n return outputShape;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n // Dense layer accepts only a single input.\n const input = getExactlyOneTensor(inputs);\n const fusedActivationName =\n mapActivationToFusedKernel(this.activation.getClassName());\n let output: Tensor;\n\n if (fusedActivationName != null) {\n output = K.dot(\n input, this.kernel.read(), fusedActivationName,\n this.bias ? this.bias.read() : null);\n } else {\n output = K.dot(input, this.kernel.read());\n if (this.bias != null) {\n output = K.biasAdd(output, this.bias.read());\n }\n if (this.activation != null) {\n output = this.activation.apply(output);\n }\n }\n\n return output;\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n units: this.units,\n activation: serializeActivation(this.activation),\n useBias: this.useBias,\n kernelInitializer: serializeInitializer(this.kernelInitializer),\n biasInitializer: serializeInitializer(this.biasInitializer),\n kernelRegularizer: serializeRegularizer(this.kernelRegularizer),\n biasRegularizer: serializeRegularizer(this.biasRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n kernelConstraint: serializeConstraint(this.kernelConstraint),\n biasConstraint: serializeConstraint(this.biasConstraint)\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Dense);\n\nexport declare interface FlattenLayerArgs extends LayerArgs {\n /** Image data format: channelsLast (default) or channelsFirst. */\n dataFormat?: DataFormat;\n}\n\nexport class Flatten extends Layer {\n private dataFormat: DataFormat;\n\n /** @nocollapse */\n static className = 'Flatten';\n constructor(args?: FlattenLayerArgs) {\n args = args || {};\n super(args);\n this.inputSpec = [{minNDim: 3}];\n this.dataFormat = args.dataFormat;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n for (const dim of inputShape.slice(1)) {\n if (dim == null) {\n throw new ValueError(\n `The shape of the input to \"Flatten\" is not fully defined ` +\n `(got ${inputShape.slice(1)}). Make sure to pass a complete ` +\n `\"input_shape\" or \"batch_input_shape\" argument to the first ` +\n `layer in your model.`);\n }\n }\n return [inputShape[0], arrayProd(inputShape, 1)];\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n\n let input = getExactlyOneTensor(inputs);\n if (this.dataFormat === 'channelsFirst' && input.rank > 1) {\n const permutation: number[] = [0];\n for (let i = 2; i < input.rank; ++i) {\n permutation.push(i);\n }\n permutation.push(1);\n input = transpose(input, permutation);\n }\n\n return K.batchFlatten(input);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {};\n if (this.dataFormat != null) {\n config['dataFormat'] = this.dataFormat;\n }\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Flatten);\n\nexport declare interface ActivationLayerArgs extends LayerArgs {\n /**\n * Name of the activation function to use.\n */\n activation: ActivationIdentifier;\n}\n\nexport class Activation extends Layer {\n /** @nocollapse */\n static className = 'Activation';\n activation: ActivationFn;\n\n constructor(args: ActivationLayerArgs) {\n super(args);\n this.supportsMasking = true;\n this.activation = getActivation(args.activation);\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n return this.activation.apply(input);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {activation: serializeActivation(this.activation)};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Activation);\n\nexport declare interface ReshapeLayerArgs extends LayerArgs {\n /** The target shape. Does not include the batch axis. */\n targetShape: Shape;\n}\n\nexport declare interface RepeatVectorLayerArgs extends LayerArgs {\n /**\n * The integer number of times to repeat the input.\n */\n n: number;\n}\n\nexport class RepeatVector extends Layer {\n /** @nocollapse */\n static className = 'RepeatVector';\n readonly n: number;\n\n constructor(args: RepeatVectorLayerArgs) {\n super(args);\n this.n = args.n;\n this.inputSpec = [{ndim: 2}];\n }\n\n override computeOutputShape(inputShape: Shape): Shape {\n return [inputShape[0], this.n, inputShape[1]];\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n return K.repeat(inputs, this.n);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n n: this.n,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(RepeatVector);\n\nexport class Reshape extends Layer {\n /** @nocollapse */\n static className = 'Reshape';\n private targetShape: Shape;\n\n constructor(args: ReshapeLayerArgs) {\n super(args);\n this.targetShape = args.targetShape;\n\n // Make sure that all unknown dimensions are represented as `null`.\n for (let i = 0; i < this.targetShape.length; ++i) {\n if (this.isUnknown(this.targetShape[i])) {\n this.targetShape[i] = null;\n }\n }\n }\n\n private isUnknown(dim: number): boolean {\n return dim < 0 || dim == null;\n }\n\n /**\n * Finds and replaces a missing dimension in output shape.\n *\n * This is a near direct port of the internal Numpy function\n * `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`.\n *\n * @param inputShape: Original shape of array begin reshape.\n * @param outputShape: Target shape of the array, with at most a single\n * `null` or negative number, which indicates an underdetermined dimension\n * that should be derived from `inputShape` and the known dimensions of\n * `outputShape`.\n * @returns: The output shape with `null` replaced with its computed value.\n * @throws: ValueError: If `inputShape` and `outputShape` do not match.\n */\n private fixUnknownDimension(inputShape: Shape, outputShape: Shape): Shape {\n const errorMsg = 'Total size of new array must be unchanged.';\n const finalShape = outputShape.slice();\n let known = 1;\n let unknown = null;\n for (let i = 0; i < finalShape.length; ++i) {\n const dim = finalShape[i];\n if (this.isUnknown(dim)) {\n if (unknown === null) {\n unknown = i;\n } else {\n throw new ValueError('Can only specifiy one unknown dimension.');\n }\n } else {\n known *= dim;\n }\n }\n\n const originalSize = arrayProd(inputShape);\n if (unknown !== null) {\n if (known === 0 || originalSize % known !== 0) {\n throw new ValueError(errorMsg);\n }\n finalShape[unknown] = originalSize / known;\n } else if (originalSize !== known) {\n throw new ValueError(errorMsg);\n }\n\n return finalShape;\n }\n\n override computeOutputShape(inputShape: Shape): Shape {\n let anyUnknownDims = false;\n for (let i = 0; i < inputShape.length; ++i) {\n if (this.isUnknown(inputShape[i])) {\n anyUnknownDims = true;\n break;\n }\n }\n\n if (anyUnknownDims) {\n return inputShape.slice(0, 1).concat(this.targetShape);\n } else {\n return inputShape.slice(0, 1).concat(\n this.fixUnknownDimension(inputShape.slice(1), this.targetShape));\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n const inputShape = input.shape;\n const outputShape = inputShape.slice(0, 1).concat(\n this.fixUnknownDimension(inputShape.slice(1), this.targetShape));\n return reshape(input, outputShape);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n targetShape: this.targetShape,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Reshape);\n\nexport declare interface PermuteLayerArgs extends LayerArgs {\n /**\n * Array of integers. Permutation pattern. Does not include the\n * sample (batch) dimension. Index starts at 1.\n * For instance, `[2, 1]` permutes the first and second dimensions\n * of the input.\n */\n dims: number[];\n}\n\nexport class Permute extends Layer {\n /** @nocollapse */\n static className = 'Permute';\n readonly dims: number[];\n private readonly dimsIncludingBatch: number[];\n\n constructor(args: PermuteLayerArgs) {\n super(args);\n if (args.dims == null) {\n throw new Error(\n 'Required configuration field `dims` is missing during Permute ' +\n 'constructor call.');\n }\n if (!Array.isArray(args.dims)) {\n throw new Error(\n 'Permute constructor requires `dims` to be an Array, but received ' +\n `${args.dims} instead.`);\n }\n\n // Check the validity of the permutation indices.\n const expectedSortedIndices = range(1, args.dims.length + 1);\n if (!util.arraysEqual(args.dims.slice().sort(), expectedSortedIndices)) {\n throw new Error(\n 'Invalid permutation `dims`: ' + JSON.stringify(args.dims) +\n ' `dims` must contain consecutive integers starting from 1.');\n }\n\n this.dims = args.dims;\n this.dimsIncludingBatch = [0].concat(this.dims);\n this.inputSpec = [new InputSpec({ndim: this.dims.length + 1})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const outputShape = inputShape.slice();\n this.dims.forEach((dim: number, i: number) => {\n outputShape[i + 1] = (inputShape as Shape)[dim];\n });\n return outputShape;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return transpose(getExactlyOneTensor(inputs), this.dimsIncludingBatch);\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n dims: this.dims,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Permute);\n\nexport declare interface MaskingArgs extends LayerArgs {\n /**\n * Masking Value. Defaults to `0.0`.\n */\n maskValue?: number;\n}\n\nexport class Masking extends Layer {\n /** @nocollapse */\n static className = 'Masking';\n maskValue: number;\n\n constructor(args?: MaskingArgs) {\n super(args == null ? {} : args);\n this.supportsMasking = true;\n if (args != null) {\n this.maskValue = args.maskValue == null ? 0 : args.maskValue;\n } else {\n this.maskValue = 0;\n }\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig() {\n const baseConfig = super.getConfig();\n const config = {maskValue: this.maskValue};\n Object.assign(config, baseConfig);\n return config;\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]):\n Tensor {\n const input = getExactlyOneTensor(inputs);\n const axis = -1;\n return any(notEqual(input, this.maskValue), axis);\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n const axis = -1;\n const keepDims = true;\n const booleanMask = any(notEqual(input, this.maskValue), axis, keepDims);\n const output = mul(input, cast(booleanMask, input.dtype));\n return output;\n });\n }\n}\nserialization.registerClass(Masking);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Embedding Layer.\n *\n * Original source: keras/constraints.py\n */\nimport {notEqual, reshape, serialization, Tensor, tidy, zerosLike} from '@tensorflow/tfjs-core';\n\nimport * as K from '../backend/tfjs_backend';\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {Layer, LayerArgs} from '../engine/topology';\nimport {ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\nexport declare interface EmbeddingLayerArgs extends LayerArgs {\n /**\n * Integer > 0. Size of the vocabulary, i.e. maximum integer index + 1.\n */\n inputDim: number;\n /**\n * Integer >= 0. Dimension of the dense embedding.\n */\n outputDim: number;\n /**\n * Initializer for the `embeddings` matrix.\n */\n embeddingsInitializer?: InitializerIdentifier|Initializer;\n /**\n * Regularizer function applied to the `embeddings` matrix.\n */\n embeddingsRegularizer?: RegularizerIdentifier|Regularizer;\n /**\n * Regularizer function applied to the activation.\n */\n activityRegularizer?: RegularizerIdentifier|Regularizer;\n /**\n * Constraint function applied to the `embeddings` matrix.\n */\n embeddingsConstraint?: ConstraintIdentifier|Constraint;\n /**\n * Whether the input value 0 is a special \"padding\" value that should be\n * masked out. This is useful when using recurrent layers which may take\n * variable length input.\n *\n * If this is `True` then all subsequent layers in the model need to support\n * masking or an exception will be raised. If maskZero is set to `True`, as a\n * consequence, index 0 cannot be used in the vocabulary (inputDim should\n * equal size of vocabulary + 1).\n */\n maskZero?: boolean;\n /**\n * Length of input sequences, when it is constant.\n *\n * This argument is required if you are going to connect `flatten` then\n * `dense` layers upstream (without it, the shape of the dense outputs cannot\n * be computed).\n */\n inputLength?: number|number[];\n}\n\nexport class Embedding extends Layer {\n /** @nocollapse */\n static className = 'Embedding';\n private inputDim: number;\n private outputDim: number;\n private embeddingsInitializer: Initializer;\n private maskZero: boolean;\n private inputLength: number|number[];\n\n private embeddings: LayerVariable = null;\n\n readonly DEFAULT_EMBEDDINGS_INITIALIZER: InitializerIdentifier =\n 'randomUniform';\n private readonly embeddingsRegularizer?: Regularizer;\n private readonly embeddingsConstraint?: Constraint;\n\n constructor(args: EmbeddingLayerArgs) {\n super(args);\n if (args.batchInputShape == null && args.inputShape == null) {\n // Porting Note: This logic is copied from Layer's constructor, since we\n // can't do exactly what the Python constructor does for Embedding().\n // Specifically, the super constructor can not be called after the\n // mutation of the `config` argument.\n let batchSize: number = null;\n if (args.batchSize != null) {\n batchSize = args.batchSize;\n }\n if (args.inputLength == null) {\n // Fix super-constructor to what it would have done if\n // 'config.inputShape' were (None, )\n this.batchInputShape = [batchSize, null];\n } else {\n // Fix super-constructor to what it would have done if\n // 'config.inputShape' were (config.inputLength, )\n this.batchInputShape =\n [batchSize].concat(generic_utils.toList(args.inputLength));\n }\n }\n this.inputDim = args.inputDim;\n generic_utils.assertPositiveInteger(this.inputDim, 'inputDim');\n this.outputDim = args.outputDim;\n generic_utils.assertPositiveInteger(this.outputDim, 'outputDim');\n this.embeddingsInitializer = getInitializer(\n args.embeddingsInitializer || this.DEFAULT_EMBEDDINGS_INITIALIZER);\n this.embeddingsRegularizer = getRegularizer(args.embeddingsRegularizer);\n this.activityRegularizer = getRegularizer(args.activityRegularizer);\n this.embeddingsConstraint = getConstraint(args.embeddingsConstraint);\n this.maskZero = args.maskZero;\n this.supportsMasking = args.maskZero;\n this.inputLength = args.inputLength;\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n this.embeddings = this.addWeight(\n 'embeddings', [this.inputDim, this.outputDim], this.dtype,\n this.embeddingsInitializer, this.embeddingsRegularizer, true,\n this.embeddingsConstraint);\n this.built = true;\n }\n\n // Override warnOnIncompatibleInputShape because an embedding layer allows\n // the input to have varying ranks.\n protected override warnOnIncompatibleInputShape(inputShape: Shape) {}\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]):\n Tensor {\n return tidy(() => {\n if (!this.maskZero) {\n return null;\n } else {\n inputs = getExactlyOneTensor(inputs);\n return notEqual(inputs, zerosLike(inputs));\n }\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n if (this.inputLength == null) {\n return [...inputShape, this.outputDim];\n }\n // inputLength can be an array if input is 3D or higher.\n const inLens: number[] = generic_utils.toList(this.inputLength);\n if (inLens.length !== inputShape.length - 1) {\n throw new ValueError(\n `\"inputLength\" is ${this.inputLength}, but received ` +\n `input shape has shape ${inputShape}`);\n } else {\n let i = 0;\n for (let k = 0; k < inLens.length; ++k) {\n const s1 = inLens[k];\n const s2 = inputShape[k + 1];\n if ((s1 != null) && (s2 != null) && (s1 !== s2)) {\n throw new ValueError(\n `\"inputLength\" is ${this.inputLength}, but received ` +\n `input shape has shape ${inputShape}`);\n } else if (s1 == null) {\n inLens[i] = s2;\n }\n i++;\n }\n }\n return [inputShape[0], ...inLens, this.outputDim];\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n // Embedding layer accepts only a single input.\n let input = getExactlyOneTensor(inputs);\n if (input.dtype !== 'int32') {\n input = K.cast(input, 'int32');\n }\n const output =\n K.gather(this.embeddings.read(), reshape(input, [input.size]));\n return reshape(\n output, getExactlyOneShape(this.computeOutputShape(input.shape)));\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n inputDim: this.inputDim,\n outputDim: this.outputDim,\n embeddingsInitializer: serializeInitializer(this.embeddingsInitializer),\n embeddingsRegularizer: serializeRegularizer(this.embeddingsRegularizer),\n activityRegularizer: serializeRegularizer(this.activityRegularizer),\n embeddingsConstraint: serializeConstraint(this.embeddingsConstraint),\n maskZero: this.maskZero,\n inputLength: this.inputLength\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Embedding);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Merge Layers.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, tidy, util} from '@tensorflow/tfjs-core';\nimport * as K from '../backend/tfjs_backend';\nimport {Layer, LayerArgs, SymbolicTensor} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {Shape} from '../keras_format/common';\nimport {l2Normalize} from '../losses';\nimport {Kwargs} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport * as mathUtils from '../utils/math_utils';\nimport {getExactlyOneShape} from '../utils/types_utils';\n\n/**\n * Generic Merge layer for element-wise merge functions.\n *\n * Used to implement `Sum`, `Average`, `Concatenate`, etc.\n */\nexport abstract class Merge extends Layer {\n protected reshapeRequired: boolean;\n\n constructor(args?: LayerArgs) {\n super(args || {});\n this.supportsMasking = true;\n }\n\n /**\n * Logic for merging multiple tensors, to be overridden by subclasses.\n * @param inputs\n */\n protected mergeFunction(inputs: Tensor[]): Tensor {\n throw new NotImplementedError();\n }\n\n /**\n * Computes the shape of the result of an elementwise operation.\n *\n * @param shape1: Shape of the first tensor.\n * @param shape2: Shape of the second tensor.\n * @returns Expected output shape when an elementwise operation is carried\n * out on 2 tensors with shapes `shape1` and `shape2`.\n * @throws ValueError: If `shape1` and `shape2` are not compatible for\n * element-wise operations.\n */\n private computeElementwiseOpOutputShape(shape1: Shape, shape2: Shape): Shape {\n if (shape1 == null || shape2 == null) {\n return null;\n } else if (shape1.length < shape2.length) {\n return this.computeElementwiseOpOutputShape(shape2, shape1);\n } else if (shape2.length === 0) {\n return shape1;\n }\n const outputShape: Shape = shape1.slice(0, shape1.length - shape2.length);\n for (let k = 0; k < shape2.length; ++k) {\n const i = shape1[shape1.length - shape2.length + k];\n const j = shape2[k];\n if (i == null || j == null || i < 0 || j < 0) {\n outputShape.push(null);\n } else if (i === 1) {\n outputShape.push(j);\n } else if (j === 1) {\n outputShape.push(i);\n } else {\n if (i !== j) {\n throw new ValueError(\n 'Operands could not be broadcast together with shapes ' +\n JSON.stringify(shape1) + ' ' + JSON.stringify(shape2));\n }\n outputShape.push(i);\n }\n }\n return outputShape;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n // Used purely for shape validation.\n if (Array.isArray(inputShape) && !Array.isArray(inputShape[0])) {\n // Make sure that inputShape is an Array of shape.\n inputShape = [getExactlyOneShape(inputShape)];\n }\n inputShape = inputShape as Shape[];\n if (inputShape.length < 2) {\n throw new ValueError(\n 'A merge layer should be called on an Array of at least 2 inputs.' +\n ` Got ${inputShape.length} input(s).`);\n }\n\n // Make sure that there is at most one unique batch size among the input\n // shapes.\n let batchSizes: number[] = [];\n for (const shape of inputShape) {\n if (shape != null && shape[0] !== null) {\n batchSizes.push(shape[0]);\n }\n }\n batchSizes = generic_utils.unique(batchSizes);\n if (batchSizes.length > 1) {\n throw new ValueError(\n `Can not merge tensors with different batch sizes. ` +\n `Got tensors with shapes: ${JSON.stringify(inputShape)}.`);\n }\n\n let outputShape: Shape =\n inputShape[0] == null ? null : inputShape[0].slice(1);\n for (let i = 1; i < inputShape.length; ++i) {\n const shape = inputShape[i] == null ? null : inputShape[i].slice(1);\n outputShape = this.computeElementwiseOpOutputShape(outputShape, shape);\n }\n // If the inputs have different ranks, we have to reshape them to make them\n // broadcastable.\n const allRanks = inputShape.map(shape => shape.length);\n if (inputShape.indexOf(null) === -1 &&\n generic_utils.unique(allRanks).length === 1) {\n this.reshapeRequired = false;\n } else {\n this.reshapeRequired = true;\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n inputs = inputs as Tensor[];\n if (this.reshapeRequired) {\n const reshapedInputs: Tensor[] = [];\n const inputDims = inputs.map(input => input.rank);\n if (inputDims.indexOf(null) === -1) {\n // If ranks of all inputs are available, we simply expand each of them\n // at axis=1 until all of them have the same rank.\n const maxNDim = mathUtils.max(inputDims);\n for (let x of inputs) {\n const xNDim = x.rank;\n for (let k = 0; k < maxNDim - xNDim; ++k) {\n x = K.expandDims(x, 1);\n }\n reshapedInputs.push(x);\n }\n return this.mergeFunction(reshapedInputs);\n } else {\n // Transpose all inputs so that batch size is the last dimension.\n // [batchSize, dim1, dim2, ...] -> [dim1, dim2, ..., batchSize]\n let transposed = false;\n for (const x of inputs) {\n const xNDim = x.rank;\n if (xNDim == null) {\n const xShape = x.shape;\n const batchSize = xShape[0];\n const newShape = xShape.slice(1).concat([batchSize]);\n let xTransposed = tfc.reshape(\n x, [batchSize].concat(mathUtils.arrayProd(xShape.slice(1))));\n xTransposed = tfc.transpose(xTransposed, [1, 0]);\n xTransposed = tfc.reshape(xTransposed, newShape);\n reshapedInputs.push(xTransposed);\n transposed = true;\n } else if (xNDim > 1) {\n const dims = mathUtils.range(1, xNDim).concat([0]);\n reshapedInputs.push(tfc.transpose(x, dims));\n transposed = true;\n } else {\n // We don't transpose inputs if they are 1D vectors or scalars.\n reshapedInputs.push(x);\n }\n }\n let y = this.mergeFunction(reshapedInputs);\n const yNDim = y.rank;\n if (transposed) {\n // If inputs have been transposed, we have to transpose the output\n // too.\n if (yNDim == null) {\n const yShape = y.shape;\n const yNDim = yShape.length;\n const batchSize = yShape[yNDim - 1];\n const newShape =\n [batchSize].concat(yShape.slice(0, yShape.length - 1));\n y = tfc.reshape(\n tfc.transpose(tfc.reshape(y, [-1, batchSize]), [1, 0]),\n newShape);\n } else if (yNDim > 1) {\n const dims = [yNDim - 1].concat(mathUtils.range(0, yNDim - 1));\n y = tfc.transpose(y, dims);\n }\n }\n return y;\n }\n } else {\n return this.mergeFunction(inputs);\n }\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = inputShape as Shape[];\n let outputShape: Shape;\n if (inputShape[0] == null) {\n outputShape = null;\n } else {\n outputShape = inputShape[0].slice(1);\n }\n for (let i = 1; i < inputShape.length; ++i) {\n const shape = inputShape[i] == null ? null : inputShape[i].slice(1);\n outputShape = this.computeElementwiseOpOutputShape(outputShape, shape);\n }\n\n let batchSizes: number[] = [];\n for (const shape of inputShape) {\n if (shape != null && shape[0] !== null) {\n batchSizes.push(shape[0]);\n }\n }\n batchSizes = generic_utils.unique(batchSizes);\n if (batchSizes.length === 1) {\n outputShape = batchSizes.concat(outputShape);\n } else {\n outputShape = [null].concat(outputShape);\n }\n return outputShape;\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]):\n Tensor {\n return tfc.tidy(() => {\n if (mask == null) {\n return null;\n }\n if (!Array.isArray(mask)) {\n throw new ValueError('`mask` should be an Array');\n }\n if (!Array.isArray(inputs)) {\n throw new ValueError('`inputs` should be an Array');\n }\n if (mask.length !== inputs.length) {\n throw new ValueError(\n `The Array 'inputs' and 'mask' are expected to have the same ` +\n `length, but have different lengths ` +\n `(${inputs.length} vs ${mask.length})`);\n }\n if (mask.every(m => m == null)) {\n return null;\n }\n mask = mask.map(m => m == null ? m : tfc.expandDims(m, 0));\n let output = mask[0];\n for (let i = 1; i < mask.length - 1; ++i) {\n output = tfc.logicalAnd(output, mask[i]);\n }\n return output;\n });\n }\n}\n\nexport class Add extends Merge {\n /** @nocollapse */\n static className = 'Add';\n constructor(args?: LayerArgs) {\n super(args);\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n let output = inputs[0].clone();\n for (let i = 1; i < inputs.length; ++i) {\n output = tfc.add(output, inputs[i]);\n }\n return output;\n });\n }\n}\nserialization.registerClass(Add);\n\n/**\n * Calculate the element-wise sum of inputs, which all have the same shape.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Add` layer, by using no input argument\n * or a single configuration argument. The resultant `Add` layer can then\n * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const addLayer = tf.layers.add();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = addLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = tf.layers.add([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);\n * tf.layers.add([input1, input2]).print();\n * // Gives [[11, 22], [33, 44]].\n *\n */\nexport function add(config?: SymbolicTensor[]|Tensor[]|LayerArgs): Layer|\n SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Add({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Add(config);\n }\n}\n\nexport class Multiply extends Merge {\n /** @nocollapse */\n static className = 'Multiply';\n constructor(args?: LayerArgs) {\n super(args);\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n let output = inputs[0].clone();\n for (let i = 1; i < inputs.length; ++i) {\n output = tfc.mul(output, inputs[i]);\n }\n return output;\n });\n }\n}\nserialization.registerClass(Multiply);\n\n/**\n * Calculate the element-wise product of inputs, which all have the same shape.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Multiply` layer, by using no input argument\n * or a single configuration argument. The resultant `Multiply` layer can\n * then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const multiplyLayer = tf.layers.multiply();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = multiplyLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = tf.layers.multiply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);\n * tf.layers.multiply([input1, input2]).print();\n * // Gives [[10, 40], [90, 160]].\n *\n */\nexport function multiply(config?: SymbolicTensor[]|Tensor[]|LayerArgs): Layer|\n SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Multiply({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Multiply(config);\n }\n}\n\nexport class Average extends Merge {\n /** @nocollapse */\n static className = 'Average';\n constructor(args?: LayerArgs) {\n super(args);\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n let output = inputs[0].clone();\n for (let i = 1; i < inputs.length; ++i) {\n output = tfc.add(output, inputs[i]);\n }\n return tfc.mul(1 / inputs.length, output);\n });\n }\n}\nserialization.registerClass(Average);\n\n/**\n * Calculate the element-wise arithmetic mean of inputs, which all have the same\n * shape.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Average` layer, by using no input argument\n * or a single configuration argument. The resultant `Average` layer can then\n * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const averageLayer = tf.layers.average();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = averageLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = tf.layers.average([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]);\n * tf.layers.average([input1, input2]).print();\n * // Gives [[5.5, 11], [16.5, 22]].\n *\n */\nexport function average(config?: SymbolicTensor[]|Tensor[]|LayerArgs): Layer|\n SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Average({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Average(config);\n }\n}\n\nexport class Maximum extends Merge {\n /** @nocollapse */\n static className = 'Maximum';\n constructor(args?: LayerArgs) {\n super(args);\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n let output = inputs[0];\n for (let i = 1; i < inputs.length; ++i) {\n output = tfc.maximum(output, inputs[i]);\n }\n return output;\n });\n }\n}\nserialization.registerClass(Maximum);\n\n/**\n * Calculate the element-wise maximum of inputs, which all have the same shape.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Maximum` layer, by using no input argument\n * or a single configuration argument. The resultant `Maximum` layer can then\n * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const maximumLayer = tf.layers.maximum();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = maximumLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = tf.layers.maximum([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]);\n * const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]);\n * tf.layers.maximum([input1, input2]).print();\n * // Gives [[10, 20], [30, 40]].\n *\n */\nexport function maximum(config?: SymbolicTensor[]|Tensor[]|LayerArgs): Layer|\n SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Maximum({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Maximum(config);\n }\n}\n\nexport class Minimum extends Merge {\n /** @nocollapse */\n static className = 'Minimum';\n constructor(args?: LayerArgs) {\n super(args);\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n let output = inputs[0];\n for (let i = 1; i < inputs.length; ++i) {\n output = tfc.minimum(output, inputs[i]);\n }\n return output;\n });\n }\n}\nserialization.registerClass(Minimum);\n\n/**\n * Calculate the element-wise minimum of inputs, which all have the same shape.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Minimum` layer, by using no input argument\n * or a single configuration argument. The resultant `Minimum` layer can then\n * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const minimumLayer = tf.layers.minimum();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = minimumLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const output = tf.layers.minimum([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]);\n * const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]);\n * tf.layers.minimum([input1, input2]).print();\n * // Gives [[1, 2], [3, 4]].\n *\n */\nexport function minimum(config?: SymbolicTensor[]|Tensor[]|LayerArgs): Layer|\n SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Minimum({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Minimum(config);\n }\n}\n\nexport declare interface ConcatenateLayerArgs extends LayerArgs {\n /**\n * Axis along which to concatenate.\n */\n axis?: number;\n}\n\nexport class Concatenate extends Merge {\n /** @nocollapse */\n static className = 'Concatenate';\n readonly DEFAULT_AXIS = -1;\n private readonly axis: number;\n\n constructor(args?: ConcatenateLayerArgs) {\n super(args);\n if (args == null) {\n args = {};\n }\n this.axis = args.axis == null ? this.DEFAULT_AXIS : args.axis;\n this.supportsMasking = true;\n this.reshapeRequired = false;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n // Used purely for shape validation.]\n if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0])) ||\n inputShape.length === 1) {\n throw new ValueError(\n 'A `Concatenate` layer should be called on a list of at least 2 ' +\n 'inputs');\n }\n inputShape = inputShape as Shape[];\n\n let allNoneShape = true;\n for (const shape of inputShape) {\n if (shape != null) {\n allNoneShape = false;\n break;\n }\n }\n if (allNoneShape) {\n return;\n }\n\n const shapeSet: Shape[] = [];\n for (let i = 0; i < inputShape.length; ++i) {\n const shapeWithoutConcatAxis = inputShape[i].slice();\n shapeWithoutConcatAxis.splice(this.axis, 1);\n let exists = false;\n for (const shape of shapeSet) {\n if (util.arraysEqual(shape, shapeWithoutConcatAxis)) {\n exists = true;\n break;\n }\n }\n if (!exists) {\n shapeSet.push(shapeWithoutConcatAxis);\n }\n }\n if (shapeSet.length > 1) {\n throw new ValueError(\n 'A `Concatenate` layer requires inputs with matching shapes ' +\n 'except for the concat axis. Got input shapes: ' +\n JSON.stringify(inputShape));\n }\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n return tidy(() => {\n return K.concatenate(inputs, this.axis);\n });\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0]))) {\n throw new ValueError(\n 'A `Concatenate` layer should be called on a list of inputs.');\n }\n const inputShapes = inputShape as Shape[];\n const outputShape = inputShapes[0].slice();\n const axis = this.axis < 0 ? outputShape.length + this.axis : this.axis;\n // Porting Note: the line above is because TypeScript doesn't support\n // negative indices.\n for (const shape of inputShapes.slice(1)) {\n if (outputShape[axis] == null || shape[axis] == null) {\n outputShape[axis] = null;\n break;\n }\n outputShape[axis] += shape[axis];\n }\n return outputShape;\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]):\n Tensor {\n if (mask == null) {\n return null;\n }\n if (!Array.isArray(mask)) {\n throw new ValueError('`mask` should be an array for Concatenate');\n }\n if (!Array.isArray(inputs)) {\n throw new ValueError('`inputs` should be an array for Concatenate');\n }\n if (mask.length !== inputs.length) {\n throw new ValueError(\n `Mismatch in the length of mask (${mask.length}) ` +\n `and the legnth of inputs (${inputs.length})`);\n }\n return tfc.tidy(() => {\n let allNullMasks = true;\n mask.forEach(m => {\n if (m != null) {\n allNullMasks = false;\n return;\n }\n });\n if (allNullMasks) {\n return null;\n }\n const outputMasks: Tensor[] = [];\n for (let i = 0; i < inputs.length; ++i) {\n if (mask[i] == null) {\n // Input is unmasked. Append all 1's to masks.\n outputMasks.push(tfc.cast(tfc.onesLike(inputs[i]), 'bool'));\n } else if (mask[i].rank < inputs[i].rank) {\n // Mask is smaller than the input, expand it.\n outputMasks.push(tfc.expandDims(mask[i], -1));\n } else {\n outputMasks.push(mask[i]);\n }\n }\n const concatenatedMasks = tfc.concat(outputMasks, this.axis);\n return tfc.all(concatenatedMasks, -1, false);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'axis': this.axis,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Concatenate);\n\n/**\n * Concatenate an `Array` of inputs.\n *\n * This function can be invoked in three ways.\n *\n * 1. Construct an instance of `Concatenate` layer, by using no input argument\n * or a single configuration argument. The resultant `Concatenate` layer can\n * then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example:\n *\n * ```js\n * const concatLayer = tf.layers.concatenate();\n *\n * // The layer can be applied to inputs.\n * const input1 = tf.input({shape: [2, 3]});\n * const input2 = tf.input({shape: [2, 4]});\n * const output = concatLayer.apply([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 7], with the first dimension as the undetermined batch\n * // dimension and the last dimension as the result of concatenating the\n * // last dimensions of the two inputs.\n * ```\n *\n * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.SymbolicTensor`. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 3]});\n * const input2 = tf.input({shape: [2, 4]});\n * const output = tf.layers.concatenate([input1, input2]);\n * console.log(output.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension and the last dimension as the result of concatenating the\n * // last dimensions of the two inputs.\n * ```\n *\n * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs\n * an `Layer` object internally and calls its `apply` method on the inputs,\n * generating a new `tf.Tensor` as the result of the computation. For\n * example:\n *\n * ```js\n * const input1 = tf.tensor2d([[1, 2], [3, 4]], [2, 2]);\n * const input2 = tf.tensor2d([[10, 20], [30, 40]], [2, 2]);\n * tf.layers.concatenate([input1, input2]).print();\n * // Gives [[1, 2, 10, 20], [3, 4, 30, 40]].\n *\n */\nexport function concatenate(config?: SymbolicTensor[]|Tensor[]|\n ConcatenateLayerArgs): Layer|SymbolicTensor|Tensor {\n if (Array.isArray(config)) {\n const layer = new Concatenate({});\n return layer.apply(config) as SymbolicTensor | Tensor;\n } else {\n return new Concatenate(config);\n }\n}\n\nexport declare interface DotLayerArgs extends LayerArgs {\n /**\n * Axis or axes along which the dot product will be taken.\n *\n * Integer or an Array of integers.\n */\n axes: number|[number, number];\n\n /**\n * Whether to L2-normalize samples along the dot product axis\n * before taking the dot product.\n *\n * If set to `true`, the output of the dot product is the cosine\n * proximity between the two samples.\n */\n normalize?: boolean;\n}\n\n/**\n * Interpretable potentially negative axis index.\n *\n * For example, given axis = -1, and dim = 3, this function will return 2.\n *\n * @param axis The axis index, may be a positive, zero or negative integer.\n * @param dim Total number of dimensions, a positive integer.\n * @returns A non-negative axis index equivalent to the input `axis`.\n */\nfunction interpretAxis(axis: number, dim: number): number {\n while (axis < 0) {\n axis += dim;\n }\n return axis;\n}\n\nfunction batchDot(x: Tensor, y: Tensor, axes: number|[number, number]): Tensor {\n if (x.shape.length > 3 || y.shape.length > 3) {\n throw new NotImplementedError(\n 'batchDot is not implemented for tensors of 4D or higher rank yet');\n }\n tfc.util.assert(\n x.shape.length >= 2,\n () => `batchDot requires the rank of x to be >= 2, ` +\n `but got ${x.shape.length}`);\n tfc.util.assert(\n x.shape.length >= 2,\n () => `batchDot requires the rank of y to be >= 2, ` +\n `but got ${y.shape.length}`);\n\n if (typeof axes === 'number') {\n axes = [axes, axes];\n }\n\n if (x.dtype === 'complex64' || y.dtype === 'complex64') {\n throw new NotImplementedError(\n 'batchDot is not implemented for complex64-type Tensors yet.');\n }\n\n const xNDim = x.shape.length;\n const yNDim = y.shape.length;\n if (axes == null) {\n // Behave like batchMatmul by default.\n axes = [xNDim - 1, yNDim - 2];\n }\n const axesArray = axes as [number, number];\n\n return tfc.tidy(() => {\n let diff: number;\n if (xNDim > yNDim) {\n diff = xNDim - yNDim;\n const diffShape: Shape = [];\n for (let i = 0; i < diff; ++i) {\n diffShape.push(1);\n }\n y = tfc.reshape(y, y.shape.concat(diffShape));\n } else if (yNDim > xNDim) {\n diff = yNDim - xNDim;\n const diffShape: Shape = [];\n for (let i = 0; i < diff; ++i) {\n diffShape.push(1);\n }\n x = tfc.reshape(x, x.shape.concat(diffShape));\n } else {\n diff = 0;\n }\n\n let out: Tensor;\n if (x.shape.length === 2 && y.shape.length === 2) {\n if (axesArray[0] === axesArray[1]) {\n out = tfc.sum(tfc.mul(x, y), axesArray[0]);\n } else {\n out = tfc.sum(tfc.mul(tfc.transpose(x, [1, 0]), y), axesArray[1]);\n }\n } else {\n const adjX = axesArray[0] !== x.shape.length - 1;\n const adjY = axesArray[1] === y.shape.length - 1;\n out = tfc.matMul(x, y, adjX, adjY);\n }\n\n if (diff > 0) {\n let idx: number;\n if (xNDim > yNDim) {\n idx = xNDim + yNDim - 3;\n } else {\n idx = xNDim - 1;\n }\n const squeezeAxes: number[] = [];\n for (let i = idx; i < idx + diff; ++i) {\n squeezeAxes.push(i);\n }\n out = tfc.squeeze(out, squeezeAxes);\n }\n if (out.shape.length === 1) {\n out = tfc.expandDims(out, 1);\n }\n return out;\n });\n}\n\nexport class Dot extends Merge {\n /** @nocollapse */\n static className = 'Dot';\n\n private axes: number|[number, number];\n private normalize: boolean;\n\n constructor(args: DotLayerArgs) {\n super(args);\n this.axes = args.axes;\n this.normalize = args.normalize == null ? false : args.normalize;\n this.supportsMasking = true;\n this.reshapeRequired = false;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n tfc.util.assert(\n Array.isArray(inputShape) && inputShape.length === 2 &&\n Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]),\n () => 'A `Dot` layer should be called on a list of exactly 2 inputs.');\n const shape1 = inputShape[0] as Shape;\n const shape2 = inputShape[1] as Shape;\n if (shape1.length > 3 || shape2.length > 3) {\n throw new NotImplementedError(\n 'Dot layer does not support tensors of 4D or higher rank yet.');\n }\n\n const axes = this.interpretAxes(shape1, shape2);\n if (shape1[axes[0]] !== shape2[axes[1]]) {\n throw new ValueError(\n `Dimension incompatibility: ` +\n `${shape1[axes[0]]} !== ${shape2[axes[1]]}`);\n }\n }\n\n protected override mergeFunction(inputs: Tensor[]): Tensor {\n if (inputs.length !== 2) {\n throw new ValueError(\n 'A `Dot` layer must be called on exactly 2 inputs, ' +\n `but received ${inputs.length} input(s).`);\n }\n\n let x1 = inputs[0];\n let x2 = inputs[1];\n let axes: [number, number];\n if (!Array.isArray(this.axes)) {\n axes = [\n interpretAxis(this.axes, x1.shape.length),\n interpretAxis(this.axes, x2.shape.length)\n ];\n } else {\n axes = this.axes.map(\n (axis, i) => interpretAxis(\n axis, inputs[i].shape.length)) as [number, number];\n }\n if (this.normalize) {\n x1 = l2Normalize(x1, axes[0]);\n x2 = l2Normalize(x2, axes[1]);\n }\n return batchDot(x1, x2, axes);\n }\n\n private interpretAxes(shape1: Shape, shape2: Shape): number[] {\n let axes: number[];\n if (!Array.isArray(this.axes)) {\n // `this.axes` is a single integer.\n axes = [\n interpretAxis(this.axes, shape1.length),\n interpretAxis(this.axes, shape2.length)\n ];\n } else {\n // `this.axes` is an Array of integers.\n axes = this.axes;\n }\n return axes;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n tfc.util.assert(\n Array.isArray(inputShape) && inputShape.length === 2 &&\n Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]),\n () => 'A `Dot` layer should be called on a list of exactly 2 inputs.');\n const shape1 = (inputShape[0] as Shape).slice();\n const shape2 = (inputShape[1] as Shape).slice();\n if (shape1.length > 3 || shape2.length > 3) {\n throw new NotImplementedError(\n 'Dot layer does not support tensors of 4D or higher rank yet.');\n }\n\n const axes = this.interpretAxes(shape1, shape2);\n shape1.splice(axes[0], 1);\n shape2.splice(axes[1], 1);\n shape2.splice(0, 1);\n const outputShape = shape1.concat(shape2);\n if (outputShape.length === 1) {\n outputShape.push(1);\n }\n return outputShape;\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]):\n Tensor {\n return null;\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'axes': this.axes,\n 'normalize': this.normalize\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(Dot);\n\n// TODO(cais): Add functional interfaces for the merge layers.\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Noise Layers.\n */\n\nimport {add, greaterEqual, mul, randomUniform, serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\n\nimport * as K from '../backend/tfjs_backend';\nimport {Layer, LayerArgs} from '../engine/topology';\nimport {Shape} from '../keras_format/common';\nimport {Kwargs} from '../types';\nimport {getExactlyOneTensor} from '../utils/types_utils';\n\nexport declare interface GaussianNoiseArgs extends LayerArgs {\n /** Standard Deviation. */\n stddev: number;\n}\n\nexport class GaussianNoise extends Layer {\n /** @nocollapse */\n static className = 'GaussianNoise';\n readonly stddev: number;\n\n constructor(args: GaussianNoiseArgs) {\n super(args);\n this.supportsMasking = true;\n this.stddev = args.stddev;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig() {\n const baseConfig = super.getConfig();\n const config = {stddev: this.stddev};\n Object.assign(config, baseConfig);\n return config;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n const noised = () =>\n add(K.randomNormal(input.shape, 0, this.stddev), input);\n const output =\n K.inTrainPhase(noised, () => input, kwargs['training'] || false);\n return output;\n });\n }\n}\nserialization.registerClass(GaussianNoise);\n\nexport declare interface GaussianDropoutArgs extends LayerArgs {\n /** drop probability. */\n rate: number;\n}\n\nexport class GaussianDropout extends Layer {\n /** @nocollapse */\n static className = 'GaussianDropout';\n readonly rate: number;\n\n constructor(args: GaussianDropoutArgs) {\n super(args);\n this.supportsMasking = true;\n this.rate = args.rate;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig() {\n const baseConfig = super.getConfig();\n const config = {rate: this.rate};\n Object.assign(config, baseConfig);\n return config;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n const input = getExactlyOneTensor(inputs);\n if (this.rate > 0 && this.rate < 1) {\n const noised = () => {\n const stddev = Math.sqrt(this.rate / (1 - this.rate));\n return mul(input, K.randomNormal(input.shape, 1, stddev));\n };\n return K.inTrainPhase(noised, () => input, kwargs['training'] || false);\n }\n return input;\n });\n }\n}\nserialization.registerClass(GaussianDropout);\n\nexport declare interface AlphaDropoutArgs extends LayerArgs {\n /** drop probability. */\n rate: number;\n /**\n * A 1-D `Tensor` of type `int32`, representing the\n * shape for randomly generated keep/drop flags.\n */\n noiseShape?: Shape;\n}\n\n/**\n * Applies Alpha Dropout to the input.\n *\n * As it is a regularization layer, it is only active at training time.\n *\n * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs\n * to their original values, in order to ensure the self-normalizing property\n * even after this dropout.\n * Alpha Dropout fits well to Scaled Exponential Linear Units\n * by randomly setting activations to the negative saturation value.\n *\n * Arguments:\n * - `rate`: float, drop probability (as with `Dropout`).\n * The multiplicative noise will have\n * standard deviation `sqrt(rate / (1 - rate))`.\n * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the\n * shape for randomly generated keep/drop flags.\n *\n * Input shape:\n * Arbitrary. Use the keyword argument `inputShape`\n * (tuple of integers, does not include the samples axis)\n * when using this layer as the first layer in a model.\n *\n * Output shape:\n * Same shape as input.\n *\n * References:\n * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)\n */\nexport class AlphaDropout extends Layer {\n /** @nocollapse */\n static className = 'AlphaDropout';\n readonly rate: number;\n readonly noiseShape: Shape;\n\n constructor(args: AlphaDropoutArgs) {\n super(args);\n this.supportsMasking = true;\n this.rate = args.rate;\n this.noiseShape = args.noiseShape;\n }\n\n _getNoiseShape(inputs: Tensor|Tensor[]) {\n return this.noiseShape || getExactlyOneTensor(inputs).shape;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n return inputShape;\n }\n\n override getConfig() {\n const baseConfig = super.getConfig();\n const config = {rate: this.rate};\n Object.assign(config, baseConfig);\n return config;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n if (this.rate < 1 && this.rate > 0) {\n const noiseShape = this._getNoiseShape(inputs);\n\n const droppedInputs = () => {\n const input = getExactlyOneTensor(inputs);\n\n const alpha = 1.6732632423543772848170429916717;\n const scale = 1.0507009873554804934193349852946;\n\n const alphaP = -alpha * scale;\n\n let keptIdx = greaterEqual(randomUniform(noiseShape), this.rate);\n\n keptIdx = K.cast(keptIdx, 'float32'); // get default dtype.\n\n // Get affine transformation params.\n const a = ((1 - this.rate) * (1 + this.rate * alphaP ** 2)) ** -0.5;\n const b = -a * alphaP * this.rate;\n\n // Apply mask.\n const x = add(mul(input, keptIdx), mul(add(keptIdx, -1), alphaP));\n\n return add(mul(x, a), b);\n };\n return K.inTrainPhase(\n droppedInputs, () => getExactlyOneTensor(inputs),\n kwargs['training'] || false);\n }\n return inputs;\n });\n }\n}\nserialization.registerClass(AlphaDropout);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Normalization layers.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {moments, reshape, serialization, Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {Constraint, ConstraintIdentifier, getConstraint, serializeConstraint} from '../constraints';\nimport {InputSpec, Layer, LayerArgs} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {getInitializer, Initializer, InitializerIdentifier, serializeInitializer} from '../initializers';\nimport {Shape} from '../keras_format/common';\nimport {getRegularizer, Regularizer, RegularizerIdentifier, serializeRegularizer} from '../regularizers';\nimport {Kwargs} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport * as math_utils from '../utils/math_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\n/**\n * Applies batch normalization on x given mean, var, beta and gamma.\n *\n * I.e. returns:\n * `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`\n *\n * @param x Input tensor.\n * @param mean Mean of batch.\n * @param variance Variance of batch.\n * @param beta Tensor with which to center the input.\n * @param gamma Tensor by which to scale the input.\n * @param epsilon Fuzz factor.\n * @returns The result of the batch normalization.\n */\nexport function batchNormalization(\n x: Tensor, mean: Tensor, variance: Tensor, beta?: Tensor, gamma?: Tensor,\n epsilon = 1e-3): Tensor {\n let out: Tensor;\n if (x.rank === 2) {\n out = tfc.batchNorm2d(\n x as Tensor2D, mean as Tensor2D | Tensor1D,\n variance as Tensor2D | Tensor1D, beta as Tensor2D | Tensor1D,\n gamma as Tensor2D | Tensor1D, epsilon);\n } else if (x.rank === 3) {\n // TODO(cais): Check rank; give proper error message.\n out = tfc.batchNorm3d(\n x as Tensor3D, mean as Tensor3D | Tensor1D,\n variance as Tensor3D | Tensor1D, beta as Tensor3D | Tensor1D,\n gamma as Tensor3D | Tensor1D, epsilon);\n } else if (x.rank === 4) {\n out = tfc.batchNorm4d(\n x as Tensor4D, mean as Tensor4D | Tensor1D,\n variance as Tensor4D | Tensor1D, beta as Tensor4D | Tensor1D,\n gamma as Tensor4D | Tensor1D, epsilon);\n } else {\n throw new NotImplementedError(\n `batchNormalization is not implemented for array of rank ${x.rank} ` +\n `yet`);\n }\n return out;\n}\n\n/**\n * Non-broadcasting batch normalization for use in training (not inference).\n *\n * The input is normalized to zero mean and unit variance along the\n * `reductionAxes`, followed by scaling with `gamma` and shifted by `beta`.\n * The result of that is returned as the first element\n * of the returned `Array`. The other two elements are the mean and variance,\n * respectively.\n *\n * @param x Input tensor to be normalized.\n * @param gamma Tensor by which to scale the input.\n * @param beta Tensor by which to center the input.\n * @param reductionAxes Axes over which to normalize.\n * @param epsilon Fuzz factor.\n * @returns An `Array` of three `Tensors`:\n * [normalized tensor, mean of input, variance of input].\n */\nfunction regularNormalizeBatchInTraining(\n x: Tensor, gamma: Tensor, beta: Tensor, reductionAxes: number[],\n epsilon = 1e-3): [Tensor, Tensor, Tensor] {\n return tidy(() => {\n const meanAndVariance = tfc.moments(x, reductionAxes);\n const mean = meanAndVariance.mean;\n const variance = meanAndVariance.variance;\n const normed =\n batchNormalization(x, mean, variance, beta, gamma, epsilon);\n return [normed, mean, variance];\n }) as [Tensor, Tensor, Tensor];\n}\n\n/**\n * Broadcasting batch normalization for use in training (not inference).\n *\n * The input is normalized to zero mean and unit variance along the\n * `reductionAxes`, followed by scaling with `gamma` and shifted by `beta`.\n * The result of that is returned as the first element\n * of the returned `Array`. The other two elements are the mean and variance,\n * respectively.\n *\n * @param x Input tensor to be normalized.\n * @param gamma Tensor by which to scale the input.\n * @param beta Tensor by which to center the input.\n * @param reductionAxes Axes over which to normalize.\n * @param epsilon Fuzz factor.\n * @returns An `Array` of three `Tensors`:\n * [normalized tensor, mean of input, variance of input].\n */\nfunction broadcastNormalizeBatchInTraining(\n x: Tensor, gamma: Tensor, beta: Tensor, reductionAxes: number[],\n epsilon = 1e-3): [Tensor, Tensor, Tensor] {\n return tidy(() => {\n const meanAndVariance = tfc.moments(x, reductionAxes);\n const mean = meanAndVariance.mean;\n const variance = meanAndVariance.variance;\n const targetShape: number[] = [];\n for (const axis of math_utils.range(0, x.rank)) {\n if (reductionAxes.indexOf(axis) !== -1) {\n targetShape.push(1);\n } else {\n targetShape.push(x.shape[axis]);\n }\n }\n const broadcastMean = reshape(mean, targetShape);\n const broadcastVariance = reshape(variance, targetShape);\n const broadcastGamma =\n gamma == null ? null : reshape(gamma, targetShape);\n const broadcastBeta =\n beta == null ? null : reshape(beta, targetShape);\n const normed = batchNormalization(\n x, broadcastMean, broadcastVariance, broadcastBeta,\n broadcastGamma, epsilon);\n return [normed, mean, variance];\n }) as [Tensor, Tensor, Tensor];\n}\n\n/**\n * Batch normalization for use in training (not inference).\n *\n * @param x Input tensor to be normalized.\n * @param gamma Tensor by which to scale the input.\n * @param beta Tensor by which to center the input.\n * @param reductionAxes Axes over which to normalize.\n * @param epsilon Fuzz factor.\n * @returns An `Array` of three `Tensors`:\n * [normalized tensor, mean of input, variance of input].\n */\nexport function normalizeBatchInTraining(\n x: Tensor, gamma: Tensor, beta: Tensor, reductionAxes: number[],\n epsilon = 1e-3): [Tensor, Tensor, Tensor] {\n if (util.arraysEqual(\n reductionAxes.slice().sort(), math_utils.range(0, x.rank - 1))) {\n return regularNormalizeBatchInTraining(\n x, gamma, beta, reductionAxes, epsilon);\n } else {\n return broadcastNormalizeBatchInTraining(\n x, gamma, beta, reductionAxes, epsilon);\n }\n}\n\nexport declare interface BatchNormalizationLayerArgs extends LayerArgs {\n /**\n * The integer axis that should be normalized (typically the features axis).\n * Defaults to -1.\n *\n * For instance, after a `Conv2D` layer with `data_format=\"channels_first\"`,\n * set `axis=1` in `batchNormalization`.\n */\n axis?: number;\n\n /**\n * Momentum of the moving average. Defaults to 0.99.\n */\n momentum?: number;\n\n /**\n * Small float added to the variance to avoid dividing by zero. Defaults to\n * 1e-3.\n */\n epsilon?: number;\n\n /**\n * If `true`, add offset of `beta` to normalized tensor.\n * If `false`, `beta` is ignored.\n * Defaults to `true`.\n */\n center?: boolean;\n\n /**\n * If `true`, multiply by `gamma`.\n * If `false`, `gamma` is not used.\n * When the next layer is linear (also e.g. `nn.relu`),\n * this can be disabled since the scaling will be done by the next layer.\n * Defaults to `true`.\n */\n scale?: boolean;\n\n /**\n * Initializer for the beta weight.\n * Defaults to 'zeros'.\n */\n betaInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the gamma weight.\n * Defaults to `ones`.\n */\n gammaInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the moving mean.\n * Defaults to `zeros`\n */\n movingMeanInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the moving variance.\n * Defaults to 'Ones'.\n */\n movingVarianceInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Constraint for the beta weight.\n */\n betaConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Constraint for gamma weight.\n */\n gammaConstraint?: ConstraintIdentifier|Constraint;\n\n /**\n * Regularizer for the beta weight.\n */\n betaRegularizer?: RegularizerIdentifier|Regularizer;\n\n /**\n * Regularizer for the gamma weight.\n */\n gammaRegularizer?: RegularizerIdentifier|Regularizer;\n}\n\nexport class BatchNormalization extends Layer {\n /** @nocollapse */\n static className = 'BatchNormalization';\n private readonly axis: number;\n private readonly momentum: number;\n private readonly epsilon: number;\n private readonly center: boolean;\n private readonly scale: boolean;\n private readonly betaInitializer: Initializer;\n private readonly gammaInitializer: Initializer;\n private readonly movingMeanInitializer: Initializer;\n private readonly movingVarianceInitializer: Initializer;\n private readonly betaConstraint: Constraint;\n private readonly gammaConstraint: Constraint;\n private readonly betaRegularizer: Regularizer;\n private readonly gammaRegularizer: Regularizer;\n private gamma: LayerVariable;\n private beta: LayerVariable;\n private movingMean: LayerVariable;\n private movingVariance: LayerVariable;\n\n constructor(args?: BatchNormalizationLayerArgs) {\n if (args == null) {\n args = {};\n }\n super(args);\n\n this.supportsMasking = true;\n this.axis = args.axis == null ? -1 : args.axis;\n this.momentum = args.momentum == null ? 0.99 : args.momentum;\n this.epsilon = args.epsilon == null ? 1e-3 : args.epsilon;\n this.center = args.center == null ? true : args.center;\n this.scale = args.scale == null ? true : args.scale;\n this.betaInitializer = getInitializer(args.betaInitializer || 'zeros');\n this.gammaInitializer = getInitializer(args.gammaInitializer || 'ones');\n this.movingMeanInitializer =\n getInitializer(args.movingMeanInitializer || 'zeros');\n this.movingVarianceInitializer =\n getInitializer(args.movingVarianceInitializer || 'ones');\n this.betaConstraint = getConstraint(args.betaConstraint);\n this.gammaConstraint = getConstraint(args.gammaConstraint);\n this.betaRegularizer = getRegularizer(args.betaRegularizer);\n this.gammaRegularizer = getRegularizer(args.gammaRegularizer);\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const axis = this.axis >= 0 ? this.axis : (this.axis + inputShape.length);\n const dim = inputShape[axis];\n if (dim == null) {\n throw new ValueError(\n `Axis ${axis} of input tensor should have a defined dimension but ` +\n `the layer received an input with shape ` +\n `${JSON.stringify(inputShape)}.`);\n }\n this.inputSpec =\n [new InputSpec({ndim: inputShape.length, axes: {[axis]: dim}})];\n const shape = [dim];\n if (this.scale) {\n this.gamma = this.addWeight(\n 'gamma', shape, null, this.gammaInitializer, this.gammaRegularizer,\n true, this.gammaConstraint);\n }\n if (this.center) {\n this.beta = this.addWeight(\n 'beta', shape, null, this.betaInitializer, this.betaRegularizer, true,\n this.betaConstraint);\n }\n this.movingMean = this.addWeight(\n 'moving_mean', shape, null, this.movingMeanInitializer, null, false);\n this.movingVariance = this.addWeight(\n 'moving_variance', shape, null, this.movingVarianceInitializer, null,\n false);\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const training = kwargs['training'] == null ? false : kwargs['training'];\n const input = getExactlyOneTensor(inputs);\n const inputShape = input.shape;\n const ndim = inputShape.length;\n const reductionAxes = math_utils.range(0, ndim);\n const axis = this.axis >= 0 ? this.axis : (this.axis + ndim);\n reductionAxes.splice(axis, 1);\n const broadcastShape = generic_utils.pyListRepeat(1, ndim);\n broadcastShape[axis] = inputShape[axis];\n\n const sortedReductionAxes = reductionAxes.slice();\n sortedReductionAxes.sort();\n const needsBroadcasting = !util.arraysEqual(\n sortedReductionAxes, math_utils.range(0, ndim).slice(0, ndim - 1));\n\n const normalizeInference: () => Tensor = () => {\n if (needsBroadcasting) {\n const broadcastMovingMean =\n reshape(this.movingMean.read(), broadcastShape);\n const broadcastMovingVariance =\n reshape(this.movingVariance.read(), broadcastShape);\n const broadcastBeta =\n this.center ? reshape(this.beta.read(), broadcastShape) : null;\n const broadcastGamma =\n this.scale ? reshape(this.gamma.read(), broadcastShape) : null;\n return batchNormalization(\n input, broadcastMovingMean, broadcastMovingVariance,\n broadcastBeta, broadcastGamma, this.epsilon);\n } else {\n return batchNormalization(\n input, this.movingMean.read(), this.movingVariance.read(),\n this.beta == null ? null : this.beta.read(),\n this.gamma == null ? null : this.gamma.read(), this.epsilon);\n }\n };\n\n if (!training) {\n return normalizeInference();\n }\n\n const [normedTraining, mean, variance] = normalizeBatchInTraining(\n input, this.gamma.read(), this.beta.read(), reductionAxes,\n this.epsilon);\n\n const doMovingAverage =\n (variable: LayerVariable, value: Tensor, momentum: number): void => {\n tfc.tidy(() => {\n const decay = 1 - momentum;\n const origValue = variable.read();\n const updateDelta = tfc.mul(tfc.sub(origValue, value), decay);\n variable.write(tfc.sub(origValue, updateDelta));\n });\n };\n\n // Perform updates to moving mean and moving variance for training.\n // Porting Note: In PyKeras, these updates to `movingMean` and\n // `movingAverage` are done as a deferred Graph, added to the `Layer`'s\n // `update`s using the `add_update()` method. Here we do it imperatively\n // and encapsulate the updates in a function that is invoked\n // immediately.\n const updateMovingMeanAndVariance = () => {\n doMovingAverage(this.movingMean, mean, this.momentum);\n doMovingAverage(this.movingVariance, variance, this.momentum);\n };\n updateMovingMeanAndVariance();\n\n return normedTraining;\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n axis: this.axis,\n momentum: this.momentum,\n epsilon: this.epsilon,\n center: this.center,\n scale: this.scale,\n betaInitializer: serializeInitializer(this.betaInitializer),\n gammaInitializer: serializeInitializer(this.gammaInitializer),\n movingMeanInitializer: serializeInitializer(this.movingMeanInitializer),\n movingVarianceInitializer:\n serializeInitializer(this.movingVarianceInitializer),\n betaRegularizer: serializeRegularizer(this.betaRegularizer),\n gammaRegularizer: serializeRegularizer(this.gammaRegularizer),\n betaConstraint: serializeConstraint(this.betaConstraint),\n gammaConstraint: serializeConstraint(this.gammaConstraint)\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(BatchNormalization);\n\nexport interface LayerNormalizationLayerArgs extends LayerArgs {\n /**\n * The axis or axes that should be normalized (typically, the feature axis).\n * Defaults to -1 (the last axis).\n */\n axis?: number|number[];\n\n /**\n * A small positive float added to variance to avoid divison by zero.\n * Defaults to 1e-3.\n */\n epsilon?: number;\n\n /**\n * If `true`, add offset of `beta` to normalized tensor.\n * If `false`, `beta` is ignored.\n * Default: `true`.\n */\n center?: boolean;\n\n /**\n * If `true`, multiply output by `gamma`.\n * If `false`, `gamma` is not used.\n * When the next layer is linear, this can be disabled since scaling will\n * be done by the next layer.\n * Default: `true`.\n */\n scale?: boolean;\n\n /**\n * Initializer for the beta weight.\n * Default: `'zeros'`.\n */\n betaInitializer?: InitializerIdentifier|Initializer;\n\n /**\n * Initializer for the gamma weight.\n * Default: `'ones'`.\n */\n gammaInitializer?: InitializerIdentifier|Initializer;\n\n /** Regularizer for the beta weight. */\n betaRegularizer?: RegularizerIdentifier|Regularizer;\n\n /** Regularizer for the gamma weight. */\n gammaRegularizer?: RegularizerIdentifier|Regularizer;\n}\n\nexport class LayerNormalization extends Layer {\n /** @nocollapse */\n static className = 'LayerNormalization';\n\n private axis: number|number[];\n readonly epsilon: number;\n readonly center: boolean;\n readonly scale: boolean;\n readonly betaInitializer: Initializer;\n readonly gammaInitializer: Initializer;\n readonly betaRegularizer: Regularizer;\n readonly gammaRegularizer: Regularizer;\n\n private gamma: LayerVariable;\n private beta: LayerVariable;\n\n constructor(args?: LayerNormalizationLayerArgs) {\n if (args == null) {\n args = {};\n }\n super(args);\n\n this.axis = args.axis == null ? -1 : args.axis;\n if (typeof this.axis === 'number') {\n if (!Number.isInteger(this.axis)) {\n throw new Error(\n `Expected axis to be an integer, but received ${this.axis}`);\n }\n } else if (Array.isArray(this.axis)) {\n for (const axis of this.axis) {\n if (!Number.isInteger(axis)) {\n throw new Error(\n `Expected axis to be an array of integers, ` +\n `but received ${JSON.stringify(this.axis)}`);\n }\n }\n } else {\n throw new Error(\n `Expected axis to be an integer or an array of integers, ` +\n `but received ${JSON.stringify(this.axis)}`);\n }\n\n this.epsilon = args.epsilon == null ? 1e-3 : args.epsilon;\n this.center = args.center == null ? true : args.center;\n this.scale = args.scale == null ? true : args.scale;\n this.betaInitializer = getInitializer(args.betaInitializer || 'zeros');\n this.gammaInitializer = getInitializer(args.gammaInitializer || 'ones');\n this.betaRegularizer = getRegularizer(args.betaRegularizer);\n this.gammaRegularizer = getRegularizer(args.gammaRegularizer);\n\n this.supportsMasking = true;\n }\n\n public override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n const nDims = inputShape.length;\n\n // Convert axis to array and resolve negatives.\n if (typeof this.axis === 'number') {\n this.axis = [this.axis];\n }\n for (let i = 0; i < this.axis.length; ++i) {\n if (this.axis[i] < 0) {\n this.axis[i] += nDims;\n }\n }\n\n // Further validate axes.\n for (const axis of this.axis) {\n if (axis < 0 || axis >= nDims) {\n throw new Error(`Invalid axis: ${axis}`);\n }\n }\n if (this.axis.length !== generic_utils.unique(this.axis).length) {\n throw new Error(`Found duplicate axes in: ${this.axis}`);\n }\n\n const paramShape = this.axis.map(axis => inputShape[axis]) as number[];\n\n const trainable = true;\n if (this.scale) {\n this.gamma = this.addWeight(\n 'gamma', paramShape, 'float32', this.gammaInitializer,\n this.gammaRegularizer, trainable);\n } else {\n this.gamma = null;\n }\n if (this.center) {\n this.beta = this.addWeight(\n 'beta', paramShape, 'float32', this.betaInitializer,\n this.betaRegularizer, trainable);\n } else {\n this.beta = null;\n }\n\n this.built = true;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n const input = getExactlyOneTensor(inputs);\n const inputShape = input.shape;\n const nDims = inputShape.length;\n\n return tidy(() => {\n const keepDims = true;\n let {mean, variance} = moments(input, this.axis, keepDims);\n const broadcastShape = generic_utils.pyListRepeat(1, nDims);\n for (const dim of this.axis as number[]) {\n broadcastShape[dim] = inputShape[dim];\n }\n\n const broadcast = (v: Tensor) => {\n if (v != null && v.shape.length !== nDims) {\n return tfc.reshape(v, broadcastShape);\n } else {\n return v;\n }\n };\n\n let scale = this.scale ? broadcast(this.gamma.read()) : null;\n let offset = this.center ? broadcast(this.beta.read()) : null;\n\n // TODO(https://github.com/tensorflow/tfjs/issues/2120): The tiling below\n // is a workaround for the limitation of core's batchNormalization?d don't\n // support broadcasting in their gradients. In addition, the tiling is\n // necessary to ensure correctness on the browser CPU backend regardless\n // of forward or backward computation. Remove this workaround once the\n // limitation is addressed. See .\n const momentsTiling: number[] = [];\n const scaleOffsetTiling: number[] = [];\n for (let i = 0; i < nDims; ++i) {\n if ((this.axis as number[]).indexOf(i) !== -1) {\n momentsTiling.push(inputShape[i]);\n scaleOffsetTiling.push(1);\n } else {\n momentsTiling.push(1);\n scaleOffsetTiling.push(inputShape[i]);\n }\n }\n mean = tfc.tile(mean, momentsTiling);\n variance = tfc.tile(variance, momentsTiling);\n if (scale != null) {\n scale = tfc.tile(scale, scaleOffsetTiling);\n }\n if (offset != null) {\n offset = tfc.tile(offset, scaleOffsetTiling);\n }\n\n return batchNormalization(\n input, mean, variance, offset, scale, this.epsilon);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n axis: this.axis,\n epsilon: this.epsilon,\n center: this.center,\n scale: this.scale,\n betaInitializer: serializeInitializer(this.betaInitializer),\n gammaInitializer: serializeInitializer(this.gammaInitializer),\n betaRegularizer: serializeRegularizer(this.betaRegularizer),\n gammaRegularizer: serializeRegularizer(this.gammaRegularizer)\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(LayerNormalization);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Padding Layers.\n */\n\n// Porting Note: In Python Keras, the padding layers are in convolutional.py,\n// but we decided to put them in a separate file (padding.ts) for clarity.\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\n\nimport {imageDataFormat} from '../backend/common';\nimport {InputSpec, Layer, LayerArgs} from '../engine/topology';\nimport {ValueError} from '../errors';\nimport {DataFormat, Shape} from '../keras_format/common';\nimport {Kwargs} from '../types';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\n\n/**\n * Pads the middle dimension of a 3D tensor.\n *\n * @param x Input `tf.Tensor` to be padded.\n * @param padding `Array` of 2 integers, how many zeros to add at the start and\n * end of the middle dimension (i.e., dimension 1).\n * @return A padded 3D `tf.Tensor`.\n */\nexport function temporalPadding(x: Tensor, padding?: [number, number]): Tensor {\n return tidy(() => {\n if (x.rank !== 3) {\n throw new ValueError(\n `temporalPadding expects input tensor to be 3-D, but received a ` +\n `${x.rank}-D tensor.`);\n }\n\n if (padding == null) {\n padding = [1, 1];\n }\n if (padding.length !== 2) {\n throw new ValueError(\n `temporalPadding expects input padding pattern to be a length-2 ` +\n `array, but received a length-${padding.length} array.`);\n }\n\n const pattern: Array<[number, number]> = [[0, 0], padding, [0, 0]];\n return tfc.pad(x, pattern);\n });\n}\n\n/**\n * Pads the 2nd and 3rd dimensions of a 4D tensor.\n *\n * @param x Input `tf.Tensor` to be padded.\n * @param padding `Array` of two `Array`s, each of which is an `Array` of two\n * integers. The amount of padding at the beginning and end of the 2nd and 3rd\n * dimensions, respectively.\n * @param dataFormat 'channelsLast' (default) or 'channelsFirst'.\n * @return Padded 4D `tf.Tensor`.\n */\nexport function spatial2dPadding(\n x: Tensor, padding?: [[number, number], [number, number]],\n dataFormat?: DataFormat): Tensor {\n return tidy(() => {\n if (x.rank !== 4) {\n throw new ValueError(\n `temporalPadding expects input tensor to be 4-D, but received a ` +\n `${x.rank}-D tensor.`);\n }\n\n if (padding == null) {\n padding = [[1, 1], [1, 1]];\n }\n if (padding.length !== 2 || padding[0].length !== 2 ||\n padding[1].length !== 2) {\n throw new ValueError(\n 'spatial2dPadding expects `padding` to be an Array of two Arrays, ' +\n 'each of which is an Array of two integers.');\n }\n\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n if (dataFormat !== 'channelsLast' && dataFormat !== 'channelsFirst') {\n throw new ValueError(\n `Unknown data format: ${dataFormat}. ` +\n `Supported data formats are 'channelsLast' and 'channelsFirst.`);\n }\n\n let pattern: Array<[number, number]>;\n if (dataFormat === 'channelsFirst') {\n pattern = [[0, 0], [0, 0], padding[0], padding[1]];\n } else {\n pattern = [[0, 0], padding[0], padding[1], [0, 0]];\n }\n\n return tfc.pad(x, pattern);\n });\n}\n\nexport declare interface ZeroPadding2DLayerArgs extends LayerArgs {\n /**\n * Integer, or `Array` of 2 integers, or `Array` of 2 `Array`s, each of\n * which is an `Array` of 2 integers.\n * - If integer, the same symmetric padding is applied to width and height.\n * - If `Array` of 2 integers, interpreted as two different symmetric values\n * for height and width:\n * `[symmetricHeightPad, symmetricWidthPad]`.\n * - If `Array` of 2 `Array`s, interpreted as:\n * `[[topPad, bottomPad], [leftPad, rightPad]]`.\n */\n padding?: number|[number, number]|[[number, number], [number, number]];\n\n /**\n * One of `'channelsLast'` (default) and `'channelsFirst'`.\n *\n * The ordering of the dimensions in the inputs.\n * `channelsLast` corresponds to inputs with shape\n * `[batch, height, width, channels]` while `channelsFirst`\n * corresponds to inputs with shape\n * `[batch, channels, height, width]`.\n */\n dataFormat?: DataFormat;\n}\n\nexport class ZeroPadding2D extends Layer {\n /** @nocollapse */\n static className = 'ZeroPadding2D';\n readonly dataFormat: DataFormat;\n readonly padding: [[number, number], [number, number]];\n\n constructor(args?: ZeroPadding2DLayerArgs) {\n if (args == null) {\n args = {};\n }\n super(args);\n\n this.dataFormat =\n args.dataFormat == null ? imageDataFormat() : args.dataFormat;\n // TODO(cais): Maybe refactor the following logic surrounding `padding`\n // into a helper method.\n if (args.padding == null) {\n this.padding = [[1, 1], [1, 1]];\n } else if (typeof args.padding === 'number') {\n this.padding =\n [[args.padding, args.padding], [args.padding, args.padding]];\n } else {\n args.padding = args.padding;\n if (args.padding.length !== 2) {\n throw new ValueError(\n `ZeroPadding2D expects padding to be a length-2 array, but ` +\n `received a length-${args.padding.length} array.`);\n }\n\n let heightPadding: [number, number];\n let widthPadding: [number, number];\n if (typeof args.padding[0] === 'number') {\n heightPadding = [args.padding[0], args.padding[0]];\n widthPadding = [args.padding[1] as number, args.padding[1] as number];\n } else {\n args.padding = args.padding as [[number, number], [number, number]];\n\n if (args.padding[0].length !== 2) {\n throw new ValueError(\n `ZeroPadding2D expects height padding to be a length-2 array, ` +\n `but received a length-${args.padding[0].length} array.`);\n }\n heightPadding = args.padding[0] as [number, number];\n\n if (args.padding[1].length !== 2) {\n throw new ValueError(\n `ZeroPadding2D expects width padding to be a length-2 array, ` +\n `but received a length-${args.padding[1].length} array.`);\n }\n widthPadding = args.padding[1] as [number, number];\n }\n this.padding = [heightPadding, widthPadding];\n }\n this.inputSpec = [new InputSpec({ndim: 4})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n\n let rows: number;\n let cols: number;\n if (this.dataFormat === 'channelsFirst') {\n if (inputShape[2] != null && inputShape[2] >= 0) {\n rows = inputShape[2] + this.padding[0][0] + this.padding[0][1];\n } else {\n rows = null;\n }\n if (inputShape[3] != null && inputShape[3] >= 0) {\n cols = inputShape[3] + this.padding[1][0] + this.padding[1][1];\n } else {\n cols = null;\n }\n return [inputShape[0], inputShape[1], rows, cols];\n } else {\n if (inputShape[1] != null && inputShape[1] >= 0) {\n rows = inputShape[1] + this.padding[0][0] + this.padding[0][1];\n } else {\n rows = null;\n }\n if (inputShape[2] != null && inputShape[2] >= 0) {\n cols = inputShape[2] + this.padding[1][0] + this.padding[1][1];\n } else {\n cols = null;\n }\n return [inputShape[0], rows, cols, inputShape[3]];\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(\n () => spatial2dPadding(\n getExactlyOneTensor(inputs), this.padding, this.dataFormat));\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n padding: this.padding,\n dataFormat: this.dataFormat,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\nserialization.registerClass(ZeroPadding2D);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * TensorFlow.js Layers: Pooling Layers.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, Tensor3D, Tensor4D, Tensor5D, tidy} from '@tensorflow/tfjs-core';\n\nimport {imageDataFormat} from '../backend/common';\nimport * as K from '../backend/tfjs_backend';\nimport {checkDataFormat, checkPaddingMode, checkPoolMode} from '../common';\nimport {InputSpec} from '../engine/topology';\nimport {Layer, LayerArgs} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {DataFormat, PaddingMode, PoolMode, Shape} from '../keras_format/common';\nimport {Kwargs} from '../types';\nimport {convOutputLength} from '../utils/conv_utils';\nimport {assertPositiveInteger} from '../utils/generic_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\n\nimport {preprocessConv2DInput, preprocessConv3DInput} from './convolutional';\n\n/**\n * 2D pooling.\n * @param x\n * @param poolSize\n * @param strides strides. Defaults to [1, 1].\n * @param padding padding. Defaults to 'valid'.\n * @param dataFormat data format. Defaults to 'channelsLast'.\n * @param poolMode Mode of pooling. Defaults to 'max'.\n * @returns Result of the 2D pooling.\n */\nexport function pool2d(\n x: Tensor, poolSize: [number, number], strides?: [number, number],\n padding?: PaddingMode, dataFormat?: DataFormat,\n poolMode?: PoolMode): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n checkPoolMode(poolMode);\n checkPaddingMode(padding);\n if (strides == null) {\n strides = [1, 1];\n }\n if (padding == null) {\n padding = 'valid';\n }\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n if (poolMode == null) {\n poolMode = 'max';\n }\n\n // TODO(cais): Remove the preprocessing step once deeplearn.js supports\n // dataFormat as an input argument.\n x = preprocessConv2DInput(x, dataFormat); // x is NHWC after preprocessing.\n let y: Tensor;\n const paddingString = (padding === 'same') ? 'same' : 'valid';\n if (poolMode === 'max') {\n // TODO(cais): Rank check?\n y = tfc.maxPool(x as Tensor4D, poolSize, strides, paddingString);\n } else { // 'avg'\n // TODO(cais): Check the dtype and rank of x and give clear error message\n // if those are incorrect.\n y = tfc.avgPool(\n // TODO(cais): Rank check?\n x as Tensor3D | Tensor4D, poolSize, strides, paddingString);\n }\n if (dataFormat === 'channelsFirst') {\n y = tfc.transpose(y, [0, 3, 1, 2]); // NHWC -> NCHW.\n }\n return y;\n });\n}\n\n/**\n * 3D pooling.\n * @param x\n * @param poolSize. Default to [1, 1, 1].\n * @param strides strides. Defaults to [1, 1, 1].\n * @param padding padding. Defaults to 'valid'.\n * @param dataFormat data format. Defaults to 'channelsLast'.\n * @param poolMode Mode of pooling. Defaults to 'max'.\n * @returns Result of the 3D pooling.\n */\nexport function pool3d(\n x: Tensor5D, poolSize: [number, number, number],\n strides?: [number, number, number], padding?: PaddingMode,\n dataFormat?: DataFormat, poolMode?: PoolMode): Tensor {\n return tidy(() => {\n checkDataFormat(dataFormat);\n checkPoolMode(poolMode);\n checkPaddingMode(padding);\n if (strides == null) {\n strides = [1, 1, 1];\n }\n if (padding == null) {\n padding = 'valid';\n }\n if (dataFormat == null) {\n dataFormat = imageDataFormat();\n }\n if (poolMode == null) {\n poolMode = 'max';\n }\n\n // x is NDHWC after preprocessing.\n x = preprocessConv3DInput(x as Tensor, dataFormat) as Tensor5D;\n let y: Tensor;\n const paddingString = (padding === 'same') ? 'same' : 'valid';\n if (poolMode === 'max') {\n y = tfc.maxPool3d(x, poolSize, strides, paddingString);\n } else { // 'avg'\n y = tfc.avgPool3d(x, poolSize, strides, paddingString);\n }\n if (dataFormat === 'channelsFirst') {\n y = tfc.transpose(y, [0, 4, 1, 2, 3]); // NDHWC -> NCDHW.\n }\n return y;\n });\n}\n\nexport declare interface Pooling1DLayerArgs extends LayerArgs {\n /**\n * Size of the window to pool over, should be an integer.\n */\n poolSize?: number|[number];\n /**\n * Period at which to sample the pooled values.\n *\n * If `null`, defaults to `poolSize`.\n */\n strides?: number|[number];\n /** How to fill in data that's not an integer multiple of poolSize. */\n padding?: PaddingMode;\n}\n\n/**\n * Abstract class for different pooling 1D layers.\n */\nexport abstract class Pooling1D extends Layer {\n protected readonly poolSize: [number];\n protected readonly strides: [number];\n protected readonly padding: PaddingMode;\n\n /**\n *\n * @param args Parameters for the Pooling layer.\n *\n * config.poolSize defaults to 2.\n */\n constructor(args: Pooling1DLayerArgs) {\n if (args.poolSize == null) {\n args.poolSize = 2;\n }\n super(args);\n if (typeof args.poolSize === 'number') {\n this.poolSize = [args.poolSize];\n } else if (\n Array.isArray(args.poolSize) &&\n (args.poolSize as number[]).length === 1 &&\n typeof (args.poolSize as number[])[0] === 'number') {\n this.poolSize = args.poolSize;\n } else {\n throw new ValueError(\n `poolSize for 1D convolutional layer must be a number or an ` +\n `Array of a single number, but received ` +\n `${JSON.stringify(args.poolSize)}`);\n }\n assertPositiveInteger(this.poolSize, 'poolSize');\n if (args.strides == null) {\n this.strides = this.poolSize;\n } else {\n if (typeof args.strides === 'number') {\n this.strides = [args.strides];\n } else if (\n Array.isArray(args.strides) &&\n (args.strides as number[]).length === 1 &&\n typeof (args.strides as number[])[0] === 'number') {\n this.strides = args.strides;\n } else {\n throw new ValueError(\n `strides for 1D convolutional layer must be a number or an ` +\n `Array of a single number, but received ` +\n `${JSON.stringify(args.strides)}`);\n }\n }\n assertPositiveInteger(this.strides, 'strides');\n\n this.padding = args.padding == null ? 'valid' : args.padding;\n checkPaddingMode(this.padding);\n this.inputSpec = [new InputSpec({ndim: 3})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const length = convOutputLength(\n inputShape[1], this.poolSize[0], this.padding, this.strides[0]);\n return [inputShape[0], length, inputShape[2]];\n }\n\n protected abstract poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor;\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n // Add dummy last dimension.\n inputs = K.expandDims(getExactlyOneTensor(inputs), 2);\n const output = this.poolingFunction(\n getExactlyOneTensor(inputs), [this.poolSize[0], 1],\n [this.strides[0], 1], this.padding, 'channelsLast');\n // Remove dummy last dimension.\n return tfc.squeeze(output, [2]);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n poolSize: this.poolSize,\n padding: this.padding,\n strides: this.strides,\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\n\nexport class MaxPooling1D extends Pooling1D {\n /** @nocollapse */\n static className = 'MaxPooling1D';\n constructor(args: Pooling1DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool2d(inputs, poolSize, strides, padding, dataFormat, 'max');\n }\n}\nserialization.registerClass(MaxPooling1D);\n\nexport class AveragePooling1D extends Pooling1D {\n /** @nocollapse */\n static className = 'AveragePooling1D';\n constructor(args: Pooling1DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool2d(inputs, poolSize, strides, padding, dataFormat, 'avg');\n }\n}\nserialization.registerClass(AveragePooling1D);\n\nexport declare interface Pooling2DLayerArgs extends LayerArgs {\n /**\n * Factors by which to downscale in each dimension [vertical, horizontal].\n * Expects an integer or an array of 2 integers.\n *\n * For example, `[2, 2]` will halve the input in both spatial dimensions.\n * If only one integer is specified, the same window length\n * will be used for both dimensions.\n */\n poolSize?: number|[number, number];\n\n /**\n * The size of the stride in each dimension of the pooling window. Expects\n * an integer or an array of 2 integers. Integer, tuple of 2 integers, or\n * None.\n *\n * If `null`, defaults to `poolSize`.\n */\n strides?: number|[number, number];\n\n /** The padding type to use for the pooling layer. */\n padding?: PaddingMode;\n /** The data format to use for the pooling layer. */\n dataFormat?: DataFormat;\n}\n\n/**\n * Abstract class for different pooling 2D layers.\n */\nexport abstract class Pooling2D extends Layer {\n protected readonly poolSize: [number, number];\n protected readonly strides: [number, number];\n protected readonly padding: PaddingMode;\n protected readonly dataFormat: DataFormat;\n\n constructor(args: Pooling2DLayerArgs) {\n if (args.poolSize == null) {\n args.poolSize = [2, 2];\n }\n super(args);\n this.poolSize = Array.isArray(args.poolSize) ?\n args.poolSize :\n [args.poolSize, args.poolSize];\n if (args.strides == null) {\n this.strides = this.poolSize;\n } else if (Array.isArray(args.strides)) {\n if (args.strides.length !== 2) {\n throw new ValueError(\n `If the strides property of a 2D pooling layer is an Array, ` +\n `it is expected to have a length of 2, but received length ` +\n `${args.strides.length}.`);\n }\n this.strides = args.strides;\n } else {\n // `config.strides` is a number.\n this.strides = [args.strides, args.strides];\n }\n assertPositiveInteger(this.poolSize, 'poolSize');\n assertPositiveInteger(this.strides, 'strides');\n this.padding = args.padding == null ? 'valid' : args.padding;\n this.dataFormat =\n args.dataFormat == null ? 'channelsLast' : args.dataFormat;\n checkDataFormat(this.dataFormat);\n checkPaddingMode(this.padding);\n\n this.inputSpec = [new InputSpec({ndim: 4})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n let rows =\n this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1];\n let cols =\n this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2];\n rows =\n convOutputLength(rows, this.poolSize[0], this.padding, this.strides[0]);\n cols =\n convOutputLength(cols, this.poolSize[1], this.padding, this.strides[1]);\n if (this.dataFormat === 'channelsFirst') {\n return [inputShape[0], inputShape[1], rows, cols];\n } else {\n return [inputShape[0], rows, cols, inputShape[3]];\n }\n }\n\n protected abstract poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor;\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n return this.poolingFunction(\n getExactlyOneTensor(inputs), this.poolSize, this.strides,\n this.padding, this.dataFormat);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n poolSize: this.poolSize,\n padding: this.padding,\n strides: this.strides,\n dataFormat: this.dataFormat\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\n\nexport class MaxPooling2D extends Pooling2D {\n /** @nocollapse */\n static className = 'MaxPooling2D';\n constructor(args: Pooling2DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool2d(inputs, poolSize, strides, padding, dataFormat, 'max');\n }\n}\nserialization.registerClass(MaxPooling2D);\n\nexport class AveragePooling2D extends Pooling2D {\n /** @nocollapse */\n static className = 'AveragePooling2D';\n constructor(args: Pooling2DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number], strides: [number, number],\n padding: PaddingMode, dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool2d(inputs, poolSize, strides, padding, dataFormat, 'avg');\n }\n}\nserialization.registerClass(AveragePooling2D);\n\nexport declare interface Pooling3DLayerArgs extends LayerArgs {\n /**\n * Factors by which to downscale in each dimension [depth, height, width].\n * Expects an integer or an array of 3 integers.\n *\n * For example, `[2, 2, 2]` will halve the input in three dimensions.\n * If only one integer is specified, the same window length\n * will be used for all dimensions.\n */\n poolSize?: number|[number, number, number];\n\n /**\n * The size of the stride in each dimension of the pooling window. Expects\n * an integer or an array of 3 integers. Integer, tuple of 3 integers, or\n * None.\n *\n * If `null`, defaults to `poolSize`.\n */\n strides?: number|[number, number, number];\n\n /** The padding type to use for the pooling layer. */\n padding?: PaddingMode;\n /** The data format to use for the pooling layer. */\n dataFormat?: DataFormat;\n}\n\n/**\n * Abstract class for different pooling 3D layers.\n */\nexport abstract class Pooling3D extends Layer {\n protected readonly poolSize: [number, number, number];\n protected readonly strides: [number, number, number];\n protected readonly padding: PaddingMode;\n protected readonly dataFormat: DataFormat;\n\n constructor(args: Pooling3DLayerArgs) {\n if (args.poolSize == null) {\n args.poolSize = [2, 2, 2];\n }\n super(args);\n this.poolSize = Array.isArray(args.poolSize) ?\n args.poolSize :\n [args.poolSize, args.poolSize, args.poolSize];\n if (args.strides == null) {\n this.strides = this.poolSize;\n } else if (Array.isArray(args.strides)) {\n if (args.strides.length !== 3) {\n throw new ValueError(\n `If the strides property of a 3D pooling layer is an Array, ` +\n `it is expected to have a length of 3, but received length ` +\n `${args.strides.length}.`);\n }\n this.strides = args.strides;\n } else {\n // `config.strides` is a number.\n this.strides = [args.strides, args.strides, args.strides];\n }\n assertPositiveInteger(this.poolSize, 'poolSize');\n assertPositiveInteger(this.strides, 'strides');\n this.padding = args.padding == null ? 'valid' : args.padding;\n this.dataFormat =\n args.dataFormat == null ? 'channelsLast' : args.dataFormat;\n checkDataFormat(this.dataFormat);\n checkPaddingMode(this.padding);\n\n this.inputSpec = [new InputSpec({ndim: 5})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n let depths =\n this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1];\n let rows =\n this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2];\n let cols =\n this.dataFormat === 'channelsFirst' ? inputShape[4] : inputShape[3];\n depths = convOutputLength(\n depths, this.poolSize[0], this.padding, this.strides[0]);\n rows =\n convOutputLength(rows, this.poolSize[1], this.padding, this.strides[1]);\n cols =\n convOutputLength(cols, this.poolSize[2], this.padding, this.strides[2]);\n if (this.dataFormat === 'channelsFirst') {\n return [inputShape[0], inputShape[1], depths, rows, cols];\n } else {\n return [inputShape[0], depths, rows, cols, inputShape[4]];\n }\n }\n\n protected abstract poolingFunction(\n inputs: Tensor, poolSize: [number, number, number],\n strides: [number, number, number], padding: PaddingMode,\n dataFormat: DataFormat): Tensor;\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n this.invokeCallHook(inputs, kwargs);\n return this.poolingFunction(\n getExactlyOneTensor(inputs), this.poolSize, this.strides,\n this.padding, this.dataFormat);\n });\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {\n poolSize: this.poolSize,\n padding: this.padding,\n strides: this.strides,\n dataFormat: this.dataFormat\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\n\nexport class MaxPooling3D extends Pooling3D {\n /** @nocollapse */\n static className = 'MaxPooling3D';\n constructor(args: Pooling3DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number, number],\n strides: [number, number, number], padding: PaddingMode,\n dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool3d(\n inputs as Tensor5D, poolSize, strides, padding, dataFormat, 'max');\n }\n}\nserialization.registerClass(MaxPooling3D);\n\nexport class AveragePooling3D extends Pooling3D {\n /** @nocollapse */\n static className = 'AveragePooling3D';\n constructor(args: Pooling3DLayerArgs) {\n super(args);\n }\n\n protected poolingFunction(\n inputs: Tensor, poolSize: [number, number, number],\n strides: [number, number, number], padding: PaddingMode,\n dataFormat: DataFormat): Tensor {\n checkDataFormat(dataFormat);\n checkPaddingMode(padding);\n return pool3d(\n inputs as Tensor5D, poolSize, strides, padding, dataFormat, 'avg');\n }\n}\nserialization.registerClass(AveragePooling3D);\n\n/**\n * Abstract class for different global pooling 1D layers.\n */\nexport abstract class GlobalPooling1D extends Layer {\n constructor(args: LayerArgs) {\n super(args);\n this.inputSpec = [new InputSpec({ndim: 3})];\n }\n\n override computeOutputShape(inputShape: Shape): Shape {\n return [inputShape[0], inputShape[2]];\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n throw new NotImplementedError();\n }\n}\n\nexport class GlobalAveragePooling1D extends GlobalPooling1D {\n /** @nocollapse */\n static className = 'GlobalAveragePooling1D';\n constructor(args?: LayerArgs) {\n super(args || {});\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const input = getExactlyOneTensor(inputs);\n return tfc.mean(input, 1);\n });\n }\n}\nserialization.registerClass(GlobalAveragePooling1D);\n\nexport class GlobalMaxPooling1D extends GlobalPooling1D {\n /** @nocollapse */\n static className = 'GlobalMaxPooling1D';\n constructor(args: LayerArgs) {\n super(args || {});\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const input = getExactlyOneTensor(inputs);\n return tfc.max(input, 1);\n });\n }\n}\nserialization.registerClass(GlobalMaxPooling1D);\n\nexport declare interface GlobalPooling2DLayerArgs extends LayerArgs {\n /**\n * One of `CHANNEL_LAST` (default) or `CHANNEL_FIRST`.\n *\n * The ordering of the dimensions in the inputs. `CHANNEL_LAST` corresponds\n * to inputs with shape `[batch, height, width, channels]` while\n * `CHANNEL_FIRST` corresponds to inputs with shape\n * `[batch, channels, height, width]`.\n */\n dataFormat?: DataFormat;\n}\n\n/**\n * Abstract class for different global pooling 2D layers.\n */\nexport abstract class GlobalPooling2D extends Layer {\n protected dataFormat: DataFormat;\n constructor(args: GlobalPooling2DLayerArgs) {\n super(args);\n this.dataFormat =\n args.dataFormat == null ? 'channelsLast' : args.dataFormat;\n checkDataFormat(this.dataFormat);\n this.inputSpec = [new InputSpec({ndim: 4})];\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = inputShape as Shape;\n if (this.dataFormat === 'channelsLast') {\n return [inputShape[0], inputShape[3]];\n } else {\n return [inputShape[0], inputShape[1]];\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n throw new NotImplementedError();\n }\n\n override getConfig(): serialization.ConfigDict {\n const config = {dataFormat: this.dataFormat};\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n}\n\nexport class GlobalAveragePooling2D extends GlobalPooling2D {\n /** @nocollapse */\n static className = 'GlobalAveragePooling2D';\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const input = getExactlyOneTensor(inputs);\n if (this.dataFormat === 'channelsLast') {\n return tfc.mean(input, [1, 2]);\n } else {\n return tfc.mean(input, [2, 3]);\n }\n });\n }\n}\nserialization.registerClass(GlobalAveragePooling2D);\n\nexport class GlobalMaxPooling2D extends GlobalPooling2D {\n /** @nocollapse */\n static className = 'GlobalMaxPooling2D';\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const input = getExactlyOneTensor(inputs);\n if (this.dataFormat === 'channelsLast') {\n return tfc.max(input, [1, 2]);\n } else {\n return tfc.max(input, [2, 3]);\n }\n });\n }\n}\nserialization.registerClass(GlobalMaxPooling2D);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/**\n * Layers that augment the functionality of a base layer.\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\nimport {serialization, Tensor, tidy} from '@tensorflow/tfjs-core';\nimport * as K from '../backend/tfjs_backend';\nimport {nameScope} from '../common';\nimport {InputSpec, Layer, LayerArgs, SymbolicTensor} from '../engine/topology';\nimport {NotImplementedError, ValueError} from '../errors';\nimport {BidirectionalMergeMode, Shape, VALID_BIDIRECTIONAL_MERGE_MODES} from '../keras_format/common';\nimport {Kwargs} from '../types';\nimport {RegularizerFn, RnnStepFunction} from '../types';\nimport * as generic_utils from '../utils/generic_utils';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../utils/types_utils';\nimport {LayerVariable} from '../variables';\n\nimport {rnn, RNN, standardizeArgs} from './recurrent';\nimport {deserialize} from './serialization';\n\nexport declare interface WrapperLayerArgs extends LayerArgs {\n /**\n * The layer to be wrapped.\n */\n layer: Layer;\n}\n\n/**\n * Abstract wrapper base class.\n *\n * Wrappers take another layer and augment it in various ways.\n * Do not use this class as a layer, it is only an abstract base class.\n * Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers.\n */\nexport abstract class Wrapper extends Layer {\n readonly layer: Layer;\n\n constructor(args: WrapperLayerArgs) {\n // Porting Note: In PyKeras, `self.layer` is set prior to the calling\n // `super()`. But we can't do that here due to TypeScript's restriction.\n // See: https://github.com/Microsoft/TypeScript/issues/8277\n // As a result, we have to add checks in `get trainable()` and\n // `set trainable()` below in order to prevent using `this.layer` when\n // its value is `undefined`. The super constructor does use the getter\n // and the setter of `this.layer`.\n super(args);\n this.layer = args.layer;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n this.built = true;\n }\n\n // TODO(cais): Implement activityRegularizer getter.\n\n override get trainable(): boolean {\n // Porting Note: the check of `this.layer` here is necessary due to the\n // way the `constructor` of this class is written (see Porting Note\n // above).\n if (this.layer != null) {\n return this.layer.trainable;\n } else {\n return false;\n }\n }\n\n override set trainable(value: boolean) {\n // Porting Note: the check of `this.layer` here is necessary due to the\n // way the `constructor` of this class is written (see Porting Note\n // above).\n if (this.layer != null) {\n this.layer.trainable = value;\n }\n }\n\n override get trainableWeights(): LayerVariable[] {\n return this.layer.trainableWeights;\n }\n // TODO(cais): Implement setter for trainableWeights.\n\n override get nonTrainableWeights(): LayerVariable[] {\n return this.layer.nonTrainableWeights;\n }\n // TODO(cais): Implement setter for nonTrainableWeights.\n\n override get updates(): Tensor[] {\n // tslint:disable-next-line:no-any\n return (this.layer as any)._updates;\n }\n\n // TODO(cais): Implement getUpdatesFor().\n\n override get losses(): RegularizerFn[] {\n return this.layer.losses;\n }\n\n // TODO(cais): Implement getLossesFor().\n\n override getWeights(): Tensor[] {\n return this.layer.getWeights();\n }\n\n override setWeights(weights: Tensor[]): void {\n this.layer.setWeights(weights);\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'layer': {\n 'className': this.layer.getClassName(),\n 'config': this.layer.getConfig(),\n }\n };\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override setFastWeightInitDuringBuild(value: boolean) {\n super.setFastWeightInitDuringBuild(value);\n if (this.layer != null) {\n this.layer.setFastWeightInitDuringBuild(value);\n }\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict,\n customObjects = {} as serialization.ConfigDict): T {\n const layerConfig = config['layer'] as serialization.ConfigDict;\n const layer = deserialize(layerConfig, customObjects) as Layer;\n delete config['layer'];\n const newConfig = {layer};\n Object.assign(newConfig, config);\n return new cls(newConfig);\n }\n}\n\nexport class TimeDistributed extends Wrapper {\n /** @nocollapse */\n static className = 'TimeDistributed';\n constructor(args: WrapperLayerArgs) {\n super(args);\n this.supportsMasking = true;\n }\n\n override build(inputShape: Shape|Shape[]): void {\n inputShape = getExactlyOneShape(inputShape);\n if (inputShape.length < 3) {\n throw new ValueError(\n `TimeDistributed layer expects an input shape >= 3D, but received ` +\n `input shape ${JSON.stringify(inputShape)}`);\n }\n this.inputSpec = [{shape: inputShape}];\n const childInputShape = [inputShape[0]].concat(inputShape.slice(2));\n if (!this.layer.built) {\n this.layer.build(childInputShape);\n this.layer.built = true;\n }\n super.build(inputShape);\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const childInputShape = [inputShape[0]].concat(inputShape.slice(2));\n const childOutputShape =\n this.layer.computeOutputShape(childInputShape) as Shape;\n const timesteps = inputShape[1];\n return [childOutputShape[0], timesteps].concat(childOutputShape.slice(1));\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n // TODO(cais): Add 'training' and 'useLearningPhase' to kwargs.\n inputs = getExactlyOneTensor(inputs);\n // Porting Note: In tfjs-layers, `inputs` are always concrete tensor\n // values. Hence the inputs can't have an undetermined first (batch)\n // dimension, which is why we always use the K.rnn approach here.\n const step: RnnStepFunction = (inputs: Tensor, states: Tensor[]) => {\n // TODO(cais): Add useLearningPhase.\n // NOTE(cais): `layer.call` may return a length-1 array of Tensor in\n // some cases (e.g., `layer` is a `Sequential` instance), which is\n // why `getExactlyOneTensor` is used below.\n const output = getExactlyOneTensor(this.layer.call(inputs, kwargs));\n return [output, []];\n };\n const rnnOutputs =\n rnn(step, inputs, [], false /* goBackwards */, null /* mask */,\n null /* constants */, false /* unroll */,\n true /* needPerStepOutputs */);\n const y = rnnOutputs[1];\n // TODO(cais): Add activity regularization.\n // TODO(cais): Add useLearningPhase.\n return y;\n });\n }\n\n // TODO(cais): Implement detailed computeMask() logic.\n}\nserialization.registerClass(TimeDistributed);\n\nexport function checkBidirectionalMergeMode(value?: string): void {\n generic_utils.checkStringTypeUnionValue(\n VALID_BIDIRECTIONAL_MERGE_MODES, 'BidirectionalMergeMode', value);\n}\n\nexport declare interface BidirectionalLayerArgs extends WrapperLayerArgs {\n /**\n * The instance of an `RNN` layer to be wrapped.\n */\n layer: RNN;\n\n /**\n * Mode by which outputs of the forward and backward RNNs are\n * combined. If `null` or `undefined`, the output will not be\n * combined, they will be returned as an `Array`.\n *\n * If `undefined` (i.e., not provided), defaults to `'concat'`.\n */\n mergeMode?: BidirectionalMergeMode;\n}\n\nconst DEFAULT_BIDIRECTIONAL_MERGE_MODE: BidirectionalMergeMode = 'concat';\n\nexport class Bidirectional extends Wrapper {\n /** @nocollapse */\n static className = 'Bidirectional';\n mergeMode: BidirectionalMergeMode;\n private forwardLayer: RNN;\n private backwardLayer: RNN;\n private returnSequences: boolean;\n private returnState: boolean;\n private numConstants?: number;\n private _trainable: boolean;\n\n constructor(args: BidirectionalLayerArgs) {\n super(args);\n\n // Note: When creating `this.forwardLayer`, the original Layer object\n // (`config.layer`) ought to be cloned. This is why we call\n // `getConfig()` followed by `deserialize()`. Without this cloning,\n // the layer names saved during serialization will incorrectly contain\n // the 'forward_' prefix. In Python Keras, this is done using\n // `copy.copy` (shallow copy), which does not have a simple equivalent\n // in JavaScript. JavaScript's `Object.assign()` does not copy\n // methods.\n const layerConfig = args.layer.getConfig();\n const forwDict: serialization.ConfigDict = {};\n forwDict['className'] = args.layer.getClassName();\n forwDict['config'] = layerConfig;\n this.forwardLayer = deserialize(forwDict) as RNN;\n layerConfig['goBackwards'] =\n layerConfig['goBackwards'] === true ? false : true;\n const backDict: serialization.ConfigDict = {};\n backDict['className'] = args.layer.getClassName();\n backDict['config'] = layerConfig;\n this.backwardLayer = deserialize(backDict) as RNN;\n this.forwardLayer.name = 'forward_' + this.forwardLayer.name;\n this.backwardLayer.name = 'backward_' + this.backwardLayer.name;\n\n this.mergeMode = args.mergeMode === undefined ?\n DEFAULT_BIDIRECTIONAL_MERGE_MODE :\n args.mergeMode;\n checkBidirectionalMergeMode(this.mergeMode);\n if (args.weights) {\n throw new NotImplementedError(\n 'weights support is not implemented for Bidirectional layer yet.');\n }\n this._stateful = args.layer.stateful;\n this.returnSequences = args.layer.returnSequences;\n this.returnState = args.layer.returnState;\n this.supportsMasking = true;\n this._trainable = true;\n this.inputSpec = args.layer.inputSpec;\n this.numConstants = null;\n }\n\n override get trainable(): boolean {\n return this._trainable;\n }\n\n override set trainable(value: boolean) {\n // Porting Note: the check of `this.layer` here is necessary due to the\n // way the `constructor` of this class is written (see Porting Note\n // above).\n this._trainable = value;\n if (this.forwardLayer != null) {\n this.forwardLayer.trainable = value;\n }\n if (this.backwardLayer != null) {\n this.backwardLayer.trainable = value;\n }\n }\n\n override getWeights(): Tensor[] {\n return this.forwardLayer.getWeights().concat(\n this.backwardLayer.getWeights());\n }\n\n override setWeights(weights: Tensor[]): void {\n const numWeights = weights.length;\n const numeightsOver2 = Math.floor(numWeights / 2);\n this.forwardLayer.setWeights(weights.slice(0, numeightsOver2));\n this.backwardLayer.setWeights(weights.slice(numeightsOver2));\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n let layerShapes: Shape|Shape[] =\n this.forwardLayer.computeOutputShape(inputShape);\n if (!(Array.isArray(layerShapes) && Array.isArray(layerShapes[0]))) {\n layerShapes = [layerShapes as Shape];\n }\n layerShapes = layerShapes as Shape[];\n\n let outputShape: Shape;\n let outputShapes: Shape[];\n let stateShape: Shape[];\n if (this.returnState) {\n stateShape = layerShapes.slice(1);\n outputShape = layerShapes[0];\n } else {\n outputShape = layerShapes[0];\n }\n outputShape = outputShape;\n if (this.mergeMode === 'concat') {\n outputShape[outputShape.length - 1] *= 2;\n outputShapes = [outputShape];\n } else if (this.mergeMode == null) {\n outputShapes = [outputShape, outputShape.slice()];\n } else {\n outputShapes = [outputShape];\n }\n\n if (this.returnState) {\n if (this.mergeMode == null) {\n return outputShapes.concat(stateShape).concat(stateShape.slice());\n }\n return [outputShape].concat(stateShape).concat(stateShape.slice());\n }\n return generic_utils.singletonOrArray(outputShapes);\n }\n\n override apply(\n inputs: Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[],\n kwargs?: Kwargs): Tensor|Tensor[]|SymbolicTensor|SymbolicTensor[] {\n let initialState: Tensor[]|SymbolicTensor[] =\n kwargs == null ? null : kwargs['initialState'];\n let constants: Tensor[]|SymbolicTensor[] =\n kwargs == null ? null : kwargs['constants'];\n if (kwargs == null) {\n kwargs = {};\n }\n const standardized =\n standardizeArgs(inputs, initialState, constants, this.numConstants);\n inputs = standardized.inputs;\n initialState = standardized.initialState;\n constants = standardized.constants;\n\n if (Array.isArray(inputs)) {\n initialState = (inputs as Tensor[] | SymbolicTensor[]).slice(1);\n inputs = (inputs as Tensor[] | SymbolicTensor[])[0];\n }\n\n if ((initialState == null || initialState.length === 0) &&\n constants == null) {\n return super.apply(inputs, kwargs);\n }\n const additionalInputs: Array = [];\n const additionalSpecs: InputSpec[] = [];\n if (initialState != null) {\n const numStates = initialState.length;\n if (numStates % 2 > 0) {\n throw new ValueError(\n 'When passing `initialState` to a Bidrectional RNN, ' +\n 'the state should be an Array containing the states of ' +\n 'the underlying RNNs.');\n }\n kwargs['initialState'] = initialState;\n additionalInputs.push(...initialState);\n const stateSpecs = (initialState as Array)\n .map(state => new InputSpec({shape: state.shape}));\n this.forwardLayer.stateSpec = stateSpecs.slice(0, numStates / 2);\n this.backwardLayer.stateSpec = stateSpecs.slice(numStates / 2);\n additionalSpecs.push(...stateSpecs);\n }\n if (constants != null) {\n throw new NotImplementedError(\n 'Support for constants in Bidirectional layers is not ' +\n 'implemented yet.');\n }\n\n const isSymbolicTensor = additionalInputs[0] instanceof SymbolicTensor;\n for (const tensor of additionalInputs) {\n if (tensor instanceof SymbolicTensor !== isSymbolicTensor) {\n throw new ValueError(\n 'The initial state of a Bidirectional layer cannot be ' +\n 'specified as a mix of symbolic and non-symbolic tensors');\n }\n }\n\n if (isSymbolicTensor) {\n // Compute the full input and specs, including the states.\n const fullInput = [inputs].concat(additionalInputs);\n const fullInputSpec = this.inputSpec.concat(additionalSpecs);\n // Perform the call temporarily and replace inputSpec.\n // Note: with initial states symbolic calls and non-symbolic calls to\n // this method differ in how the initial states are passed. For\n // symbolic calls, the initial states are passed in the first arg, as\n // an Array of SymbolicTensors; for non-symbolic calls, they are\n // passed in the second arg as a part of the kwargs. Hence the need to\n // temporarily modify inputSpec here.\n // TODO(cais): Make refactoring so that this hacky code below is no\n // longer needed.\n const originalInputSpec = this.inputSpec;\n this.inputSpec = fullInputSpec;\n const output =\n super.apply(fullInput as Tensor[] | SymbolicTensor[], kwargs);\n this.inputSpec = originalInputSpec;\n return output;\n } else {\n return super.apply(inputs, kwargs);\n }\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor|Tensor[] {\n return tidy(() => {\n const initialState = kwargs['initialState'];\n\n let y: Tensor|Tensor[];\n let yRev: Tensor|Tensor[];\n if (initialState == null) {\n y = this.forwardLayer.call(inputs, kwargs);\n yRev = this.backwardLayer.call(inputs, kwargs);\n } else {\n const forwardState = initialState.slice(0, initialState.length / 2);\n const backwardState = initialState.slice(initialState.length / 2);\n y = this.forwardLayer.call(\n inputs, Object.assign(kwargs, {initialState: forwardState}));\n yRev = this.backwardLayer.call(\n inputs, Object.assign(kwargs, {initialState: backwardState}));\n }\n\n let states: Tensor[];\n if (this.returnState) {\n if (Array.isArray(y)) {\n states = y.slice(1).concat((yRev as Tensor[]).slice(1));\n } else {\n }\n y = (y as Tensor[])[0];\n yRev = (yRev as Tensor[])[0];\n }\n\n if (this.returnSequences) {\n yRev = tfc.reverse(yRev as Tensor, 1);\n }\n\n let output: Tensor|Tensor[];\n if (this.mergeMode === 'concat') {\n output = K.concatenate([y as Tensor, yRev as Tensor]);\n } else if (this.mergeMode === 'sum') {\n output = tfc.add(y as Tensor, yRev as Tensor);\n } else if (this.mergeMode === 'ave') {\n output = tfc.mul(.5, tfc.add(y as Tensor, yRev as Tensor));\n } else if (this.mergeMode === 'mul') {\n output = tfc.mul(y as Tensor, yRev as Tensor);\n } else if (this.mergeMode == null) {\n output = [y as Tensor, yRev as Tensor];\n }\n\n // TODO(cais): Properly set learning phase.\n if (this.returnState) {\n if (this.mergeMode == null) {\n return (output as Tensor[]).concat(states);\n }\n return [output as Tensor].concat(states);\n }\n return output;\n });\n }\n\n override resetStates(states?: Tensor|Tensor[]): void {\n this.forwardLayer.resetStates();\n this.backwardLayer.resetStates();\n }\n\n override build(inputShape: Shape|Shape[]): void {\n nameScope(this.forwardLayer.name, () => {\n this.forwardLayer.build(inputShape);\n });\n nameScope(this.backwardLayer.name, () => {\n this.backwardLayer.build(inputShape);\n });\n this.built = true;\n }\n\n override computeMask(inputs: Tensor|Tensor[], mask?: Tensor|Tensor[]): Tensor\n |Tensor[] {\n if (Array.isArray(mask)) {\n mask = mask[0];\n }\n let outputMask: Tensor|Tensor[];\n if (this.returnSequences) {\n if (this.mergeMode == null) {\n outputMask = [mask, mask];\n } else {\n outputMask = mask;\n }\n } else {\n if (this.mergeMode == null) {\n outputMask = [null, null];\n } else {\n outputMask = null;\n }\n }\n if (this.returnState) {\n const states = this.forwardLayer.states;\n const stateMask: Tensor[] = states.map(state => null);\n if (Array.isArray(outputMask)) {\n return outputMask.concat(stateMask).concat(stateMask);\n } else {\n return [outputMask].concat(stateMask).concat(stateMask);\n }\n } else {\n return outputMask;\n }\n }\n\n override get trainableWeights(): LayerVariable[] {\n return this.forwardLayer.trainableWeights.concat(\n this.backwardLayer.trainableWeights);\n }\n\n override get nonTrainableWeights(): LayerVariable[] {\n return this.forwardLayer.nonTrainableWeights.concat(\n this.backwardLayer.nonTrainableWeights);\n }\n\n // TODO(cais): Implement constraints().\n\n override setFastWeightInitDuringBuild(value: boolean) {\n super.setFastWeightInitDuringBuild(value);\n if (this.forwardLayer != null) {\n this.forwardLayer.setFastWeightInitDuringBuild(value);\n }\n if (this.backwardLayer != null) {\n this.backwardLayer.setFastWeightInitDuringBuild(value);\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'mergeMode': this.mergeMode,\n };\n // TODO(cais): Add logic for `numConstants` once the property is added.\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n /** @nocollapse */\n static override fromConfig(\n cls: serialization.SerializableConstructor,\n config: serialization.ConfigDict): T {\n const rnnLayer =\n deserialize(config['layer'] as serialization.ConfigDict) as RNN;\n delete config['layer'];\n // TODO(cais): Add logic for `numConstants` once the property is added.\n if (config['numConstants'] != null) {\n throw new NotImplementedError(\n `Deserialization of a Bidirectional layer with numConstants ` +\n `present is not supported yet.`);\n }\n // tslint:disable-next-line:no-any\n const newConfig: {[key: string]: any} = config;\n newConfig['layer'] = rnnLayer;\n return new cls(newConfig);\n }\n}\nserialization.registerClass(Bidirectional);\n","/**\n * @license\n * Copyright 2022 CodeSmith LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {LayerArgs, Layer} from '../../engine/topology';\nimport { serialization, Tensor, mul, add, tidy } from '@tensorflow/tfjs-core';\nimport { getExactlyOneTensor } from '../../utils/types_utils';\nimport * as K from '../../backend/tfjs_backend';\nimport { Kwargs } from '../../types';\n\nexport declare interface RescalingArgs extends LayerArgs {\n scale: number;\n offset?: number;\n}\n\n/**\n * Preprocessing Rescaling Layer\n *\n * This rescales images by a scaling and offset factor\n */\nexport class Rescaling extends Layer {\n /** @nocollapse */\n static className = 'Rescaling';\n private readonly scale: number;\n private readonly offset: number;\n constructor(args: RescalingArgs) {\n super(args);\n\n this.scale = args.scale;\n\n if(args.offset) {\n this.offset = args.offset;\n } else {\n this.offset = 0;\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'scale': this.scale,\n 'offset': this.offset\n };\n\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor[]|Tensor {\n return tidy(() => {\n inputs = getExactlyOneTensor(inputs);\n if(inputs.dtype !== 'float32') {\n inputs = K.cast(inputs, 'float32');\n }\n return add(mul(inputs, this.scale), this.offset);\n });\n }\n}\n\nserialization.registerClass(Rescaling);\n","/**\n * @license\n * Copyright 2022 CodeSmith LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {serialization,DataType,unstack,stack,tensor,Tensor,Tensor1D,Tensor2D, Tensor3D, Tensor4D, tidy, range, image} from '@tensorflow/tfjs-core';\nimport {getExactlyOneShape, getExactlyOneTensor} from '../../utils/types_utils';\nimport {LayerArgs, Layer} from '../../engine/topology';\nimport {Kwargs} from '../../types';\nimport {Shape} from '../../keras_format/common';\nimport * as K from '../../backend/tfjs_backend';\n\nconst {resizeBilinear, cropAndResize} = image;\n\nexport declare interface CenterCropArgs extends LayerArgs{\n height: number;\n width: number;\n}\n\nexport class CenterCrop extends Layer {\n /** @nocollapse */\n static className = 'CenterCrop';\n private readonly height: number;\n private readonly width: number;\n constructor(args: CenterCropArgs) {\n super(args);\n this.height = args.height;\n this.width = args.width;\n }\n\n centerCrop(inputs: Tensor3D | Tensor4D, hBuffer: number, wBuffer: number,\n height: number, width: number, inputHeight: number,\n inputWidth: number, dtype: DataType): Tensor | Tensor[] {\n\n return tidy(() => {\n let input: Tensor4D;\n let isRank3 = false;\n const top = hBuffer / inputHeight;\n const left = wBuffer / inputWidth;\n const bottom = ((height) + hBuffer) / inputHeight;\n const right = ((width) + wBuffer) / inputWidth;\n const bound = [top, left, bottom, right];\n const boxesArr = [];\n\n if(inputs.rank === 3) {\n isRank3 = true;\n input = stack([inputs]) as Tensor4D;\n } else {\n input = inputs as Tensor4D;\n }\n\n for (let i = 0; i < input.shape[0]; i++) {\n boxesArr.push(bound);\n }\n\n const boxes: Tensor2D = tensor(boxesArr, [boxesArr.length, 4]);\n const boxInd: Tensor1D = range(0, boxesArr.length, 1, 'int32');\n\n const cropSize: [number, number] = [height, width];\n const cropped = cropAndResize(input, boxes, boxInd, cropSize, 'nearest');\n\n if(isRank3) {\n return K.cast(getExactlyOneTensor(unstack(cropped)), dtype);\n }\n return K.cast(cropped, dtype);\n });\n\n }\n\n upsize(inputs : Tensor3D | Tensor4D, height: number,\n width: number, dtype: DataType): Tensor | Tensor[] {\n\n return tidy(() => {\n const outputs = resizeBilinear(inputs, [height, width]);\n return K.cast(outputs, dtype);\n });\n\n}\n\n override call(inputs: Tensor3D | Tensor4D , kwargs: Kwargs):\n Tensor[] | Tensor {\n return tidy(() => {\n const rankedInputs = getExactlyOneTensor(inputs) as Tensor3D | Tensor4D;\n const dtype = rankedInputs.dtype;\n const inputShape = rankedInputs.shape;\n const inputHeight = inputShape[inputShape.length - 3];\n const inputWidth = inputShape[inputShape.length - 2];\n\n let hBuffer = 0;\n if (inputHeight !== this.height) {\n hBuffer = Math.floor((inputHeight - this.height) / 2);\n }\n\n let wBuffer = 0;\n if (inputWidth !== this.width) {\n wBuffer = Math.floor((inputWidth - this.width) / 2);\n\n if (wBuffer === 0) {\n wBuffer = 1;\n }\n }\n\n if(hBuffer >= 0 && wBuffer >= 0) {\n return this.centerCrop(rankedInputs, hBuffer, wBuffer,\n this.height, this.width, inputHeight,\n inputWidth, dtype);\n } else {\n return this.upsize(inputs, this.height, this.width, dtype);\n }\n });\n\n }\n\n override getConfig(): serialization.ConfigDict{\n\n const config: serialization.ConfigDict = {\n 'height' : this.height,\n 'width' : this.width\n };\n\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override computeOutputShape(inputShape: Shape | Shape[]): Shape | Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const hAxis = inputShape.length - 3;\n const wAxis = inputShape.length - 2;\n inputShape[hAxis] = this.height;\n inputShape[wAxis] = this.width;\n return inputShape;\n }\n}\n\nserialization.registerClass(CenterCrop);\n","/**\n * @license\n * Copyright 2022 CodeSmith LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport { LayerArgs, Layer } from '../../engine/topology';\nimport { serialization, Tensor, tidy, Tensor1D, Tensor2D} from '@tensorflow/tfjs-core';\nimport { greater, greaterEqual, max, min} from '@tensorflow/tfjs-core';\nimport { Shape } from '../../keras_format/common';\nimport { getExactlyOneShape, getExactlyOneTensor } from '../../utils/types_utils';\nimport { Kwargs } from '../../types';\nimport { ValueError } from '../../errors';\nimport * as K from '../../backend/tfjs_backend';\nimport * as utils from './preprocessing_utils';\nimport { OutputMode } from './preprocessing_utils';\n\nexport declare interface CategoryEncodingArgs extends LayerArgs {\n numTokens: number;\n outputMode?: OutputMode;\n }\n\nexport class CategoryEncoding extends Layer {\n /** @nocollapse */\n static className = 'CategoryEncoding';\n private readonly numTokens: number;\n private readonly outputMode: OutputMode;\n\n constructor(args: CategoryEncodingArgs) {\n super(args);\n this.numTokens = args.numTokens;\n\n if(args.outputMode) {\n this.outputMode = args.outputMode;\n } else {\n this.outputMode = 'multiHot';\n }\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'numTokens': this.numTokens,\n 'outputMode': this.outputMode,\n };\n\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n\n if(inputShape == null) {\n return [this.numTokens];\n }\n\n if(this.outputMode === 'oneHot' && inputShape[inputShape.length - 1] !== 1){\n inputShape.push(this.numTokens);\n return inputShape;\n }\n\n inputShape[inputShape.length - 1] = this.numTokens;\n return inputShape;\n }\n\n override call(inputs: Tensor|Tensor[], kwargs: Kwargs): Tensor[]|Tensor {\n return tidy(() => {\n\n inputs = getExactlyOneTensor(inputs);\n if(inputs.dtype !== 'int32') {\n inputs = K.cast(inputs, 'int32');\n }\n\n let countWeights: Tensor1D | Tensor2D;\n\n if((typeof kwargs['countWeights']) !== 'undefined') {\n\n if(this.outputMode !== 'count') {\n throw new ValueError(\n `countWeights is not used when outputMode !== count.\n Received countWeights=${kwargs['countWeights']}`);\n }\n\n countWeights\n = getExactlyOneTensor(kwargs['countWeights']) as Tensor1D|Tensor2D;\n }\n\n const maxValue = max(inputs);\n const minValue = min(inputs);\n const greaterEqualMax = greater(this.numTokens, maxValue)\n .bufferSync().get(0);\n\n const greaterMin = greaterEqual(minValue, 0).bufferSync().get(0);\n\n if(!(greaterEqualMax && greaterMin)) {\n\n throw new ValueError('Input values must be between 0 < values <='\n + ` numTokens with numTokens=${this.numTokens}`);\n }\n\n return utils.encodeCategoricalInputs(inputs,\n this.outputMode, this.numTokens, countWeights);\n });\n }\n}\n\nserialization.registerClass(CategoryEncoding);\n","/**\n * @license\n * Copyright 2022 CodeSmith LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport { Tensor, denseBincount, Tensor1D, Tensor2D, TensorLike, mul} from '@tensorflow/tfjs-core';\nimport { getExactlyOneTensor } from '../../utils/types_utils';\nimport { expandDims} from '@tensorflow/tfjs-core';\nimport { ValueError } from '../../errors';\nimport * as K from '../../backend/tfjs_backend';\n\nexport type OutputMode = 'int' | 'oneHot' | 'multiHot' | 'count' | 'tfIdf';\n\nexport function encodeCategoricalInputs(inputs: Tensor|Tensor[],\n outputMode: OutputMode,\n depth: number,\n weights?: Tensor1D|Tensor2D|TensorLike):\n Tensor|Tensor[] {\n\n let input = getExactlyOneTensor(inputs);\n\n if(input.dtype !== 'int32') {\n input = K.cast(input, 'int32');\n }\n\n if(outputMode === 'int') {\n return input;\n }\n\n const originalShape = input.shape;\n\n if(input.rank === 0) {\n input = expandDims(input, -1);\n }\n\n if(outputMode === 'oneHot') {\n if(input.shape[input.shape.length - 1] !== 1) {\n input = expandDims(input, -1);\n }\n }\n\n if(input.rank > 2) {\n throw new ValueError(`When outputMode is not int, maximum output rank is 2`\n + ` Received outputMode ${outputMode} and input shape ${originalShape}`\n + ` which would result in output rank ${input.rank}.`);\n }\n\n const binaryOutput = ['multiHot', 'oneHot'].includes(outputMode);\n\n const denseBincountInput = input as Tensor1D | Tensor2D;\n\n let binCounts: Tensor1D | Tensor2D;\n\n if ((typeof weights) !== 'undefined' && outputMode === 'count') {\n binCounts = denseBincount(denseBincountInput, weights, depth, binaryOutput);\n } else {\n binCounts = denseBincount(denseBincountInput, [], depth, binaryOutput);\n }\n\n if(outputMode !== 'tfIdf') {\n return binCounts;\n }\n\n if (weights) {\n return mul(binCounts, weights);\n } else {\n throw new ValueError(\n `When outputMode is 'tfIdf', weights must be provided.`\n );\n }\n}\n","/**\n * @license\n * Copyright 2022 CodeSmith LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {image, Rank, serialization, Tensor, tidy} from '@tensorflow/tfjs-core'; // mul, add\n\nimport {Layer, LayerArgs} from '../../engine/topology';\nimport {ValueError} from '../../errors';\nimport {Shape} from '../../keras_format/common';\nimport {Kwargs} from '../../types';\nimport {getExactlyOneShape} from '../../utils/types_utils'; //, getExactlyOneTensor\n\n// tf methods unimplemented in tfjs: 'bicubic', 'area', 'lanczos3', 'lanczos5',\n// 'gaussian', 'mitchellcubic'\nconst INTERPOLATION_KEYS = ['bilinear', 'nearest'] as const;\nconst INTERPOLATION_METHODS = new Set(INTERPOLATION_KEYS);\ntype InterpolationType = typeof INTERPOLATION_KEYS[number];\n\nexport declare interface ResizingArgs extends LayerArgs {\n height: number;\n width: number;\n interpolation?: InterpolationType; // default = 'bilinear';\n cropToAspectRatio?: boolean; // default = false;\n}\n\n/**\n * Preprocessing Resizing Layer\n *\n * This resizes images by a scaling and offset factor\n */\n\nexport class Resizing extends Layer {\n /** @nocollapse */\n static className = 'Resizing';\n private readonly height: number;\n private readonly width: number;\n // method of interpolation to be used; default = \"bilinear\";\n private readonly interpolation: InterpolationType;\n // toggle whether the aspect ratio should be preserved; default = false;\n private readonly cropToAspectRatio: boolean;\n\n constructor(args: ResizingArgs) {\n super(args);\n\n this.height = args.height;\n this.width = args.width;\n\n if (args.interpolation) {\n if (INTERPOLATION_METHODS.has(args.interpolation)) {\n this.interpolation = args.interpolation;\n } else {\n throw new ValueError(`Invalid interpolation parameter: ${\n args.interpolation} is not implemented`);\n }\n } else {\n this.interpolation = 'bilinear';\n }\n this.cropToAspectRatio = Boolean(args.cropToAspectRatio);\n }\n\n override computeOutputShape(inputShape: Shape|Shape[]): Shape|Shape[] {\n inputShape = getExactlyOneShape(inputShape);\n const numChannels = inputShape[2];\n return [this.height, this.width, numChannels];\n }\n\n override getConfig(): serialization.ConfigDict {\n const config: serialization.ConfigDict = {\n 'height': this.height,\n 'width': this.width,\n 'interpolation': this.interpolation,\n 'cropToAspectRatio': this.cropToAspectRatio\n };\n\n const baseConfig = super.getConfig();\n Object.assign(config, baseConfig);\n return config;\n }\n\n override call(inputs: Tensor|Tensor, kwargs: Kwargs):\n Tensor[]|Tensor {\n return tidy(() => {\n const size: [number, number] = [this.height, this.width];\n if (this.interpolation === 'bilinear') {\n return image.resizeBilinear(inputs, size, !this.cropToAspectRatio);\n } else if (this.interpolation === 'nearest') {\n return image.resizeNearestNeighbor(\n inputs, size, !this.cropToAspectRatio);\n } else {\n throw new Error(`Interpolation is ${this.interpolation} but only ${[...INTERPOLATION_METHODS]} are supported`);\n }\n });\n }\n}\n\nserialization.registerClass(Resizing);\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\nimport {InputLayer, InputLayerArgs} from './engine/input_layer';\nimport {Layer, LayerArgs} from './engine/topology';\nimport {input} from './exports';\nimport {ELU, ELULayerArgs, LeakyReLU, LeakyReLULayerArgs, PReLU, PReLULayerArgs, ReLU, ReLULayerArgs, Softmax, SoftmaxLayerArgs, ThresholdedReLU, ThresholdedReLULayerArgs} from './layers/advanced_activations';\nimport {Conv1D, Conv2D, Conv2DTranspose, Conv3D, ConvLayerArgs, Cropping2D, Cropping2DLayerArgs, SeparableConv2D, SeparableConvLayerArgs, UpSampling2D, UpSampling2DLayerArgs, Conv3DTranspose} from './layers/convolutional';\nimport {DepthwiseConv2D, DepthwiseConv2DLayerArgs} from './layers/convolutional_depthwise';\nimport {ConvLSTM2D, ConvLSTM2DArgs, ConvLSTM2DCell, ConvLSTM2DCellArgs} from './layers/convolutional_recurrent';\nimport {Activation, ActivationLayerArgs, Dense, DenseLayerArgs, Dropout, DropoutLayerArgs, Flatten, FlattenLayerArgs, Masking, MaskingArgs, Permute, PermuteLayerArgs, RepeatVector, RepeatVectorLayerArgs, Reshape, ReshapeLayerArgs, SpatialDropout1D, SpatialDropout1DLayerConfig} from './layers/core';\nimport {Embedding, EmbeddingLayerArgs} from './layers/embeddings';\nimport {Add, Average, Concatenate, ConcatenateLayerArgs, Dot, DotLayerArgs, Maximum, Minimum, Multiply} from './layers/merge';\nimport {AlphaDropout, AlphaDropoutArgs, GaussianDropout, GaussianDropoutArgs, GaussianNoise, GaussianNoiseArgs} from './layers/noise';\nimport {BatchNormalization, BatchNormalizationLayerArgs, LayerNormalization, LayerNormalizationLayerArgs} from './layers/normalization';\nimport {ZeroPadding2D, ZeroPadding2DLayerArgs} from './layers/padding';\nimport {AveragePooling1D, AveragePooling2D, AveragePooling3D, GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D, GlobalPooling2DLayerArgs, MaxPooling1D, MaxPooling2D, MaxPooling3D, Pooling1DLayerArgs, Pooling2DLayerArgs, Pooling3DLayerArgs} from './layers/pooling';\nimport {GRU, GRUCell, GRUCellLayerArgs, GRULayerArgs, LSTM, LSTMCell, LSTMCellLayerArgs, LSTMLayerArgs, RNN, RNNCell, RNNLayerArgs, SimpleRNN, SimpleRNNCell, SimpleRNNCellLayerArgs, SimpleRNNLayerArgs, StackedRNNCells, StackedRNNCellsArgs} from './layers/recurrent';\nimport {Bidirectional, BidirectionalLayerArgs, TimeDistributed, WrapperLayerArgs} from './layers/wrappers';\nimport {Rescaling, RescalingArgs} from './layers/preprocessing/image_preprocessing';\nimport {CenterCrop, CenterCropArgs} from './layers/preprocessing/center_crop';\nimport {CategoryEncoding, CategoryEncodingArgs} from './layers/preprocessing/category_encoding';\nimport {Resizing, ResizingArgs} from './layers/preprocessing/image_resizing';\n\n// TODO(cais): Add doc string to all the public static functions in this\n// class; include exectuable JavaScript code snippets where applicable\n// (b/74074458).\n\n// Input Layer.\n/**\n * An input layer is an entry point into a `tf.LayersModel`.\n *\n * `InputLayer` is generated automatically for `tf.Sequential` models by\n * specifying the `inputshape` or `batchInputShape` for the first layer. It\n * should not be specified explicitly. However, it can be useful sometimes,\n * e.g., when constructing a sequential model from a subset of another\n * sequential model's layers. Like the code snippet below shows.\n *\n * ```js\n * // Define a model which simply adds two inputs.\n * const model1 = tf.sequential();\n * model1.add(tf.layers.dense({inputShape: [4], units: 3, activation: 'relu'}));\n * model1.add(tf.layers.dense({units: 1, activation: 'sigmoid'}));\n * model1.summary();\n * model1.predict(tf.zeros([1, 4])).print();\n *\n * // Construct another model, reusing the second layer of `model1` while\n * // not using the first layer of `model1`. Note that you cannot add the second\n * // layer of `model` directly as the first layer of the new sequential model,\n * // because doing so will lead to an error related to the fact that the layer\n * // is not an input layer. Instead, you need to create an `inputLayer` and add\n * // it to the new sequential model before adding the reused layer.\n * const model2 = tf.sequential();\n * // Use an inputShape that matches the input shape of `model1`'s second\n * // layer.\n * model2.add(tf.layers.inputLayer({inputShape: [3]}));\n * model2.add(model1.layers[1]);\n * model2.summary();\n * model2.predict(tf.zeros([1, 3])).print();\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Inputs', namespace: 'layers'}\n */\nexport function inputLayer(args: InputLayerArgs) {\n return new InputLayer(args);\n}\n\n// Advanced Activation Layers.\n\n/**\n * Exponential Linear Unit (ELU).\n *\n * It follows:\n * `f(x) = alpha * (exp(x) - 1.) for x < 0`,\n * `f(x) = x for x >= 0`.\n *\n * Input shape:\n * Arbitrary. Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * References:\n * - [Fast and Accurate Deep Network Learning by Exponential Linear Units\n * (ELUs)](https://arxiv.org/abs/1511.07289v1)\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function elu(args?: ELULayerArgs) {\n return new ELU(args);\n}\n\n/**\n * Rectified Linear Unit activation function.\n *\n * Input shape:\n * Arbitrary. Use the config field `inputShape` (Array of integers, does\n * not include the sample axis) when using this layer as the first layer\n * in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function reLU(args?: ReLULayerArgs) {\n return new ReLU(args);\n}\n\n/**\n * Leaky version of a rectified linear unit.\n *\n * It allows a small gradient when the unit is not active:\n * `f(x) = alpha * x for x < 0.`\n * `f(x) = x for x >= 0.`\n *\n * Input shape:\n * Arbitrary. Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function leakyReLU(args?: LeakyReLULayerArgs) {\n return new LeakyReLU(args);\n}\n\n/**\n * Parameterized version of a leaky rectified linear unit.\n *\n * It follows\n * `f(x) = alpha * x for x < 0.`\n * `f(x) = x for x >= 0.`\n * wherein `alpha` is a trainable weight.\n *\n * Input shape:\n * Arbitrary. Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function prelu(args?: PReLULayerArgs) {\n return new PReLU(args);\n}\n\n/**\n * Softmax activation layer.\n *\n * Input shape:\n * Arbitrary. Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function softmax(args?: SoftmaxLayerArgs) {\n return new Softmax(args);\n}\n\n/**\n * Thresholded Rectified Linear Unit.\n *\n * It follows:\n * `f(x) = x for x > theta`,\n * `f(x) = 0 otherwise`.\n *\n * Input shape:\n * Arbitrary. Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n * Output shape:\n * Same shape as the input.\n *\n * References:\n * - [Zero-Bias Autoencoders and the Benefits of Co-Adapting\n * Features](http://arxiv.org/abs/1402.3337)\n *\n * @doc {\n * heading: 'Layers',\n * subheading: 'Advanced Activation',\n * namespace: 'layers'\n * }\n */\nexport function thresholdedReLU(args?: ThresholdedReLULayerArgs) {\n return new ThresholdedReLU(args);\n}\n\n// Convolutional Layers.\n\n/**\n * 1D convolution layer (e.g., temporal convolution).\n *\n * This layer creates a convolution kernel that is convolved\n * with the layer input over a single spatial (or temporal) dimension\n * to produce a tensor of outputs.\n *\n * If `use_bias` is True, a bias vector is created and added to the outputs.\n *\n * If `activation` is not `null`, it is applied to the outputs as well.\n *\n * When using this layer as the first layer in a model, provide an\n * `inputShape` argument `Array` or `null`.\n *\n * For example, `inputShape` would be:\n * - `[10, 128]` for sequences of 10 vectors of 128-dimensional vectors\n * - `[null, 128]` for variable-length sequences of 128-dimensional vectors.\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function conv1d(args: ConvLayerArgs) {\n return new Conv1D(args);\n}\n\n/**\n * 2D convolution layer (e.g. spatial convolution over images).\n *\n * This layer creates a convolution kernel that is convolved\n * with the layer input to produce a tensor of outputs.\n *\n * If `useBias` is True, a bias vector is created and added to the outputs.\n *\n * If `activation` is not `null`, it is applied to the outputs as well.\n *\n * When using this layer as the first layer in a model,\n * provide the keyword argument `inputShape`\n * (Array of integers, does not include the sample axis),\n * e.g. `inputShape=[128, 128, 3]` for 128x128 RGB pictures\n * in `dataFormat='channelsLast'`.\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function conv2d(args: ConvLayerArgs) {\n return new Conv2D(args);\n}\n\n/**\n * Transposed convolutional layer (sometimes called Deconvolution).\n *\n * The need for transposed convolutions generally arises\n * from the desire to use a transformation going in the opposite direction of\n * a normal convolution, i.e., from something that has the shape of the output\n * of some convolution to something that has the shape of its input while\n * maintaining a connectivity pattern that is compatible with said\n * convolution.\n *\n * When using this layer as the first layer in a model, provide the\n * configuration `inputShape` (`Array` of integers, does not include the\n * sample axis), e.g., `inputShape: [128, 128, 3]` for 128x128 RGB pictures in\n * `dataFormat: 'channelsLast'`.\n *\n * Input shape:\n * 4D tensor with shape:\n * `[batch, channels, rows, cols]` if `dataFormat` is `'channelsFirst'`.\n * or 4D tensor with shape\n * `[batch, rows, cols, channels]` if `dataFormat` is `'channelsLast'`.\n *\n * Output shape:\n * 4D tensor with shape:\n * `[batch, filters, newRows, newCols]` if `dataFormat` is\n * `'channelsFirst'`. or 4D tensor with shape:\n * `[batch, newRows, newCols, filters]` if `dataFormat` is `'channelsLast'`.\n *\n * References:\n * - [A guide to convolution arithmetic for deep\n * learning](https://arxiv.org/abs/1603.07285v1)\n * - [Deconvolutional\n * Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf)\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function conv2dTranspose(args: ConvLayerArgs) {\n return new Conv2DTranspose(args);\n}\n\n/**\n * 3D convolution layer (e.g. spatial convolution over volumes).\n *\n * This layer creates a convolution kernel that is convolved\n * with the layer input to produce a tensor of outputs.\n *\n * If `useBias` is True, a bias vector is created and added to the outputs.\n *\n * If `activation` is not `null`, it is applied to the outputs as well.\n *\n * When using this layer as the first layer in a model,\n * provide the keyword argument `inputShape`\n * (Array of integers, does not include the sample axis),\n * e.g. `inputShape=[128, 128, 128, 1]` for 128x128x128 grayscale volumes\n * in `dataFormat='channelsLast'`.\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function conv3d(args: ConvLayerArgs) {\n return new Conv3D(args);\n}\n\nexport function conv3dTranspose(args: ConvLayerArgs): Layer {\n return new Conv3DTranspose(args);\n}\n\n/**\n * Depthwise separable 2D convolution.\n *\n * Separable convolution consists of first performing\n * a depthwise spatial convolution\n * (which acts on each input channel separately)\n * followed by a pointwise convolution which mixes together the resulting\n * output channels. The `depthMultiplier` argument controls how many\n * output channels are generated per input channel in the depthwise step.\n *\n * Intuitively, separable convolutions can be understood as\n * a way to factorize a convolution kernel into two smaller kernels,\n * or as an extreme version of an Inception block.\n *\n * Input shape:\n * 4D tensor with shape:\n * `[batch, channels, rows, cols]` if data_format='channelsFirst'\n * or 4D tensor with shape:\n * `[batch, rows, cols, channels]` if data_format='channelsLast'.\n *\n * Output shape:\n * 4D tensor with shape:\n * `[batch, filters, newRows, newCols]` if data_format='channelsFirst'\n * or 4D tensor with shape:\n * `[batch, newRows, newCols, filters]` if data_format='channelsLast'.\n * `rows` and `cols` values might have changed due to padding.\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function separableConv2d(args: SeparableConvLayerArgs) {\n return new SeparableConv2D(args);\n}\n\n/**\n * Cropping layer for 2D input (e.g., image).\n *\n * This layer can crop an input\n * at the top, bottom, left and right side of an image tensor.\n *\n * Input shape:\n * 4D tensor with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, rows, cols, channels]`\n * - If `data_format` is `\"channels_first\"`:\n * `[batch, channels, rows, cols]`.\n *\n * Output shape:\n * 4D with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, croppedRows, croppedCols, channels]`\n * - If `dataFormat` is `\"channelsFirst\"`:\n * `[batch, channels, croppedRows, croppedCols]`.\n *\n * Examples\n * ```js\n *\n * const model = tf.sequential();\n * model.add(tf.layers.cropping2D({cropping:[[2, 2], [2, 2]],\n * inputShape: [128, 128, 3]}));\n * //now output shape is [batch, 124, 124, 3]\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function cropping2D(args: Cropping2DLayerArgs) {\n return new Cropping2D(args);\n}\n\n/**\n * Upsampling layer for 2D inputs.\n *\n * Repeats the rows and columns of the data\n * by size[0] and size[1] respectively.\n *\n *\n * Input shape:\n * 4D tensor with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, rows, cols, channels]`\n * - If `dataFormat` is `\"channelsFirst\"`:\n * `[batch, channels, rows, cols]`\n *\n * Output shape:\n * 4D tensor with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, upsampledRows, upsampledCols, channels]`\n * - If `dataFormat` is `\"channelsFirst\"`:\n * `[batch, channels, upsampledRows, upsampledCols]`\n *\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function upSampling2d(args: UpSampling2DLayerArgs) {\n return new UpSampling2D(args);\n}\n\n// Convolutional(depthwise) Layers.\n\n/**\n * Depthwise separable 2D convolution.\n *\n * Depthwise Separable convolutions consists in performing just the first step\n * in a depthwise spatial convolution (which acts on each input channel\n * separately). The `depthMultiplier` argument controls how many output channels\n * are generated per input channel in the depthwise step.\n *\n * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'}\n */\nexport function depthwiseConv2d(args: DepthwiseConv2DLayerArgs) {\n return new DepthwiseConv2D(args);\n}\n\n// Basic Layers.\n\n/**\n * Applies an activation function to an output.\n *\n * This layer applies element-wise activation function. Other layers, notably\n * `dense` can also apply activation functions. Use this isolated activation\n * function to extract the values before and after the\n * activation. For instance:\n *\n * ```js\n * const input = tf.input({shape: [5]});\n * const denseLayer = tf.layers.dense({units: 1});\n * const activationLayer = tf.layers.activation({activation: 'relu6'});\n *\n * // Obtain the output symbolic tensors by applying the layers in order.\n * const denseOutput = denseLayer.apply(input);\n * const activationOutput = activationLayer.apply(denseOutput);\n *\n * // Create the model based on the inputs.\n * const model = tf.model({\n * inputs: input,\n * outputs: [denseOutput, activationOutput]\n * });\n *\n * // Collect both outputs and print separately.\n * const [denseOut, activationOut] = model.predict(tf.randomNormal([6, 5]));\n * denseOut.print();\n * activationOut.print();\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function activation(args: ActivationLayerArgs) {\n return new Activation(args);\n}\n\n/**\n * Creates a dense (fully connected) layer.\n *\n * This layer implements the operation:\n * `output = activation(dot(input, kernel) + bias)`\n *\n * `activation` is the element-wise activation function\n * passed as the `activation` argument.\n *\n * `kernel` is a weights matrix created by the layer.\n *\n * `bias` is a bias vector created by the layer (only applicable if `useBias`\n * is `true`).\n *\n * **Input shape:**\n *\n * nD `tf.Tensor` with shape: `(batchSize, ..., inputDim)`.\n *\n * The most common situation would be\n * a 2D input with shape `(batchSize, inputDim)`.\n *\n * **Output shape:**\n *\n * nD tensor with shape: `(batchSize, ..., units)`.\n *\n * For instance, for a 2D input with shape `(batchSize, inputDim)`,\n * the output would have shape `(batchSize, units)`.\n *\n * Note: if the input to the layer has a rank greater than 2, then it is\n * flattened prior to the initial dot product with the kernel.\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function dense(args: DenseLayerArgs) {\n return new Dense(args);\n}\n\n/**\n * Applies\n * [dropout](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) to\n * the input.\n *\n * Dropout consists in randomly setting a fraction `rate` of input units to 0 at\n * each update during training time, which helps prevent overfitting.\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function dropout(args: DropoutLayerArgs) {\n return new Dropout(args);\n}\n\n/**\n * Spatial 1D version of Dropout.\n *\n * This Layer type performs the same function as the Dropout layer, but it drops\n * entire 1D feature maps instead of individual elements. For example, if an\n * input example consists of 3 timesteps and the feature map for each timestep\n * has a size of 4, a `spatialDropout1d` layer may zero out the feature maps\n * of the 1st timesteps and 2nd timesteps completely while sparing all feature\n * elements of the 3rd timestep.\n *\n * If adjacent frames (timesteps) are strongly correlated (as is normally the\n * case in early convolution layers), regular dropout will not regularize the\n * activation and will otherwise just result in merely an effective learning\n * rate decrease. In this case, `spatialDropout1d` will help promote\n * independence among feature maps and should be used instead.\n *\n * **Arguments:**\n * rate: A floating-point number >=0 and <=1. Fraction of the input elements\n * to drop.\n *\n * **Input shape:**\n * 3D tensor with shape `(samples, timesteps, channels)`.\n *\n * **Output shape:**\n * Same as the input shape.\n *\n * References:\n * - [Efficient Object Localization Using Convolutional\n * Networks](https://arxiv.org/abs/1411.4280)\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function spatialDropout1d(args: SpatialDropout1DLayerConfig) {\n return new SpatialDropout1D(args);\n}\n\n/**\n * Flattens the input. Does not affect the batch size.\n *\n * A `Flatten` layer flattens each batch in its inputs to 1D (making the output\n * 2D).\n *\n * For example:\n *\n * ```js\n * const input = tf.input({shape: [4, 3]});\n * const flattenLayer = tf.layers.flatten();\n * // Inspect the inferred output shape of the flatten layer, which\n * // equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the\n * // flattening. (The 1st dimension is the undermined batch size.)\n * console.log(JSON.stringify(flattenLayer.apply(input).shape));\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function flatten(args?: FlattenLayerArgs) {\n return new Flatten(args);\n}\n\n/**\n * Repeats the input n times in a new dimension.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.repeatVector({n: 4, inputShape: [2]}));\n * const x = tf.tensor2d([[10, 20]]);\n * // Use the model to do inference on a data point the model hasn't see\n * model.predict(x).print();\n * // output shape is now [batch, 2, 4]\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function repeatVector(args: RepeatVectorLayerArgs) {\n return new RepeatVector(args);\n}\n\n/**\n * Reshapes an input to a certain shape.\n *\n * ```js\n * const input = tf.input({shape: [4, 3]});\n * const reshapeLayer = tf.layers.reshape({targetShape: [2, 6]});\n * // Inspect the inferred output shape of the Reshape layer, which\n * // equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.)\n * console.log(JSON.stringify(reshapeLayer.apply(input).shape));\n * ```\n *\n * Input shape:\n * Arbitrary, although all dimensions in the input shape must be fixed.\n * Use the configuration `inputShape` when using this layer as the\n * first layer in a model.\n *\n *\n * Output shape:\n * [batchSize, targetShape[0], targetShape[1], ...,\n * targetShape[targetShape.length - 1]].\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function reshape(args: ReshapeLayerArgs) {\n return new Reshape(args);\n}\n\n/**\n * Permutes the dimensions of the input according to a given pattern.\n *\n * Useful for, e.g., connecting RNNs and convnets together.\n *\n * Example:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.permute({\n * dims: [2, 1],\n * inputShape: [10, 64]\n * }));\n * console.log(model.outputShape);\n * // Now model's output shape is [null, 64, 10], where null is the\n * // unpermuted sample (batch) dimension.\n * ```\n *\n * Input shape:\n * Arbitrary. Use the configuration field `inputShape` when using this\n * layer as the first layer in a model.\n *\n * Output shape:\n * Same rank as the input shape, but with the dimensions re-ordered (i.e.,\n * permuted) according to the `dims` configuration of this layer.\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function permute(args: PermuteLayerArgs) {\n return new Permute(args);\n}\n\n/**\n * Maps positive integers (indices) into dense vectors of fixed size.\n * E.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n *\n * **Input shape:** 2D tensor with shape: `[batchSize, sequenceLength]`.\n *\n * **Output shape:** 3D tensor with shape: `[batchSize, sequenceLength,\n * outputDim]`.\n *\n * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'}\n */\nexport function embedding(args: EmbeddingLayerArgs) {\n return new Embedding(args);\n}\n\n// Merge Layers.\n\n/**\n * Layer that performs element-wise addition on an `Array` of inputs.\n *\n * It takes as input a list of tensors, all of the same shape, and returns a\n * single tensor (also of the same shape). The inputs are specified as an\n * `Array` when the `apply` method of the `Add` layer instance is called. For\n * example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const addLayer = tf.layers.add();\n * const sum = addLayer.apply([input1, input2]);\n * console.log(JSON.stringify(sum.shape));\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function add(args?: LayerArgs) {\n return new Add(args);\n}\n\n/**\n * Layer that performs element-wise averaging on an `Array` of inputs.\n *\n * It takes as input a list of tensors, all of the same shape, and returns a\n * single tensor (also of the same shape). For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const averageLayer = tf.layers.average();\n * const average = averageLayer.apply([input1, input2]);\n * console.log(JSON.stringify(average.shape));\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function average(args?: LayerArgs) {\n return new Average(args);\n}\n\n/**\n * Layer that concatenates an `Array` of inputs.\n *\n * It takes a list of tensors, all of the same shape except for the\n * concatenation axis, and returns a single tensor, the concatenation\n * of all inputs. For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 3]});\n * const concatLayer = tf.layers.concatenate();\n * const output = concatLayer.apply([input1, input2]);\n * console.log(JSON.stringify(output.shape));\n * // You get [null, 2, 5], with the first dimension as the undetermined batch\n * // dimension. The last dimension (5) is the result of concatenating the\n * // last dimensions of the inputs (2 and 3).\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function concatenate(args?: ConcatenateLayerArgs) {\n return new Concatenate(args);\n}\n\n/**\n * Layer that computes the element-wise maximum of an `Array` of inputs.\n *\n * It takes as input a list of tensors, all of the same shape, and returns a\n * single tensor (also of the same shape). For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const maxLayer = tf.layers.maximum();\n * const max = maxLayer.apply([input1, input2]);\n * console.log(JSON.stringify(max.shape));\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function maximum(args?: LayerArgs) {\n return new Maximum(args);\n}\n\n/**\n * Layer that computes the element-wise minimum of an `Array` of inputs.\n *\n * It takes as input a list of tensors, all of the same shape, and returns a\n * single tensor (also of the same shape). For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const minLayer = tf.layers.minimum();\n * const min = minLayer.apply([input1, input2]);\n * console.log(JSON.stringify(min.shape));\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function minimum(args?: LayerArgs) {\n return new Minimum(args);\n}\n\n/**\n * Layer that multiplies (element-wise) an `Array` of inputs.\n *\n * It takes as input an Array of tensors, all of the same\n * shape, and returns a single tensor (also of the same shape).\n * For example:\n *\n * ```js\n * const input1 = tf.input({shape: [2, 2]});\n * const input2 = tf.input({shape: [2, 2]});\n * const input3 = tf.input({shape: [2, 2]});\n * const multiplyLayer = tf.layers.multiply();\n * const product = multiplyLayer.apply([input1, input2, input3]);\n * console.log(product.shape);\n * // You get [null, 2, 2], with the first dimension as the undetermined batch\n * // dimension.\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function multiply(args?: LayerArgs) {\n return new Multiply(args);\n}\n\n/**\n * Layer that computes a dot product between samples in two tensors.\n *\n * E.g., if applied to a list of two tensors `a` and `b` both of shape\n * `[batchSize, n]`, the output will be a tensor of shape `[batchSize, 1]`,\n * where each entry at index `[i, 0]` will be the dot product between\n * `a[i, :]` and `b[i, :]`.\n *\n * Example:\n *\n * ```js\n * const dotLayer = tf.layers.dot({axes: -1});\n * const x1 = tf.tensor2d([[10, 20], [30, 40]]);\n * const x2 = tf.tensor2d([[-1, -2], [-3, -4]]);\n *\n * // Invoke the layer's apply() method in eager (imperative) mode.\n * const y = dotLayer.apply([x1, x2]);\n * y.print();\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'}\n */\nexport function dot(args: DotLayerArgs) {\n return new Dot(args);\n}\n\n// Normalization Layers.\n\n/**\n * Batch normalization layer (Ioffe and Szegedy, 2014).\n *\n * Normalize the activations of the previous layer at each batch,\n * i.e. applies a transformation that maintains the mean activation\n * close to 0 and the activation standard deviation close to 1.\n *\n * Input shape:\n * Arbitrary. Use the keyword argument `inputShape` (Array of integers, does\n * not include the sample axis) when calling the constructor of this class,\n * if this layer is used as a first layer in a model.\n *\n * Output shape:\n * Same shape as input.\n *\n * References:\n * - [Batch Normalization: Accelerating Deep Network Training by Reducing\n * Internal Covariate Shift](https://arxiv.org/abs/1502.03167)\n *\n * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}\n */\nexport function batchNormalization(args?: BatchNormalizationLayerArgs) {\n return new BatchNormalization(args);\n}\n\n/**\n * Layer-normalization layer (Ba et al., 2016).\n *\n * Normalizes the activations of the previous layer for each given example in a\n * batch independently, instead of across a batch like in `batchNormalization`.\n * In other words, this layer applies a transformation that maintains the mean\n * activation within each example close to 0 and activation variance close to 1.\n *\n * Input shape:\n * Arbitrary. Use the argument `inputShape` when using this layer as the first\n * layer in a model.\n *\n * Output shape:\n * Same as input.\n *\n * References:\n * - [Layer Normalization](https://arxiv.org/abs/1607.06450)\n *\n * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'}\n */\nexport function layerNormalization(args?: LayerNormalizationLayerArgs) {\n return new LayerNormalization(args);\n}\n\n// Padding Layers.\n\n/**\n * Zero-padding layer for 2D input (e.g., image).\n *\n * This layer can add rows and columns of zeros\n * at the top, bottom, left and right side of an image tensor.\n *\n * Input shape:\n * 4D tensor with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, rows, cols, channels]`\n * - If `data_format` is `\"channels_first\"`:\n * `[batch, channels, rows, cols]`.\n *\n * Output shape:\n * 4D with shape:\n * - If `dataFormat` is `\"channelsLast\"`:\n * `[batch, paddedRows, paddedCols, channels]`\n * - If `dataFormat` is `\"channelsFirst\"`:\n * `[batch, channels, paddedRows, paddedCols]`.\n *\n * @doc {heading: 'Layers', subheading: 'Padding', namespace: 'layers'}\n */\nexport function zeroPadding2d(args?: ZeroPadding2DLayerArgs) {\n return new ZeroPadding2D(args);\n}\n\n// Pooling Layers.\n\n/**\n * Average pooling operation for spatial data.\n *\n * Input shape: `[batchSize, inLength, channels]`\n *\n * Output shape: `[batchSize, pooledLength, channels]`\n *\n * `tf.avgPool1d` is an alias.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function averagePooling1d(args: Pooling1DLayerArgs) {\n return new AveragePooling1D(args);\n}\nexport function avgPool1d(args: Pooling1DLayerArgs) {\n return averagePooling1d(args);\n}\n// For backwards compatibility.\n// See https://github.com/tensorflow/tfjs/issues/152\nexport function avgPooling1d(args: Pooling1DLayerArgs) {\n return averagePooling1d(args);\n}\n\n/**\n * Average pooling operation for spatial data.\n *\n * Input shape:\n * - If `dataFormat === CHANNEL_LAST`:\n * 4D tensor with shape:\n * `[batchSize, rows, cols, channels]`\n * - If `dataFormat === CHANNEL_FIRST`:\n * 4D tensor with shape:\n * `[batchSize, channels, rows, cols]`\n *\n * Output shape\n * - If `dataFormat === CHANNEL_LAST`:\n * 4D tensor with shape:\n * `[batchSize, pooledRows, pooledCols, channels]`\n * - If `dataFormat === CHANNEL_FIRST`:\n * 4D tensor with shape:\n * `[batchSize, channels, pooledRows, pooledCols]`\n *\n * `tf.avgPool2d` is an alias.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function averagePooling2d(args: Pooling2DLayerArgs) {\n return new AveragePooling2D(args);\n}\nexport function avgPool2d(args: Pooling2DLayerArgs) {\n return averagePooling2d(args);\n}\n// For backwards compatibility.\n// See https://github.com/tensorflow/tfjs/issues/152\nexport function avgPooling2d(args: Pooling2DLayerArgs) {\n return averagePooling2d(args);\n}\n\n/**\n * Average pooling operation for 3D data.\n *\n * Input shape\n * - If `dataFormat === channelsLast`:\n * 5D tensor with shape:\n * `[batchSize, depths, rows, cols, channels]`\n * - If `dataFormat === channelsFirst`:\n * 4D tensor with shape:\n * `[batchSize, channels, depths, rows, cols]`\n *\n * Output shape\n * - If `dataFormat=channelsLast`:\n * 5D tensor with shape:\n * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`\n * - If `dataFormat=channelsFirst`:\n * 5D tensor with shape:\n * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function averagePooling3d(args: Pooling3DLayerArgs) {\n return new AveragePooling3D(args);\n}\nexport function avgPool3d(args: Pooling3DLayerArgs) {\n return averagePooling3d(args);\n}\n// For backwards compatibility.\n// See https://github.com/tensorflow/tfjs/issues/152\nexport function avgPooling3d(args: Pooling3DLayerArgs) {\n return averagePooling3d(args);\n}\n\n/**\n * Global average pooling operation for temporal data.\n *\n * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.\n *\n * Output Shape: 2D tensor with shape: `[batchSize, features]`.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function globalAveragePooling1d(args?: LayerArgs) {\n return new GlobalAveragePooling1D(args);\n}\n\n/**\n * Global average pooling operation for spatial data.\n *\n * Input shape:\n * - If `dataFormat` is `CHANNEL_LAST`:\n * 4D tensor with shape: `[batchSize, rows, cols, channels]`.\n * - If `dataFormat` is `CHANNEL_FIRST`:\n * 4D tensor with shape: `[batchSize, channels, rows, cols]`.\n *\n * Output shape:\n * 2D tensor with shape: `[batchSize, channels]`.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function globalAveragePooling2d(args: GlobalPooling2DLayerArgs) {\n return new GlobalAveragePooling2D(args);\n}\n\n/**\n * Global max pooling operation for temporal data.\n *\n * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`.\n *\n * Output Shape: 2D tensor with shape: `[batchSize, features]`.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function globalMaxPooling1d(args?: LayerArgs) {\n return new GlobalMaxPooling1D(args);\n}\n\n/**\n * Global max pooling operation for spatial data.\n *\n * Input shape:\n * - If `dataFormat` is `CHANNEL_LAST`:\n * 4D tensor with shape: `[batchSize, rows, cols, channels]`.\n * - If `dataFormat` is `CHANNEL_FIRST`:\n * 4D tensor with shape: `[batchSize, channels, rows, cols]`.\n *\n * Output shape:\n * 2D tensor with shape: `[batchSize, channels]`.\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function globalMaxPooling2d(args: GlobalPooling2DLayerArgs) {\n return new GlobalMaxPooling2D(args);\n}\n\n/**\n * Max pooling operation for temporal data.\n *\n * Input shape: `[batchSize, inLength, channels]`\n *\n * Output shape: `[batchSize, pooledLength, channels]`\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function maxPooling1d(args: Pooling1DLayerArgs) {\n return new MaxPooling1D(args);\n}\n\n/**\n * Max pooling operation for spatial data.\n *\n * Input shape\n * - If `dataFormat === CHANNEL_LAST`:\n * 4D tensor with shape:\n * `[batchSize, rows, cols, channels]`\n * - If `dataFormat === CHANNEL_FIRST`:\n * 4D tensor with shape:\n * `[batchSize, channels, rows, cols]`\n *\n * Output shape\n * - If `dataFormat=CHANNEL_LAST`:\n * 4D tensor with shape:\n * `[batchSize, pooledRows, pooledCols, channels]`\n * - If `dataFormat=CHANNEL_FIRST`:\n * 4D tensor with shape:\n * `[batchSize, channels, pooledRows, pooledCols]`\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function maxPooling2d(args: Pooling2DLayerArgs) {\n return new MaxPooling2D(args);\n}\n\n/**\n * Max pooling operation for 3D data.\n *\n * Input shape\n * - If `dataFormat === channelsLast`:\n * 5D tensor with shape:\n * `[batchSize, depths, rows, cols, channels]`\n * - If `dataFormat === channelsFirst`:\n * 5D tensor with shape:\n * `[batchSize, channels, depths, rows, cols]`\n *\n * Output shape\n * - If `dataFormat=channelsLast`:\n * 5D tensor with shape:\n * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]`\n * - If `dataFormat=channelsFirst`:\n * 5D tensor with shape:\n * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]`\n *\n * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'}\n */\nexport function maxPooling3d(args: Pooling3DLayerArgs) {\n return new MaxPooling3D(args);\n}\n\n// Recurrent Layers.\n\n/**\n * Gated Recurrent Unit - Cho et al. 2014.\n *\n * This is an `RNN` layer consisting of one `GRUCell`. However, unlike\n * the underlying `GRUCell`, the `apply` method of `SimpleRNN` operates\n * on a sequence of inputs. The shape of the input (not including the first,\n * batch dimension) needs to be at least 2-D, with the first dimension being\n * time steps. For example:\n *\n * ```js\n * const rnn = tf.layers.gru({units: 8, returnSequences: true});\n *\n * // Create an input with 10 time steps.\n * const input = tf.input({shape: [10, 20]});\n * const output = rnn.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the `GRUCell`'s number of units.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function gru(args: GRULayerArgs) {\n return new GRU(args);\n}\n\n/**\n * Cell class for `GRU`.\n *\n * `GRUCell` is distinct from the `RNN` subclass `GRU` in that its\n * `apply` method takes the input data of only a single time step and returns\n * the cell's output at the time step, while `GRU` takes the input data\n * over a number of time steps. For example:\n *\n * ```js\n * const cell = tf.layers.gruCell({units: 2});\n * const input = tf.input({shape: [10]});\n * const output = cell.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10]: This is the cell's output at a single time step. The 1st\n * // dimension is the unknown batch size.\n * ```\n *\n * Instance(s) of `GRUCell` can be used to construct `RNN` layers. The\n * most typical use of this workflow is to combine a number of cells into a\n * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an\n * RNN. For example:\n *\n * ```js\n * const cells = [\n * tf.layers.gruCell({units: 4}),\n * tf.layers.gruCell({units: 8}),\n * ];\n * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});\n *\n * // Create an input with 10 time steps and a length-20 vector at each step.\n * const input = tf.input({shape: [10, 20]});\n * const output = rnn.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the last `gruCell`'s number of units.\n * ```\n *\n * To create an `RNN` consisting of only *one* `GRUCell`, use the\n * `tf.layers.gru`.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function gruCell(args: GRUCellLayerArgs) {\n return new GRUCell(args);\n}\n\n/**\n * Long-Short Term Memory layer - Hochreiter 1997.\n *\n * This is an `RNN` layer consisting of one `LSTMCell`. However, unlike\n * the underlying `LSTMCell`, the `apply` method of `LSTM` operates\n * on a sequence of inputs. The shape of the input (not including the first,\n * batch dimension) needs to be at least 2-D, with the first dimension being\n * time steps. For example:\n *\n * ```js\n * const lstm = tf.layers.lstm({units: 8, returnSequences: true});\n *\n * // Create an input with 10 time steps.\n * const input = tf.input({shape: [10, 20]});\n * const output = lstm.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the `LSTMCell`'s number of units.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function lstm(args: LSTMLayerArgs) {\n return new LSTM(args);\n}\n\n/**\n * Cell class for `LSTM`.\n *\n * `LSTMCell` is distinct from the `RNN` subclass `LSTM` in that its\n * `apply` method takes the input data of only a single time step and returns\n * the cell's output at the time step, while `LSTM` takes the input data\n * over a number of time steps. For example:\n *\n * ```js\n * const cell = tf.layers.lstmCell({units: 2});\n * const input = tf.input({shape: [10]});\n * const output = cell.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10]: This is the cell's output at a single time step. The 1st\n * // dimension is the unknown batch size.\n * ```\n *\n * Instance(s) of `LSTMCell` can be used to construct `RNN` layers. The\n * most typical use of this workflow is to combine a number of cells into a\n * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an\n * RNN. For example:\n *\n * ```js\n * const cells = [\n * tf.layers.lstmCell({units: 4}),\n * tf.layers.lstmCell({units: 8}),\n * ];\n * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});\n *\n * // Create an input with 10 time steps and a length-20 vector at each step.\n * const input = tf.input({shape: [10, 20]});\n * const output = rnn.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the last `lstmCell`'s number of units.\n * ```\n *\n * To create an `RNN` consisting of only *one* `LSTMCell`, use the\n * `tf.layers.lstm`.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function lstmCell(args: LSTMCellLayerArgs) {\n return new LSTMCell(args);\n}\n\n/**\n * Fully-connected RNN where the output is to be fed back to input.\n *\n * This is an `RNN` layer consisting of one `SimpleRNNCell`. However, unlike\n * the underlying `SimpleRNNCell`, the `apply` method of `SimpleRNN` operates\n * on a sequence of inputs. The shape of the input (not including the first,\n * batch dimension) needs to be at least 2-D, with the first dimension being\n * time steps. For example:\n *\n * ```js\n * const rnn = tf.layers.simpleRNN({units: 8, returnSequences: true});\n *\n * // Create an input with 10 time steps.\n * const input = tf.input({shape: [10, 20]});\n * const output = rnn.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the `SimpleRNNCell`'s number of units.\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function simpleRNN(args: SimpleRNNLayerArgs) {\n return new SimpleRNN(args);\n}\n\n/**\n * Cell class for `SimpleRNN`.\n *\n * `SimpleRNNCell` is distinct from the `RNN` subclass `SimpleRNN` in that its\n * `apply` method takes the input data of only a single time step and returns\n * the cell's output at the time step, while `SimpleRNN` takes the input data\n * over a number of time steps. For example:\n *\n * ```js\n * const cell = tf.layers.simpleRNNCell({units: 2});\n * const input = tf.input({shape: [10]});\n * const output = cell.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10]: This is the cell's output at a single time step. The 1st\n * // dimension is the unknown batch size.\n * ```\n *\n * Instance(s) of `SimpleRNNCell` can be used to construct `RNN` layers. The\n * most typical use of this workflow is to combine a number of cells into a\n * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an\n * RNN. For example:\n *\n * ```js\n * const cells = [\n * tf.layers.simpleRNNCell({units: 4}),\n * tf.layers.simpleRNNCell({units: 8}),\n * ];\n * const rnn = tf.layers.rnn({cell: cells, returnSequences: true});\n *\n * // Create an input with 10 time steps and a length-20 vector at each step.\n * const input = tf.input({shape: [10, 20]});\n * const output = rnn.apply(input);\n *\n * console.log(JSON.stringify(output.shape));\n * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the\n * // same as the sequence length of `input`, due to `returnSequences`: `true`;\n * // 3rd dimension is the last `SimpleRNNCell`'s number of units.\n * ```\n *\n * To create an `RNN` consisting of only *one* `SimpleRNNCell`, use the\n * `tf.layers.simpleRNN`.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function simpleRNNCell(args: SimpleRNNCellLayerArgs) {\n return new SimpleRNNCell(args);\n}\n\n/**\n * Convolutional LSTM layer - Xingjian Shi 2015.\n *\n * This is a `ConvRNN2D` layer consisting of one `ConvLSTM2DCell`. However,\n * unlike the underlying `ConvLSTM2DCell`, the `apply` method of `ConvLSTM2D`\n * operates on a sequence of inputs. The shape of the input (not including the\n * first, batch dimension) needs to be 4-D, with the first dimension being time\n * steps. For example:\n *\n * ```js\n * const filters = 3;\n * const kernelSize = 3;\n *\n * const batchSize = 4;\n * const sequenceLength = 2;\n * const size = 5;\n * const channels = 3;\n *\n * const inputShape = [batchSize, sequenceLength, size, size, channels];\n * const input = tf.ones(inputShape);\n *\n * const layer = tf.layers.convLstm2d({filters, kernelSize});\n *\n * const output = layer.apply(input);\n * ```\n */\n/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */\nexport function convLstm2d(args: ConvLSTM2DArgs) {\n return new ConvLSTM2D(args);\n}\n\n/**\n * Cell class for `ConvLSTM2D`.\n *\n * `ConvLSTM2DCell` is distinct from the `ConvRNN2D` subclass `ConvLSTM2D` in\n * that its `call` method takes the input data of only a single time step and\n * returns the cell's output at the time step, while `ConvLSTM2D` takes the\n * input data over a number of time steps. For example:\n *\n * ```js\n * const filters = 3;\n * const kernelSize = 3;\n *\n * const sequenceLength = 1;\n * const size = 5;\n * const channels = 3;\n *\n * const inputShape = [sequenceLength, size, size, channels];\n * const input = tf.ones(inputShape);\n *\n * const cell = tf.layers.convLstm2dCell({filters, kernelSize});\n *\n * cell.build(input.shape);\n *\n * const outputSize = size - kernelSize + 1;\n * const outShape = [sequenceLength, outputSize, outputSize, filters];\n *\n * const initialH = tf.zeros(outShape);\n * const initialC = tf.zeros(outShape);\n *\n * const [o, h, c] = cell.call([input, initialH, initialC], {});\n * ```\n */\n/** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */\nexport function convLstm2dCell(args: ConvLSTM2DCellArgs) {\n return new ConvLSTM2DCell(args);\n}\n\n/**\n * Base class for recurrent layers.\n *\n * Input shape:\n * 3D tensor with shape `[batchSize, timeSteps, inputDim]`.\n *\n * Output shape:\n * - if `returnState`, an Array of tensors (i.e., `tf.Tensor`s). The first\n * tensor is the output. The remaining tensors are the states at the\n * last time step, each with shape `[batchSize, units]`.\n * - if `returnSequences`, the output will have shape\n * `[batchSize, timeSteps, units]`.\n * - else, the output will have shape `[batchSize, units]`.\n *\n * Masking:\n * This layer supports masking for input data with a variable number\n * of timesteps. To introduce masks to your data,\n * use an embedding layer with the `mask_zero` parameter\n * set to `True`.\n *\n * Notes on using statefulness in RNNs:\n * You can set RNN layers to be 'stateful', which means that the states\n * computed for the samples in one batch will be reused as initial states\n * for the samples in the next batch. This assumes a one-to-one mapping\n * between samples in different successive batches.\n *\n * To enable statefulness:\n * - specify `stateful: true` in the layer constructor.\n * - specify a fixed batch size for your model, by passing\n * if sequential model:\n * `batchInputShape=[...]` to the first layer in your model.\n * else for functional model with 1 or more Input layers:\n * `batchShape=[...]` to all the first layers in your model.\n * This is the expected shape of your inputs *including the batch size*.\n * It should be a tuple of integers, e.g. `(32, 10, 100)`.\n * - specify `shuffle=False` when calling fit().\n *\n * To reset the states of your model, call `.resetStates()` on either\n * a specific layer, or on your entire model.\n *\n * Note on specifying the initial state of RNNs\n * You can specify the initial state of RNN layers symbolically by\n * calling them with the option `initialState`. The value of\n * `initialState` should be a tensor or list of tensors representing\n * the initial state of the RNN layer.\n *\n * You can specify the initial state of RNN layers numerically by\n * calling `resetStates` with the keyword argument `states`. The value of\n * `states` should be a numpy array or list of numpy arrays representing\n * the initial state of the RNN layer.\n *\n * Note on passing external constants to RNNs\n * You can pass \"external\" constants to the cell using the `constants`\n * keyword argument of `RNN.call` method. This requires that the `cell.call`\n * method accepts the same keyword argument `constants`. Such constants\n * can be used to condition the cell transformation on additional static\n * inputs (not changing over time), a.k.a. an attention mechanism.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function rnn(args: RNNLayerArgs) {\n return new RNN(args);\n}\n\n/**\n * Wrapper allowing a stack of RNN cells to behave as a single cell.\n *\n * Used to implement efficient stacked RNNs.\n *\n * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'}\n */\nexport function stackedRNNCells(args: StackedRNNCellsArgs){\n return new StackedRNNCells(args);\n}\n\n// Wrapper Layers.\n\n/** @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} */\nexport function bidirectional(args: BidirectionalLayerArgs) {\n return new Bidirectional(args);\n}\n\n/**\n * This wrapper applies a layer to every temporal slice of an input.\n *\n * The input should be at least 3D, and the dimension of the index `1` will be\n * considered to be the temporal dimension.\n *\n * Consider a batch of 32 samples, where each sample is a sequence of 10 vectors\n * of 16 dimensions. The batch input shape of the layer is then `[32, 10,\n * 16]`, and the `inputShape`, not including the sample dimension, is\n * `[10, 16]`.\n *\n * You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10\n * timesteps, independently:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.timeDistributed({\n * layer: tf.layers.dense({units: 8}),\n * inputShape: [10, 16],\n * }));\n *\n * // Now model.outputShape = [null, 10, 8].\n * // The output will then have shape `[32, 10, 8]`.\n *\n * // In subsequent layers, there is no need for `inputShape`:\n * model.add(tf.layers.timeDistributed({layer: tf.layers.dense({units: 32})}));\n * console.log(JSON.stringify(model.outputs[0].shape));\n * // Now model.outputShape = [null, 10, 32].\n * ```\n *\n * The output will then have shape `[32, 10, 32]`.\n *\n * `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for\n * instance a `Conv2D` layer.\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.timeDistributed({\n * layer: tf.layers.conv2d({filters: 64, kernelSize: [3, 3]}),\n * inputShape: [10, 299, 299, 3],\n * }));\n * console.log(JSON.stringify(model.outputs[0].shape));\n * ```\n *\n * @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'}\n */\nexport function timeDistributed(args: WrapperLayerArgs) {\n return new TimeDistributed(args);\n}\n\n// Aliases for pooling.\nexport const globalMaxPool1d = globalMaxPooling1d;\nexport const globalMaxPool2d = globalMaxPooling2d;\nexport const maxPool1d = maxPooling1d;\nexport const maxPool2d = maxPooling2d;\n\nexport {Layer, RNN, RNNCell, input /* alias for tf.input */};\n\n/**\n * Apply additive zero-centered Gaussian noise.\n *\n * As it is a regularization layer, it is only active at training time.\n *\n * This is useful to mitigate overfitting\n * (you could see it as a form of random data augmentation).\n * Gaussian Noise (GS) is a natural choice as corruption process\n * for real valued inputs.\n *\n * # Arguments\n * stddev: float, standard deviation of the noise distribution.\n *\n * # Input shape\n * Arbitrary. Use the keyword argument `input_shape`\n * (tuple of integers, does not include the samples axis)\n * when using this layer as the first layer in a model.\n *\n * # Output shape\n * Same shape as input.\n *\n * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}\n */\nexport function gaussianNoise(args: GaussianNoiseArgs) {\n return new GaussianNoise(args);\n}\n\n/**\n * Apply multiplicative 1-centered Gaussian noise.\n *\n * As it is a regularization layer, it is only active at training time.\n *\n * Arguments:\n * - `rate`: float, drop probability (as with `Dropout`).\n * The multiplicative noise will have\n * standard deviation `sqrt(rate / (1 - rate))`.\n *\n * Input shape:\n * Arbitrary. Use the keyword argument `inputShape`\n * (tuple of integers, does not include the samples axis)\n * when using this layer as the first layer in a model.\n *\n * Output shape:\n * Same shape as input.\n *\n * References:\n * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](\n * http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)\n *\n * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}\n */\nexport function gaussianDropout(args: GaussianDropoutArgs) {\n return new GaussianDropout(args);\n}\n\n/**\n * Applies Alpha Dropout to the input.\n *\n * As it is a regularization layer, it is only active at training time.\n *\n * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs\n * to their original values, in order to ensure the self-normalizing property\n * even after this dropout.\n * Alpha Dropout fits well to Scaled Exponential Linear Units\n * by randomly setting activations to the negative saturation value.\n *\n * Arguments:\n * - `rate`: float, drop probability (as with `Dropout`).\n * The multiplicative noise will have\n * standard deviation `sqrt(rate / (1 - rate))`.\n * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the\n * shape for randomly generated keep/drop flags.\n *\n * Input shape:\n * Arbitrary. Use the keyword argument `inputShape`\n * (tuple of integers, does not include the samples axis)\n * when using this layer as the first layer in a model.\n *\n * Output shape:\n * Same shape as input.\n *\n * References:\n * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)\n *\n * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'}\n */\nexport function alphaDropout(args: AlphaDropoutArgs) {\n return new AlphaDropout(args);\n}\n\n/**\n * Masks a sequence by using a mask value to skip timesteps.\n *\n * If all features for a given sample timestep are equal to `mask_value`,\n * then the sample timestep will be masked (skipped) in all downstream layers\n * (as long as they support masking).\n *\n * If any downstream layer does not support masking yet receives such\n * an input mask, an exception will be raised.\n *\n * Arguments:\n * - `maskValue`: Either None or mask value to skip.\n *\n * Input shape:\n * Arbitrary. Use the keyword argument `inputShape`\n * (tuple of integers, does not include the samples axis)\n * when using this layer as the first layer in a model.\n *\n * Output shape:\n * Same shape as input.\n *\n * @doc {heading: 'Layers', subheading: 'Mask', namespace: 'layers'}\n */\nexport function masking(args?: MaskingArgs) {\n return new Masking(args);\n}\n\n/**\n * A preprocessing layer which rescales input values to a new range.\n *\n * This layer rescales every value of an input (often an image) by multiplying\n * by `scale` and adding `offset`.\n *\n * For instance:\n * 1. To rescale an input in the ``[0, 255]`` range\n * to be in the `[0, 1]` range, you would pass `scale=1/255`.\n * 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]`\n * range, you would pass `scale=1./127.5, offset=-1`.\n * The rescaling is applied both during training and inference. Inputs can be\n * of integer or floating point dtype, and by default the layer will output\n * floats.\n *\n * Arguments:\n * - `scale`: Float, the scale to apply to the inputs.\n * - `offset`: Float, the offset to apply to the inputs.\n *\n * Input shape:\n * Arbitrary.\n *\n * Output shape:\n * Same as input.\n *\n * @doc {heading: 'Layers', subheading: 'Rescaling', namespace: 'layers'}\n */\nexport function rescaling(args?: RescalingArgs) {\n return new Rescaling(args);\n}\n\n/**\n * A preprocessing layer which center crops images.\n *\n * This layers crops the central portion of the images to a target size. If an\n * image is smaller than the target size, it will be resized and cropped so as\n * to return the largest possible window in the image that matches the target\n * aspect ratio.\n *\n * Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and\n * of integer or floating point dtype.\n *\n * If the input height/width is even and the target height/width is odd (or\n * inversely), the input image is left-padded by 1 pixel.\n *\n * Arguments:\n * `height`: Integer, the height of the output shape.\n * `width`: Integer, the width of the output shape.\n *\n * Input shape:\n * 3D (unbatched) or 4D (batched) tensor with shape:\n * `(..., height, width, channels)`, in `channelsLast` format.\n *\n * Output shape:\n * 3D (unbatched) or 4D (batched) tensor with shape:\n * `(..., targetHeight, targetWidth, channels)`.\n *\n *\n * @doc {heading: 'Layers', subheading: 'CenterCrop', namespace: 'layers'}\n */\nexport function centerCrop(args?: CenterCropArgs) {\n return new CenterCrop(args);\n }\n \n/**\n * A preprocessing layer which resizes images.\n * This layer resizes an image input to a target height and width. The input\n * should be a 4D (batched) or 3D (unbatched) tensor in `\"channels_last\"`\n * format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0,\n * 255]`) and of interger or floating point dtype. By default, the layer will\n * output floats.\n *\n * Arguments:\n * - `height`: number, the height for the output tensor.\n * - `width`: number, the width for the output tensor.\n * - `interpolation`: string, the method for image resizing interpolation.\n * - `cropToAspectRatio`: boolean, whether to keep image aspect ratio.\n *\n * Input shape:\n * Arbitrary.\n *\n * Output shape:\n * height, width, num channels.\n *\n * @doc {heading: 'Layers', subheading: 'Resizing', namespace: 'layers'}\n */\nexport function resizing(args?: ResizingArgs) {\n return new Resizing(args);\n}\n\n/**\n * A preprocessing layer which encodes integer features.\n *\n * This layer provides options for condensing data into a categorical encoding\n * when the total number of tokens are known in advance. It accepts integer\n * values as inputs, and it outputs a dense representation of those\n * inputs.\n *\n * Arguments:\n *\n * numTokens: The total number of tokens the layer should support. All\n * inputs to the layer must integers in the range `0 <= value <\n * numTokens`, or an error will be thrown.\n *\n * outputMode: Specification for the output of the layer.\n * Defaults to `multiHot`. Values can be `oneHot`, `multiHot` or\n * `count`, configuring the layer as follows:\n *\n * oneHot: Encodes each individual element in the input into an\n * array of `numTokens` size, containing a 1 at the element index. If\n * the last dimension is size 1, will encode on that dimension. If the\n * last dimension is not size 1, will append a new dimension for the\n * encoded output.\n *\n * multiHot: Encodes each sample in the input into a single array\n * of `numTokens` size, containing a 1 for each vocabulary term\n * present in the sample. Treats the last dimension as the sample\n * dimension, if input shape is `(..., sampleLength)`, output shape\n * will be `(..., numTokens)`.\n *\n * count: Like `multiHot`, but the int array contains a count of\n * the number of times the token at that index appeared in the sample.\n *\n * For all output modes, currently only output up to rank 2 is supported.\n * Call arguments:\n * inputs: A 1D or 2D tensor of integer inputs.\n * countWeights: A tensor in the same shape as `inputs` indicating the\n * weight for each sample value when summing up in `count` mode. Not used\n * in `multiHot` or `oneHot` modes.\n *\n *\n * @doc {heading: 'Layers', subheading: 'CategoryEncoding', namespace: 'layers'}\n */\nexport function categoryEncoding(args: CategoryEncodingArgs) {\n return new CategoryEncoding(args);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\nimport {Tensor} from '@tensorflow/tfjs-core';\n\nimport * as losses from './losses';\nimport * as metrics from './metrics';\n\n/**\n * Binary accuracy metric function.\n *\n * `yTrue` and `yPred` can have 0-1 values. Example:\n * ```js\n * const x = tf.tensor2d([[1, 1, 1, 1], [0, 0, 0, 0]], [2, 4]);\n * const y = tf.tensor2d([[1, 0, 1, 0], [0, 0, 0, 1]], [2, 4]);\n * const accuracy = tf.metrics.binaryAccuracy(x, y);\n * accuracy.print();\n * ```\n *\n * `yTrue` and `yPred` can also have floating-number values between 0 and 1, in\n * which case the values will be thresholded at 0.5 to yield 0-1 values (i.e.,\n * a value >= 0.5 and <= 1.0 is interpreted as 1).\n *\n * Example:\n * ```js\n * const x = tf.tensor1d([1, 1, 1, 1, 0, 0, 0, 0]);\n * const y = tf.tensor1d([0.2, 0.4, 0.6, 0.8, 0.2, 0.3, 0.4, 0.7]);\n * const accuracy = tf.metrics.binaryAccuracy(x, y);\n * accuracy.print();\n * ```\n *\n * @param yTrue Binary Tensor of truth.\n * @param yPred Binary Tensor of prediction.\n * @return Accuracy Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function binaryAccuracy(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.binaryAccuracy(yTrue, yPred);\n}\n\n/**\n * Binary crossentropy metric function.\n *\n * Example:\n * ```js\n * const x = tf.tensor2d([[0], [1], [1], [1]]);\n * const y = tf.tensor2d([[0], [0], [0.5], [1]]);\n * const crossentropy = tf.metrics.binaryCrossentropy(x, y);\n * crossentropy.print();\n * ```\n *\n * @param yTrue Binary Tensor of truth.\n * @param yPred Binary Tensor of prediction, probabilities for the `1` case.\n * @return Accuracy Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function binaryCrossentropy(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.binaryCrossentropy(yTrue, yPred);\n}\n\n/**\n * Sparse categorical accuracy metric function.\n *\n * Example:\n * ```js\n *\n * const yTrue = tf.tensor1d([1, 1, 2, 2, 0]);\n * const yPred = tf.tensor2d(\n * [[0, 1, 0], [1, 0, 0], [0, 0.4, 0.6], [0, 0.6, 0.4], [0.7, 0.3, 0]]);\n * const crossentropy = tf.metrics.sparseCategoricalAccuracy(yTrue, yPred);\n * crossentropy.print();\n * ```\n *\n * @param yTrue True labels: indices.\n * @param yPred Predicted probabilities or logits.\n * @returns Accuracy tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function sparseCategoricalAccuracy(\n yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.sparseCategoricalAccuracy(yTrue, yPred);\n}\n\n/**\n * Categorical accuracy metric function.\n *\n * Example:\n * ```js\n * const x = tf.tensor2d([[0, 0, 0, 1], [0, 0, 0, 1]]);\n * const y = tf.tensor2d([[0.1, 0.8, 0.05, 0.05], [0.1, 0.05, 0.05, 0.8]]);\n * const accuracy = tf.metrics.categoricalAccuracy(x, y);\n * accuracy.print();\n * ```\n *\n * @param yTrue Binary Tensor of truth: one-hot encoding of categories.\n * @param yPred Binary Tensor of prediction: probabilities or logits for the\n * same categories as in `yTrue`.\n * @return Accuracy Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function categoricalAccuracy(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.categoricalAccuracy(yTrue, yPred);\n}\n\n/**\n * Categorical crossentropy between an output tensor and a target tensor.\n *\n * @param target A tensor of the same shape as `output`.\n * @param output A tensor resulting from a softmax (unless `fromLogits` is\n * `true`, in which case `output` is expected to be the logits).\n * @param fromLogits Boolean, whether `output` is the result of a softmax, or is\n * a tensor of logits.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function categoricalCrossentropy(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.categoricalCrossentropy(yTrue, yPred);\n}\n\n/**\n * Computes the precision of the predictions with respect to the labels.\n *\n * Example:\n * ```js\n * const x = tf.tensor2d(\n * [\n * [0, 0, 0, 1],\n * [0, 1, 0, 0],\n * [0, 0, 0, 1],\n * [1, 0, 0, 0],\n * [0, 0, 1, 0]\n * ]\n * );\n *\n * const y = tf.tensor2d(\n * [\n * [0, 0, 1, 0],\n * [0, 1, 0, 0],\n * [0, 0, 0, 1],\n * [0, 1, 0, 0],\n * [0, 1, 0, 0]\n * ]\n * );\n *\n * const precision = tf.metrics.precision(x, y);\n * precision.print();\n * ```\n *\n * @param yTrue The ground truth values. Expected to contain only 0-1 values.\n * @param yPred The predicted values. Expected to contain only 0-1 values.\n * @return Precision Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function precision(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.precision(yTrue, yPred);\n}\n\n/**\n * Computes the recall of the predictions with respect to the labels.\n *\n * Example:\n * ```js\n * const x = tf.tensor2d(\n * [\n * [0, 0, 0, 1],\n * [0, 1, 0, 0],\n * [0, 0, 0, 1],\n * [1, 0, 0, 0],\n * [0, 0, 1, 0]\n * ]\n * );\n *\n * const y = tf.tensor2d(\n * [\n * [0, 0, 1, 0],\n * [0, 1, 0, 0],\n * [0, 0, 0, 1],\n * [0, 1, 0, 0],\n * [0, 1, 0, 0]\n * ]\n * );\n *\n * const recall = tf.metrics.recall(x, y);\n * recall.print();\n * ```\n *\n * @param yTrue The ground truth values. Expected to contain only 0-1 values.\n * @param yPred The predicted values. Expected to contain only 0-1 values.\n * @return Recall Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function recall(yTrue: Tensor, yPred: Tensor): Tensor {\n return metrics.recall(yTrue, yPred);\n}\n\n/**\n * Loss or metric function: Cosine proximity.\n *\n * Mathematically, cosine proximity is defined as:\n * `-sum(l2Normalize(yTrue) * l2Normalize(yPred))`,\n * wherein `l2Normalize()` normalizes the L2 norm of the input to 1 and `*`\n * represents element-wise multiplication.\n *\n * ```js\n * const yTrue = tf.tensor2d([[1, 0], [1, 0]]);\n * const yPred = tf.tensor2d([[1 / Math.sqrt(2), 1 / Math.sqrt(2)], [0, 1]]);\n * const proximity = tf.metrics.cosineProximity(yTrue, yPred);\n * proximity.print();\n * ```\n *\n * @param yTrue Truth Tensor.\n * @param yPred Prediction Tensor.\n * @return Cosine proximity Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function cosineProximity(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.cosineProximity(yTrue, yPred);\n}\n\n/**\n * Loss or metric function: Mean absolute error.\n *\n * Mathematically, mean absolute error is defined as:\n * `mean(abs(yPred - yTrue))`,\n * wherein the `mean` is applied over feature dimensions.\n *\n * ```js\n * const yTrue = tf.tensor2d([[0, 1], [0, 0], [2, 3]]);\n * const yPred = tf.tensor2d([[0, 1], [0, 1], [-2, -3]]);\n * const mse = tf.metrics.meanAbsoluteError(yTrue, yPred);\n * mse.print();\n * ```\n *\n * @param yTrue Truth Tensor.\n * @param yPred Prediction Tensor.\n * @return Mean absolute error Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function meanAbsoluteError(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanAbsoluteError(yTrue, yPred);\n}\n\n/**\n * Loss or metric function: Mean absolute percentage error.\n *\n * ```js\n * const yTrue = tf.tensor2d([[0, 1], [10, 20]]);\n * const yPred = tf.tensor2d([[0, 1], [11, 24]]);\n * const mse = tf.metrics.meanAbsolutePercentageError(yTrue, yPred);\n * mse.print();\n * ```\n *\n * Aliases: `tf.metrics.MAPE`, `tf.metrics.mape`.\n *\n * @param yTrue Truth Tensor.\n * @param yPred Prediction Tensor.\n * @return Mean absolute percentage error Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function meanAbsolutePercentageError(\n yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanAbsolutePercentageError(yTrue, yPred);\n}\n\nexport function MAPE(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanAbsolutePercentageError(yTrue, yPred);\n}\n\nexport function mape(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanAbsolutePercentageError(yTrue, yPred);\n}\n\n/**\n * Loss or metric function: Mean squared error.\n *\n * ```js\n * const yTrue = tf.tensor2d([[0, 1], [3, 4]]);\n * const yPred = tf.tensor2d([[0, 1], [-3, -4]]);\n * const mse = tf.metrics.meanSquaredError(yTrue, yPred);\n * mse.print();\n * ```\n *\n * Aliases: `tf.metrics.MSE`, `tf.metrics.mse`.\n *\n * @param yTrue Truth Tensor.\n * @param yPred Prediction Tensor.\n * @return Mean squared error Tensor.\n *\n * @doc {heading: 'Metrics', namespace: 'metrics'}\n */\nexport function meanSquaredError(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanSquaredError(yTrue, yPred);\n}\n\nexport function MSE(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanSquaredError(yTrue, yPred);\n}\n\nexport function mse(yTrue: Tensor, yPred: Tensor): Tensor {\n return losses.meanSquaredError(yTrue, yPred);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Use of this source code is governed by an MIT-style\n * license that can be found in the LICENSE file or at\n * https://opensource.org/licenses/MIT.\n * =============================================================================\n */\n\n/* Original source: keras/callbacks.py */\n\nimport {BaseCallback} from './base_callbacks';\nimport {Container} from './engine/container';\nimport {LayersModel} from './engine/training';\nimport {NotImplementedError} from './errors';\nimport {Logs, resolveScalarsInLogs} from './logs';\n\nexport abstract class Callback extends BaseCallback {\n /** Instance of `keras.models.Model`. Reference of the model being trained. */\n model: LayersModel = null;\n\n override setModel(model: Container): void {\n if (!(model instanceof LayersModel)) {\n throw new Error('model must be a LayersModel, not some other Container');\n }\n this.model = model;\n }\n}\n\nexport interface EarlyStoppingCallbackArgs {\n /**\n * Quantity to be monitored.\n *\n * Defaults to 'val_loss'.\n */\n monitor?: string;\n\n /**\n * Minimum change in the monitored quantity to qualify as improvement,\n * i.e., an absolute change of less than `minDelta` will count as no\n * improvement.\n *\n * Defaults to 0.\n */\n minDelta?: number;\n\n /**\n * Number of epochs with no improvement after which training will be stopped.\n *\n * Defaults to 0.\n */\n patience?: number;\n\n /** Verbosity mode. */\n verbose?: number;\n\n /**\n * Mode: one of 'min', 'max', and 'auto'.\n * - In 'min' mode, training will be stopped when the quantity monitored has\n * stopped decreasing.\n * - In 'max' mode, training will be stopped when the quantity monitored has\n * stopped increasing.\n * - In 'auto' mode, the direction is inferred automatically from the name of\n * the monitored quantity.\n *\n * Defaults to 'auto'.\n */\n mode?: 'auto'|'min'|'max';\n\n /**\n * Baseline value of the monitored quantity.\n *\n * If specified, training will be stopped if the model doesn't show\n * improvement over the baseline.\n */\n baseline?: number;\n\n /**\n * Whether to restore model weights from the epoch with the best value\n * of the monitored quantity. If `False`, the model weights obtained at the\n * last step of training are used.\n *\n * **`True` is not supported yet.**\n */\n restoreBestWeights?: boolean;\n}\n\nfunction less(currVal: number, prevVal: number) {\n return currVal < prevVal;\n}\n\nfunction greater(currVal: number, prevVal: number) {\n return currVal > prevVal;\n}\n\n/**\n * A Callback that stops training when a monitored quantity has stopped\n * improving.\n */\nexport class EarlyStopping extends Callback {\n protected readonly monitor: string;\n protected readonly minDelta: number;\n protected readonly patience: number;\n protected readonly baseline: number;\n protected readonly verbose: number;\n protected readonly mode: 'auto'|'min'|'max';\n\n protected monitorFunc: (currVal: number, prevVal: number) => boolean;\n\n private wait: number;\n private stoppedEpoch: number;\n private best: number;\n\n constructor(args?: EarlyStoppingCallbackArgs) {\n super();\n if (args == null) {\n args = {};\n }\n if (args.restoreBestWeights) {\n throw new NotImplementedError(\n 'restoreBestWeights = True is not implemented in EarlyStopping yet.');\n }\n\n this.monitor = args.monitor || 'val_loss';\n this.minDelta = Math.abs(args.minDelta || 0);\n this.patience = args.patience || 0;\n this.verbose = args.verbose || 0;\n this.mode = args.mode || 'auto';\n this.baseline = args.baseline;\n\n if (['auto', 'min', 'max'].indexOf(this.mode) === -1) {\n console.warn(\n `EarlyStopping mode '${this.mode}' is invalid. ` +\n `Falling back to mode 'auto'.`);\n this.mode = 'auto';\n }\n\n if (this.mode === 'min') {\n this.monitorFunc = less;\n } else if (this.mode === 'max') {\n this.monitorFunc = greater;\n } else {\n // For mode === 'auto'.\n if (this.monitor.indexOf('acc') !== -1) {\n this.monitorFunc = greater;\n } else {\n this.monitorFunc = less;\n }\n }\n\n if (this.monitorFunc === less) {\n this.minDelta *= -1;\n }\n }\n\n override async onTrainBegin(logs?: Logs) {\n this.wait = 0;\n this.stoppedEpoch = 0;\n if (this.baseline != null) {\n this.best = this.baseline;\n } else {\n this.best = this.monitorFunc === less ? Infinity : -Infinity;\n }\n }\n\n override async onEpochEnd(epoch: number, logs?: Logs) {\n await resolveScalarsInLogs(logs);\n const current = this.getMonitorValue(logs);\n if (current == null) {\n return;\n }\n\n if (this.monitorFunc(current - this.minDelta, this.best)) {\n this.best = current;\n this.wait = 0;\n // TODO(cais): Logic for restoreBestWeights.\n } else {\n this.wait++;\n if (this.wait >= this.patience) {\n this.stoppedEpoch = epoch;\n this.model.stopTraining = true;\n }\n // TODO(cais): Logic for restoreBestWeights.\n }\n }\n\n override async onTrainEnd(logs?: Logs) {\n if (this.stoppedEpoch > 0 && this.verbose) {\n console.log(`Epoch ${this.stoppedEpoch}: early stopping.`);\n }\n }\n\n private getMonitorValue(logs: Logs) {\n if (logs == null) {\n logs = {};\n }\n const monitorValue = logs[this.monitor];\n if (monitorValue == null) {\n console.warn(\n `Metric for EarlyStopping ${this.monitor} is not available. ` +\n `Available metrics are: ${Object.keys(logs)}`);\n }\n return monitorValue;\n }\n}\n\n/**\n * Factory function for a Callback that stops training when a monitored\n * quantity has stopped improving.\n *\n * Early stopping is a type of regularization, and protects model against\n * overfitting.\n *\n * The following example based on fake data illustrates how this callback\n * can be used during `tf.LayersModel.fit()`:\n *\n * ```js\n * const model = tf.sequential();\n * model.add(tf.layers.dense({\n * units: 3,\n * activation: 'softmax',\n * kernelInitializer: 'ones',\n * inputShape: [2]\n * }));\n * const xs = tf.tensor2d([1, 2, 3, 4], [2, 2]);\n * const ys = tf.tensor2d([[1, 0, 0], [0, 1, 0]], [2, 3]);\n * const xsVal = tf.tensor2d([4, 3, 2, 1], [2, 2]);\n * const ysVal = tf.tensor2d([[0, 0, 1], [0, 1, 0]], [2, 3]);\n * model.compile(\n * {loss: 'categoricalCrossentropy', optimizer: 'sgd', metrics: ['acc']});\n *\n * // Without the EarlyStopping callback, the val_acc value would be:\n * // 0.5, 0.5, 0.5, 0.5, ...\n * // With val_acc being monitored, training should stop after the 2nd epoch.\n * const history = await model.fit(xs, ys, {\n * epochs: 10,\n * validationData: [xsVal, ysVal],\n * callbacks: tf.callbacks.earlyStopping({monitor: 'val_acc'})\n * });\n *\n * // Expect to see a length-2 array.\n * console.log(history.history.val_acc);\n * ```\n *\n * @doc {\n * heading: 'Callbacks',\n * namespace: 'callbacks'\n * }\n */\nexport function earlyStopping(args?: EarlyStoppingCallbackArgs) {\n return new EarlyStopping(args);\n}\n\nexport const callbacks = {earlyStopping};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\n/* tslint:disable */\n\n/** Properties of an Any. */\nexport declare interface IAny {\n /** Any typeUrl */\n typeUrl?: (string|null);\n\n /** Any value */\n value?: (Uint8Array|null);\n}\n\n/** DataType enum. */\nexport enum DataType {\n // Not a legal value for DataType. Used to indicate a DataType field\n // has not been set.\n DT_INVALID = 0,\n\n // Data types that all computation devices are expected to be\n // capable to support.\n DT_FLOAT = 1,\n DT_DOUBLE = 2,\n DT_INT32 = 3,\n DT_UINT8 = 4,\n DT_INT16 = 5,\n DT_INT8 = 6,\n DT_STRING = 7,\n DT_COMPLEX64 = 8, // Single-precision complex\n DT_INT64 = 9,\n DT_BOOL = 10,\n DT_QINT8 = 11, // Quantized int8\n DT_QUINT8 = 12, // Quantized uint8\n DT_QINT32 = 13, // Quantized int32\n DT_BFLOAT16 = 14, // Float32 truncated to 16 bits. Only for cast ops.\n DT_QINT16 = 15, // Quantized int16\n DT_QUINT16 = 16, // Quantized uint16\n DT_UINT16 = 17,\n DT_COMPLEX128 = 18, // Double-precision complex\n DT_HALF = 19,\n DT_RESOURCE = 20,\n DT_VARIANT = 21, // Arbitrary C++ data types\n DT_UINT32 = 22,\n DT_UINT64 = 23,\n\n // Do not use! These are only for parameters. Every enum above\n // should have a corresponding value below (verified by types_test).\n DT_FLOAT_REF = 101,\n DT_DOUBLE_REF = 102,\n DT_INT32_REF = 103,\n DT_UINT8_REF = 104,\n DT_INT16_REF = 105,\n DT_INT8_REF = 106,\n DT_STRING_REF = 107,\n DT_COMPLEX64_REF = 108,\n DT_INT64_REF = 109,\n DT_BOOL_REF = 110,\n DT_QINT8_REF = 111,\n DT_QUINT8_REF = 112,\n DT_QINT32_REF = 113,\n DT_BFLOAT16_REF = 114,\n DT_QINT16_REF = 115,\n DT_QUINT16_REF = 116,\n DT_UINT16_REF = 117,\n DT_COMPLEX128_REF = 118,\n DT_HALF_REF = 119,\n DT_RESOURCE_REF = 120,\n DT_VARIANT_REF = 121,\n DT_UINT32_REF = 122,\n DT_UINT64_REF = 123,\n}\n\n/** Properties of a TensorShape. */\nexport declare interface ITensorShape {\n /** TensorShape dim */\n dim?: (TensorShape.IDim[]|null);\n\n /** TensorShape unknownRank */\n unknownRank?: (boolean|null);\n}\n\nexport namespace TensorShape {\n /** Properties of a Dim. */\n export declare interface IDim {\n /** Dim size */\n size?: (number|string|null);\n\n /** Dim name */\n name?: (string|null);\n }\n}\n\n/** Properties of a Tensor. */\nexport declare interface ITensor {\n /** Tensor dtype */\n dtype?: (DataType|null);\n\n /** Tensor tensorShape */\n tensorShape?: (ITensorShape|null);\n\n /** Tensor versionNumber */\n versionNumber?: (number|null);\n\n /** Tensor tensorContent */\n tensorContent?: (Uint8Array|null);\n\n /** Tensor floatVal */\n floatVal?: (number[]|null);\n\n /** Tensor doubleVal */\n doubleVal?: (number[]|null);\n\n /** Tensor intVal */\n intVal?: (number[]|null);\n\n /** Tensor stringVal */\n stringVal?: (Uint8Array[]|null);\n\n /** Tensor scomplexVal */\n scomplexVal?: (number[]|null);\n\n /** Tensor int64Val */\n int64Val?: ((number | string)[]|null);\n\n /** Tensor boolVal */\n boolVal?: (boolean[]|null);\n\n /** Tensor uint32Val */\n uint32Val?: (number[]|null);\n\n /** Tensor uint64Val */\n uint64Val?: ((number | string)[]|null);\n}\n\n/** Properties of an AttrValue. */\nexport declare interface IAttrValue {\n /** AttrValue list */\n list?: (AttrValue.IListValue|null);\n\n /** AttrValue s */\n s?: (string|null);\n\n /** AttrValue i */\n i?: (number|string|null);\n\n /** AttrValue f */\n f?: (number|null);\n\n /** AttrValue b */\n b?: (boolean|null);\n\n /** AttrValue type */\n type?: (DataType|null);\n\n /** AttrValue shape */\n shape?: (ITensorShape|null);\n\n /** AttrValue tensor */\n tensor?: (ITensor|null);\n\n /** AttrValue placeholder */\n placeholder?: (string|null);\n\n /** AttrValue func */\n func?: (INameAttrList|null);\n}\n\nexport namespace AttrValue {\n /** Properties of a ListValue. */\n export declare interface IListValue {\n /** ListValue s */\n s?: (string[]|null);\n\n /** ListValue i */\n i?: ((number | string)[]|null);\n\n /** ListValue f */\n f?: (number[]|null);\n\n /** ListValue b */\n b?: (boolean[]|null);\n\n /** ListValue type */\n type?: (DataType[]|null);\n\n /** ListValue shape */\n shape?: (ITensorShape[]|null);\n\n /** ListValue tensor */\n tensor?: (ITensor[]|null);\n\n /** ListValue func */\n func?: (INameAttrList[]|null);\n }\n}\n\n/** Properties of a NameAttrList. */\nexport declare interface INameAttrList {\n /** NameAttrList name */\n name?: (string|null);\n\n /** NameAttrList attr */\n attr?: ({[k: string]: IAttrValue}|null);\n}\n\n/** Properties of a NodeDef. */\nexport declare interface INodeDef {\n /** NodeDef name */\n name?: (string|null);\n\n /** NodeDef op */\n op?: (string|null);\n\n /** NodeDef input */\n input?: (string[]|null);\n\n /** NodeDef device */\n device?: (string|null);\n\n /** NodeDef attr */\n attr?: ({[k: string]: IAttrValue}|null);\n}\n\n/** Properties of a VersionDef. */\nexport declare interface IVersionDef {\n /** VersionDef producer */\n producer?: (number|null);\n\n /** VersionDef minConsumer */\n minConsumer?: (number|null);\n\n /** VersionDef badConsumers */\n badConsumers?: (number[]|null);\n}\n\n/** Properties of a GraphDef. */\nexport declare interface IGraphDef {\n /** GraphDef node */\n node?: (INodeDef[]|null);\n\n /** GraphDef versions */\n versions?: (IVersionDef|null);\n\n /** GraphDef library */\n library?: (IFunctionDefLibrary|null);\n}\n\n/** Properties of a CollectionDef. */\nexport declare interface ICollectionDef {\n /** CollectionDef nodeList */\n nodeList?: (CollectionDef.INodeList|null);\n\n /** CollectionDef bytesList */\n bytesList?: (CollectionDef.IBytesList|null);\n\n /** CollectionDef int64List */\n int64List?: (CollectionDef.IInt64List|null);\n\n /** CollectionDef floatList */\n floatList?: (CollectionDef.IFloatList|null);\n\n /** CollectionDef anyList */\n anyList?: (CollectionDef.IAnyList|null);\n}\n\nexport namespace CollectionDef {\n /** Properties of a NodeList. */\n export declare interface INodeList {\n /** NodeList value */\n value?: (string[]|null);\n }\n\n /** Properties of a BytesList. */\n export declare interface IBytesList {\n /** BytesList value */\n value?: (Uint8Array[]|null);\n }\n\n /** Properties of an Int64List. */\n export declare interface IInt64List {\n /** Int64List value */\n value?: ((number | string)[]|null);\n }\n\n /** Properties of a FloatList. */\n export declare interface IFloatList {\n /** FloatList value */\n value?: (number[]|null);\n }\n\n /** Properties of an AnyList. */\n export declare interface IAnyList {\n /** AnyList value */\n value?: (IAny[]|null);\n }\n}\n\n/** Properties of a SaverDef. */\nexport declare interface ISaverDef {\n /** SaverDef filenameTensorName */\n filenameTensorName?: (string|null);\n\n /** SaverDef saveTensorName */\n saveTensorName?: (string|null);\n\n /** SaverDef restoreOpName */\n restoreOpName?: (string|null);\n\n /** SaverDef maxToKeep */\n maxToKeep?: (number|null);\n\n /** SaverDef sharded */\n sharded?: (boolean|null);\n\n /** SaverDef keepCheckpointEveryNHours */\n keepCheckpointEveryNHours?: (number|null);\n\n /** SaverDef version */\n version?: (SaverDef.CheckpointFormatVersion|null);\n}\n\nexport namespace SaverDef {\n /** CheckpointFormatVersion enum. */\n export enum CheckpointFormatVersion {'LEGACY' = 0, 'V1' = 1, 'V2' = 2}\n}\n\n/** Properties of a TensorInfo. */\nexport declare interface ITensorInfo {\n /** TensorInfo name */\n name?: (string|null);\n\n /** TensorInfo cooSparse */\n cooSparse?: (TensorInfo.ICooSparse|null);\n\n /** TensorInfo dtype */\n dtype?: (DataType|string|null);\n\n /** TensorInfo tensorShape */\n tensorShape?: (ITensorShape|null);\n\n /** Resource id tensor was originally assigned to. */\n resourceId?: (number|null);\n}\n\nexport namespace TensorInfo {\n /** Properties of a CooSparse. */\n export declare interface ICooSparse {\n /** CooSparse valuesTensorName */\n valuesTensorName?: (string|null);\n\n /** CooSparse indicesTensorName */\n indicesTensorName?: (string|null);\n\n /** CooSparse denseShapeTensorName */\n denseShapeTensorName?: (string|null);\n }\n}\n\n/** Properties of a SignatureDef. */\nexport declare interface ISignatureDef {\n /** SignatureDef inputs */\n inputs?: ({[k: string]: ITensorInfo}|null);\n\n /** SignatureDef outputs */\n outputs?: ({[k: string]: ITensorInfo}|null);\n\n /** SignatureDef methodName */\n methodName?: (string|null);\n}\n\n/** Properties of an AssetFileDef. */\nexport declare interface IAssetFileDef {\n /** AssetFileDef tensorInfo */\n tensorInfo?: (ITensorInfo|null);\n\n /** AssetFileDef filename */\n filename?: (string|null);\n}\n\n/** Properties of an OpDef. */\nexport declare interface IOpDef {\n /** OpDef name */\n name?: (string|null);\n\n /** OpDef inputArg */\n inputArg?: (OpDef.IArgDef[]|null);\n\n /** OpDef outputArg */\n outputArg?: (OpDef.IArgDef[]|null);\n\n /** OpDef attr */\n attr?: (OpDef.IAttrDef[]|null);\n\n /** OpDef deprecation */\n deprecation?: (OpDef.IOpDeprecation|null);\n\n /** OpDef summary */\n summary?: (string|null);\n\n /** OpDef description */\n description?: (string|null);\n\n /** OpDef isCommutative */\n isCommutative?: (boolean|null);\n\n /** OpDef isAggregate */\n isAggregate?: (boolean|null);\n\n /** OpDef isStateful */\n isStateful?: (boolean|null);\n\n /** OpDef allowsUninitializedInput */\n allowsUninitializedInput?: (boolean|null);\n}\n\nexport namespace OpDef {\n /** Properties of an ArgDef. */\n export declare interface IArgDef {\n /** ArgDef name */\n name?: (string|null);\n\n /** ArgDef description */\n description?: (string|null);\n\n /** ArgDef type */\n type?: (DataType|null);\n\n /** ArgDef typeAttr */\n typeAttr?: (string|null);\n\n /** ArgDef numberAttr */\n numberAttr?: (string|null);\n\n /** ArgDef typeListAttr */\n typeListAttr?: (string|null);\n\n /** ArgDef isRef */\n isRef?: (boolean|null);\n }\n\n /** Properties of an AttrDef. */\n export declare interface IAttrDef {\n /** AttrDef name */\n name?: (string|null);\n\n /** AttrDef type */\n type?: (string|null);\n\n /** AttrDef defaultValue */\n defaultValue?: (IAttrValue|null);\n\n /** AttrDef description */\n description?: (string|null);\n\n /** AttrDef hasMinimum */\n hasMinimum?: (boolean|null);\n\n /** AttrDef minimum */\n minimum?: (number|string|null);\n\n /** AttrDef allowedValues */\n allowedValues?: (IAttrValue|null);\n }\n\n /** Properties of an OpDeprecation. */\n export declare interface IOpDeprecation {\n /** OpDeprecation version */\n version?: (number|null);\n\n /** OpDeprecation explanation */\n explanation?: (string|null);\n }\n}\n\n/** Properties of an OpList. */\nexport declare interface IOpList {\n /** OpList op */\n op?: (IOpDef[]|null);\n}\n\n/** Properties of a MetaGraphDef. */\nexport declare interface IMetaGraphDef {\n /** MetaGraphDef metaInfoDef */\n metaInfoDef?: (MetaGraphDef.IMetaInfoDef|null);\n\n /** MetaGraphDef graphDef */\n graphDef?: (IGraphDef|null);\n\n /** MetaGraphDef saverDef */\n saverDef?: (ISaverDef|null);\n\n /** MetaGraphDef collectionDef */\n collectionDef?: ({[k: string]: ICollectionDef}|null);\n\n /** MetaGraphDef signatureDef */\n signatureDef?: ({[k: string]: ISignatureDef}|null);\n\n /** MetaGraphDef assetFileDef */\n assetFileDef?: (IAssetFileDef[]|null);\n}\n\nexport namespace MetaGraphDef {\n /** Properties of a MetaInfoDef. */\n export declare interface IMetaInfoDef {\n /** MetaInfoDef metaGraphVersion */\n metaGraphVersion?: (string|null);\n\n /** MetaInfoDef strippedOpList */\n strippedOpList?: (IOpList|null);\n\n /** MetaInfoDef anyInfo */\n anyInfo?: (IAny|null);\n\n /** MetaInfoDef tags */\n tags?: (string[]|null);\n\n /** MetaInfoDef tensorflowVersion */\n tensorflowVersion?: (string|null);\n\n /** MetaInfoDef tensorflowGitVersion */\n tensorflowGitVersion?: (string|null);\n }\n}\n\n/** Properties of a SavedModel. */\nexport declare interface ISavedModel {\n /** SavedModel savedModelSchemaVersion */\n savedModelSchemaVersion?: (number|string|null);\n\n /** SavedModel metaGraphs */\n metaGraphs?: (IMetaGraphDef[]|null);\n}\n\n/** Properties of a FunctionDefLibrary. */\nexport declare interface IFunctionDefLibrary {\n /** FunctionDefLibrary function */\n 'function'?: (IFunctionDef[]|null);\n\n /** FunctionDefLibrary gradient */\n gradient?: (IGradientDef[]|null);\n}\n\n/** Properties of a FunctionDef. */\nexport declare interface IFunctionDef {\n /** FunctionDef signature */\n signature?: (IOpDef|null);\n\n /** FunctionDef attr */\n attr?: ({[k: string]: IAttrValue}|null);\n\n /** FunctionDef nodeDef */\n nodeDef?: (INodeDef[]|null);\n\n /** FunctionDef ret */\n ret?: ({[k: string]: string}|null);\n}\n\n/** Properties of a GradientDef. */\nexport declare interface IGradientDef {\n /** GradientDef functionName */\n functionName?: (string|null);\n\n /** GradientDef gradientFunc */\n gradientFunc?: (string|null);\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\n\nconst ENV = env();\n\n/** Whether to keep intermediate tensors. */\nENV.registerFlag('KEEP_INTERMEDIATE_TENSORS', () => false, debugValue => {\n if (debugValue) {\n console.warn(\n 'Keep intermediate tensors is ON. This will print the values of all ' +\n 'intermediate tensors during model inference. Not all models ' +\n 'support this mode. For details, check e2e/benchmarks/ ' +\n 'model_config.js. This significantly impacts performance.');\n }\n});\n","\n/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpExecutor, OpMapper} from '../types';\n\nconst CUSTOM_OPS: {[key: string]: OpMapper} = {};\n\n/**\n * Register an Op for graph model executor. This allows you to register\n * TensorFlow custom op or override existing op.\n *\n * Here is an example of registering a new MatMul Op.\n * ```js\n * const customMatmul = (node) =>\n * tf.matMul(\n * node.inputs[0], node.inputs[1],\n * node.attrs['transpose_a'], node.attrs['transpose_b']);\n *\n * tf.registerOp('MatMul', customMatmul);\n * ```\n * The inputs and attrs of the node object are based on the TensorFlow op\n * registry.\n *\n * @param name The Tensorflow Op name.\n * @param opFunc An op function which is called with the current graph node\n * during execution and needs to return a tensor or a list of tensors. The node\n * has the following attributes:\n * - attr: A map from attribute name to its value\n * - inputs: A list of input tensors\n *\n * @doc {heading: 'Models', subheading: 'Op Registry'}\n */\nexport function registerOp(name: string, opFunc: OpExecutor) {\n const opMapper: OpMapper = {\n tfOpName: name,\n category: 'custom',\n inputs: [],\n attrs: [],\n customExecutor: opFunc\n };\n\n CUSTOM_OPS[name] = opMapper;\n}\n\n/**\n * Retrieve the OpMapper object for the registered op.\n *\n * @param name The Tensorflow Op name.\n *\n * @doc {heading: 'Models', subheading: 'Op Registry'}\n */\nexport function getRegisteredOp(name: string): OpMapper {\n return CUSTOM_OPS[name];\n}\n\n/**\n * Deregister the Op for graph model executor.\n *\n * @param name The Tensorflow Op name.\n *\n * @doc {heading: 'Models', subheading: 'Op Registry'}\n */\nexport function deregisterOp(name: string) {\n delete CUSTOM_OPS[name];\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {clone, Tensor, util} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {ResourceManager} from '../../executor/resource_manager';\nimport {Node, ValueType} from '../types';\n\nexport function getParamValue(\n paramName: string, node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, resourceManager?: ResourceManager): ValueType {\n const inputParam = node.inputParams[paramName];\n if (inputParam && inputParam.inputIndexStart !== undefined) {\n const start = inputParam.inputIndexStart;\n const end = inputParam.inputIndexEnd === 0 ?\n undefined :\n (inputParam.inputIndexEnd === undefined ? start + 1 :\n inputParam.inputIndexEnd);\n if (inputParam.type === 'tensor') {\n return getTensor(\n node.inputNames[inputParam.inputIndexStart], tensorMap, context,\n resourceManager);\n }\n if (inputParam.type === 'tensors') {\n const inputs = node.inputNames.slice(start, end);\n\n return inputs.map(\n name => getTensor(name, tensorMap, context, resourceManager));\n }\n const tensor = getTensor(\n node.inputNames.slice(start)[0], tensorMap, context, resourceManager);\n const data = tensor.dataSync();\n return inputParam.type === 'number' ?\n data[0] :\n util.toNestedArray(tensor.shape, data);\n }\n const attrParam = node.attrParams[paramName];\n return attrParam && attrParam.value;\n}\n\n/**\n * Retrieve the tensor from tensorsMap based on input name.\n * @param name Node input name\n * @param tensorsMap Tensors map keyed by the node\n * @param context contains tensors and information for running the current node.\n * @param resourceManager Optional. Contains global resources of the model.\n */\nexport function getTensor(\n name: string, tensorsMap: NamedTensorsMap, context: ExecutionContext,\n resourceManager?: ResourceManager): Tensor {\n const [nodeName, index] = parseNodeName(name);\n\n if (resourceManager != null) {\n const tensor = resourceManager.getHashTableHandleByName(nodeName);\n if (tensor != null) {\n return tensor;\n }\n }\n\n const contextId = context.currentContextIds.find(contextId => {\n return !!tensorsMap[getNodeNameWithContextId(nodeName, contextId)];\n });\n\n return contextId !== undefined ?\n tensorsMap[getNodeNameWithContextId(nodeName, contextId)][index] :\n undefined;\n}\n\n/**\n * Retrieve the tensors based on input name for current context.\n * @param name Node input name\n * @param tensorsMap Tensors map keyed by the node\n */\nexport function getTensorsForCurrentContenxt(\n name: string, tensorsMap: NamedTensorsMap,\n context: ExecutionContext): Tensor[] {\n return tensorsMap[getNodeNameWithContextId(name, context.currentContextId)];\n}\n\n/**\n * Returns the node name, outputName and index from the Node input name.\n * @param inputName The input name of the node, in format of\n * node_name:output_index, i.e. MatMul:0, if the output_index is not set, it is\n * default to 0.\n * If the input name contains output name i.e. StringSplit:indices:0, it will\n * return ['StringSplit', 0, 'indices'].\n */\nexport function getNodeNameAndIndex(\n inputName: string, context?: ExecutionContext): [string, number, string] {\n const [nodeName, index, outputName] = parseNodeName(inputName);\n\n return [\n getNodeNameWithContextId(nodeName, context && context.currentContextId),\n index, outputName\n ];\n}\n\nfunction getNodeNameWithContextId(name: string, contextId?: string): string {\n return !!contextId ? `${name}-${contextId}` : name;\n}\n\nexport function parseNodeName(name: string): [string, number, string] {\n const parts = name.split(':');\n if (parts.length === 1) {\n return [name, 0, undefined];\n }\n\n const nodeName = parts[0];\n const outputName = parts.length === 3 ? parts[1] : undefined;\n const index = Number(parts[parts.length - 1]);\n return [nodeName, index, outputName];\n}\n\nexport function split(arr: number[], size: number) {\n const res = [];\n for (let i = 0; i < arr.length; i += size) {\n res.push(arr.slice(i, i + size));\n }\n return res;\n}\nexport function getPadding(\n node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext): ValueType {\n let pad = getParamValue('pad', node, tensorMap, context);\n if (pad === 'explicit') {\n // This is 1d array, we need to convert it to 2d array\n pad = getParamValue('explicitPaddings', node, tensorMap, context);\n const explicitPadding: [\n [number, number], [number, number], [number, number], [number, number]\n ] = [[0, 0], [0, 0], [0, 0], [0, 0]];\n for (let i = 0; i < 4; i++) {\n explicitPadding[i][0] = (pad as number[])[i * 2];\n explicitPadding[i][1] = (pad as number[])[i * 2 + 1];\n }\n return explicitPadding;\n }\n return pad;\n}\n\n/**\n * Reuse the tensor if it is marked as keep, otherwise clone the tensor to\n * avoid disposal. This is important for TensorArray and TensorList ops, since\n * internally they use a tensor as the id for TensorArray and TensorList, and\n * to simplify lookup, they also use Tensor.id as the key to the internal map.\n * These id tensors have been marked as kept in the backend, we need avoid clone\n * them in order to create new Tensor.id.\n * @param tensor\n */\nexport function cloneTensor(tensor: Tensor): Tensor {\n return tensor.kept ? tensor : clone(tensor);\n}\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Add',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'AddV2',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'AddN',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'tensors',\n 'type': 'tensors'\n }\n ]\n },\n {\n 'tfOpName': 'BiasAdd',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sub',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'RealDiv',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Div',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'DivNoNan',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'FloorDiv',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Mul',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Maximum',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Minimum',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Pow',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'SquaredDifference',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Mod',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'FloorMod',\n 'category': 'arithmetic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Abs',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Acos',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Asin',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Atan',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Atan2',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'y',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Ceil',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'ClipByValue',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'clipValueMin',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'clipValueMax',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Complex',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'real',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'imag',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'ComplexAbs',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Cos',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Cosh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Elu',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Exp',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Floor',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Log',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Imag',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'outputType',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Neg',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Real',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'outputType',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Prelu',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'alpha',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Relu',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Relu6',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Selu',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sigmoid',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sin',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sinh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sqrt',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Rsqrt',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Square',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Tan',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Tanh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Sign',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Round',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Expm1',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Log1p',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Reciprocal',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Softplus',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Asinh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Acosh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Atanh',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Erf',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Prod',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axes',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool',\n 'notSupported': true\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LeakyRelu',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'alpha',\n 'name': 'alpha',\n 'type': 'number',\n 'defaultValue': 0.2\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'IsNan',\n 'category': 'basic_math',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'EmptyTensorList',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'start': 1,\n 'name': 'maxNumElements',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'LoopCond',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'pred',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Switch',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'data',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'pred',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Merge',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'tensors',\n 'type': 'tensors'\n }\n ]\n },\n {\n 'tfOpName': 'Enter',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'frame_name',\n 'name': 'frameName',\n 'type': 'string'\n },\n {\n 'tfName': 'is_constant',\n 'name': 'isConstant',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Exit',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'NextIteration',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'size',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'element_shape',\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'tfName': 'dynamic_size',\n 'name': 'dynamicSize',\n 'type': 'bool'\n },\n {\n 'tfName': 'clear_after_read',\n 'name': 'clearAfterRead',\n 'type': 'bool'\n },\n {\n 'tfName': 'identical_element_shapes',\n 'name': 'identicalElementShapes',\n 'type': 'bool'\n },\n {\n 'tfName': 'tensor_array_name',\n 'name': 'name',\n 'type': 'string'\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayWriteV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'index',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayReadV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'index',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayGatherV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'element_shape',\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayScatterV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayConcatV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'element_shape_except0',\n 'name': 'elementShapeExcept0',\n 'type': 'shape',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'TensorArraySplitV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'lengths',\n 'type': 'number[]'\n },\n {\n 'start': 3,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorArraySizeV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'flowIn',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'TensorArrayCloseV3',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorArrayId',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'StatelessIf',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'cond',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'then_branch',\n 'name': 'thenBranch',\n 'type': 'func'\n },\n {\n 'tfName': 'else_branch',\n 'name': 'elseBranch',\n 'type': 'func'\n }\n ]\n },\n {\n 'tfOpName': 'If',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'cond',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'then_branch',\n 'name': 'thenBranch',\n 'type': 'func'\n },\n {\n 'tfName': 'else_branch',\n 'name': 'elseBranch',\n 'type': 'func'\n }\n ]\n },\n {\n 'tfOpName': 'StatelessWhile',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'cond',\n 'name': 'cond',\n 'type': 'func'\n },\n {\n 'tfName': 'body',\n 'name': 'body',\n 'type': 'func'\n }\n ]\n },\n {\n 'tfOpName': 'While',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'cond',\n 'name': 'cond',\n 'type': 'func'\n },\n {\n 'tfName': 'body',\n 'name': 'body',\n 'type': 'func'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListScatter',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListScatterV2',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'start': 3,\n 'name': 'numElements',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListGather',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListGetItem',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'index',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListSetItem',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'index',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListReserve',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'start': 1,\n 'name': 'numElements',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListFromTensor',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListStack',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n },\n {\n 'tfName': 'num_elements',\n 'name': 'numElements',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListSplit',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'start': 2,\n 'name': 'lengths',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListConcat',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_shape',\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListConcatV2',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_shape',\n 'name': 'elementShape',\n 'type': 'shape'\n },\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListPopBack',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'elementShape',\n 'type': 'shape'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListPushBack',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'element_dtype',\n 'name': 'elementDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListLength',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'TensorListResize',\n 'category': 'control',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensorListId',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'size',\n 'type': 'number'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'AvgPool',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n },\n {\n 'tfName': 'ksize',\n 'name': 'kernelSize',\n 'type': 'number[]'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'MaxPool',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n },\n {\n 'tfName': 'ksize',\n 'name': 'kernelSize',\n 'type': 'number[]'\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': [],\n 'notSupported': true\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'MaxPoolWithArgmax',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'ksize',\n 'name': 'kernelSize',\n 'type': 'number[]'\n },\n {\n 'tfName': 'include_batch_in_index',\n 'name': 'includeBatchInIndex',\n 'type': 'bool'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'AvgPool3D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n },\n {\n 'tfName': 'ksize',\n 'name': 'kernelSize',\n 'type': 'number[]'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'MaxPool3D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n },\n {\n 'tfName': 'ksize',\n 'name': 'kernelSize',\n 'type': 'number[]'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Conv1D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'stride',\n 'name': 'stride',\n 'type': 'number'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NWC'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'dilation',\n 'name': 'dilation',\n 'type': 'number',\n 'defaultValue': 1\n }\n ]\n },\n {\n 'tfOpName': 'Conv2D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'useCudnnOnGpu',\n 'name': 'useCudnnOnGpu',\n 'type': 'bool'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': '_FusedConv2D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'num_args',\n 'name': 'numArgs',\n 'type': 'number'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'use_cudnn_on_gpu',\n 'name': 'useCudnnOnGpu',\n 'type': 'bool',\n 'defaultValue': true\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]',\n 'defaultValue': [\n 1,\n 1,\n 1,\n 1\n ]\n },\n {\n 'tfName': 'fused_ops',\n 'name': 'fusedOps',\n 'type': 'string[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'epsilon',\n 'name': 'epsilon',\n 'type': 'number',\n 'defaultValue': 0.0001\n },\n {\n 'tfName': 'leakyrelu_alpha',\n 'name': 'leakyreluAlpha',\n 'type': 'number',\n 'defaultValue': 0.2\n }\n ]\n },\n {\n 'tfOpName': 'Conv2DBackpropInput',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 2,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n },\n {\n 'start': 0,\n 'name': 'outputShape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'DepthwiseConv2d',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'input',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'DepthwiseConv2dNative',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'input',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'FusedDepthwiseConv2dNative',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'num_args',\n 'name': 'numArgs',\n 'type': 'number'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]',\n 'defaultValue': [\n 1,\n 1,\n 1,\n 1\n ]\n },\n {\n 'tfName': 'fused_ops',\n 'name': 'fusedOps',\n 'type': 'string[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'explicit_paddings',\n 'name': 'explicitPaddings',\n 'type': 'number[]',\n 'defaultValue': []\n }\n ]\n },\n {\n 'tfOpName': 'Conv3D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'defaultValue': 'NHWC'\n },\n {\n 'tfName': 'dilations',\n 'name': 'dilations',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'Dilation2D',\n 'category': 'convolution',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'filter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'strides',\n 'name': 'strides',\n 'type': 'number[]'\n },\n {\n 'tfName': 'rates',\n 'name': 'dilations',\n 'type': 'number[]'\n },\n {\n 'tfName': 'padding',\n 'name': 'pad',\n 'type': 'string'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Fill',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n },\n {\n 'start': 1,\n 'name': 'value',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'LinSpace',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'start',\n 'type': 'number'\n },\n {\n 'start': 1,\n 'name': 'stop',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'num',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'OneHot',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'depth',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'onValue',\n 'type': 'number',\n 'defaultValue': 1\n },\n {\n 'start': 3,\n 'name': 'offValue',\n 'type': 'number',\n 'defaultValue': 0\n }\n ],\n 'attrs': [\n {\n 'tfName': 'axis',\n 'name': 'axis',\n 'type': 'number',\n 'notSupported': true\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'Ones',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'OnesLike',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'RandomStandardNormal',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'seed',\n 'name': 'seed',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'seed2',\n 'name': 'seed2',\n 'type': 'number',\n 'defaultValue': 0,\n 'notSupported': true\n },\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'T',\n 'name': 'T',\n 'type': 'number',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'RandomUniform',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'minval',\n 'name': 'minval',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'maxval',\n 'name': 'maxval',\n 'type': 'number',\n 'defaultValue': 1\n },\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'seed',\n 'name': 'seed',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'seed2',\n 'name': 'seed2',\n 'type': 'number',\n 'defaultValue': 0,\n 'notSupported': true\n },\n {\n 'tfName': 'T',\n 'name': 'T',\n 'type': 'number',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Range',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'start',\n 'type': 'number'\n },\n {\n 'start': 1,\n 'name': 'stop',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'step',\n 'type': 'number',\n 'defaultValue': 0\n }\n ],\n 'attrs': [\n {\n 'tfName': 'Tidx',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'TruncatedNormal',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'means',\n 'name': 'mean',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'stddev',\n 'name': 'stdDev',\n 'type': 'number',\n 'defaultValue': 1\n },\n {\n 'tfName': 'seed',\n 'name': 'seed',\n 'type': 'number'\n },\n {\n 'tfName': 'seed2',\n 'name': 'seed2',\n 'type': 'number',\n 'defaultValue': 0,\n 'notSupported': true\n },\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'T',\n 'name': 'T',\n 'type': 'number',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Zeros',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'ZerosLike',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'Multinomial',\n 'category': 'creation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'logits',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'numSamples',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'seed',\n 'name': 'seed',\n 'type': 'number'\n },\n {\n 'tfName': 'seed2',\n 'name': 'seed2',\n 'type': 'number'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n },\n {\n 'tfName': 'output_dtype',\n 'name': 'output_dtype',\n 'type': 'dtype'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'NonMaxSuppressionV2',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'boxes',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scores',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'maxOutputSize',\n 'type': 'number'\n },\n {\n 'start': 3,\n 'name': 'iouThreshold',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'NonMaxSuppressionV3',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'boxes',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scores',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'maxOutputSize',\n 'type': 'number'\n },\n {\n 'start': 3,\n 'name': 'iouThreshold',\n 'type': 'number'\n },\n {\n 'start': 4,\n 'name': 'scoreThreshold',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'NonMaxSuppressionV4',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'boxes',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scores',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'maxOutputSize',\n 'type': 'number'\n },\n {\n 'start': 3,\n 'name': 'iouThreshold',\n 'type': 'number'\n },\n {\n 'start': 4,\n 'name': 'scoreThreshold',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'T_threshold',\n 'name': 'threshold',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'pad_to_max_output_size',\n 'name': 'padToMaxOutputSize',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'NonMaxSuppressionV5',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'boxes',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scores',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'maxOutputSize',\n 'type': 'number'\n },\n {\n 'start': 3,\n 'name': 'iouThreshold',\n 'type': 'number'\n },\n {\n 'start': 4,\n 'name': 'scoreThreshold',\n 'type': 'number'\n },\n {\n 'start': 5,\n 'name': 'softNmsSigma',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'Where',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'condition',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'ListDiff',\n 'category': 'dynamic',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'y',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'LowerBound',\n 'category': 'evaluation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'sortedSequence',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'values',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'TopKV2',\n 'category': 'evaluation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'k',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'sorted',\n 'name': 'sorted',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'UpperBound',\n 'category': 'evaluation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'sortedSequence',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'values',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Unique',\n 'category': 'evaluation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'UniqueV2',\n 'category': 'evaluation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'PlaceholderWithDefault',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'default',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'shape',\n 'name': 'shape',\n 'type': 'shape'\n },\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'Placeholder',\n 'category': 'graph',\n 'attrs': [\n {\n 'tfName': 'shape',\n 'name': 'shape',\n 'type': 'shape'\n },\n {\n 'tfName': 'dtype',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'Const',\n 'category': 'graph'\n },\n {\n 'tfOpName': 'Identity',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'IdentityN',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'x',\n 'type': 'tensors'\n }\n ]\n },\n {\n 'tfOpName': 'Snapshot',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Rank',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Size',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'Shape',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'ShapeN',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'x',\n 'type': 'tensors'\n }\n ]\n },\n {\n 'tfOpName': 'Print',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'data',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'message',\n 'name': 'message',\n 'type': 'string'\n },\n {\n 'tfName': 'first_n',\n 'name': 'firstN',\n 'type': 'number',\n 'notSupported': true\n },\n {\n 'tfName': 'summarize',\n 'name': 'summarize',\n 'type': 'number',\n 'defaultValue': 3\n }\n ]\n },\n {\n 'tfOpName': 'NoOp',\n 'category': 'graph',\n 'inputs': []\n },\n {\n 'tfOpName': 'StopGradient',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'FakeQuantWithMinMaxVars',\n 'category': 'graph',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'min',\n 'name': 'min',\n 'type': 'number'\n },\n {\n 'tfName': 'max',\n 'name': 'max',\n 'type': 'number'\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'HashTable',\n 'category': 'hash_table',\n 'inputs': [],\n 'attrs': [\n {\n 'tfName': 'shared_name',\n 'name': 'sharedName',\n 'type': 'string'\n },\n {\n 'tfName': 'use_node_name_sharing',\n 'name': 'useNodeNameSharing',\n 'type': 'bool'\n },\n {\n 'tfName': 'key_dtype',\n 'name': 'keyDType',\n 'type': 'dtype'\n },\n {\n 'tfName': 'value_dtype',\n 'name': 'valueDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'HashTableV2',\n 'category': 'hash_table',\n 'inputs': [],\n 'attrs': [\n {\n 'tfName': 'shared_name',\n 'name': 'sharedName',\n 'type': 'string'\n },\n {\n 'tfName': 'use_node_name_sharing',\n 'name': 'useNodeNameSharing',\n 'type': 'bool'\n },\n {\n 'tfName': 'key_dtype',\n 'name': 'keyDType',\n 'type': 'dtype'\n },\n {\n 'tfName': 'value_dtype',\n 'name': 'valueDType',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableImport',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'values',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'Tin',\n 'name': 'tIn',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'tOut',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableImportV2',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'values',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'Tin',\n 'name': 'tIn',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'tOut',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableFind',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'defaultValue',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'Tin',\n 'name': 'tIn',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'tOut',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableFindV2',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'defaultValue',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'Tin',\n 'name': 'tIn',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'Tout',\n 'name': 'tOut',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableSize',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'LookupTableSizeV2',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'InitializeTable',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'values',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'InitializeTableV2',\n 'category': 'hash_table',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tableHandle',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'keys',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'values',\n 'type': 'tensor'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'ResizeBilinear',\n 'category': 'image',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'images',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'size',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'align_corners',\n 'name': 'alignCorners',\n 'type': 'bool'\n },\n {\n 'tfName': 'half_pixel_centers',\n 'name': 'halfPixelCenters',\n 'type': 'bool'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'ResizeNearestNeighbor',\n 'category': 'image',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'images',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'size',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'align_corners',\n 'name': 'alignCorners',\n 'type': 'bool'\n },\n {\n 'tfName': 'half_pixel_centers',\n 'name': 'halfPixelCenters',\n 'type': 'bool'\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'CropAndResize',\n 'category': 'image',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'image',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'boxes',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'boxInd',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'cropSize',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'method',\n 'name': 'method',\n 'type': 'string'\n },\n {\n 'tfName': 'extrapolation_value',\n 'name': 'extrapolationValue',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'ImageProjectiveTransformV3',\n 'category': 'image',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'images',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'transforms',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'outputShape',\n 'type': 'number[]'\n },\n {\n 'start': 3,\n 'name': 'fillValue',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'interpolation',\n 'name': 'interpolation',\n 'type': 'string'\n },\n {\n 'tfName': 'fill_mode',\n 'name': 'fillMode',\n 'type': 'string'\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Equal',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'NotEqual',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Greater',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'GreaterEqual',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Less',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LessEqual',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LogicalAnd',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LogicalNot',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LogicalOr',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Select',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'condition',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'SelectV2',\n 'category': 'logical',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'condition',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': '_FusedMatMul',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'end': 0,\n 'name': 'args',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'num_args',\n 'name': 'numArgs',\n 'type': 'number'\n },\n {\n 'tfName': 'fused_ops',\n 'name': 'fusedOps',\n 'type': 'string[]',\n 'defaultValue': []\n },\n {\n 'tfName': 'epsilon',\n 'name': 'epsilon',\n 'type': 'number',\n 'defaultValue': 0.0001\n },\n {\n 'tfName': 'transpose_a',\n 'name': 'transposeA',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'transpose_b',\n 'name': 'transposeB',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'leakyrelu_alpha',\n 'name': 'leakyreluAlpha',\n 'type': 'number',\n 'defaultValue': 0.2\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'MatMul',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'transpose_a',\n 'name': 'transposeA',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'transpose_b',\n 'name': 'transposeB',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'BatchMatMul',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'adj_x',\n 'name': 'transposeA',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'adj_y',\n 'name': 'transposeB',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'BatchMatMulV2',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'a',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'b',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'adj_x',\n 'name': 'transposeA',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'adj_y',\n 'name': 'transposeB',\n 'type': 'bool',\n 'defaultValue': false\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Transpose',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'perm',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Einsum',\n 'category': 'matrices',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'tensors',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'equation',\n 'name': 'equation',\n 'type': 'string'\n },\n {\n 'tfName': 'N',\n 'name': 'n',\n 'type': 'number',\n 'defaultValue': 2\n },\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'EuclideanNorm',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool',\n 'defaultValue': false\n }\n ]\n },\n {\n 'tfOpName': 'FusedBatchNorm',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scale',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'offset',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'mean',\n 'type': 'tensor'\n },\n {\n 'start': 4,\n 'name': 'variance',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'epsilon',\n 'name': 'epsilon',\n 'type': 'number',\n 'defaultValue': 0.001\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'FusedBatchNormV2',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scale',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'offset',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'mean',\n 'type': 'tensor'\n },\n {\n 'start': 4,\n 'name': 'variance',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'epsilon',\n 'name': 'epsilon',\n 'type': 'number',\n 'defaultValue': 0.001\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'FusedBatchNormV3',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'scale',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'offset',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'mean',\n 'type': 'tensor'\n },\n {\n 'start': 4,\n 'name': 'variance',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'epsilon',\n 'name': 'epsilon',\n 'type': 'number',\n 'defaultValue': 0.001\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'LRN',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'depth_radius',\n 'name': 'radius',\n 'type': 'number',\n 'defaultValue': 5\n },\n {\n 'tfName': 'bias',\n 'name': 'bias',\n 'type': 'number',\n 'defaultValue': 1\n },\n {\n 'tfName': 'alpha',\n 'name': 'alpha',\n 'type': 'number',\n 'defaultValue': 1\n },\n {\n 'tfName': 'beta',\n 'name': 'beta',\n 'type': 'number',\n 'defaultValue': 0.5\n }\n ]\n },\n {\n 'tfOpName': 'Softmax',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'LogSoftmax',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'SparseToDense',\n 'category': 'normalization',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'sparseIndices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'outputShape',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'sparseValues',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'defaultValue',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'validate_indices',\n 'name': 'validateIndices',\n 'type': 'bool',\n 'defaultValue': true,\n 'notSupported': true\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Bincount',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'size',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'weights',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'DenseBincount',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'size',\n 'type': 'number'\n },\n {\n 'start': 2,\n 'name': 'weights',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'binary_output',\n 'name': 'binaryOutput',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Max',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Mean',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Min',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Sum',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'All',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Any',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'ArgMax',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'ArgMin',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'Prod',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'keep_dims',\n 'name': 'keepDims',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Cumprod',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'exclusive',\n 'name': 'exclusive',\n 'type': 'bool'\n },\n {\n 'tfName': 'reverse',\n 'name': 'reverse',\n 'type': 'bool'\n }\n ]\n },\n {\n 'tfOpName': 'Cumsum',\n 'category': 'reduction',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'exclusive',\n 'name': 'exclusive',\n 'type': 'bool'\n },\n {\n 'tfName': 'reverse',\n 'name': 'reverse',\n 'type': 'bool'\n }\n ]\n }\n]\n;\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'ConcatV2',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'end': -1,\n 'name': 'tensors',\n 'type': 'tensors'\n },\n {\n 'start': -1,\n 'name': 'axis',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'N',\n 'name': 'n',\n 'type': 'number',\n 'defaultValue': 2\n }\n ]\n },\n {\n 'tfOpName': 'Concat',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 1,\n 'end': 0,\n 'name': 'tensors',\n 'type': 'tensors'\n },\n {\n 'start': 0,\n 'name': 'axis',\n 'type': 'number'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'N',\n 'name': 'n',\n 'type': 'number',\n 'defaultValue': 2\n }\n ]\n },\n {\n 'tfOpName': 'GatherV2',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'axis',\n 'type': 'number',\n 'defaultValue': 0\n }\n ],\n 'attrs': [\n {\n 'tfName': 'batch_dims',\n 'name': 'batchDims',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'Gather',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'validate_indices',\n 'name': 'validateIndices',\n 'type': 'bool',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Reverse',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'dims',\n 'type': 'bool[]'\n }\n ]\n },\n {\n 'tfOpName': 'ReverseV2',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'Slice',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'begin',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'size',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'StridedSlice',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'begin',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'end',\n 'type': 'number[]'\n },\n {\n 'start': 3,\n 'name': 'strides',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'begin_mask',\n 'name': 'beginMask',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'end_mask',\n 'name': 'endMask',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'new_axis_mask',\n 'name': 'newAxisMask',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'ellipsis_mask',\n 'name': 'ellipsisMask',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'shrink_axis_mask',\n 'name': 'shrinkAxisMask',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'Pack',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'end': 0,\n 'name': 'tensors',\n 'type': 'tensors'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'axis',\n 'name': 'axis',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'Unpack',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'tensor',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'axis',\n 'name': 'axis',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'tfName': 'num',\n 'name': 'num',\n 'type': 'number',\n 'defaultValue': 0,\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'Tile',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'reps',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'Split',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'axis',\n 'type': 'number',\n 'defaultValue': 0\n },\n {\n 'start': 1,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'num_split',\n 'name': 'numOrSizeSplits',\n 'type': 'number',\n 'defaultValue': 1\n }\n ]\n },\n {\n 'tfOpName': 'SplitV',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'numOrSizeSplits',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'axis',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'ScatterNd',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'values',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'GatherNd',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'SparseToDense',\n 'category': 'slice_join',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'sparseIndices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'outputShape',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'sparseValues',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'defaultValue',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'validate_indices',\n 'name': 'validateIndices',\n 'type': 'bool',\n 'defaultValue': false,\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'SparseFillEmptyRows',\n 'category': 'sparse',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'values',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'denseShape',\n 'type': 'tensor'\n },\n {\n 'start': 3,\n 'name': 'defaultValue',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'SparseReshape',\n 'category': 'sparse',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'inputIndices',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'inputShape',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'newShape',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'T',\n 'name': 'dtype',\n 'type': 'dtype',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'SparseSegmentMean',\n 'category': 'sparse',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'data',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'segmentIds',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'SparseSegmentSum',\n 'category': 'sparse',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'data',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'indices',\n 'type': 'tensor'\n },\n {\n 'start': 2,\n 'name': 'segmentIds',\n 'type': 'tensor'\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'FFT',\n 'category': 'spectral',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'IFFT',\n 'category': 'spectral',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ]\n },\n {\n 'tfOpName': 'RFFT',\n 'category': 'spectral',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'fft_length',\n 'type': 'number',\n 'notSupported': true\n }\n ]\n },\n {\n 'tfOpName': 'IRFFT',\n 'category': 'spectral',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'fft_length',\n 'type': 'number',\n 'notSupported': true\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'StringNGrams',\n 'category': 'string',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'data',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'dataSplits',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'separator',\n 'name': 'separator',\n 'type': 'string'\n },\n {\n 'tfName': 'ngram_widths',\n 'name': 'nGramWidths',\n 'type': 'number[]'\n },\n {\n 'tfName': 'left_pad',\n 'name': 'leftPad',\n 'type': 'string'\n },\n {\n 'tfName': 'right_pad',\n 'name': 'rightPad',\n 'type': 'string'\n },\n {\n 'tfName': 'pad_width',\n 'name': 'padWidth',\n 'type': 'number'\n },\n {\n 'tfName': 'preserve_short_sequences',\n 'name': 'preserveShortSequences',\n 'type': 'bool'\n }\n ],\n 'outputs': [\n 'ngrams',\n 'ngrams_splits'\n ]\n },\n {\n 'tfOpName': 'StringSplit',\n 'category': 'string',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'input',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'delimiter',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'skip_empty',\n 'name': 'skipEmpty',\n 'type': 'bool'\n }\n ],\n 'outputs': [\n 'indices',\n 'values',\n 'shape'\n ]\n },\n {\n 'tfOpName': 'StringToHashBucketFast',\n 'category': 'string',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'input',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'num_buckets',\n 'name': 'numBuckets',\n 'type': 'number'\n }\n ]\n }\n];\n","\n/**\n * @license\n * Copyright 2023 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {OpMapper} from '../types';\n\nexport const json: OpMapper[] = [\n {\n 'tfOpName': 'Cast',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'SrcT',\n 'name': 'sdtype',\n 'type': 'dtype',\n 'notSupported': true\n },\n {\n 'tfName': 'DstT',\n 'name': 'dtype',\n 'type': 'dtype'\n }\n ]\n },\n {\n 'tfOpName': 'ExpandDims',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'axis',\n 'type': 'number'\n }\n ]\n },\n {\n 'tfOpName': 'MirrorPad',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'padding',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'mode',\n 'name': 'mode',\n 'type': 'string'\n }\n ]\n },\n {\n 'tfOpName': 'Pad',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'padding',\n 'type': 'number[]'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'constant_value',\n 'name': 'constantValue',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'PadV2',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'padding',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'constantValue',\n 'type': 'number',\n 'defaultValue': 0\n }\n ]\n },\n {\n 'tfOpName': 'Reshape',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'Squeeze',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'axis',\n 'tfDeprecatedName': 'squeeze_dims',\n 'name': 'axis',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'SpaceToBatchND',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'blockShape',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'paddings',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'BatchToSpaceND',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'blockShape',\n 'type': 'number[]'\n },\n {\n 'start': 2,\n 'name': 'crops',\n 'type': 'number[]'\n }\n ]\n },\n {\n 'tfOpName': 'DepthToSpace',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n }\n ],\n 'attrs': [\n {\n 'tfName': 'block_size',\n 'name': 'blockSize',\n 'type': 'number'\n },\n {\n 'tfName': 'data_format',\n 'name': 'dataFormat',\n 'type': 'string'\n }\n ]\n },\n {\n 'tfOpName': 'BroadcastTo',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 'x',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 'shape',\n 'type': 'number[]'\n }\n ],\n 'attrs': []\n },\n {\n 'tfOpName': 'BroadcastArgs',\n 'category': 'transformation',\n 'inputs': [\n {\n 'start': 0,\n 'name': 's0',\n 'type': 'tensor'\n },\n {\n 'start': 1,\n 'name': 's1',\n 'type': 'tensor'\n }\n ],\n 'attrs': []\n }\n];\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, env} from '@tensorflow/tfjs-core';\n\nimport * as tensorflow from '../data/compiled_api';\n\nimport {getRegisteredOp} from './custom_op/register';\nimport {getNodeNameAndIndex} from './executors/utils';\nimport * as arithmetic from './op_list/arithmetic';\nimport * as basicMath from './op_list/basic_math';\nimport * as control from './op_list/control';\nimport * as convolution from './op_list/convolution';\nimport * as creation from './op_list/creation';\nimport * as dynamic from './op_list/dynamic';\nimport * as evaluation from './op_list/evaluation';\nimport * as graph from './op_list/graph';\nimport * as hashTable from './op_list/hash_table';\nimport * as image from './op_list/image';\nimport * as logical from './op_list/logical';\nimport * as matrices from './op_list/matrices';\nimport * as normalization from './op_list/normalization';\nimport * as reduction from './op_list/reduction';\nimport * as sliceJoin from './op_list/slice_join';\nimport * as sparse from './op_list/sparse';\nimport * as spectral from './op_list/spectral';\nimport * as string from './op_list/string';\nimport * as transformation from './op_list/transformation';\nimport {Graph, InputParamValue, Node, OpMapper, ParamValue} from './types';\n\nexport class OperationMapper {\n private static _instance: OperationMapper;\n\n private opMappers: {[key: string]: OpMapper};\n\n // Singleton instance for the mapper\n public static get Instance() {\n return this._instance || (this._instance = new this());\n }\n\n // Loads the op mapping from the JSON file.\n private constructor() {\n const ops = [\n arithmetic, basicMath, control, convolution, creation, dynamic,\n evaluation, graph, hashTable, image, logical, matrices, normalization,\n reduction, sliceJoin, sparse, spectral, string, transformation\n ];\n const mappersJson: OpMapper[] = [].concat(...ops.map(op => op.json));\n\n this.opMappers = mappersJson.reduce<{[key: string]: OpMapper}>(\n (map, mapper: OpMapper) => {\n map[mapper.tfOpName] = mapper;\n return map;\n },\n {});\n }\n\n // Converts the model inference graph from Tensorflow GraphDef to local\n // representation for TensorFlow.js API\n transformGraph(\n graph: tensorflow.IGraphDef,\n signature: tensorflow.ISignatureDef = {}): Graph {\n const tfNodes = graph.node;\n const placeholders: Node[] = [];\n const weights: Node[] = [];\n const initNodes: Node[] = [];\n const nodes = tfNodes.reduce<{[key: string]: Node}>((map, node) => {\n map[node.name] = this.mapNode(node);\n if (node.op.startsWith('Placeholder')) {\n placeholders.push(map[node.name]);\n } else if (node.op === 'Const') {\n weights.push(map[node.name]);\n } else if (node.input == null || node.input.length === 0) {\n initNodes.push(map[node.name]);\n }\n return map;\n }, {});\n\n let inputs: Node[] = [];\n const outputs: Node[] = [];\n let inputNodeNameToKey: {[key: string]: string} = {};\n let outputNodeNameToKey: {[key: string]: string} = {};\n if (signature != null) {\n inputNodeNameToKey = this.mapSignatureEntries(signature.inputs);\n outputNodeNameToKey = this.mapSignatureEntries(signature.outputs);\n }\n const allNodes = Object.keys(nodes);\n allNodes.forEach(key => {\n const node = nodes[key];\n node.inputNames.forEach((name, index) => {\n const [nodeName, , outputName] = getNodeNameAndIndex(name);\n const inputNode = nodes[nodeName];\n if (inputNode.outputs != null) {\n const outputIndex = inputNode.outputs.indexOf(outputName);\n if (outputIndex !== -1) {\n const inputName = `${nodeName}:${outputIndex}`;\n // update the input name to use the mapped output index directly.\n node.inputNames[index] = inputName;\n }\n }\n node.inputs.push(inputNode);\n inputNode.children.push(node);\n });\n });\n\n // if signature has not outputs set, add any node that does not have\n // outputs.\n if (Object.keys(outputNodeNameToKey).length === 0) {\n allNodes.forEach(key => {\n const node = nodes[key];\n if (node.children.length === 0) {\n outputs.push(node);\n }\n });\n } else {\n Object.keys(outputNodeNameToKey).forEach(name => {\n const [nodeName, ] = getNodeNameAndIndex(name);\n const node = nodes[nodeName];\n if (node != null) {\n node.signatureKey = outputNodeNameToKey[name];\n outputs.push(node);\n }\n });\n }\n\n if (Object.keys(inputNodeNameToKey).length > 0) {\n Object.keys(inputNodeNameToKey).forEach(name => {\n const [nodeName, ] = getNodeNameAndIndex(name);\n const node = nodes[nodeName];\n if (node) {\n node.signatureKey = inputNodeNameToKey[name];\n inputs.push(node);\n }\n });\n } else {\n inputs = placeholders;\n }\n\n let functions = {};\n if (graph.library != null && graph.library.function != null) {\n functions = graph.library.function.reduce((functions, func) => {\n functions[func.signature.name] = this.mapFunction(func);\n return functions;\n }, {} as {[key: string]: Graph});\n }\n\n const result: Graph =\n {nodes, inputs, outputs, weights, placeholders, signature, functions};\n\n if (initNodes.length > 0) {\n result.initNodes = initNodes;\n }\n\n return result;\n }\n\n private mapSignatureEntries(entries: {[k: string]: tensorflow.ITensorInfo}) {\n return Object.keys(entries || {})\n .reduce<{[key: string]: string}>((prev, curr) => {\n prev[entries[curr].name] = curr;\n return prev;\n }, {});\n }\n\n private mapNode(node: tensorflow.INodeDef): Node {\n // Unsupported ops will cause an error at run-time (not parse time), since\n // they may not be used by the actual execution subgraph.\n const mapper =\n getRegisteredOp(node.op) || this.opMappers[node.op] || {} as OpMapper;\n if (node.attr == null) {\n node.attr = {};\n }\n\n const newNode: Node = {\n name: node.name,\n op: node.op,\n category: mapper.category,\n inputNames:\n (node.input ||\n []).map(input => input.startsWith('^') ? input.slice(1) : input),\n inputs: [],\n children: [],\n inputParams: {},\n attrParams: {},\n rawAttrs: node.attr,\n outputs: mapper.outputs\n };\n\n if (mapper.inputs != null) {\n newNode.inputParams =\n mapper.inputs.reduce<{[key: string]: InputParamValue}>(\n (map, param) => {\n map[param.name] = {\n type: param.type,\n inputIndexStart: param.start,\n inputIndexEnd: param.end\n };\n return map;\n },\n {});\n }\n if (mapper.attrs != null) {\n newNode.attrParams =\n mapper.attrs.reduce<{[key: string]: ParamValue}>((map, param) => {\n const type = param.type;\n let value = undefined;\n switch (param.type) {\n case 'string':\n value = getStringParam(\n node.attr, param.tfName, param.defaultValue as string);\n\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getStringParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as string);\n }\n break;\n case 'string[]':\n value = getStringArrayParam(\n node.attr, param.tfName, param.defaultValue as string[]);\n\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getStringArrayParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as string[]);\n }\n break;\n case 'number':\n value = getNumberParam(\n node.attr, param.tfName,\n (param.defaultValue || 0) as number);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getNumberParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as number);\n }\n break;\n case 'number[]':\n value = getNumericArrayParam(\n node.attr, param.tfName, param.defaultValue as number[]);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getNumericArrayParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as number[]);\n }\n break;\n case 'bool':\n value = getBoolParam(\n node.attr, param.tfName, param.defaultValue as boolean);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getBoolParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as boolean);\n }\n break;\n case 'bool[]':\n value = getBoolArrayParam(\n node.attr, param.tfName, param.defaultValue as boolean[]);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getBoolArrayParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as boolean[]);\n }\n break;\n case 'shape':\n value = getTensorShapeParam(\n node.attr, param.tfName, param.defaultValue as number[]);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getTensorShapeParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as number[]);\n }\n break;\n case 'shape[]':\n value = getTensorShapeArrayParam(\n node.attr, param.tfName, param.defaultValue as number[][]);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getTensorShapeArrayParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as number[][]);\n }\n break;\n case 'dtype':\n value = getDtypeParam(\n node.attr, param.tfName, param.defaultValue as DataType);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getDtypeParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as DataType);\n }\n break;\n case 'dtype[]':\n value = getDtypeArrayParam(\n node.attr, param.tfName, param.defaultValue as DataType[]);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getDtypeArrayParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as DataType[]);\n }\n break;\n case 'func':\n value = getFuncParam(\n node.attr, param.tfName, param.defaultValue as string);\n if (value === undefined && !!param.tfDeprecatedName) {\n value = getFuncParam(\n node.attr, param.tfDeprecatedName,\n param.defaultValue as string);\n }\n break;\n case 'tensor':\n case 'tensors':\n break;\n default:\n throw new Error(\n `Unsupported param type: ${param.type} for op: ${node.op}`);\n }\n map[param.name] = {value, type};\n return map;\n }, {});\n }\n return newNode;\n }\n\n // map the TFunctionDef to TFJS graph object\n private mapFunction(functionDef: tensorflow.IFunctionDef): Graph {\n const tfNodes = functionDef.nodeDef;\n const placeholders: Node[] = [];\n const weights: Node[] = [];\n let nodes: {[key: string]: Node} = {};\n if (tfNodes != null) {\n nodes = tfNodes.reduce<{[key: string]: Node}>((map, node) => {\n map[node.name] = this.mapNode(node);\n if (node.op === 'Const') {\n weights.push(map[node.name]);\n }\n return map;\n }, {});\n }\n const inputs: Node[] = [];\n const outputs: Node[] = [];\n\n functionDef.signature.inputArg.forEach(arg => {\n const [nodeName, ] = getNodeNameAndIndex(arg.name);\n const node: Node = {\n name: nodeName,\n op: 'Placeholder',\n inputs: [],\n inputNames: [],\n category: 'graph',\n inputParams: {},\n attrParams: {dtype: {value: parseDtypeParam(arg.type), type: 'dtype'}},\n children: []\n };\n node.signatureKey = arg.name;\n inputs.push(node);\n nodes[nodeName] = node;\n });\n\n const allNodes = Object.keys(nodes);\n allNodes.forEach(key => {\n const node = nodes[key];\n node.inputNames.forEach((name, index) => {\n const [nodeName, , outputName] = getNodeNameAndIndex(name);\n const inputNode = nodes[nodeName];\n if (inputNode.outputs != null) {\n const outputIndex = inputNode.outputs.indexOf(outputName);\n if (outputIndex !== -1) {\n const inputName = `${nodeName}:${outputIndex}`;\n // update the input name to use the mapped output index directly.\n node.inputNames[index] = inputName;\n }\n }\n node.inputs.push(inputNode);\n inputNode.children.push(node);\n });\n });\n\n const returnNodeMap = functionDef.ret;\n\n functionDef.signature.outputArg.forEach(output => {\n const [nodeName, index] = getNodeNameAndIndex(returnNodeMap[output.name]);\n const node = nodes[nodeName];\n if (node != null) {\n node.defaultOutput = index;\n outputs.push(node);\n }\n });\n\n const signature = this.mapArgsToSignature(functionDef);\n return {nodes, inputs, outputs, weights, placeholders, signature};\n }\n\n private mapArgsToSignature(functionDef: tensorflow.IFunctionDef):\n tensorflow.ISignatureDef {\n return {\n methodName: functionDef.signature.name,\n inputs: functionDef.signature.inputArg.reduce(\n (map, arg) => {\n map[arg.name] = this.mapArgToTensorInfo(arg);\n return map;\n },\n {} as {[key: string]: tensorflow.ITensorInfo}),\n outputs: functionDef.signature.outputArg.reduce(\n (map, arg) => {\n map[arg.name] = this.mapArgToTensorInfo(arg, functionDef.ret);\n return map;\n },\n {} as {[key: string]: tensorflow.ITensorInfo}),\n };\n }\n\n private mapArgToTensorInfo(\n arg: tensorflow.OpDef.IArgDef,\n nameMap?: {[key: string]: string}): tensorflow.ITensorInfo {\n let name = arg.name;\n if (nameMap != null) {\n name = nameMap[name];\n }\n return {name, dtype: arg.type};\n }\n}\n\nexport function decodeBase64(text: string): string {\n const global = env().global;\n if (typeof global.atob !== 'undefined') {\n return global.atob(text);\n } else if (typeof Buffer !== 'undefined') {\n return new Buffer(text, 'base64').toString();\n } else {\n throw new Error(\n 'Unable to decode base64 in this environment. ' +\n 'Missing built-in atob() or Buffer()');\n }\n}\n\nexport function parseStringParam(s: []|string, keepCase: boolean): string {\n const value =\n Array.isArray(s) ? String.fromCharCode.apply(null, s) : decodeBase64(s);\n return keepCase ? value : value.toLowerCase();\n}\n\nexport function getStringParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string, def: string,\n keepCase = false): string {\n const param = attrs[name];\n if (param != null) {\n return parseStringParam(param.s, keepCase);\n }\n return def;\n}\n\nexport function getBoolParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: boolean): boolean {\n const param = attrs[name];\n return param ? param.b : def;\n}\n\nexport function getNumberParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: number): number {\n const param = attrs[name] || {};\n const value =\n param['i'] != null ? param['i'] : (param['f'] != null ? param['f'] : def);\n return (typeof value === 'number') ? value : parseInt(value, 10);\n}\n\nexport function parseDtypeParam(value: string|tensorflow.DataType): DataType {\n if (typeof (value) === 'string') {\n // tslint:disable-next-line:no-any\n value = tensorflow.DataType[value as any];\n }\n switch (value) {\n case tensorflow.DataType.DT_FLOAT:\n case tensorflow.DataType.DT_HALF:\n return 'float32';\n case tensorflow.DataType.DT_INT32:\n case tensorflow.DataType.DT_INT64:\n case tensorflow.DataType.DT_INT8:\n case tensorflow.DataType.DT_UINT8:\n return 'int32';\n case tensorflow.DataType.DT_BOOL:\n return 'bool';\n case tensorflow.DataType.DT_DOUBLE:\n return 'float32';\n case tensorflow.DataType.DT_STRING:\n return 'string';\n default:\n // Unknown dtype error will happen at runtime (instead of parse time),\n // since these nodes might not be used by the actual subgraph execution.\n return null;\n }\n}\n\nexport function getFuncParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: string): string {\n const param = attrs[name];\n if (param && param.func) {\n return param.func.name;\n }\n return def;\n}\n\nexport function getDtypeParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: DataType): DataType {\n const param = attrs[name];\n if (param && param.type) {\n return parseDtypeParam(param.type);\n }\n return def;\n}\n\nexport function getDtypeArrayParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: DataType[]): DataType[] {\n const param = attrs[name];\n if (param && param.list && param.list.type) {\n return param.list.type.map(v => parseDtypeParam(v));\n }\n return def;\n}\n\nexport function parseTensorShapeParam(shape: tensorflow.ITensorShape): number[]|\n undefined {\n if (shape.unknownRank) {\n return undefined;\n }\n if (shape.dim != null) {\n return shape.dim.map(\n dim =>\n (typeof dim.size === 'number') ? dim.size : parseInt(dim.size, 10));\n }\n return [];\n}\n\nexport function getTensorShapeParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def?: number[]): number[]|undefined {\n const param = attrs[name];\n if (param && param.shape) {\n return parseTensorShapeParam(param.shape);\n }\n return def;\n}\n\nexport function getNumericArrayParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: number[]): number[] {\n const param = attrs[name];\n if (param) {\n return ((param.list.f && param.list.f.length ? param.list.f :\n param.list.i) ||\n [])\n .map(v => (typeof v === 'number') ? v : parseInt(v, 10));\n }\n return def;\n}\n\nexport function getStringArrayParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string, def: string[],\n keepCase = false): string[] {\n const param = attrs[name];\n if (param && param.list && param.list.s) {\n return param.list.s.map((v) => {\n return parseStringParam(v, keepCase);\n });\n }\n return def;\n}\n\nexport function getTensorShapeArrayParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: number[][]): number[][] {\n const param = attrs[name];\n if (param && param.list && param.list.shape) {\n return param.list.shape.map((v) => {\n return parseTensorShapeParam(v);\n });\n }\n return def;\n}\n\nexport function getBoolArrayParam(\n attrs: {[key: string]: tensorflow.IAttrValue}, name: string,\n def: boolean[]): boolean[] {\n const param = attrs[name];\n if (param && param.list && param.list.b) {\n return param.list.b;\n }\n return def;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, Tensor} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {getTensor} from '../executors/utils';\nimport {getBoolArrayParam, getBoolParam, getDtypeArrayParam, getDtypeParam, getNumberParam, getNumericArrayParam, getStringArrayParam, getStringParam, getTensorShapeArrayParam, getTensorShapeParam} from '../operation_mapper';\nimport {GraphNode, Node, ValueType} from '../types';\n\n/**\n * Helper class for lookup inputs and params for nodes in the model graph.\n */\nexport class NodeValueImpl implements GraphNode {\n public readonly inputs: Tensor[] = [];\n public readonly attrs: {[key: string]: ValueType} = {};\n constructor(\n private node: Node, private tensorMap: NamedTensorsMap,\n private context: ExecutionContext) {\n this.inputs = node.inputNames.map(name => this.getInput(name));\n if (node.rawAttrs != null) {\n this.attrs = Object.keys(node.rawAttrs)\n .reduce((attrs: {[key: string]: ValueType}, key) => {\n attrs[key] = this.getAttr(key);\n return attrs;\n }, {});\n }\n }\n\n /**\n * Return the value of the attribute or input param.\n * @param name String: name of attribute or input param.\n */\n private getInput(name: string): Tensor {\n return getTensor(name, this.tensorMap, this.context);\n }\n\n /**\n * Return the value of the attribute or input param.\n * @param name String: name of attribute or input param.\n */\n private getAttr(name: string, defaultValue?: ValueType): ValueType {\n const value = this.node.rawAttrs[name];\n if (value.tensor != null) {\n return getTensor(name, this.tensorMap, this.context);\n }\n if (value.i != null || value.f != null) {\n return getNumberParam(this.node.rawAttrs, name, defaultValue as number);\n }\n if (value.s != null) {\n return getStringParam(this.node.rawAttrs, name, defaultValue as string);\n }\n if (value.b != null) {\n return getBoolParam(this.node.rawAttrs, name, defaultValue as boolean);\n }\n if (value.shape != null) {\n return getTensorShapeParam(\n this.node.rawAttrs, name, defaultValue as number[]);\n }\n if (value.type != null) {\n return getDtypeParam(this.node.rawAttrs, name, defaultValue as DataType);\n }\n if (value.list != null) {\n if (value.list.i != null || value.list.f != null) {\n return getNumericArrayParam(\n this.node.rawAttrs, name, defaultValue as number[]);\n }\n if (value.list.s != null) {\n return getStringArrayParam(\n this.node.rawAttrs, name, defaultValue as string[]);\n }\n if (value.list.shape != null) {\n return getTensorShapeArrayParam(\n this.node.rawAttrs, name, defaultValue as number[][]);\n }\n if (value.list.b != null) {\n return getBoolArrayParam(\n this.node.rawAttrs, name, defaultValue as boolean[]);\n }\n if (value.list.type != null) {\n return getDtypeArrayParam(\n this.node.rawAttrs, name, defaultValue as DataType[]);\n }\n }\n\n return defaultValue;\n }\n}\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n/**\n * This differs from util.assertShapesMatch in that it allows values of\n * negative one, an undefined size of a dimensinon, in a shape to match\n * anything.\n */\n\nimport {Tensor, util} from '@tensorflow/tfjs-core';\n\n/**\n * Used by TensorList and TensorArray to verify if elementShape matches, support\n * negative value as the dim shape.\n * @param shapeA\n * @param shapeB\n * @param errorMessagePrefix\n */\nexport function assertShapesMatchAllowUndefinedSize(\n shapeA: number|number[], shapeB: number|number[],\n errorMessagePrefix = ''): void {\n // constant shape means unknown rank\n if (typeof shapeA === 'number' || typeof shapeB === 'number') {\n return;\n }\n util.assert(\n shapeA.length === shapeB.length,\n () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n for (let i = 0; i < shapeA.length; i++) {\n const dim0 = shapeA[i];\n const dim1 = shapeB[i];\n util.assert(\n dim0 < 0 || dim1 < 0 || dim0 === dim1,\n () =>\n errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);\n }\n}\n\nexport function fullDefinedShape(elementShape: number|number[]): boolean {\n if (typeof elementShape === 'number' || elementShape.some(dim => dim < 0)) {\n return false;\n }\n return true;\n}\n/**\n * Generate the output element shape from the list elementShape, list tensors\n * and input param.\n * @param listElementShape\n * @param tensors\n * @param elementShape\n */\nexport function inferElementShape(\n listElementShape: number|number[], tensors: Tensor[],\n elementShape: number|number[]): number[] {\n let partialShape = mergeElementShape(listElementShape, elementShape);\n const notfullDefinedShape = !fullDefinedShape(partialShape);\n if (notfullDefinedShape && tensors.length === 0) {\n throw new Error(\n `Tried to calculate elements of an empty list` +\n ` with non-fully-defined elementShape: ${partialShape}`);\n }\n if (notfullDefinedShape) {\n tensors.forEach(tensor => {\n partialShape = mergeElementShape(tensor.shape, partialShape);\n });\n }\n if (!fullDefinedShape(partialShape)) {\n throw new Error(`Non-fully-defined elementShape: ${partialShape}`);\n }\n return partialShape as number[];\n}\n\nexport function mergeElementShape(\n elementShapeA: number|number[], elementShapeB: number|number[]): number|\n number[] {\n if (typeof elementShapeA === 'number') {\n return elementShapeB;\n }\n if (typeof elementShapeB === 'number') {\n return elementShapeA;\n }\n\n if (elementShapeA.length !== elementShapeB.length) {\n throw new Error(`Incompatible ranks during merge: ${elementShapeA} vs. ${\n elementShapeB}`);\n }\n\n const result: number[] = [];\n for (let i = 0; i < elementShapeA.length; ++i) {\n const dim0 = elementShapeA[i];\n const dim1 = elementShapeB[i];\n if (dim0 >= 0 && dim1 >= 0 && dim0 !== dim1) {\n throw new Error(`Incompatible shape during merge: ${elementShapeA} vs. ${\n elementShapeB}`);\n }\n result[i] = dim0 >= 0 ? dim0 : dim1;\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {concat, DataType, keep, reshape, scalar, slice, stack, Tensor, tensor, tidy, unstack} from '@tensorflow/tfjs-core';\n\nimport {assertShapesMatchAllowUndefinedSize} from './tensor_utils';\n\nexport interface TensorWithState {\n tensor?: Tensor;\n written?: boolean;\n read?: boolean;\n cleared?: boolean;\n}\n/**\n * The TensorArray object keeps an array of Tensors. It\n * allows reading from the array and writing to the array.\n */\nexport class TensorArray {\n private tensors: TensorWithState[] = [];\n private closed_ = false;\n readonly idTensor: Tensor;\n constructor(\n readonly name: string, readonly dtype: DataType, private maxSize: number,\n private elementShape: number[], readonly identicalElementShapes: boolean,\n readonly dynamicSize: boolean, readonly clearAfterRead: boolean) {\n this.idTensor = scalar(0);\n keep(this.idTensor);\n }\n\n get id() {\n return this.idTensor.id;\n }\n\n get closed() {\n return this.closed_;\n }\n\n /**\n * Dispose the tensors and idTensor and mark the TensoryArray as closed.\n */\n clearAndClose(keepIds?: Set) {\n this.tensors.forEach(tensor => {\n if (keepIds == null || !keepIds.has(tensor.tensor.id)) {\n tensor.tensor.dispose();\n }\n });\n this.tensors = [];\n this.closed_ = true;\n this.idTensor.dispose();\n }\n\n size(): number {\n return this.tensors.length;\n }\n\n /**\n * Read the value at location index in the TensorArray.\n * @param index Number the index to read from.\n */\n read(index: number): Tensor {\n if (this.closed_) {\n throw new Error(`TensorArray ${this.name} has already been closed.`);\n }\n\n if (index < 0 || index >= this.size()) {\n throw new Error(`Tried to read from index ${index}, but array size is: ${\n this.size()}`);\n }\n\n const tensorWithState = this.tensors[index];\n if (tensorWithState.cleared) {\n throw new Error(\n `TensorArray ${this.name}: Could not read index ${\n index} twice because it was cleared after a previous read ` +\n `(perhaps try setting clear_after_read = false?).`);\n }\n\n if (this.clearAfterRead) {\n tensorWithState.cleared = true;\n }\n\n tensorWithState.read = true;\n return tensorWithState.tensor;\n }\n\n /**\n * Helper method to read multiple tensors from the specified indices.\n */\n readMany(indices: number[]): Tensor[] {\n return indices.map(index => this.read(index));\n }\n\n /**\n * Write value into the index of the TensorArray.\n * @param index number the index to write to.\n * @param tensor\n */\n write(index: number, tensor: Tensor) {\n if (this.closed_) {\n throw new Error(`TensorArray ${this.name} has already been closed.`);\n }\n\n if (index < 0 || !this.dynamicSize && index >= this.maxSize) {\n throw new Error(`Tried to write to index ${\n index}, but array is not resizeable and size is: ${this.maxSize}`);\n }\n\n const t = this.tensors[index] || {};\n\n if (tensor.dtype !== this.dtype) {\n throw new Error(`TensorArray ${\n this.name}: Could not write to TensorArray index ${index},\n because the value dtype is ${\n tensor.dtype}, but TensorArray dtype is ${this.dtype}.`);\n }\n\n // Set the shape for the first time write to unknow shape tensor array\n if (this.size() === 0 &&\n (this.elementShape == null || this.elementShape.length === 0)) {\n this.elementShape = tensor.shape;\n }\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, tensor.shape,\n `TensorArray ${this.name}: Could not write to TensorArray index ${\n index}.`);\n\n if (t.read) {\n throw new Error(\n `TensorArray ${this.name}: Could not write to TensorArray index ${\n index}, because it has already been read.`);\n }\n\n if (t.written) {\n throw new Error(\n `TensorArray ${this.name}: Could not write to TensorArray index ${\n index}, because it has already been written.`);\n }\n\n t.tensor = tensor;\n keep(tensor);\n t.written = true;\n\n this.tensors[index] = t;\n }\n\n /**\n * Helper method to write multiple tensors to the specified indices.\n */\n writeMany(indices: number[], tensors: Tensor[]) {\n if (indices.length !== tensors.length) {\n throw new Error(\n `TensorArray ${this.name}: could not write multiple tensors,` +\n `because the index size: ${\n indices.length} is not the same as tensors size: ${\n tensors.length}.`);\n }\n\n indices.forEach((i, index) => this.write(i, tensors[index]));\n }\n\n /**\n * Return selected values in the TensorArray as a packed Tensor. All of\n * selected values must have been written and their shapes must all match.\n * @param [indices] number[] Optional. Taking values in [0, max_value). If the\n * TensorArray is not dynamic, max_value=size(). If not specified returns\n * all tensors in the original order.\n * @param [dtype]\n */\n gather(indices?: number[], dtype?: DataType): Tensor {\n if (!!dtype && dtype !== this.dtype) {\n throw new Error(`TensorArray dtype is ${\n this.dtype} but gather requested dtype ${dtype}`);\n }\n\n if (!indices) {\n indices = [];\n for (let i = 0; i < this.size(); i++) {\n indices.push(i);\n }\n } else {\n indices = indices.slice(0, this.size());\n }\n\n if (indices.length === 0) {\n return tensor([], [0].concat(this.elementShape));\n }\n\n // Read all the PersistentTensors into a vector to keep track of\n // their memory.\n const tensors = this.readMany(indices);\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, tensors[0].shape, 'TensorArray shape mismatch: ');\n\n return stack(tensors, 0);\n }\n\n /**\n * Return the values in the TensorArray as a concatenated Tensor.\n */\n concat(dtype?: DataType): Tensor {\n if (!!dtype && dtype !== this.dtype) {\n throw new Error(`TensorArray dtype is ${\n this.dtype} but concat requested dtype ${dtype}`);\n }\n\n if (this.size() === 0) {\n return tensor([], [0].concat(this.elementShape));\n }\n\n const indices = [];\n for (let i = 0; i < this.size(); i++) {\n indices.push(i);\n }\n // Collect all the tensors from the tensors array.\n const tensors = this.readMany(indices);\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, tensors[0].shape,\n `TensorArray shape mismatch: tensor array shape (${\n this.elementShape}) vs first tensor shape (${tensors[0].shape})`);\n\n return concat(tensors, 0);\n }\n\n /**\n * Scatter the values of a Tensor in specific indices of a TensorArray.\n * @param indices nummber[] values in [0, max_value). If the\n * TensorArray is not dynamic, max_value=size().\n * @param tensor Tensor input tensor.\n */\n scatter(indices: number[], tensor: Tensor) {\n if (tensor.dtype !== this.dtype) {\n throw new Error(`TensorArray dtype is ${\n this.dtype} but tensor has dtype ${tensor.dtype}`);\n }\n\n if (indices.length !== tensor.shape[0]) {\n throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${\n indices.length} vs. ${tensor.shape[0]}`);\n }\n\n const maxIndex = Math.max(...indices);\n\n if (!this.dynamicSize && maxIndex >= this.maxSize) {\n throw new Error(\n `Max index must be < array size (${maxIndex} vs. ${this.maxSize})`);\n }\n\n this.writeMany(indices, unstack(tensor, 0));\n }\n\n /**\n * Split the values of a Tensor into the TensorArray.\n * @param length number[] with the lengths to use when splitting value along\n * its first dimension.\n * @param tensor Tensor, the tensor to split.\n */\n split(length: number[], tensor: Tensor) {\n if (tensor.dtype !== this.dtype) {\n throw new Error(`TensorArray dtype is ${\n this.dtype} but tensor has dtype ${tensor.dtype}`);\n }\n let totalLength = 0;\n const cumulativeLengths = length.map(len => {\n totalLength += len;\n return totalLength;\n });\n\n if (totalLength !== tensor.shape[0]) {\n throw new Error(`Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n ${totalLength}, and tensor's shape is: ${tensor.shape}`);\n }\n\n if (!this.dynamicSize && length.length !== this.maxSize) {\n throw new Error(\n `TensorArray's size is not equal to the size of lengths (${\n this.maxSize} vs. ${length.length}), ` +\n 'and the TensorArray is not marked as dynamically resizeable');\n }\n\n const elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength;\n const tensors: Tensor[] = [];\n tidy(() => {\n tensor = reshape(tensor, [1, totalLength, elementPerRow]);\n for (let i = 0; i < length.length; ++i) {\n const previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1];\n const indices = [0, previousLength, 0];\n const sizes = [1, length[i], elementPerRow];\n tensors[i] = reshape(slice(tensor, indices, sizes), this.elementShape);\n }\n return tensors;\n });\n const indices = [];\n for (let i = 0; i < length.length; i++) {\n indices[i] = i;\n }\n this.writeMany(indices, tensors);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {concat, DataType, keep, reshape, scalar, slice, stack, Tensor, tensor, tidy, unstack} from '@tensorflow/tfjs-core';\n\nimport {assertShapesMatchAllowUndefinedSize, inferElementShape, mergeElementShape} from './tensor_utils';\n\n/**\n * TensorList stores a container of `tf.Tensor` objects, which are accessible\n * via tensors field.\n *\n * In order to get a copy of the underlying list, use the copy method:\n * ```\n * TensorList b = a.copy();\n * b.tensors().pushBack(t); // This does not modify a.tensors().\n * ```\n *\n * Note that this is not a deep copy: the memory locations of the underlying\n * tensors will still point to the same locations of the corresponding tensors\n * in the original.\n */\n\nexport class TensorList {\n readonly idTensor: Tensor;\n maxNumElements: number;\n\n get id() {\n return this.idTensor.id;\n }\n /**\n *\n * @param tensors list of tensors\n * @param elementShape shape of each tensor, this can be a single number (any\n * shape is allowed) or partial shape (dim = -1).\n * @param elementDtype data type of each tensor\n * @param maxNumElements The maximum allowed size of `tensors`. Defaults to -1\n * meaning that the size of `tensors` is unbounded.\n */\n constructor(\n readonly tensors: Tensor[], readonly elementShape: number|number[],\n readonly elementDtype: DataType, maxNumElements = -1) {\n if (tensors != null) {\n tensors.forEach(tensor => {\n if (elementDtype !== tensor.dtype) {\n throw new Error(`Invalid data types; op elements ${\n elementDtype}, but list elements ${tensor.dtype}`);\n }\n assertShapesMatchAllowUndefinedSize(\n elementShape, tensor.shape, 'TensorList shape mismatch: ');\n\n keep(tensor);\n });\n }\n this.idTensor = scalar(0);\n this.maxNumElements = maxNumElements;\n keep(this.idTensor);\n }\n\n /**\n * Get a new TensorList containing a copy of the underlying tensor container.\n */\n copy(): TensorList {\n return new TensorList(\n [...this.tensors], this.elementShape, this.elementDtype);\n }\n\n /**\n * Dispose the tensors and idTensor and clear the tensor list.\n */\n clearAndClose(keepIds?: Set) {\n this.tensors.forEach(tensor => {\n if (keepIds == null || !keepIds.has(tensor.id)) {\n tensor.dispose();\n }\n });\n this.tensors.length = 0;\n this.idTensor.dispose();\n }\n /**\n * The size of the tensors in the tensor list.\n */\n size() {\n return this.tensors.length;\n }\n\n /**\n * Return a tensor that stacks a list of rank-R tf.Tensors into one rank-(R+1)\n * tf.Tensor.\n * @param elementShape shape of each tensor\n * @param elementDtype data type of each tensor\n * @param numElements the number of elements to stack\n */\n stack(elementShape: number[], elementDtype: DataType, numElements = -1):\n Tensor {\n if (elementDtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n elementDtype}, but list elements ${this.elementDtype}`);\n }\n if (numElements !== -1 && this.tensors.length !== numElements) {\n throw new Error(`Operation expected a list with ${\n numElements} elements but got a list with ${\n this.tensors.length} elements.`);\n }\n assertShapesMatchAllowUndefinedSize(\n elementShape, this.elementShape, 'TensorList shape mismatch: ');\n const outputElementShape =\n inferElementShape(this.elementShape, this.tensors, elementShape);\n return tidy(() => {\n const reshapedTensors =\n this.tensors.map(tensor => reshape(tensor, outputElementShape));\n return stack(reshapedTensors, 0);\n });\n }\n\n /**\n * Pop a tensor from the end of the list.\n * @param elementShape shape of the tensor\n * @param elementDtype data type of the tensor\n */\n popBack(elementShape: number[], elementDtype: DataType): Tensor {\n if (elementDtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n elementDtype}, but list elements ${this.elementDtype}`);\n }\n\n if (this.size() === 0) {\n throw new Error('Trying to pop from an empty list.');\n }\n const outputElementShape =\n inferElementShape(this.elementShape, this.tensors, elementShape);\n const tensor = this.tensors.pop();\n tensor.kept = false;\n\n assertShapesMatchAllowUndefinedSize(\n tensor.shape, elementShape, 'TensorList shape mismatch: ');\n\n return reshape(tensor, outputElementShape);\n }\n\n /**\n * Push a tensor to the end of the list.\n * @param tensor Tensor to be pushed.\n */\n pushBack(tensor: Tensor) {\n if (tensor.dtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n tensor.dtype}, but list elements ${this.elementDtype}`);\n }\n\n assertShapesMatchAllowUndefinedSize(\n tensor.shape, this.elementShape, 'TensorList shape mismatch: ');\n\n if (this.maxNumElements === this.size()) {\n throw new Error(`Trying to push element into a full list.`);\n }\n keep(tensor);\n this.tensors.push(tensor);\n }\n\n /**\n * Update the size of the list.\n * @param size the new size of the list.\n */\n resize(size: number) {\n if (size < 0) {\n throw new Error(\n `TensorListResize expects size to be non-negative. Got: ${size}`);\n }\n\n if (this.maxNumElements !== -1 && size > this.maxNumElements) {\n throw new Error(`TensorListResize input size ${\n size} is greater maxNumElement ${this.maxNumElements}.`);\n }\n\n const destTensorList: TensorList = new TensorList(\n [], this.elementShape, this.elementDtype, this.maxNumElements);\n destTensorList.tensors.length = size;\n for (let i = 0; i < Math.min(this.tensors.length, size); ++i) {\n destTensorList.tensors[i] = this.tensors[i];\n }\n return destTensorList;\n }\n\n /**\n * Retrieve the element at the provided index\n * @param elementShape shape of the tensor\n * @param elementDtype dtype of the tensor\n * @param elementIndex index of the tensor\n */\n getItem(elementIndex: number, elementShape: number[], elementDtype: DataType):\n Tensor {\n if (elementDtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n elementDtype}, but list elements ${this.elementDtype}`);\n }\n if (elementIndex < 0 || elementIndex > this.tensors.length) {\n throw new Error(`Trying to access element ${\n elementIndex} in a list with ${this.tensors.length} elements.`);\n }\n\n if (this.tensors[elementIndex] == null) {\n throw new Error(`element at index ${elementIndex} is null.`);\n }\n\n assertShapesMatchAllowUndefinedSize(\n this.tensors[elementIndex].shape, elementShape,\n 'TensorList shape mismatch: ');\n const outputElementShape =\n inferElementShape(this.elementShape, this.tensors, elementShape);\n return reshape(this.tensors[elementIndex], outputElementShape);\n }\n\n /**\n * Set the tensor at the index\n * @param elementIndex index of the tensor\n * @param tensor the tensor to be inserted into the list\n */\n setItem(elementIndex: number, tensor: Tensor) {\n if (tensor.dtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n tensor.dtype}, but list elements ${this.elementDtype}`);\n }\n\n if (elementIndex < 0 ||\n this.maxNumElements !== -1 && elementIndex >= this.maxNumElements) {\n throw new Error(`Trying to set element ${\n elementIndex} in a list with max ${this.maxNumElements} elements.`);\n }\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, tensor.shape, 'TensorList shape mismatch: ');\n keep(tensor);\n\n // dispose the previous value if it is replacing.\n if (this.tensors[elementIndex] != null) {\n this.tensors[elementIndex].kept = false;\n }\n\n this.tensors[elementIndex] = tensor;\n }\n\n /**\n * Return selected values in the TensorList as a stacked Tensor. All of\n * selected values must have been written and their shapes must all match.\n * @param indices indices of tensors to gather\n * @param elementDtype output tensor dtype\n * @param elementShape output tensor element shape\n */\n gather(indices: number[], elementDtype: DataType, elementShape: number[]):\n Tensor {\n if (elementDtype !== this.elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n elementDtype}, but list elements ${this.elementDtype}`);\n }\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, elementShape, 'TensorList shape mismatch: ');\n\n // When indices is greater than the size of the list, indices beyond the\n // size of the list are ignored.\n indices = indices.slice(0, this.size());\n const outputElementShape =\n inferElementShape(this.elementShape, this.tensors, elementShape);\n if (indices.length === 0) {\n return tensor([], [0].concat(outputElementShape));\n }\n\n return tidy(() => {\n const tensors =\n indices.map(i => reshape(this.tensors[i], outputElementShape));\n return stack(tensors, 0);\n });\n }\n\n /**\n * Return the values in the TensorList as a concatenated Tensor.\n * @param elementDtype output tensor dtype\n * @param elementShape output tensor element shape\n */\n concat(elementDtype: DataType, elementShape: number[]): Tensor {\n if (!!elementDtype && elementDtype !== this.elementDtype) {\n throw new Error(`TensorList dtype is ${\n this.elementDtype} but concat requested dtype ${elementDtype}`);\n }\n\n assertShapesMatchAllowUndefinedSize(\n this.elementShape, elementShape, 'TensorList shape mismatch: ');\n const outputElementShape =\n inferElementShape(this.elementShape, this.tensors, elementShape);\n\n if (this.size() === 0) {\n return tensor([], [0].concat(outputElementShape));\n }\n return tidy(() => {\n const tensors = this.tensors.map(t => reshape(t, outputElementShape));\n return concat(tensors, 0);\n });\n }\n}\n\n/**\n * Creates a TensorList which, when stacked, has the value of tensor.\n * @param tensor from tensor\n * @param elementShape output tensor element shape\n */\nexport function fromTensor(\n tensor: Tensor, elementShape: number[], elementDtype: DataType) {\n const dtype = tensor.dtype;\n if (tensor.shape.length < 1) {\n throw new Error(\n `Tensor must be at least a vector, but saw shape: ${tensor.shape}`);\n }\n if (tensor.dtype !== elementDtype) {\n throw new Error(`Invalid data types; op elements ${\n tensor.dtype}, but list elements ${elementDtype}`);\n }\n const tensorElementShape = tensor.shape.slice(1);\n assertShapesMatchAllowUndefinedSize(\n tensorElementShape, elementShape, 'TensorList shape mismatch: ');\n const tensorList: Tensor[] = unstack(tensor);\n return new TensorList(tensorList, elementShape, dtype);\n}\n\n/**\n * Return a TensorList of the given size with empty elements.\n * @param elementShape the shape of the future elements of the list\n * @param elementDtype the desired type of elements in the list\n * @param numElements the number of elements to reserve\n * @param maxNumElements the maximum number of elements in th list\n */\nexport function reserve(\n elementShape: number[], elementDtype: DataType, numElements: number,\n maxNumElements: number) {\n return new TensorList([], elementShape, elementDtype, maxNumElements);\n}\n\n/**\n * Put tensors at specific indices of a stacked tensor into a TensorList.\n * @param indices list of indices on how to scatter the tensor.\n * @param tensor input tensor.\n * @param elementShape the shape of the future elements of the list\n * @param numElements the number of elements to scatter\n */\nexport function scatter(\n tensor: Tensor, indices: number[], elementShape: number[],\n numElements?: number): TensorList {\n if (indices.length !== tensor.shape[0]) {\n throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${\n indices.length} vs. ${tensor.shape[0]}`);\n }\n\n const maxIndex = Math.max(...indices);\n\n if (numElements != null && numElements !== -1 && maxIndex >= numElements) {\n throw new Error(\n `Max index must be < array size (${maxIndex} vs. ${numElements})`);\n }\n\n const list = new TensorList([], elementShape, tensor.dtype, numElements);\n const tensors = unstack(tensor, 0);\n indices.forEach((value, index) => {\n list.setItem(value, tensors[index]);\n });\n return list;\n}\n\n/**\n * Split the values of a Tensor into a TensorList.\n * @param length the lengths to use when splitting value along\n * its first dimension.\n * @param tensor the tensor to split.\n * @param elementShape the shape of the future elements of the list\n */\nexport function split(\n tensor: Tensor, length: number[], elementShape: number[]) {\n let totalLength = 0;\n const cumulativeLengths = length.map(len => {\n totalLength += len;\n return totalLength;\n });\n\n if (totalLength !== tensor.shape[0]) {\n throw new Error(`Expected sum of lengths to be equal to\n tensor.shape[0], but sum of lengths is\n ${totalLength}, and tensor's shape is: ${tensor.shape}`);\n }\n\n const shapeWithoutFirstDim = tensor.shape.slice(1);\n const outputElementShape =\n mergeElementShape(shapeWithoutFirstDim, elementShape);\n const elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength;\n const tensors: Tensor[] = tidy(() => {\n const tensors = [];\n tensor = reshape(tensor, [1, totalLength, elementPerRow]);\n for (let i = 0; i < length.length; ++i) {\n const previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1];\n const indices = [0, previousLength, 0];\n const sizes = [1, length[i], elementPerRow];\n tensors[i] = reshape(\n slice(tensor, indices, sizes), outputElementShape as number[]);\n }\n tensor.dispose();\n return tensors;\n });\n\n const list = new TensorList([], elementShape, tensor.dtype, length.length);\n\n for (let i = 0; i < tensors.length; i++) {\n list.setItem(i, tensors[i]);\n }\n return list;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, scalar, Tensor} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {TensorArray} from '../../executor/tensor_array';\nimport {fromTensor, reserve, scatter, split} from '../../executor/tensor_list';\nimport {InternalOpAsyncExecutor, Node} from '../types';\n\nimport {cloneTensor, getParamValue, getTensor} from './utils';\n\nexport const executeOp: InternalOpAsyncExecutor = async(\n node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext): Promise => {\n switch (node.op) {\n case 'If':\n case 'StatelessIf': {\n const thenFunc =\n getParamValue('thenBranch', node, tensorMap, context) as string;\n const elseFunc =\n getParamValue('elseBranch', node, tensorMap, context) as string;\n const cond = getParamValue('cond', node, tensorMap, context) as Tensor;\n const args = getParamValue('args', node, tensorMap, context) as Tensor[];\n const condValue = await cond.data();\n if (condValue[0]) {\n return context.functionMap[thenFunc].executeFunctionAsync(\n args, context.tensorArrayMap, context.tensorListMap);\n } else {\n return context.functionMap[elseFunc].executeFunctionAsync(\n args, context.tensorArrayMap, context.tensorListMap);\n }\n }\n case 'While':\n case 'StatelessWhile': {\n const bodyFunc =\n getParamValue('body', node, tensorMap, context) as string;\n const condFunc =\n getParamValue('cond', node, tensorMap, context) as string;\n const args = getParamValue('args', node, tensorMap, context) as Tensor[];\n\n // Calculate the condition of the loop\n const condResult =\n (await context.functionMap[condFunc].executeFunctionAsync(\n args, context.tensorArrayMap, context.tensorListMap));\n const argIds = args.map(tensor => tensor.id);\n let condValue = await condResult[0].data();\n // Dispose the intermediate tensors for condition function\n condResult.forEach(tensor => {\n if (!tensor.kept && argIds.indexOf(tensor.id) === -1) {\n tensor.dispose();\n }\n });\n\n let result: Tensor[] = args;\n\n while (condValue[0]) {\n // Record the previous result for intermediate tensor tracking\n const origResult = result;\n // Execution the body of the loop\n result = await context.functionMap[bodyFunc].executeFunctionAsync(\n result, context.tensorArrayMap, context.tensorListMap);\n const resultIds = result.map(tensor => tensor.id);\n\n // Dispose the intermediate tensor for body function that is not global\n // kept, not input/output of the body function\n origResult.forEach(tensor => {\n if (!tensor.kept && argIds.indexOf(tensor.id) === -1 &&\n resultIds.indexOf(tensor.id) === -1) {\n tensor.dispose();\n }\n });\n\n // Recalcuate the condition of the loop using the latest results.\n const condResult =\n (await context.functionMap[condFunc].executeFunctionAsync(\n result, context.tensorArrayMap, context.tensorListMap));\n condValue = await condResult[0].data();\n // Dispose the intermediate tensors for condition function\n condResult.forEach(tensor => {\n if (!tensor.kept && argIds.indexOf(tensor.id) === -1 &&\n resultIds.indexOf(tensor.id) === -1) {\n tensor.dispose();\n }\n });\n }\n return result;\n }\n case 'LoopCond': {\n const pred = getParamValue('pred', node, tensorMap, context) as Tensor;\n return [cloneTensor(pred)];\n }\n case 'Switch': {\n const pred = getParamValue('pred', node, tensorMap, context) as Tensor;\n let data = getParamValue('data', node, tensorMap, context) as Tensor;\n if (!data.kept) {\n data = cloneTensor(data);\n }\n // Outputs nodes :0 => false, :1 => true\n return (await pred.data())[0] ? [undefined, data] : [data, undefined];\n }\n case 'Merge': {\n const inputName = node.inputNames.find(\n name => getTensor(name, tensorMap, context) !== undefined);\n if (inputName) {\n const data = getTensor(inputName, tensorMap, context);\n return [cloneTensor(data)];\n }\n return undefined;\n }\n case 'Enter': {\n const frameId =\n getParamValue('frameName', node, tensorMap, context) as string;\n const data = getParamValue('tensor', node, tensorMap, context) as Tensor;\n context.enterFrame(frameId);\n return [cloneTensor(data)];\n }\n case 'Exit': {\n const data = getParamValue('tensor', node, tensorMap, context) as Tensor;\n context.exitFrame();\n return [cloneTensor(data)];\n }\n case 'NextIteration': {\n const data = getParamValue('tensor', node, tensorMap, context) as Tensor;\n context.nextIteration();\n return [cloneTensor(data)];\n }\n case 'TensorArrayV3': {\n const size = getParamValue('size', node, tensorMap, context) as number;\n const dtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const dynamicSize =\n getParamValue('dynamicSize', node, tensorMap, context) as boolean;\n const clearAfterRead =\n getParamValue('clearAfterRead', node, tensorMap, context) as boolean;\n const identicalElementShapes =\n getParamValue('identicalElementShapes', node, tensorMap, context) as\n boolean;\n const name = getParamValue('name', node, tensorMap, context) as string;\n const tensorArray = new TensorArray(\n name, dtype, size, elementShape, identicalElementShapes, dynamicSize,\n clearAfterRead);\n context.addTensorArray(tensorArray);\n return [tensorArray.idTensor, scalar(1.0)];\n }\n case 'TensorArrayWriteV3': {\n const id =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const index = getParamValue('index', node, tensorMap, context) as number;\n const writeTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const writeTensorArray = context.getTensorArray(id.id);\n writeTensorArray.write(index, writeTensor);\n return [writeTensorArray.idTensor];\n }\n case 'TensorArrayReadV3': {\n const readId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const readIndex =\n getParamValue('index', node, tensorMap, context) as number;\n const readTensorArray = context.getTensorArray(readId.id);\n return [readTensorArray.read(readIndex)];\n }\n case 'TensorArrayGatherV3': {\n const gatherId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const gatherIndices =\n getParamValue('indices', node, tensorMap, context) as number[];\n const gatherDtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n const gatherTensorArray = context.getTensorArray(gatherId.id);\n return [gatherTensorArray.gather(gatherIndices, gatherDtype)];\n }\n case 'TensorArrayScatterV3': {\n const scatterId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const scatterIndices =\n getParamValue('indices', node, tensorMap, context) as number[];\n const scatterTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const scatterTensorArray = context.getTensorArray(scatterId.id);\n scatterTensorArray.scatter(scatterIndices, scatterTensor);\n return [scatterTensorArray.idTensor];\n }\n case 'TensorArrayConcatV3': {\n const concatId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const concatTensorArray = context.getTensorArray(concatId.id);\n const concatDtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n return [concatTensorArray.concat(concatDtype)];\n }\n case 'TensorArraySplitV3': {\n const splitId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const splitTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const lengths =\n getParamValue('lengths', node, tensorMap, context) as number[];\n const splitTensorArray = context.getTensorArray(splitId.id);\n splitTensorArray.split(lengths, splitTensor);\n return [splitTensorArray.idTensor];\n }\n case 'TensorArraySizeV3': {\n const sizeId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const sizeTensorArray = context.getTensorArray(sizeId.id);\n return [scalar(sizeTensorArray.size(), 'int32')];\n }\n case 'TensorArrayCloseV3': {\n const closeId =\n getParamValue('tensorArrayId', node, tensorMap, context) as Tensor;\n const closeTensorArray = context.getTensorArray(closeId.id);\n closeTensorArray.clearAndClose();\n return [closeTensorArray.idTensor];\n }\n case 'TensorListSetItem': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const index = getParamValue('index', node, tensorMap, context) as number;\n const writeTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const tensorList = context.getTensorList(idTensor.id);\n tensorList.setItem(index, writeTensor);\n return [tensorList.idTensor];\n }\n case 'TensorListGetItem': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const readIndex =\n getParamValue('index', node, tensorMap, context) as number;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n\n const elementDType =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n const tensorList = context.getTensorList(idTensor.id);\n return [tensorList.getItem(readIndex, elementShape, elementDType)];\n }\n case 'TensorListScatterV2':\n case 'TensorListScatter': {\n const scatterIndices =\n getParamValue('indices', node, tensorMap, context) as number[];\n const scatterTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const numElements =\n getParamValue('numElements', node, tensorMap, context) as number;\n const tensorList =\n scatter(scatterTensor, scatterIndices, elementShape, numElements);\n context.addTensorList(tensorList);\n return [tensorList.idTensor];\n }\n case 'TensorListReserve':\n case 'EmptyTensorList': {\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const elementDtype =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n let numElementsParam;\n\n if (node.op === 'TensorListReserve') {\n numElementsParam = 'numElements';\n } else {\n numElementsParam = 'maxNumElements';\n }\n\n const numElements =\n getParamValue(numElementsParam, node, tensorMap, context) as number;\n const maxNumElements = node.op === 'TensorListReserve' ? -1 : numElements;\n const tensorList =\n reserve(elementShape, elementDtype, numElements, maxNumElements);\n context.addTensorList(tensorList);\n return [tensorList.idTensor];\n }\n case 'TensorListGather': {\n const gatherId =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const gatherIndices =\n getParamValue('indices', node, tensorMap, context) as number[];\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const elementDtype =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n const tensorList = context.getTensorList(gatherId.id);\n return [tensorList.gather(gatherIndices, elementDtype, elementShape)];\n }\n case 'TensorListStack': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const elementDtype =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n const numElements =\n getParamValue('numElements', node, tensorMap, context) as number;\n const tensorList = context.getTensorList(idTensor.id);\n return [tensorList.stack(elementShape, elementDtype, numElements)];\n }\n case 'TensorListFromTensor': {\n const tensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const elementDtype =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n const tensorList = fromTensor(tensor, elementShape, elementDtype);\n context.addTensorList(tensorList);\n return [tensorList.idTensor];\n }\n case 'TensorListConcat':\n case 'TensorListConcatV2': {\n const concatId =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const tensorList = context.getTensorList(concatId.id);\n const concatDtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n return [tensorList.concat(concatDtype, elementShape)];\n }\n case 'TensorListPushBack': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const writeTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const tensorList = context.getTensorList(idTensor.id);\n tensorList.pushBack(writeTensor);\n return [tensorList.idTensor];\n }\n case 'TensorListPopBack': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const elementDType =\n getParamValue('elementDType', node, tensorMap, context) as DataType;\n const tensorList = context.getTensorList(idTensor.id);\n return [tensorList.popBack(elementShape, elementDType)];\n }\n case 'TensorListSplit': {\n const splitTensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n const elementShape =\n getParamValue('elementShape', node, tensorMap, context) as number[];\n const lengths =\n getParamValue('lengths', node, tensorMap, context) as number[];\n\n const tensorList = split(splitTensor, lengths, elementShape);\n context.addTensorList(tensorList);\n return [tensorList.idTensor];\n }\n case 'TensorListLength': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const tensorList = context.getTensorList(idTensor.id);\n return [scalar(tensorList.size(), 'int32')];\n }\n case 'TensorListResize': {\n const idTensor =\n getParamValue('tensorListId', node, tensorMap, context) as Tensor;\n const size = getParamValue('size', node, tensorMap, context) as number;\n\n const srcTensorList = context.getTensorList(idTensor.id);\n const destTensorList = srcTensorList.resize(size);\n context.addTensorList(destTensorList);\n return [destTensorList.idTensor];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n};\n\nexport const CATEGORY = 'control';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Rank, Tensor, Tensor3D, Tensor4D, Tensor5D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getPadding, getParamValue} from './utils';\n\nfunction fusedConvAndDepthWiseParams(\n node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext) {\n const [extraOp, activationFunc] =\n (getParamValue('fusedOps', node, tensorMap, context) as string[]);\n\n const isBiasAdd = extraOp === 'biasadd';\n const noBiasAdd = !isBiasAdd;\n const isPrelu = activationFunc === 'prelu';\n const isBatchNorm = extraOp === 'fusedbatchnorm';\n\n const numArgs =\n (getParamValue('numArgs', node, tensorMap, context) as number);\n if (isBiasAdd) {\n if (isPrelu && numArgs !== 2) {\n throw new Error(\n 'FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu ' +\n 'must have two extra arguments: bias and alpha.');\n }\n if (!isPrelu && isBiasAdd && numArgs !== 1) {\n throw new Error(\n 'FusedConv2d and DepthwiseConv2d with BiasAdd must have ' +\n 'one extra argument: bias.');\n }\n }\n if (isBatchNorm) {\n throw new Error(\n 'FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported');\n }\n const stride = getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getPadding(node, tensorMap, context);\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as string)\n .toUpperCase();\n const dilations =\n getParamValue('dilations', node, tensorMap, context) as number[];\n let [biasArg, preluArg] =\n getParamValue('args', node, tensorMap, context) as Tensor[];\n if (noBiasAdd) {\n preluArg = biasArg;\n biasArg = undefined;\n }\n const leakyreluAlpha =\n getParamValue('leakyreluAlpha', node, tensorMap, context) as number;\n\n return {\n stride,\n pad,\n dataFormat,\n dilations,\n biasArg,\n preluArg,\n activationFunc,\n leakyreluAlpha\n };\n}\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Conv1D': {\n const stride =\n getParamValue('stride', node, tensorMap, context) as number;\n const pad = getParamValue('pad', node, tensorMap, context);\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as string)\n .toUpperCase();\n const dilation =\n getParamValue('dilation', node, tensorMap, context) as number;\n return [ops.conv1d(\n getParamValue('x', node, tensorMap, context) as Tensor3D,\n getParamValue('filter', node, tensorMap, context) as Tensor3D,\n stride, pad as 'valid' | 'same', dataFormat as 'NWC' | 'NCW',\n dilation)];\n }\n case 'Conv2D': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getPadding(node, tensorMap, context);\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as string)\n .toUpperCase();\n const dilations =\n getParamValue('dilations', node, tensorMap, context) as number[];\n return [ops.conv2d(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n getParamValue('filter', node, tensorMap, context) as Tensor4D,\n [stride[1], stride[2]], pad as 'valid' | 'same',\n dataFormat as 'NHWC' | 'NCHW', [dilations[1], dilations[2]])];\n }\n case '_FusedConv2D': {\n const {\n stride,\n pad,\n dataFormat,\n dilations,\n biasArg,\n preluArg,\n activationFunc,\n leakyreluAlpha\n } = fusedConvAndDepthWiseParams(node, tensorMap, context);\n\n return [ops.fused.conv2d({\n x: getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n filter: getParamValue('filter', node, tensorMap, context) as\n Tensor4D,\n strides: [stride[1], stride[2]],\n pad: pad as 'valid' | 'same',\n dataFormat: dataFormat as 'NHWC' | 'NCHW',\n dilations: [dilations[1], dilations[2]],\n bias: biasArg,\n activation: activationFunc as tfOps.fused.Activation,\n preluActivationWeights: preluArg,\n leakyreluAlpha\n })];\n }\n\n case 'FusedDepthwiseConv2dNative': {\n const {\n stride,\n pad,\n dataFormat,\n dilations,\n biasArg,\n preluArg,\n activationFunc,\n leakyreluAlpha,\n } = fusedConvAndDepthWiseParams(node, tensorMap, context);\n\n return [ops.fused.depthwiseConv2d({\n x: getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n filter: getParamValue('filter', node, tensorMap, context) as\n Tensor4D,\n strides: [stride[1], stride[2]],\n pad: pad as 'valid' | 'same',\n dataFormat: dataFormat as 'NHWC' | 'NCHW',\n dilations: [dilations[1], dilations[2]],\n bias: biasArg,\n activation: activationFunc as tfOps.fused.Activation,\n preluActivationWeights: preluArg,\n leakyreluAlpha\n })];\n }\n case 'Conv2DBackpropInput':\n case 'Conv2dTranspose': {\n const shape = getParamValue(\n 'outputShape', node, tensorMap,\n context) as [number, number, number] |\n [number, number, number, number];\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getPadding(node, tensorMap, context);\n return [ops.conv2dTranspose(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n getParamValue('filter', node, tensorMap, context) as Tensor4D,\n shape, [stride[1], stride[2]], pad as 'valid' | 'same')];\n }\n case 'DepthwiseConv2dNative':\n case 'DepthwiseConv2d': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getPadding(node, tensorMap, context);\n const dilations =\n getParamValue('dilations', node, tensorMap, context) as number[];\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as string)\n .toUpperCase();\n\n return [ops.depthwiseConv2d(\n getParamValue('input', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n getParamValue('filter', node, tensorMap, context) as Tensor4D,\n [stride[1], stride[2]], pad as 'valid' | 'same',\n dataFormat as 'NHWC' | 'NCHW', [dilations[1], dilations[2]])];\n }\n case 'Conv3D': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as string)\n .toUpperCase();\n const dilations =\n getParamValue('dilations', node, tensorMap, context) as number[];\n return [ops.conv3d(\n getParamValue('x', node, tensorMap, context) as Tensor4D |\n Tensor,\n getParamValue('filter', node, tensorMap, context) as\n Tensor,\n [stride[1], stride[2], stride[3]], pad as 'valid' | 'same',\n dataFormat as 'NDHWC' | 'NCDHW',\n [dilations[1], dilations[2], dilations[3]])];\n }\n case 'AvgPool': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const kernelSize =\n getParamValue('kernelSize', node, tensorMap, context) as number[];\n\n return [ops.avgPool(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n [kernelSize[1], kernelSize[2]], [stride[1], stride[2]],\n pad as 'valid' | 'same')];\n }\n case 'MaxPool': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const kernelSize =\n getParamValue('kernelSize', node, tensorMap, context) as number[];\n\n return [ops.maxPool(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n [kernelSize[1], kernelSize[2]], [stride[1], stride[2]],\n pad as 'valid' | 'same')];\n }\n case 'MaxPoolWithArgmax': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const kernelSize =\n getParamValue('kernelSize', node, tensorMap, context) as number[];\n const includeBatchInIndex =\n getParamValue('includeBatchInIndex', node, tensorMap, context) as\n boolean;\n const {result, indexes} = ops.maxPoolWithArgmax(\n getParamValue('x', node, tensorMap, context) as Tensor4D,\n [kernelSize[1], kernelSize[2]], [stride[1], stride[2]],\n pad as 'valid' | 'same', includeBatchInIndex);\n return [result, indexes];\n }\n case 'AvgPool3D': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const kernelSize =\n getParamValue('kernelSize', node, tensorMap, context) as number[];\n\n return [ops.avgPool3d(\n getParamValue('x', node, tensorMap, context) as Tensor5D,\n [kernelSize[1], kernelSize[2], kernelSize[3]],\n [stride[1], stride[2], stride[3]], pad as 'valid' | 'same')];\n }\n\n case 'MaxPool3D': {\n const stride =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const kernelSize =\n getParamValue('kernelSize', node, tensorMap, context) as number[];\n\n return [ops.maxPool3d(\n getParamValue('x', node, tensorMap, context) as Tensor5D,\n [kernelSize[1], kernelSize[2], kernelSize[3]],\n [stride[1], stride[2], stride[3]], pad as 'valid' | 'same')];\n }\n\n case 'Dilation2D': {\n const strides =\n getParamValue('strides', node, tensorMap, context) as number[];\n const pad = getParamValue('pad', node, tensorMap, context);\n const dilations =\n getParamValue('dilations', node, tensorMap, context) as number[];\n\n // strides: [1, stride_height, stride_width, 1].\n const strideHeight = strides[1];\n const strideWidth = strides[2];\n\n // dilations: [1, dilation_height, dilation_width, 1].\n const dilationHeight = dilations[1];\n const dilationWidth = dilations[2];\n\n return [ops.dilation2d(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n getParamValue('filter', node, tensorMap, context) as Tensor3D,\n [strideHeight, strideWidth], pad as 'valid' | 'same',\n [dilationHeight, dilationWidth], 'NHWC' /* dataFormat */)];\n }\n\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'convolution';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D, Tensor2D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport { ResourceManager } from '../../executor/resource_manager';\nimport {InternalOpAsyncExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nfunction nmsParams(\n node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext) {\n const boxes = getParamValue('boxes', node, tensorMap, context) as Tensor;\n const scores = getParamValue('scores', node, tensorMap, context) as Tensor;\n const maxOutputSize =\n getParamValue('maxOutputSize', node, tensorMap, context) as number;\n const iouThreshold =\n getParamValue('iouThreshold', node, tensorMap, context) as number;\n const scoreThreshold =\n getParamValue('scoreThreshold', node, tensorMap, context) as number;\n const softNmsSigma =\n getParamValue('softNmsSigma', node, tensorMap, context) as number;\n\n return {\n boxes,\n scores,\n maxOutputSize,\n iouThreshold,\n scoreThreshold,\n softNmsSigma\n };\n}\n\nexport const executeOp: InternalOpAsyncExecutor = async(\n node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, resourceManager: ResourceManager,\n ops = tfOps): Promise => {\n switch (node.op) {\n case 'NonMaxSuppressionV5': {\n const {\n boxes,\n scores,\n maxOutputSize,\n iouThreshold,\n scoreThreshold,\n softNmsSigma\n } = nmsParams(node, tensorMap, context);\n\n const result = await ops.image.nonMaxSuppressionWithScoreAsync(\n boxes as Tensor2D, scores as Tensor1D, maxOutputSize, iouThreshold,\n scoreThreshold, softNmsSigma);\n\n return [result.selectedIndices, result.selectedScores];\n }\n case 'NonMaxSuppressionV4': {\n const {boxes, scores, maxOutputSize, iouThreshold, scoreThreshold} =\n nmsParams(node, tensorMap, context);\n\n const padToMaxOutputSize =\n getParamValue('padToMaxOutputSize', node, tensorMap, context) as\n boolean;\n\n const result = await ops.image.nonMaxSuppressionPaddedAsync(\n boxes as Tensor2D, scores as Tensor1D, maxOutputSize, iouThreshold,\n scoreThreshold, padToMaxOutputSize);\n\n return [result.selectedIndices, result.validOutputs];\n }\n case 'NonMaxSuppressionV3':\n case 'NonMaxSuppressionV2': {\n const {boxes, scores, maxOutputSize, iouThreshold, scoreThreshold} =\n nmsParams(node, tensorMap, context);\n\n return [await ops.image.nonMaxSuppressionAsync(\n boxes as Tensor2D, scores as Tensor1D, maxOutputSize, iouThreshold,\n scoreThreshold)];\n }\n case 'Where': {\n const condition = ops.cast(\n (getParamValue('condition', node, tensorMap, context) as Tensor),\n 'bool');\n const result = [await ops.whereAsync(condition)];\n condition.dispose();\n return result;\n }\n case 'ListDiff': {\n return ops.setdiff1dAsync(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('y', node, tensorMap, context) as Tensor);\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n};\n\nexport const CATEGORY = 'dynamic';\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {DataType, keep, scalar, stack, Tensor, tidy, unstack, util} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\n/**\n * Hashtable contains a set of tensors, which can be accessed by key.\n */\nexport class HashTable {\n readonly handle: Tensor;\n\n // tslint:disable-next-line: no-any\n private tensorMap: Map;\n\n get id() {\n return this.handle.id;\n }\n\n /**\n * Constructor of HashTable. Creates a hash table.\n *\n * @param keyDType `dtype` of the table keys.\n * @param valueDType `dtype` of the table values.\n */\n constructor(readonly keyDType: DataType, readonly valueDType: DataType) {\n this.handle = scalar(0);\n // tslint:disable-next-line: no-any\n this.tensorMap = new Map();\n\n keep(this.handle);\n }\n\n /**\n * Dispose the tensors and handle and clear the hashtable.\n */\n clearAndClose() {\n this.tensorMap.forEach(value => value.dispose());\n this.tensorMap.clear();\n this.handle.dispose();\n }\n\n /**\n * The number of items in the hash table.\n */\n size(): number {\n return this.tensorMap.size;\n }\n\n /**\n * The number of items in the hash table as a rank-0 tensor.\n */\n tensorSize(): Tensor {\n return tfOps.scalar(this.size(), 'int32');\n }\n\n /**\n * Replaces the contents of the table with the specified keys and values.\n * @param keys Keys to store in the hashtable.\n * @param values Values to store in the hashtable.\n */\n async import(keys: Tensor, values: Tensor): Promise {\n this.checkKeyAndValueTensor(keys, values);\n\n // We only store the primitive values of the keys, this allows lookup\n // to be O(1).\n const $keys = await keys.data();\n\n // Clear the hashTable before inserting new values.\n this.tensorMap.forEach(value => value.dispose());\n this.tensorMap.clear();\n\n return tidy(() => {\n const $values = unstack(values);\n\n const keysLength = $keys.length;\n const valuesLength = $values.length;\n\n util.assert(\n keysLength === valuesLength,\n () => `The number of elements doesn't match, keys has ` +\n `${keysLength} elements, the values has ${valuesLength} ` +\n `elements.`);\n\n for (let i = 0; i < keysLength; i++) {\n const key = $keys[i];\n const value = $values[i];\n\n keep(value);\n this.tensorMap.set(key, value);\n }\n\n return this.handle;\n });\n }\n\n /**\n * Looks up keys in a hash table, outputs the corresponding values.\n *\n * Performs batch lookups, for every element in the key tensor, `find`\n * stacks the corresponding value into the return tensor.\n *\n * If an element is not present in the table, the given `defaultValue` is\n * used.\n *\n * @param keys Keys to look up. Must have the same type as the keys of the\n * table.\n * @param defaultValue The scalar `defaultValue` is the value output for keys\n * not present in the table. It must also be of the same type as the\n * table values.\n */\n async find(keys: Tensor, defaultValue: Tensor): Promise {\n this.checkKeyAndValueTensor(keys, defaultValue);\n\n const $keys = await keys.data();\n\n return tidy(() => {\n const result: Tensor[] = [];\n\n for (let i = 0; i < $keys.length; i++) {\n const key = $keys[i];\n\n const value = this.findWithDefault(key, defaultValue);\n result.push(value);\n }\n\n return stack(result);\n });\n }\n\n // tslint:disable-next-line: no-any\n private findWithDefault(key: any, defaultValue: Tensor): Tensor {\n const result = this.tensorMap.get(key);\n\n return result != null ? result : defaultValue;\n }\n\n private checkKeyAndValueTensor(key: Tensor, value: Tensor) {\n if (key.dtype !== this.keyDType) {\n throw new Error(\n `Expect key dtype ${this.keyDType}, but got ` +\n `${key.dtype}`);\n }\n\n if (value.dtype !== this.valueDType) {\n throw new Error(\n `Expect value dtype ${this.valueDType}, but got ` +\n `${value.dtype}`);\n }\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, Tensor} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {HashTable} from '../../executor/hash_table';\nimport {ResourceManager} from '../../executor/resource_manager';\nimport {InternalOpAsyncExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpAsyncExecutor = async(\n node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n resourceManager: ResourceManager): Promise => {\n switch (node.op) {\n case 'HashTable':\n case 'HashTableV2': {\n const existingTableHandle =\n resourceManager.getHashTableHandleByName(node.name);\n // Table is shared with initializer.\n if (existingTableHandle != null) {\n return [existingTableHandle];\n } else {\n const keyDType =\n getParamValue('keyDType', node, tensorMap, context) as DataType;\n const valueDType =\n getParamValue('valueDType', node, tensorMap, context) as DataType;\n\n const hashTable = new HashTable(keyDType, valueDType);\n resourceManager.addHashTable(node.name, hashTable);\n return [hashTable.handle];\n }\n }\n case 'InitializeTable':\n case 'InitializeTableV2':\n case 'LookupTableImport':\n case 'LookupTableImportV2': {\n const handle = getParamValue(\n 'tableHandle', node, tensorMap, context,\n resourceManager) as Tensor;\n const keys = getParamValue('keys', node, tensorMap, context) as Tensor;\n const values =\n getParamValue('values', node, tensorMap, context) as Tensor;\n\n const hashTable = resourceManager.getHashTableById(handle.id);\n\n return [await hashTable.import(keys, values)];\n }\n case 'LookupTableFind':\n case 'LookupTableFindV2': {\n const handle = getParamValue(\n 'tableHandle', node, tensorMap, context,\n resourceManager) as Tensor;\n const keys = getParamValue('keys', node, tensorMap, context) as Tensor;\n const defaultValue =\n getParamValue('defaultValue', node, tensorMap, context) as Tensor;\n\n const hashTable = resourceManager.getHashTableById(handle.id);\n return [await hashTable.find(keys, defaultValue)];\n }\n case 'LookupTableSize':\n case 'LookupTableSizeV2': {\n const handle = getParamValue(\n 'tableHandle', node, tensorMap, context,\n resourceManager) as Tensor;\n\n const hashTable = resourceManager.getHashTableById(handle.id);\n return [hashTable.tensorSize()];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n};\n\nexport const CATEGORY = 'hash_table';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport * as tfc from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../data/types';\nimport {ExecutionContext} from '../executor/execution_context';\nimport {ResourceManager} from '../executor/resource_manager';\n\nimport {NodeValueImpl} from './custom_op/node_value_impl';\nimport {getRegisteredOp} from './custom_op/register';\nimport * as arithmetic from './executors/arithmetic_executor';\nimport * as basicMath from './executors/basic_math_executor';\nimport * as control from './executors/control_executor';\nimport * as convolution from './executors/convolution_executor';\nimport * as creation from './executors/creation_executor';\nimport * as dynamic from './executors/dynamic_executor';\nimport * as evaluation from './executors/evaluation_executor';\nimport * as graph from './executors/graph_executor';\nimport * as hashTable from './executors/hash_table_executor';\nimport * as image from './executors/image_executor';\nimport * as logical from './executors/logical_executor';\nimport * as matrices from './executors/matrices_executor';\nimport * as normalization from './executors/normalization_executor';\nimport * as ragged from './executors/ragged_executor';\nimport * as reduction from './executors/reduction_executor';\nimport * as sliceJoin from './executors/slice_join_executor';\nimport * as sparse from './executors/sparse_executor';\nimport * as spectral from './executors/spectral_executor';\nimport * as string from './executors/string_executor';\nimport * as transformation from './executors/transformation_executor';\nimport {Node} from './types';\n\n/**\n * Executes the op defined by the node object.\n * @param node\n * @param tensorMap contains tensors for executed nodes and weights\n * @param context contains tensors and information for running the current node.\n * @param resourceManager Optional. Contains global resources of the model.\n */\nexport function executeOp(\n node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n resourceManager?: ResourceManager, tidy = tfc.tidy): tfc.Tensor[]|\n Promise {\n const value =\n ((node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext) => {\n switch (node.category) {\n case 'arithmetic':\n return tidy(() => arithmetic.executeOp(node, tensorMap, context));\n case 'basic_math':\n return tidy(() => basicMath.executeOp(node, tensorMap, context));\n case 'control':\n return control.executeOp(node, tensorMap, context);\n case 'convolution':\n return tidy(() => convolution.executeOp(node, tensorMap, context));\n case 'creation':\n return tidy(() => creation.executeOp(node, tensorMap, context));\n case 'dynamic':\n return dynamic.executeOp(node, tensorMap, context);\n case 'evaluation':\n return tidy(() => evaluation.executeOp(node, tensorMap, context));\n case 'image':\n return tidy(() => image.executeOp(node, tensorMap, context));\n case 'graph':\n return tidy(() => graph.executeOp(node, tensorMap, context));\n case 'logical':\n return tidy(() => logical.executeOp(node, tensorMap, context));\n case 'matrices':\n return tidy(() => matrices.executeOp(node, tensorMap, context));\n case 'normalization':\n return tidy(\n () => normalization.executeOp(node, tensorMap, context));\n case 'ragged':\n return tidy(() => ragged.executeOp(node, tensorMap, context));\n case 'reduction':\n return tidy(() => reduction.executeOp(node, tensorMap, context));\n case 'slice_join':\n return tidy(() => sliceJoin.executeOp(node, tensorMap, context));\n case 'sparse':\n return tidy(() => sparse.executeOp(node, tensorMap, context));\n case 'spectral':\n return tidy(() => spectral.executeOp(node, tensorMap, context));\n case 'string':\n return tidy(() => string.executeOp(node, tensorMap, context));\n case 'transformation':\n return tidy(\n () => transformation.executeOp(node, tensorMap, context));\n case 'hash_table':\n return hashTable.executeOp(\n node, tensorMap, context, resourceManager);\n case 'custom':\n const opMapper = getRegisteredOp(node.op);\n if (opMapper && opMapper.customExecutor) {\n return opMapper.customExecutor(\n new NodeValueImpl(node, tensorMap, context));\n } else {\n throw TypeError(`Custom op ${node.op} is not registered.`);\n }\n default:\n throw TypeError(\n `Unknown op '${node.op}'. File an issue at ` +\n `https://github.com/tensorflow/tfjs/issues so we can add it` +\n `, or register a custom execution with tf.registerOp()`);\n }\n })(node, tensorMap, context);\n if (tfc.util.isPromise(value)) {\n return value.then((data) => [].concat(data));\n }\n return [].concat(value);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'BiasAdd':\n case 'AddV2':\n case 'Add': {\n return [ops.add(\n (getParamValue('a', node, tensorMap, context) as Tensor),\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'AddN': {\n return [ops.addN((\n getParamValue('tensors', node, tensorMap, context) as Tensor[]))];\n }\n case 'FloorMod':\n case 'Mod':\n return [ops.mod(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n case 'Mul':\n return [ops.mul(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n case 'RealDiv':\n case 'Div': {\n return [ops.div(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'DivNoNan': {\n return [ops.divNoNan(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'FloorDiv': {\n return [ops.floorDiv(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Sub': {\n return [ops.sub(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Minimum': {\n return [ops.minimum(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Maximum': {\n return [ops.maximum(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Pow': {\n return [ops.pow(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'SquaredDifference': {\n return [ops.squaredDifference(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'arithmetic';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue, getTensor} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Abs':\n case 'ComplexAbs':\n return [ops.abs(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Acos':\n return [ops.acos(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Acosh':\n return [ops.acosh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Asin':\n return [ops.asin(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Asinh':\n return [ops.asinh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Atan':\n return [ops.atan(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Atan2':\n return [ops.atan2(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('y', node, tensorMap, context) as Tensor)];\n case 'Atanh':\n return [ops.atanh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Ceil':\n return [ops.ceil(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Complex':\n return [ops.complex(\n getParamValue('real', node, tensorMap, context) as Tensor,\n getParamValue('imag', node, tensorMap, context) as Tensor)];\n case 'Cos':\n return [ops.cos(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Cosh':\n return [ops.cosh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Elu':\n return [ops.elu(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Erf':\n return [ops.erf(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Exp':\n return [ops.exp(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Expm1': {\n return [ops.expm1(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Floor':\n return [ops.floor(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Log':\n return [ops.log(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Log1p': {\n return [ops.log1p(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Imag':\n return [ops.imag(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n\n case 'Neg':\n return [ops.neg(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Reciprocal': {\n return [ops.reciprocal(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Real':\n return [ops.real(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Relu':\n return [ops.relu(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Round': {\n return [ops.round(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Selu':\n return [ops.selu(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Sigmoid':\n return [ops.sigmoid(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Sin':\n return [ops.sin(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Sign': {\n return [ops.sign(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Sinh': {\n return [ops.sinh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Softplus': {\n return [ops.softplus(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Sqrt': {\n return [ops.sqrt(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Square': {\n return [ops.square(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Tanh': {\n return [ops.tanh(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'Tan':\n return [ops.tan(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'ClipByValue':\n return [ops.clipByValue(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('clipValueMin', node, tensorMap, context) as number,\n getParamValue('clipValueMax', node, tensorMap, context) as\n number)];\n case 'Relu6':\n return [ops.relu6(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n case 'Rsqrt':\n return [ops.rsqrt(\n getTensor(node.inputNames[0], tensorMap, context))];\n case 'Prod':\n return [ops.prod(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('axes', node, tensorMap, context) as number[])];\n case 'LeakyRelu':\n return [ops.leakyRelu(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('alpha', node, tensorMap, context) as number)];\n case 'Prelu':\n return [ops.prelu(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('alpha', node, tensorMap, context) as Tensor)];\n case 'IsNan':\n return [ops.isNaN(\n getTensor(node.inputNames[0], tensorMap, context))];\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'basic_math';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, Tensor, Tensor1D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Fill': {\n const shape =\n getParamValue('shape', node, tensorMap, context) as number[];\n const dtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n const value =\n getParamValue('value', node, tensorMap, context) as number;\n return [ops.fill(shape, value, dtype)];\n }\n case 'LinSpace': {\n const start =\n getParamValue('start', node, tensorMap, context) as number;\n const stop =\n getParamValue('stop', node, tensorMap, context) as number;\n const num = getParamValue('num', node, tensorMap, context) as number;\n return [ops.linspace(start, stop, num)];\n }\n case 'Multinomial': {\n const logits =\n getParamValue('logits', node, tensorMap, context) as Tensor1D;\n const numSamples =\n getParamValue('numSamples', node, tensorMap, context) as number;\n const seed =\n getParamValue('seed', node, tensorMap, context) as number;\n return [ops.multinomial(logits, numSamples, seed)];\n }\n case 'OneHot': {\n const indices =\n getParamValue('indices', node, tensorMap, context) as Tensor1D;\n const depth =\n getParamValue('depth', node, tensorMap, context) as number;\n const onValue =\n getParamValue('onValue', node, tensorMap, context) as number;\n const offValue =\n getParamValue('offValue', node, tensorMap, context) as number;\n const dtype =\n getParamValue('dtype', node, tensorMap, context) as DataType;\n return [ops.oneHot(indices, depth, onValue, offValue, dtype)];\n }\n case 'Ones': {\n return [ops.ones(\n getParamValue('shape', node, tensorMap, context) as number[],\n getParamValue('dtype', node, tensorMap, context) as DataType)];\n }\n case 'OnesLike': {\n return [ops.onesLike(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'RandomStandardNormal': {\n return [ops.randomStandardNormal(\n getParamValue('shape', node, tensorMap, context) as number[],\n getParamValue('dtype', node, tensorMap, context) as 'float32' |\n 'int32',\n getParamValue('seed', node, tensorMap, context) as number)];\n }\n case 'RandomUniform': {\n return [ops.randomUniform(\n // tslint:disable-next-line:no-any\n getParamValue('shape', node, tensorMap, context) as any,\n getParamValue('minval', node, tensorMap, context) as number,\n getParamValue('maxval', node, tensorMap, context) as number,\n getParamValue('dtype', node, tensorMap, context) as DataType)];\n }\n case 'Range': {\n const start =\n getParamValue('start', node, tensorMap, context) as number;\n const stop =\n getParamValue('stop', node, tensorMap, context) as number;\n const step =\n getParamValue('step', node, tensorMap, context) as number;\n return [ops.range(\n start, stop, step,\n getParamValue('dtype', node, tensorMap, context) as 'float32' |\n 'int32')];\n }\n case 'TruncatedNormal': {\n const shape =\n getParamValue('shape', node, tensorMap, context) as number[];\n const mean =\n getParamValue('mean', node, tensorMap, context) as number;\n const stdDev =\n getParamValue('stdDev', node, tensorMap, context) as number;\n const seed =\n getParamValue('seed', node, tensorMap, context) as number;\n return [ops.truncatedNormal(\n shape, mean, stdDev,\n getParamValue('dtype', node, tensorMap, context) as 'float32' |\n 'int32',\n seed)];\n }\n case 'Zeros': {\n return [ops.zeros(\n getParamValue('shape', node, tensorMap, context) as number[],\n getParamValue('dtype', node, tensorMap, context) as DataType)];\n }\n case 'ZerosLike': {\n return [ops.zerosLike(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'creation';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n ops = tfOps):\n Tensor[] => {\n switch (node.op) {\n case 'LowerBound': {\n const sortedSequence =\n getParamValue('sortedSequence', node, tensorMap, context) as\n Tensor;\n const values =\n getParamValue('values', node, tensorMap, context) as Tensor;\n return [ops.lowerBound(sortedSequence, values)];\n }\n case 'TopKV2': {\n const x = getParamValue('x', node, tensorMap, context) as Tensor;\n const k = getParamValue('k', node, tensorMap, context) as number;\n const sorted =\n getParamValue('sorted', node, tensorMap, context) as boolean;\n const result = ops.topk(x, k, sorted);\n return [result.values, result.indices];\n }\n case 'UpperBound': {\n const sortedSequence =\n getParamValue('sortedSequence', node, tensorMap, context) as\n Tensor;\n const values =\n getParamValue('values', node, tensorMap, context) as Tensor;\n return [ops.upperBound(sortedSequence, values)];\n }\n case 'Unique': {\n const x = getParamValue('x', node, tensorMap, context) as Tensor;\n const result = ops.unique(x);\n return [result.values, result.indices];\n }\n case 'UniqueV2': {\n const x = getParamValue('x', node, tensorMap, context) as Tensor;\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const result = ops.unique(x, axis);\n return [result.values, result.indices];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'evaluation';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D, Tensor2D, Tensor3D, Tensor4D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'ResizeBilinear': {\n const images =\n getParamValue('images', node, tensorMap, context) as Tensor;\n const size =\n getParamValue('size', node, tensorMap, context) as number[];\n const alignCorners =\n getParamValue('alignCorners', node, tensorMap, context) as\n boolean;\n const halfPixelCenters =\n getParamValue('halfPixelCenters', node, tensorMap, context) as\n boolean;\n return [ops.image.resizeBilinear(\n images as Tensor3D | Tensor4D, [size[0], size[1]], alignCorners,\n halfPixelCenters)];\n }\n case 'ResizeNearestNeighbor': {\n const images =\n getParamValue('images', node, tensorMap, context) as Tensor;\n const size =\n getParamValue('size', node, tensorMap, context) as number[];\n const alignCorners =\n getParamValue('alignCorners', node, tensorMap, context) as\n boolean;\n const halfPixelCenters =\n getParamValue('halfPixelCenters', node, tensorMap, context) as\n boolean;\n return [ops.image.resizeNearestNeighbor(\n images as Tensor3D | Tensor4D, [size[0], size[1]], alignCorners,\n halfPixelCenters)];\n }\n case 'CropAndResize': {\n const image =\n getParamValue('image', node, tensorMap, context) as Tensor;\n const boxes =\n getParamValue('boxes', node, tensorMap, context) as Tensor;\n const boxInd =\n getParamValue('boxInd', node, tensorMap, context) as Tensor;\n const cropSize =\n getParamValue('cropSize', node, tensorMap, context) as number[];\n const method =\n getParamValue('method', node, tensorMap, context) as string;\n const extrapolationValue =\n getParamValue('extrapolationValue', node, tensorMap, context) as\n number;\n return [ops.image.cropAndResize(\n image as Tensor4D, boxes as Tensor2D, boxInd as Tensor1D,\n cropSize as [number, number], method as 'bilinear' | 'nearest',\n extrapolationValue)];\n }\n case 'ImageProjectiveTransformV3': {\n const images =\n getParamValue('images', node, tensorMap, context) as Tensor;\n const transforms =\n getParamValue('transforms', node, tensorMap, context) as Tensor;\n const outputShape =\n getParamValue('outputShape', node, tensorMap, context) as\n number[];\n const fillValue =\n getParamValue('fillValue', node, tensorMap, context) as number;\n const interpolation =\n getParamValue('interpolation', node, tensorMap, context) as\n string;\n const fillMode =\n getParamValue('fillMode', node, tensorMap, context) as string;\n return [ops.image.transform(\n images as Tensor4D,\n transforms as Tensor2D,\n interpolation.toLowerCase() as 'bilinear' | 'nearest',\n fillMode.toLowerCase() as 'constant' | 'reflect' | 'wrap' | 'nearest',\n fillValue,\n outputShape as [number, number])];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'image';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {cloneTensor, getParamValue, getTensor} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Const': {\n return tensorMap[node.name];\n }\n case 'PlaceholderWithDefault':\n const def =\n getParamValue('default', node, tensorMap, context) as Tensor;\n return [getTensor(node.name, tensorMap, context) || def];\n case 'Placeholder':\n return [getTensor(node.name, tensorMap, context)];\n case 'Identity':\n case 'StopGradient':\n case 'FakeQuantWithMinMaxVars': { // This op is currently ignored.\n const data = getParamValue('x', node, tensorMap, context) as Tensor;\n return [cloneTensor(data)];\n }\n case 'IdentityN':\n return (getParamValue('x', node, tensorMap, context) as Tensor[])\n .map((t: Tensor) => cloneTensor(t));\n case 'Snapshot':\n const snapshot =\n (getParamValue('x', node, tensorMap, context) as Tensor);\n return [cloneTensor(snapshot)];\n case 'Shape':\n return [ops.tensor1d(\n (getParamValue('x', node, tensorMap, context) as Tensor).shape,\n 'int32')];\n case 'ShapeN':\n return (getParamValue('x', node, tensorMap, context) as Tensor[])\n .map((t: Tensor) => ops.tensor1d(t.shape));\n case 'Size':\n return [ops.scalar(\n (getParamValue('x', node, tensorMap, context) as Tensor).size,\n 'int32')];\n case 'Rank':\n return [ops.scalar(\n (getParamValue('x', node, tensorMap, context) as Tensor).rank,\n 'int32')];\n case 'NoOp':\n return [ops.scalar(1)];\n case 'Print':\n const input = getParamValue('x', node, tensorMap, context) as Tensor;\n const data =\n getParamValue('data', node, tensorMap, context) as Tensor[];\n const message =\n getParamValue('message', node, tensorMap, context) as string;\n const summarize =\n getParamValue('summarize', node, tensorMap, context) as number;\n console.warn(\n 'The graph has a tf.print() operation,' +\n 'usually used for debugging, which slows down performance.');\n console.log(message);\n for (let i = 0; i < data.length; i++) {\n console.log(Array.prototype.slice.call(data[i].dataSync())\n .slice(0, summarize));\n }\n return [input];\n\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'graph';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Equal': {\n return [ops.equal(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'NotEqual': {\n return [ops.notEqual(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Greater': {\n return [ops.greater(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'GreaterEqual': {\n return [ops.greaterEqual(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Less': {\n return [ops.less(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'LessEqual': {\n return [ops.lessEqual(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'LogicalAnd': {\n return [ops.logicalAnd(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'LogicalNot': {\n return [ops.logicalNot(\n getParamValue('a', node, tensorMap, context) as Tensor)];\n }\n case 'LogicalOr': {\n return [ops.logicalOr(\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n case 'Select':\n case 'SelectV2': {\n return [ops.where(\n getParamValue('condition', node, tensorMap, context) as Tensor,\n getParamValue('a', node, tensorMap, context) as Tensor,\n getParamValue('b', node, tensorMap, context) as Tensor)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'logical';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor2D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'BatchMatMul':\n case 'BatchMatMulV2':\n case 'MatMul':\n return [ops.matMul(\n getParamValue('a', node, tensorMap, context) as Tensor2D,\n getParamValue('b', node, tensorMap, context) as Tensor2D,\n getParamValue('transposeA', node, tensorMap, context) as boolean,\n getParamValue('transposeB', node, tensorMap, context) as\n boolean)];\n\n case 'Einsum':\n return [ops.einsum(\n getParamValue('equation', node, tensorMap, context) as string,\n ...getParamValue('tensors', node, tensorMap, context) as\n Tensor[])];\n\n case 'Transpose':\n return [ops.transpose(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('perm', node, tensorMap, context) as number[])];\n\n case '_FusedMatMul':\n const [extraOp, activationFunc] =\n (getParamValue('fusedOps', node, tensorMap, context) as string[]);\n\n const isBiasAdd = extraOp === 'biasadd';\n const isPrelu = activationFunc === 'prelu';\n\n const numArgs =\n (getParamValue('numArgs', node, tensorMap, context) as number);\n const leakyreluAlpha =\n getParamValue('leakyreluAlpha', node, tensorMap, context) as\n number;\n\n if (isBiasAdd) {\n if (isPrelu && numArgs !== 2) {\n throw new Error(\n 'Fused MatMul with BiasAdd and Prelu must have two ' +\n 'extra arguments: bias and alpha.');\n }\n if (!isPrelu && numArgs !== 1) {\n throw new Error(\n 'Fused MatMul with BiasAdd must have one extra argument: bias.');\n }\n }\n const [biasArg, preluArg] =\n getParamValue('args', node, tensorMap, context) as Tensor[];\n return [ops.fused.matMul({\n a: getParamValue('a', node, tensorMap, context) as Tensor2D,\n b: getParamValue('b', node, tensorMap, context) as Tensor2D,\n transposeA: getParamValue('transposeA', node, tensorMap, context) as\n boolean,\n transposeB: getParamValue('transposeB', node, tensorMap, context) as\n boolean,\n bias: biasArg,\n activation: activationFunc as tfOps.fused.Activation,\n preluActivationWeights: preluArg,\n leakyreluAlpha\n })];\n\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'matrices';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor, Tensor3D, Tensor4D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'EuclideanNorm':\n return [ops.euclideanNorm(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('axis', node, tensorMap, context) as number[],\n getParamValue('keepDims', node, tensorMap, context) as boolean)];\n case 'FusedBatchNorm':\n case 'FusedBatchNormV2': {\n return [ops.batchNorm(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('mean', node, tensorMap, context) as Tensor,\n getParamValue('variance', node, tensorMap, context) as Tensor,\n getParamValue('offset', node, tensorMap, context) as Tensor,\n getParamValue('scale', node, tensorMap, context) as Tensor,\n getParamValue('epsilon', node, tensorMap, context) as number)];\n }\n case 'FusedBatchNormV3': {\n return [ops.batchNorm(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('mean', node, tensorMap, context) as Tensor,\n getParamValue('variance', node, tensorMap, context) as Tensor,\n getParamValue('offset', node, tensorMap, context) as Tensor,\n getParamValue('scale', node, tensorMap, context) as Tensor,\n getParamValue('epsilon', node, tensorMap, context) as number)];\n }\n case 'LRN': {\n return [ops.localResponseNormalization(\n getParamValue('x', node, tensorMap, context) as Tensor3D |\n Tensor4D,\n getParamValue('radius', node, tensorMap, context) as number,\n getParamValue('bias', node, tensorMap, context) as number,\n getParamValue('alpha', node, tensorMap, context) as number,\n getParamValue('beta', node, tensorMap, context) as number)];\n }\n case 'Softmax': {\n return [ops.softmax(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'LogSoftmax': {\n return [ops.logSoftmax(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'SparseToDense': {\n return [ops.sparseToDense(\n getParamValue('sparseIndices', node, tensorMap, context) as\n Tensor,\n getParamValue('outputShape', node, tensorMap, context) as Tensor,\n getParamValue('sparseValues', node, tensorMap, context) as\n number[],\n getParamValue('defaultValue', node, tensorMap, context) as\n Scalar)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'normalization';\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'RaggedGather': {\n const {\n outputNestedSplits,\n outputDenseValues,\n } =\n ops.raggedGather(\n getParamValue(\n 'paramsNestedSplits', node, tensorMap, context) as\n Tensor[],\n getParamValue(\n 'paramsDenseValues', node, tensorMap, context) as Tensor,\n getParamValue('indices', node, tensorMap, context) as Tensor,\n getParamValue('outputRaggedRank', node, tensorMap, context) as\n number);\n return outputNestedSplits.concat(outputDenseValues);\n }\n case 'RaggedRange': {\n const {rtNestedSplits, rtDenseValues} = ops.raggedRange(\n getParamValue('starts', node, tensorMap, context) as Tensor,\n getParamValue('limits', node, tensorMap, context) as Tensor,\n getParamValue('splits', node, tensorMap, context) as Tensor);\n return [rtNestedSplits, rtDenseValues];\n }\n case 'RaggedTensorToTensor': {\n return [ops.raggedTensorToTensor(\n getParamValue('shape', node, tensorMap, context) as Tensor,\n getParamValue('values', node, tensorMap, context) as Tensor1D,\n getParamValue('defaultValue', node, tensorMap, context) as Tensor,\n getParamValue('rowPartitionTensors', node, tensorMap, context) as\n Tensor[],\n getParamValue('rowPartitionTypes', node, tensorMap, context) as\n string[])];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'ragged';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor1D, Tensor2D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Max': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.max(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'Mean': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.mean(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'Min': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.min(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'Sum': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.sum(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'All': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.all(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'Any': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.any(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'ArgMax': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n return [ops.argMax(\n getParamValue('x', node, tensorMap, context) as Tensor, axis)];\n }\n case 'ArgMin': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n return [ops.argMin(\n getParamValue('x', node, tensorMap, context) as Tensor, axis)];\n }\n case 'Prod': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const keepDims =\n getParamValue('keepDims', node, tensorMap, context) as boolean;\n return [ops.prod(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n keepDims)];\n }\n case 'Cumprod': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const exclusive =\n getParamValue('exclusive', node, tensorMap, context) as boolean;\n const reverse =\n getParamValue('reverse', node, tensorMap, context) as boolean;\n return [ops.cumprod(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n exclusive, reverse)];\n }\n case 'Cumsum': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const exclusive =\n getParamValue('exclusive', node, tensorMap, context) as boolean;\n const reverse =\n getParamValue('reverse', node, tensorMap, context) as boolean;\n return [ops.cumsum(\n getParamValue('x', node, tensorMap, context) as Tensor, axis,\n exclusive, reverse)];\n }\n case 'Bincount':\n const x = getParamValue('x', node, tensorMap, context) as Tensor1D;\n const weights =\n getParamValue('weights', node, tensorMap, context) as Tensor1D;\n const size =\n getParamValue('size', node, tensorMap, context) as number;\n\n return [ops.bincount(x, weights, size)];\n case 'DenseBincount': {\n const x = getParamValue('x', node, tensorMap, context) as Tensor1D |\n Tensor2D;\n const weights =\n getParamValue('weights', node, tensorMap, context) as Tensor1D |\n Tensor2D;\n const size =\n getParamValue('size', node, tensorMap, context) as number;\n\n const binaryOutput =\n getParamValue('binaryOutput', node, tensorMap, context) as\n boolean;\n\n return [ops.denseBincount(x, weights, size, binaryOutput)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'reduction';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor, Tensor1D, tidy, util} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'ConcatV2':\n case 'Concat': {\n const n = getParamValue('n', node, tensorMap, context) as number;\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n let inputs =\n getParamValue('tensors', node, tensorMap, context) as Tensor[];\n inputs = inputs.slice(0, n);\n return [ops.concat(inputs, axis)];\n }\n case 'Gather': {\n const input = getParamValue('x', node, tensorMap, context) as Tensor;\n const indices =\n getParamValue('indices', node, tensorMap, context) as Tensor1D;\n return [ops.gather(input, ops.cast(indices, 'int32'), 0)];\n }\n case 'GatherV2': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const batchDims =\n getParamValue('batchDims', node, tensorMap, context) as number;\n const input = getParamValue('x', node, tensorMap, context) as Tensor;\n const indices =\n getParamValue('indices', node, tensorMap, context) as Tensor1D;\n return [ops.gather(\n input, ops.cast(indices, 'int32'), axis, batchDims)];\n }\n case 'Reverse': {\n const dims =\n getParamValue('dims', node, tensorMap, context) as boolean[];\n const axis = [];\n for (let i = 0; i < dims.length; i++) {\n if (dims[i]) {\n axis.push(i);\n }\n }\n const input = getParamValue('x', node, tensorMap, context) as Tensor;\n return [ops.reverse(input, axis)];\n }\n case 'ReverseV2': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n const input = getParamValue('x', node, tensorMap, context) as Tensor;\n return [ops.reverse(input, axis)];\n }\n case 'Slice': {\n // tslint:disable-next-line:no-any\n const begin = getParamValue('begin', node, tensorMap, context) as any;\n // tslint:disable-next-line:no-any\n const size = getParamValue('size', node, tensorMap, context) as any;\n return [ops.slice(\n getParamValue('x', node, tensorMap, context) as Tensor, begin,\n size)];\n }\n case 'StridedSlice': {\n const begin =\n getParamValue('begin', node, tensorMap, context) as number[];\n const end =\n getParamValue('end', node, tensorMap, context) as number[];\n const strides =\n getParamValue('strides', node, tensorMap, context) as number[];\n const beginMask =\n getParamValue('beginMask', node, tensorMap, context) as number;\n const endMask =\n getParamValue('endMask', node, tensorMap, context) as number;\n const ellipsisMask =\n getParamValue('ellipsisMask', node, tensorMap, context) as number;\n const newAxisMask =\n getParamValue('newAxisMask', node, tensorMap, context) as number;\n const shrinkAxisMask =\n getParamValue('shrinkAxisMask', node, tensorMap, context) as\n number;\n const tensor = getParamValue('x', node, tensorMap, context) as Tensor;\n\n return [ops.stridedSlice(\n tensor, begin, end, strides, beginMask, endMask, ellipsisMask,\n newAxisMask, shrinkAxisMask)];\n }\n case 'Pack': {\n return tidy(() => {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const tensors =\n getParamValue('tensors', node, tensorMap, context) as Tensor[];\n // Reshape the tensors to the first tensor's shape if they don't\n // match.\n const shape = tensors[0].shape;\n const squeezedShape = ops.squeeze(tensors[0]).shape;\n const mapped = tensors.map(tensor => {\n const sameShape = util.arraysEqual(tensor.shape, shape);\n if (!sameShape &&\n !util.arraysEqual(\n ops.squeeze(tensor).shape, squeezedShape)) {\n throw new Error('the input tensors shape does not match');\n }\n return sameShape ? tensor : ops.reshape(tensor, shape);\n });\n return [ops.stack(mapped, axis)];\n });\n }\n case 'Unpack': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const tensor =\n getParamValue('tensor', node, tensorMap, context) as Tensor;\n return ops.unstack(tensor, axis);\n }\n case 'Tile': {\n const reps =\n getParamValue('reps', node, tensorMap, context) as number[];\n return [ops.tile(\n getParamValue('x', node, tensorMap, context) as Tensor, reps)];\n }\n case 'Split':\n case 'SplitV': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n const numOrSizeSplits =\n getParamValue('numOrSizeSplits', node, tensorMap, context) as\n number |\n number[];\n const tensor = getParamValue('x', node, tensorMap, context) as Tensor;\n\n return ops.split(tensor, numOrSizeSplits, axis);\n }\n case 'ScatterNd': {\n const indices =\n getParamValue('indices', node, tensorMap, context) as Tensor;\n const values =\n getParamValue('values', node, tensorMap, context) as Tensor;\n const shape =\n getParamValue('shape', node, tensorMap, context) as number[];\n return [ops.scatterND(indices, values, shape)];\n }\n case 'GatherNd': {\n const x = getParamValue('x', node, tensorMap, context) as Tensor;\n const indices =\n getParamValue('indices', node, tensorMap, context) as Tensor;\n return [ops.gatherND(x, indices)];\n }\n case 'SparseToDense': {\n const indices =\n getParamValue('sparseIndices', node, tensorMap, context) as\n Tensor;\n const shape =\n getParamValue('outputShape', node, tensorMap, context) as\n number[];\n const sparseValues =\n getParamValue('sparseValues', node, tensorMap, context) as Tensor;\n const defaultValue =\n getParamValue('defaultValue', node, tensorMap, context) as Scalar;\n return [ops.sparseToDense(\n indices, sparseValues, shape,\n sparseValues.dtype === defaultValue.dtype ?\n defaultValue :\n ops.cast(defaultValue, sparseValues.dtype))];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'slice_join';\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor, Tensor1D, Tensor2D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'SparseFillEmptyRows': {\n const {\n outputIndices,\n outputValues,\n emptyRowIndicator,\n reverseIndexMap\n } =\n ops.sparse.sparseFillEmptyRows(\n getParamValue('indices', node, tensorMap, context) as\n Tensor2D,\n getParamValue('values', node, tensorMap, context) as Tensor1D,\n getParamValue('denseShape', node, tensorMap, context) as\n Tensor1D,\n getParamValue('defaultValue', node, tensorMap, context) as\n Scalar);\n return [\n outputIndices, outputValues, emptyRowIndicator, reverseIndexMap\n ];\n }\n case 'SparseReshape': {\n const {outputIndices, outputShape} = ops.sparse.sparseReshape(\n getParamValue('inputIndices', node, tensorMap, context) as\n Tensor2D,\n getParamValue('inputShape', node, tensorMap, context) as Tensor1D,\n getParamValue('newShape', node, tensorMap, context) as Tensor1D);\n return [outputIndices, outputShape];\n }\n case 'SparseSegmentMean': {\n const outputData = ops.sparse.sparseSegmentMean(\n getParamValue('data', node, tensorMap, context) as Tensor,\n getParamValue('indices', node, tensorMap, context) as Tensor1D,\n getParamValue('segmentIds', node, tensorMap, context) as\n Tensor1D);\n return [outputData];\n }\n case 'SparseSegmentSum': {\n const outputData = ops.sparse.sparseSegmentSum(\n getParamValue('data', node, tensorMap, context) as Tensor,\n getParamValue('indices', node, tensorMap, context) as Tensor1D,\n getParamValue('segmentIds', node, tensorMap, context) as\n Tensor1D);\n return [outputData];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'sparse';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap, context: ExecutionContext,\n ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'FFT': {\n return [ops.fft(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'IFFT': {\n return [ops.ifft(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'RFFT': {\n return [ops.rfft(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n case 'IRFFT': {\n return [ops.irfft(\n getParamValue('x', node, tensorMap, context) as Tensor)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'spectral';\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Scalar, Tensor, Tensor1D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'StringNGrams': {\n const {nGrams, nGramsSplits} = ops.string.stringNGrams(\n getParamValue('data', node, tensorMap, context) as Tensor1D,\n getParamValue('dataSplits', node, tensorMap, context) as Tensor,\n getParamValue('separator', node, tensorMap, context) as string,\n getParamValue('nGramWidths', node, tensorMap, context) as\n number[],\n getParamValue('leftPad', node, tensorMap, context) as string,\n getParamValue('rightPad', node, tensorMap, context) as string,\n getParamValue('padWidth', node, tensorMap, context) as number,\n getParamValue(\n 'preserveShortSequences', node, tensorMap, context) as\n boolean);\n return [nGrams, nGramsSplits];\n }\n case 'StringSplit': {\n const {indices, values, shape} = ops.string.stringSplit(\n getParamValue('input', node, tensorMap, context) as Tensor1D,\n getParamValue('delimiter', node, tensorMap, context) as Scalar,\n getParamValue('skipEmpty', node, tensorMap, context) as boolean);\n return [indices, values, shape];\n }\n case 'StringToHashBucketFast': {\n const output = ops.string.stringToHashBucketFast(\n getParamValue('input', node, tensorMap, context) as Tensor,\n getParamValue('numBuckets', node, tensorMap, context) as number);\n return [output];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'string';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Tensor, Tensor4D} from '@tensorflow/tfjs-core';\n// tslint:disable-next-line: no-imports-from-dist\nimport * as tfOps from '@tensorflow/tfjs-core/dist/ops/ops_for_converter';\n\nimport {NamedTensorsMap} from '../../data/types';\nimport {ExecutionContext} from '../../executor/execution_context';\nimport {InternalOpExecutor, Node} from '../types';\n\nimport {getParamValue} from './utils';\n\nexport const executeOp: InternalOpExecutor =\n (node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, ops = tfOps): Tensor[] => {\n switch (node.op) {\n case 'Cast': {\n return [ops.cast(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('dtype', node, tensorMap, context) as 'int32' |\n 'float32' | 'bool')];\n }\n case 'ExpandDims': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number;\n return [ops.expandDims(\n getParamValue('x', node, tensorMap, context) as Tensor, axis)];\n }\n case 'Squeeze': {\n const axis =\n getParamValue('axis', node, tensorMap, context) as number[];\n return [ops.squeeze(\n getParamValue('x', node, tensorMap, context) as Tensor, axis)];\n }\n\n case 'Reshape': {\n return [ops.reshape(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('shape', node, tensorMap, context) as number[])];\n }\n case 'MirrorPad': {\n return [ops.mirrorPad(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('padding', node, tensorMap, context) as\n Array<[number, number]>,\n getParamValue('mode', node, tensorMap, context) as 'reflect' |\n 'symmetric')];\n }\n case 'PadV2':\n case 'Pad': {\n return [ops.pad(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('padding', node, tensorMap, context) as\n Array<[number, number]>,\n getParamValue('constantValue', node, tensorMap, context) as\n number)];\n }\n case 'SpaceToBatchND': {\n const blockShape =\n getParamValue('blockShape', node, tensorMap, context) as number[];\n const paddings =\n getParamValue('paddings', node, tensorMap, context) as number[][];\n return [ops.spaceToBatchND(\n getParamValue('x', node, tensorMap, context) as Tensor,\n blockShape, paddings)];\n }\n case 'BatchToSpaceND': {\n const blockShape =\n getParamValue('blockShape', node, tensorMap, context) as number[];\n const crops =\n getParamValue('crops', node, tensorMap, context) as number[][];\n return [ops.batchToSpaceND(\n getParamValue('x', node, tensorMap, context) as Tensor,\n blockShape, crops)];\n }\n case 'DepthToSpace': {\n const blockSize =\n getParamValue('blockSize', node, tensorMap, context) as number;\n const dataFormat =\n (getParamValue('dataFormat', node, tensorMap, context) as\n string).toUpperCase() as 'NHWC' |\n 'NCHW';\n return [ops.depthToSpace(\n getParamValue('x', node, tensorMap, context) as Tensor4D,\n blockSize, dataFormat)];\n }\n case 'BroadcastTo': {\n return [ops.broadcastTo(\n getParamValue('x', node, tensorMap, context) as Tensor,\n getParamValue('shape', node, tensorMap, context) as number[])];\n }\n case 'BroadcastArgs': {\n return [ops.broadcastArgs(\n getParamValue('s0', node, tensorMap, context) as Tensor,\n getParamValue('s1', node, tensorMap, context) as Tensor)];\n }\n default:\n throw TypeError(`Node type ${node.op} is not implemented`);\n }\n };\n\nexport const CATEGORY = 'transformation';\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Tensor} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap, TensorArrayMap, TensorListMap} from '../data/types';\n\nimport {TensorArray} from './tensor_array';\nimport {TensorList} from './tensor_list';\nimport {FunctionExecutor} from './types';\n\nexport interface ExecutionContextInfo {\n id: number; // the unique id of the context info\n frameName: string; // The frame name of the loop, this comes from\n // the TensorFlow NodeDef.\n iterationId: number; // The iteration id of the loop\n}\n\n/**\n * ExecutionContext captures the runtime environment of the node. It keeps\n * track of the current frame and iteration for the control flow ops.\n *\n * For example, typical Dynamic RNN model may contain loops, for which\n * TensorFlow will generate graphs with Enter/Exit nodes to control the\n * current execution frame, and NextIteration Nodes for iteration id increment.\n * For model with branch logic, TensorFLow will generate Switch/Merge ops.\n */\nexport class ExecutionContext {\n private rootContext = {id: 0, frameName: '', iterationId: 0};\n private contexts: ExecutionContextInfo[] = [this.rootContext];\n private lastId = 0;\n private _currentContextIds: string[];\n\n constructor(\n readonly weightMap: NamedTensorsMap = {},\n readonly tensorArrayMap: TensorArrayMap = {},\n readonly tensorListMap: TensorListMap = {},\n readonly functionMap: {[key: string]: FunctionExecutor} = {}) {\n this.generateCurrentContextIds();\n }\n\n private newFrame(id: number, frameName: string) {\n return {id, frameName, iterationId: 0};\n }\n\n /**\n * Set the current context\n * @param contexts: ExecutionContextInfo[] the current path of execution\n * frames\n */\n set currentContext(contexts: ExecutionContextInfo[]) {\n if (this.contexts !== contexts) {\n this.contexts = contexts;\n this.generateCurrentContextIds();\n }\n }\n\n get currentContext(): ExecutionContextInfo[] {\n return this.contexts;\n }\n\n /**\n * Returns the current context in string format.\n */\n get currentContextId(): string {\n return this._currentContextIds[0];\n }\n\n /**\n * Returns the current context and all parent contexts in string format.\n * This allow access to the nodes in the current and parent frames.\n */\n get currentContextIds(): string[] {\n return this._currentContextIds;\n }\n\n private generateCurrentContextIds() {\n const names = [];\n for (let i = 0; i < this.contexts.length - 1; i++) {\n const contexts = this.contexts.slice(0, this.contexts.length - i);\n names.push(this.contextIdforContexts(contexts));\n }\n names.push('');\n this._currentContextIds = names;\n }\n\n private contextIdforContexts(contexts: ExecutionContextInfo[]) {\n return contexts ?\n contexts\n .map(\n context => (context.id === 0 && context.iterationId === 0) ?\n '' :\n `${context.frameName}-${context.iterationId}`)\n .join('/') :\n '';\n }\n\n /**\n * Enter a new frame, a new context is pushed on the current context list.\n * @param frameId new frame id\n */\n enterFrame(frameId: string) {\n if (this.contexts) {\n this.lastId++;\n this.contexts = this.contexts.slice();\n this.contexts.push(this.newFrame(this.lastId, frameId));\n this._currentContextIds.unshift(this.contextIdforContexts(this.contexts));\n }\n }\n\n /**\n * Exit the current frame, the last context is removed from the current\n * context list.\n */\n exitFrame() {\n if (this.contexts && this.contexts.length > 1) {\n this.contexts = this.contexts.slice();\n this.contexts.splice(-1);\n this.currentContextIds.shift();\n } else {\n throw new Error('Cannot exit frame, the context is empty');\n }\n }\n\n /**\n * Enter the next iteration of a loop, the iteration id of last context is\n * increased.\n */\n nextIteration() {\n if (this.contexts && this.contexts.length > 0) {\n this.contexts = this.contexts.slice();\n this.lastId++;\n const context =\n Object.assign({}, this.contexts[this.contexts.length - 1]);\n context.iterationId += 1;\n context.id = this.lastId;\n this.contexts.splice(-1, 1, context);\n this._currentContextIds.splice(\n 0, 1, this.contextIdforContexts(this.contexts));\n } else {\n throw new Error('Cannot increase frame iteration, the context is empty');\n }\n }\n\n getWeight(name: string): Tensor[] {\n return this.weightMap[name];\n }\n\n addTensorArray(tensorArray: TensorArray) {\n this.tensorArrayMap[tensorArray.id] = tensorArray;\n }\n\n getTensorArray(id: number): TensorArray {\n return this.tensorArrayMap[id];\n }\n\n addTensorList(tensorList: TensorList) {\n this.tensorListMap[tensorList.id] = tensorList;\n }\n\n getTensorList(id: number): TensorList {\n return this.tensorListMap[id];\n }\n\n dispose(keepIds: Set) {\n for (const key in this.tensorArrayMap) {\n this.tensorArrayMap[key].clearAndClose(keepIds);\n }\n\n for (const key in this.tensorListMap) {\n this.tensorListMap[key].clearAndClose(keepIds);\n }\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {NamedTensorMap} from '@tensorflow/tfjs-core';\n\nimport {NamedTensorsMap} from '../data/types';\nimport {parseNodeName} from '../operations/executors/utils';\nimport {Graph, Node} from '../operations/types';\n\nexport interface ExecutionInfo {\n inputs: NamedTensorMap;\n outputs: Node[];\n usedNodes: Set;\n missingInputs: string[];\n dynamicNode: Node;\n syncInputs: string[];\n}\n\n/**\n * Given graph inputs and desired outputs, find the minimal set of nodes\n * to execute in order to compute the outputs. In addition return other useful\n * info such:\n * - Missing inputs needed to compute the output.\n * - Whether the subgraph contains dynamic ops (control flow, dynamic shape).\n * - Alternative inputs in order to avoid async (dynamic op) execution.\n */\nexport function getExecutionSubgraph(\n inputs: NamedTensorMap, outputs: Node[], weightMap: NamedTensorsMap,\n initNodes?: Node[]): ExecutionInfo {\n const usedNodes = new Set();\n const missingInputs: string[] = [];\n let dynamicNode: Node = null;\n let syncInputs: string[] = null;\n\n // Start with the outputs, going backwards and find all the nodes that are\n // needed to compute those outputs.\n const seen = new Set();\n const inputNodeNames =\n Object.keys(inputs).map(name => parseNodeName(name)[0]);\n\n let initNodeNames: string[] = [];\n if (initNodes != null) {\n initNodeNames = initNodes.map(node => parseNodeName(node.name)[0]);\n }\n\n const frontier = [...outputs];\n while (frontier.length > 0) {\n const node = frontier.pop();\n if (isControlFlow(node) || isDynamicShape(node) || isHashTable(node)) {\n if (dynamicNode == null) {\n dynamicNode = node;\n syncInputs = dynamicNode.children.map(child => child.name)\n .filter(name => usedNodes.has(name));\n }\n }\n usedNodes.add(node.name);\n\n // Weights are dead end since we already have their values.\n if (weightMap[node.name] != null) {\n continue;\n }\n // This node is a dead end since it's one of the user-provided inputs.\n if (inputNodeNames.indexOf(node.name) !== -1) {\n continue;\n }\n // This node is a dead end since it doesn't have any inputs.\n if (initNodeNames.indexOf(node.name) !== -1) {\n continue;\n }\n if (node.inputs.length === 0) {\n missingInputs.push(node.name);\n continue;\n }\n node.inputs.forEach(input => {\n // Don't add to the frontier if it is already there.\n if (seen.has(input.name)) {\n return;\n }\n seen.add(input.name);\n frontier.push(input);\n });\n }\n return {inputs, outputs, usedNodes, missingInputs, dynamicNode, syncInputs};\n}\n\n/**\n * Given the execution info, return a list of nodes in topological order that\n * need to be executed to compute the output.\n */\nexport function getNodesInTopologicalOrder(\n graph: Graph, weightMap: NamedTensorsMap,\n executionInfo: ExecutionInfo): Node[] {\n const {usedNodes, inputs} = executionInfo;\n const frontier: Node[] = [];\n const inputNodes = Object.keys(inputs)\n .map(name => parseNodeName(name)[0])\n .map(name => graph.nodes[name]);\n const initNodes = graph.initNodes;\n\n inputNodes.forEach(input => {\n if (usedNodes.has(input.name)) {\n frontier.push(input);\n }\n });\n graph.weights.forEach(weight => {\n if (usedNodes.has(weight.name)) {\n frontier.push(weight);\n }\n });\n if (initNodes != null) {\n initNodes.forEach(node => {\n if (usedNodes.has(node.name)) {\n frontier.push(node);\n }\n });\n }\n const seen = new Set();\n const orderedNodes: Node[] = [];\n while (frontier.length > 0) {\n const node = frontier.pop();\n seen.add(node.name);\n if (!weightMap[node.name]) {\n orderedNodes.push(node);\n }\n node.children.forEach(child => {\n if (!seen.has(child.name) && usedNodes.has(child.name) &&\n child.inputs.every(input => seen.has(input.name))) {\n frontier.push(child);\n }\n });\n }\n return orderedNodes;\n}\n\nconst CONTROL_FLOW_OPS = [\n 'Switch', 'Merge', 'Enter', 'Exit', 'NextIteration', 'StatelessIf',\n 'StatelessWhile', 'if', 'While'\n];\nconst DYNAMIC_SHAPE_OPS = [\n 'NonMaxSuppressionV2', 'NonMaxSuppressionV3', 'NonMaxSuppressionV5', 'Where'\n];\nconst HASH_TABLE_OPS = [\n 'HashTable', 'HashTableV2', 'LookupTableImport', 'LookupTableImportV2',\n 'LookupTableFind', 'LookupTableFindV2', 'LookupTableSize', 'LookupTableSizeV2'\n];\n\nexport function isControlFlow(node: Node) {\n return CONTROL_FLOW_OPS.indexOf(node.op) >= 0;\n}\n\nexport function isDynamicShape(node: Node) {\n return DYNAMIC_SHAPE_OPS.indexOf(node.op) >= 0;\n}\n\nexport function isHashTable(node: Node) {\n return HASH_TABLE_OPS.indexOf(node.op) >= 0;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, env, keep, NamedTensorMap, Tensor, tidy, util} from '@tensorflow/tfjs-core';\n\nimport {ISignatureDef} from '../data/compiled_api';\nimport {NamedTensorsMap, TensorArrayMap, TensorInfo, TensorListMap} from '../data/types';\nimport {getNodeNameAndIndex, getParamValue, getTensor, getTensorsForCurrentContenxt, parseNodeName} from '../operations/executors/utils';\nimport {executeOp} from '../operations/operation_executor';\nimport {Graph, Node} from '../operations/types';\n\nimport {ExecutionContext, ExecutionContextInfo} from './execution_context';\nimport {getExecutionSubgraph, getNodesInTopologicalOrder, isControlFlow} from './model_analysis';\nimport {ResourceManager} from './resource_manager';\nimport {FunctionExecutor} from './types';\n\ninterface NodeWithContexts {\n contexts: ExecutionContextInfo[];\n node: Node;\n}\n\nexport class GraphExecutor implements FunctionExecutor {\n private compiledMap: Map = new Map();\n private _weightMap: NamedTensorsMap = {};\n private _weightIds: number[];\n private _signature: ISignatureDef;\n private _inputs: Node[];\n private _outputs: Node[];\n private _initNodes: Node[]; // Internal init nodes to start initialization.\n private SEPERATOR = ',';\n private _functions: {[key: string]: Graph} = {};\n private _functionExecutorMap: {[key: string]: FunctionExecutor} = {};\n private _resourceManager: ResourceManager;\n private clonedTensorsMap: NamedTensorsMap;\n private keepIntermediateTensors = false;\n\n get weightIds(): number[] {\n return this.parent ? this.parent.weightIds : this._weightIds;\n }\n\n get functionExecutorMap(): {[key: string]: FunctionExecutor} {\n return this.parent ? this.parent.functionExecutorMap :\n this._functionExecutorMap;\n }\n\n get weightMap(): NamedTensorsMap {\n return this.parent ? this.parent.weightMap : this._weightMap;\n }\n\n set weightMap(weightMap: NamedTensorsMap) {\n const weightIds = Object.keys(weightMap).map(\n key => weightMap[key].map(tensor => tensor.id));\n this._weightIds = [].concat(...weightIds);\n this._weightMap = weightMap;\n }\n\n /**\n * Set `ResourceManager` shared by executors of a model.\n * @param resourceManager: `ResourceManager` of the `GraphModel`.\n */\n set resourceManager(resourceManager: ResourceManager) {\n this._resourceManager = resourceManager;\n }\n\n get inputs(): TensorInfo[] {\n return this._inputs.map(node => {\n return {\n name: node.name,\n shape: node.attrParams['shape'] ?\n node.attrParams['shape'].value as number[] :\n undefined,\n dtype: node.attrParams['dtype'] ?\n node.attrParams['dtype'].value as DataType :\n undefined\n };\n });\n }\n\n get outputs(): TensorInfo[] {\n return this._outputs.map(node => {\n return {\n name: node.name,\n shape: node.attrParams['shape'] ?\n node.attrParams['shape'].value as number[] :\n undefined,\n dtype: node.attrParams['dtype'] ?\n node.attrParams['dtype'].value as DataType :\n undefined\n };\n });\n }\n\n get inputNodes(): string[] {\n return this._inputs.map(node => node.signatureKey || node.name);\n }\n\n get outputNodes(): string[] {\n return this._outputs.map((node) => {\n const name = node.signatureKey || node.name;\n return node.defaultOutput ? (`${name}:${node.defaultOutput}`) : name;\n });\n }\n\n get functions(): {[key: string]: ISignatureDef} {\n return Object.keys(this._functions).reduce((map, key) => {\n map[key] = this._functions[key].signature;\n return map;\n }, {} as {[key: string]: ISignatureDef});\n }\n\n /**\n *\n * @param graph Graph the model or function graph to be executed.\n * @param parent When building function exector you need to set the parent\n * executor. Since the weights and function executor maps are set at parant\n * level, that function executor can access the function maps and weight maps\n * through the parent.\n */\n constructor(private graph: Graph, private parent?: GraphExecutor) {\n this._outputs = graph.outputs;\n this._inputs = graph.inputs;\n this._initNodes = graph.initNodes;\n this._signature = graph.signature;\n this._functions = graph.functions;\n // create sub-graph executors\n if (graph.functions != null) {\n Object.keys(graph.functions).forEach(name => {\n this._functionExecutorMap[name] =\n new GraphExecutor(graph.functions[name], this);\n });\n }\n }\n\n private getCompilationKey(inputs: Node[], outputs: Node[]): string {\n const sortedInputs = inputs.map(node => node.name).sort();\n const sortedOutputs = outputs.map(node => node.name).sort();\n return sortedInputs.join(this.SEPERATOR) + '--' +\n sortedOutputs.join(this.SEPERATOR);\n }\n\n /**\n * Compiles the inference graph and returns the minimal set of nodes that are\n * required for execution, in the correct execution order.\n */\n private compile(inputs: NamedTensorMap, outputs: Node[]): Node[] {\n const executionInfo =\n getExecutionSubgraph(inputs, outputs, this.weightMap, this._initNodes);\n const {missingInputs, dynamicNode, syncInputs} = executionInfo;\n if (dynamicNode != null) {\n throw new Error(\n `This execution contains the node '${dynamicNode.name}', which has ` +\n `the dynamic op '${dynamicNode.op}'. Please use ` +\n `model.executeAsync() instead. Alternatively, to avoid the ` +\n `dynamic ops, specify the inputs [${syncInputs}]`);\n }\n\n if (missingInputs.length > 0) {\n const outNames = outputs.map(n => n.name);\n const inNames = Object.keys(inputs);\n throw new Error(\n `Cannot compute the outputs [${outNames}] from the provided inputs ` +\n `[${inNames}]. Missing the following inputs: [${missingInputs}]`);\n }\n\n return getNodesInTopologicalOrder(\n this.graph, this.weightMap, executionInfo);\n }\n\n private cloneAndKeepTensor(tensor: Tensor) {\n if (tensor == null) {\n return null;\n }\n const clone = tensor.clone();\n // Keep the clone because`model.execute()` may be called within\n // a `tidy()`, but the user may inspect these tensors after the\n // tidy.\n keep(clone);\n return clone;\n }\n\n private cloneTensorList(tensors: Tensor[]) {\n if (!tensors) {\n return null;\n }\n const clonedTensor = tensors.map(tensor => {\n return this.cloneAndKeepTensor(tensor);\n });\n return clonedTensor;\n }\n\n private cloneTensorMap(tensorsMap: NamedTensorsMap): NamedTensorsMap {\n return Object.fromEntries(\n Object.entries(tensorsMap).map(([name, tensorsList]) => {\n return [name, this.cloneTensorList(tensorsList)];\n }));\n }\n\n /**\n * Executes the inference for given input tensors.\n * @param inputs Tensor map for the model inputs, keyed by the input node\n * names.\n * @param outputs Optional. output node name from the Tensorflow model, if\n * no outputs are specified, the default outputs of the model would be used.\n * You can inspect intermediate nodes of the model by adding them to the\n * outputs array.\n */\n execute(inputs: NamedTensorMap, outputs?: string[]): Tensor[] {\n // Dispose any tensors from a prior run to avoid leaking them.\n this.disposeIntermediateTensors();\n inputs = this.mapInputs(inputs);\n const names = Object.keys(inputs).sort();\n this.checkInputs(inputs);\n this.checkInputShapeAndType(inputs);\n outputs = this.mapOutputs(outputs);\n this.checkOutputs(outputs);\n const inputNodes =\n names.map(name => this.graph.nodes[parseNodeName(name)[0]]);\n const outputNodeNames = outputs.map(name => parseNodeName(name)[0]);\n let outputNodes = outputNodeNames.map(name => this.graph.nodes[name]);\n // If no outputs are specified, then use the default outputs of the model.\n if (outputNodes.length === 0) {\n outputNodes = this._outputs;\n }\n\n const compilationKey = this.getCompilationKey(inputNodes, outputNodes);\n\n // Do nothing if the compiled graph cache contains the input.\n let orderedNodes = this.compiledMap.get(compilationKey);\n if (orderedNodes == null) {\n orderedNodes = this.compile(inputs, outputNodes);\n this.compiledMap.set(compilationKey, orderedNodes);\n }\n\n // Keep tensors if KEEP_INTERMEDIATE_TENSORS is on.\n try {\n this.keepIntermediateTensors = env().getBool('KEEP_INTERMEDIATE_TENSORS');\n } catch (e) {\n this.keepIntermediateTensors = false;\n console.warn(e.message);\n }\n const tensorArrayMap: TensorArrayMap = {};\n const tensorListMap: TensorListMap = {};\n\n return tidy(() => {\n const context = new ExecutionContext(\n this.weightMap, tensorArrayMap, tensorListMap,\n this.functionExecutorMap);\n const tensorsMap: NamedTensorsMap = {...this.weightMap};\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap = this.cloneTensorMap(this.weightMap);\n }\n\n Object.keys(inputs).forEach(name => {\n const [nodeName, index] = parseNodeName(name);\n const tensors: Tensor[] = [];\n tensors[index] = inputs[name];\n tensorsMap[nodeName] = tensors;\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap[nodeName] = this.cloneTensorList(tensors);\n }\n });\n\n const tensorsToKeep = this.getFrozenTensorIds(tensorsMap);\n const intermediateTensorConsumerCount: {[key: number]: number} = {};\n for (let i = 0; i < orderedNodes.length; i++) {\n const node = orderedNodes[i];\n if (!tensorsMap[node.name]) {\n const tensors =\n executeOp(node, tensorsMap, context, this._resourceManager) as\n Tensor[];\n if (util.isPromise(tensors)) {\n throw new Error(\n `The execution of the op '${node.op}' returned a promise. ` +\n `Please use model.executeAsync() instead.`);\n }\n tensorsMap[node.name] = tensors;\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap[node.name] = this.cloneTensorList(tensors);\n }\n this.checkTensorForDisposal(\n node.name, node, tensorsMap, context, tensorsToKeep,\n outputNodeNames, intermediateTensorConsumerCount);\n }\n }\n\n // dispose the context for the root executor\n if (this.parent == null) {\n context.dispose(tensorsToKeep);\n }\n\n return outputs.map(name => getTensor(name, tensorsMap, context));\n });\n }\n\n private getFrozenTensorIds(tensorMap: NamedTensorsMap): Set {\n const ids = [].concat.apply(\n [],\n Object.keys(tensorMap)\n .map(key => tensorMap[key])\n .map(tensors => tensors.map(tensor => tensor.id)));\n return new Set(ids);\n }\n\n private checkTensorForDisposal(\n nodeName: string, node: Node, tensorMap: NamedTensorsMap,\n context: ExecutionContext, tensorsToKeep: Set,\n outputNames: string[],\n intermediateTensorConsumerCount: {[key: string]: number}) {\n // Skip output nodes and any control flow nodes, since its dependency is\n // tricky to track correctly.\n if (node.category === 'control' || outputNames.indexOf(nodeName) !== -1) {\n return;\n }\n\n tensorMap[nodeName].forEach(tensor => {\n if (tensor != null) {\n intermediateTensorConsumerCount[tensor.id] =\n (intermediateTensorConsumerCount[tensor.id] || 0) +\n node.children.length;\n }\n });\n node.inputs.forEach(input => {\n // Skip any control flow nodes, since its dependency is tricky to track\n // correctly.\n if (input.category !== 'control') {\n const tensors =\n getTensorsForCurrentContenxt(input.name, tensorMap, context);\n if (tensors != null) {\n tensors.forEach(tensor => {\n if (tensor && !tensor.kept && !tensorsToKeep.has(tensor.id)) {\n const count = intermediateTensorConsumerCount[tensor.id];\n if (count === 1) {\n tensor.dispose();\n delete intermediateTensorConsumerCount[tensor.id];\n } else if (count != null) {\n // only intermediate nodes has count set, inputs and weights\n // are not.\n intermediateTensorConsumerCount[tensor.id]--;\n }\n }\n });\n }\n }\n });\n }\n\n /**\n * Executes the inference for given input tensors in Async fashion.\n * @param inputs Tensor map for the model inputs, keyed by the input node\n * names.\n * @param outputs output node name from the Tensorflow model, if no outputs\n * are specified, the default outputs of the model would be used. You can\n * inspect intermediate nodes of the model by adding them to the outputs\n * array.\n */\n async executeAsync(inputs: NamedTensorMap, outputs?: string[]):\n Promise {\n return this._executeAsync(inputs, outputs);\n }\n\n disposeIntermediateTensors() {\n if (!this.clonedTensorsMap) {\n return;\n }\n Object.values(this.clonedTensorsMap).forEach(tensorsList => {\n for (const tensor of tensorsList) {\n if (tensor && !tensor.isDisposed) {\n tensor.dispose();\n }\n }\n });\n\n this.clonedTensorsMap = null;\n }\n\n getIntermediateTensors(): NamedTensorsMap {\n return this.clonedTensorsMap;\n }\n\n /**\n * Executes the inference for given input tensors in Async fashion.\n * @param inputs Tensor map for the model inputs, keyed by the input node\n * names.\n * @param outputs Optional. output node name from the Tensorflow model,\n * if no outputs are specified, the default outputs of the model would be\n * used. You can inspect intermediate nodes of the model by adding them to\n * the outputs array.\n * @param isFunctionExecution Optional. Flag for executing a function.\n * @param tensorArrayMap Optional, global TensorArray map by id. Used for\n * function execution.\n * @param tensorArrayMap Optinal global TensorList map by id. Used for\n * function execution.\n */\n private async _executeAsync(\n inputs: NamedTensorMap, outputs?: string[], isFunctionExecution = false,\n tensorArrayMap: TensorArrayMap = {},\n tensorListMap: TensorListMap = {}): Promise {\n // Dispose any tensors from a prior run to avoid leaking them.\n this.disposeIntermediateTensors();\n if (!isFunctionExecution) {\n inputs = this.mapInputs(inputs);\n this.checkInputs(inputs);\n this.checkInputShapeAndType(inputs);\n outputs = this.mapOutputs(outputs);\n this.checkOutputs(outputs);\n }\n\n // Keep tensors if KEEP_INTERMEDIATE_TENSORS is on.\n try {\n this.keepIntermediateTensors = env().getBool('KEEP_INTERMEDIATE_TENSORS');\n } catch (e) {\n this.keepIntermediateTensors = false;\n console.warn(e.message);\n }\n\n const context = new ExecutionContext(\n this.weightMap, tensorArrayMap, tensorListMap,\n this.functionExecutorMap);\n\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap = this.cloneTensorMap(this.weightMap);\n }\n\n // Graph with control flow op requires runtime evaluation of the execution\n // order, while without control flow the execution order is pre-determined\n // in the compile method.\n const tensorsMap = await this.executeWithControlFlow(\n inputs, context, outputs, isFunctionExecution);\n const results = outputs.map(name => getTensor(name, tensorsMap, context));\n\n // dispose all the intermediate tensors\n const outputIds = results.map(t => t.id);\n const inputIds = Object.keys(inputs).map(name => inputs[name].id);\n const keepIds =\n new Set([...outputIds, ...inputIds, ...this.weightIds]);\n\n Object.values(tensorsMap).forEach(tensorsList => {\n tensorsList.forEach(tensor => {\n if (tensor && !tensor.isDisposed && !keepIds.has(tensor.id)) {\n tensor.dispose();\n }\n });\n });\n\n // dispose the context for the root executor\n if (this.parent == null) {\n context.dispose(keepIds);\n }\n\n return results;\n }\n\n async executeFunctionAsync(\n inputs: Tensor[], tensorArrayMap: TensorArrayMap,\n tensorListMap: TensorListMap): Promise {\n const mappedInputs = inputs.reduce((map, tensor, index) => {\n map[this.inputs[index].name] = tensor;\n return map;\n }, {} as NamedTensorMap);\n\n return this._executeAsync(\n mappedInputs, this.outputNodes, true, tensorArrayMap, tensorListMap);\n }\n\n /**\n * When there are control flow nodes in the graph, the graph execution use\n * ExecutionContext to keep track of the frames and loop iterators.\n * @param inputs placeholder tensors for the graph.\n * @param context the execution context object for current execution.\n * @param outputNames Optional. output node name from the Tensorflow model,\n * if no outputs are specified, the default outputs of the model would be\n * used. You can inspect intermediate nodes of the model by adding them to\n * the outputs array.\n * @param isFunctionExecution Flag for executing a function.\n */\n private async executeWithControlFlow(\n inputs: NamedTensorMap, context: ExecutionContext, outputNames?: string[],\n isFunctionExecution?: boolean): Promise {\n const names = Object.keys(inputs);\n const inputNodes =\n names.map(name => this.graph.nodes[parseNodeName(name)[0]]);\n const outputNodeNames = outputNames.map(name => parseNodeName(name)[0]);\n let outputNodes = outputNodeNames.map(name => this.graph.nodes[name]);\n\n // If no outputs are specified, then use the default outputs of the model.\n if (outputNodes.length === 0) {\n outputNodes = this._outputs;\n }\n\n const {usedNodes, missingInputs, dynamicNode, syncInputs} =\n getExecutionSubgraph(\n inputs, outputNodes, this.weightMap, this._initNodes);\n\n // First nodes to execute include inputNodes, weights, and initNodes.\n const stack: NodeWithContexts[] = [\n ...inputNodes, ...this.graph.weights, ...(this._initNodes || [])\n ].map(node => {\n return {node, contexts: context.currentContext};\n });\n const tensorsMap: NamedTensorsMap = {...this.weightMap};\n Object.keys(inputs).forEach(name => {\n const [nodeName, index] = parseNodeName(name);\n const tensors: Tensor[] = [];\n tensors[index] = inputs[name];\n tensorsMap[nodeName] = tensors;\n });\n const intermediateTensorConsumerCount: {[key: number]: number} = {};\n const tensorsToKeep = this.getFrozenTensorIds(tensorsMap);\n const added: {[key: string]: boolean} = {};\n while (stack.length > 0) {\n const promises = this.processStack(\n inputNodes, stack, context, tensorsMap, added, tensorsToKeep,\n outputNodeNames, intermediateTensorConsumerCount, usedNodes);\n await Promise.all(promises);\n }\n if (dynamicNode == null && !isFunctionExecution) {\n console.warn(\n `This model execution did not contain any nodes with control flow ` +\n `or dynamic output shapes. You can use model.execute() instead.`);\n }\n const missingOutputs =\n outputNodes\n .filter(\n node => !isControlFlow(node) &&\n !getTensor(node.name, tensorsMap, context))\n .map(node => node.name);\n if (missingOutputs.length > 0) {\n let alternativeMsg = '';\n if (dynamicNode != null) {\n alternativeMsg =\n `Alternatively, to avoid the dynamic ops, use model.execute() ` +\n `and specify the inputs [${syncInputs}]`;\n }\n throw new Error(\n `Cannot compute the outputs [${missingOutputs}] from the provided ` +\n `inputs [${names}]. Consider providing the following inputs: ` +\n `[${missingInputs}]. ${alternativeMsg}`);\n }\n return tensorsMap;\n }\n\n private processStack(\n inputNodes: Node[], stack: NodeWithContexts[], context: ExecutionContext,\n tensorMap: NamedTensorsMap, added: {[key: string]: boolean},\n tensorsToKeep: Set, outputNames: string[],\n intermediateTensorConsumerCount: {[key: number]: number},\n usedNodes: Set) {\n const promises: Array> = [];\n while (stack.length > 0) {\n const item = stack.pop();\n context.currentContext = item.contexts;\n let nodeName = '';\n // The tensor of the Enter op with isConstant set should be set\n // in the parent scope, so it will be available as constant for the\n // whole loop.\n if (item.node.op === 'Enter' &&\n getParamValue('isConstant', item.node, tensorMap, context)) {\n [nodeName] = getNodeNameAndIndex(item.node.name, context);\n }\n\n // only process nodes that are not in the tensorMap yet, this include\n // inputNodes and internal initNodes.\n if (tensorMap[item.node.name] == null) {\n const tensors =\n executeOp(item.node, tensorMap, context, this._resourceManager);\n if (!nodeName) {\n [nodeName] = getNodeNameAndIndex(item.node.name, context);\n }\n const currentContext = context.currentContext;\n if (util.isPromise(tensors)) {\n promises.push(tensors.then(t => {\n tensorMap[nodeName] = t;\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap[nodeName] = this.cloneTensorList(t);\n }\n context.currentContext = currentContext;\n this.checkTensorForDisposal(\n nodeName, item.node, tensorMap, context, tensorsToKeep,\n outputNames, intermediateTensorConsumerCount);\n this.processChildNodes(\n item.node, stack, context, tensorMap, added, usedNodes);\n return t;\n }));\n } else {\n tensorMap[nodeName] = tensors;\n if (this.keepIntermediateTensors) {\n this.clonedTensorsMap[nodeName] = this.cloneTensorList(tensors);\n }\n this.checkTensorForDisposal(\n nodeName, item.node, tensorMap, context, tensorsToKeep,\n outputNames, intermediateTensorConsumerCount);\n this.processChildNodes(\n item.node, stack, context, tensorMap, added, usedNodes);\n }\n } else {\n this.processChildNodes(\n item.node, stack, context, tensorMap, added, usedNodes);\n }\n }\n return promises;\n }\n\n private processChildNodes(\n node: Node, stack: NodeWithContexts[], context: ExecutionContext,\n tensorMap: NamedTensorsMap, added: {[key: string]: boolean},\n usedNodes: Set) {\n node.children.forEach((childNode) => {\n const [nodeName, ] = getNodeNameAndIndex(childNode.name, context);\n if (added[nodeName] || !usedNodes.has(childNode.name)) {\n return;\n }\n // Merge op can be pushed if any of its inputs has value.\n if (childNode.op === 'Merge') {\n if (childNode.inputNames.some(name => {\n return !!getTensor(name, tensorMap, context);\n })) {\n added[nodeName] = true;\n stack.push({contexts: context.currentContext, node: childNode});\n }\n } else // Otherwise all inputs must to have value.\n if (childNode.inputNames.every(name => {\n return !!getTensor(name, tensorMap, context);\n })) {\n added[nodeName] = true;\n stack.push({contexts: context.currentContext, node: childNode});\n }\n });\n }\n\n /**\n * Releases the memory used by the weight tensors.\n */\n dispose() {\n Object.keys(this.weightMap)\n .forEach(\n key => this.weightMap[key].forEach(tensor => tensor.dispose()));\n }\n\n private checkInputShapeAndType(inputs: NamedTensorMap) {\n Object.keys(inputs).forEach(name => {\n const input = inputs[name];\n const [nodeName, ] = parseNodeName(name);\n const node = this.graph.nodes[nodeName];\n if (node.attrParams['shape'] && node.attrParams['shape'].value) {\n const shape = node.attrParams['shape'].value as number[];\n const match = shape.length === input.shape.length &&\n input.shape.every(\n (dim, index) => shape[index] === -1 || shape[index] === dim);\n util.assert(\n match,\n () => `The shape of dict['${node.name}'] provided in ` +\n `model.execute(dict) must be [${shape}], but was ` +\n `[${input.shape}]`);\n }\n if (node.attrParams['dtype'] && node.attrParams['dtype'].value) {\n util.assert(\n input.dtype === node.attrParams['dtype'].value as string,\n () => `The dtype of dict['${node.name}'] provided in ` +\n `model.execute(dict) must be ` +\n `${node.attrParams['dtype'].value}, but was ${input.dtype}`);\n }\n });\n }\n\n private mapInputs(inputs: NamedTensorMap) {\n const result: NamedTensorMap = {};\n for (const inputName in inputs) {\n const tensor = this._signature ?.inputs ?.[inputName];\n if (tensor != null) {\n result[tensor.name] = inputs[inputName];\n } else {\n result[inputName] = inputs[inputName];\n }\n }\n return result;\n }\n\n private checkInputs(inputs: NamedTensorMap) {\n const notInGraph = Object.keys(inputs).filter(name => {\n const [nodeName] = parseNodeName(name);\n return this.graph.nodes[nodeName] == null;\n });\n if (notInGraph.length > 0) {\n throw new Error(\n `The dict provided in model.execute(dict) has ` +\n `keys: [${notInGraph}] that are not part of graph`);\n }\n }\n\n private mapOutputs(outputs: string[]) {\n return outputs.map(name => {\n const tensor = this._signature ?.outputs ?.[name];\n if (tensor != null) {\n return tensor.name;\n }\n return name;\n }, {});\n }\n\n private checkOutputs(outputs: string[]): void {\n outputs.forEach(name => {\n const [normalizedName] = parseNodeName(name);\n if (!this.graph.nodes[normalizedName]) {\n throw new Error(`The output '${name}' is not found in the graph`);\n }\n });\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {HashTableMap, NamedTensorMap} from '../data/types';\nimport {HashTable} from './hash_table';\n\n/**\n * Contains global resources of a model.\n */\nexport class ResourceManager {\n constructor(\n readonly hashTableNameToHandle: NamedTensorMap = {},\n readonly hashTableMap: HashTableMap = {}) {}\n\n /**\n * Register a `HashTable` in the resource manager.\n *\n * The `HashTable` can be retrieved by `resourceManager.getHashTableById`,\n * where id is the table handle tensor's id.\n *\n * @param name Op node name that creates the `HashTable`.\n * @param hashTable The `HashTable` to be added to resource manager.\n */\n addHashTable(name: string, hashTable: HashTable) {\n this.hashTableNameToHandle[name] = hashTable.handle;\n this.hashTableMap[hashTable.id] = hashTable;\n }\n\n /**\n * Get the table handle by node name.\n * @param name Op node name that creates the `HashTable`. This name is also\n * used in the inputs list of lookup and import `HashTable` ops.\n */\n getHashTableHandleByName(name: string) {\n return this.hashTableNameToHandle[name];\n }\n\n /**\n * Get the actual `HashTable` by its handle tensor's id.\n * @param id The id of the handle tensor.\n */\n getHashTableById(id: number): HashTable {\n return this.hashTableMap[id];\n }\n\n /**\n * Dispose `ResourceManager`, including its hashTables and tensors in them.\n */\n dispose() {\n for (const key in this.hashTableMap) {\n this.hashTableMap[key].clearAndClose();\n delete this.hashTableMap[key];\n }\n\n for (const name in this.hashTableNameToHandle) {\n this.hashTableNameToHandle[name].dispose();\n delete this.hashTableNameToHandle[name];\n }\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {dispose, InferenceModel, io, ModelPredictConfig, NamedTensorMap, Tensor, util} from '@tensorflow/tfjs-core';\n\nimport * as tensorflow from '../data/compiled_api';\nimport {NamedTensorsMap, TensorInfo} from '../data/types';\nimport {OperationMapper} from '../operations/operation_mapper';\n\nimport {GraphExecutor} from './graph_executor';\nimport {ResourceManager} from './resource_manager';\n\nexport const TFHUB_SEARCH_PARAM = '?tfjs-format=file';\nexport const DEFAULT_MODEL_NAME = 'model.json';\ntype Url = string|io.IOHandler|io.IOHandlerSync;\ntype UrlIOHandler = T extends string ? io.IOHandler : T;\n\n/**\n * A `tf.GraphModel` is a directed, acyclic graph built from a\n * SavedModel GraphDef and allows inference execution.\n *\n * A `tf.GraphModel` can only be created by loading from a model converted from\n * a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using\n * the command line converter tool and loaded via `tf.loadGraphModel`.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\nexport class GraphModel implements\n InferenceModel {\n private executor: GraphExecutor;\n private version = 'n/a';\n private handler: UrlIOHandler;\n private artifacts: io.ModelArtifacts;\n private initializer: GraphExecutor;\n private resourceIdToCapturedInput: {[key: number]: Tensor};\n private resourceManager: ResourceManager;\n private signature: tensorflow.ISignatureDef;\n private initializerSignature: tensorflow.ISignatureDef;\n private structuredOutputKeys: string[];\n private readonly io: typeof io;\n\n // Returns the version information for the tensorflow model GraphDef.\n get modelVersion(): string {\n return this.version;\n }\n\n get inputNodes(): string[] {\n return this.executor.inputNodes;\n }\n\n get outputNodes(): string[] {\n return this.executor.outputNodes;\n }\n\n get inputs(): TensorInfo[] {\n return this.executor.inputs;\n }\n\n get outputs(): TensorInfo[] {\n return this.executor.outputs;\n }\n\n get weights(): NamedTensorsMap {\n return this.executor.weightMap;\n }\n\n get metadata(): {} {\n return this.artifacts.userDefinedMetadata;\n }\n\n get modelSignature(): {} {\n return this.signature;\n }\n\n get modelStructuredOutputKeys(): {} {\n return this.structuredOutputKeys;\n }\n\n /**\n * @param modelUrl url for the model, or an `io.IOHandler`.\n * @param weightManifestUrl url for the weight file generated by\n * scripts/convert.py script.\n * @param requestOption options for Request, which allows to send credentials\n * and custom headers.\n * @param onProgress Optional, progress callback function, fired periodically\n * before the load is completed.\n */\n constructor(\n private modelUrl: ModelURL, private loadOptions: io.LoadOptions = {},\n tfio = io) {\n this.io = tfio;\n if (loadOptions == null) {\n this.loadOptions = {};\n }\n this.resourceManager = new ResourceManager();\n }\n\n private findIOHandler() {\n type IOHandler = UrlIOHandler;\n const path = this.modelUrl;\n if ((path as io.IOHandler).load != null) {\n // Path is an IO Handler.\n this.handler = path as IOHandler;\n } else if (this.loadOptions.requestInit != null) {\n this.handler = this.io.browserHTTPRequest(\n path as string, this.loadOptions) as IOHandler;\n } else {\n const handlers =\n this.io.getLoadHandlers(path as string, this.loadOptions);\n if (handlers.length === 0) {\n // For backward compatibility: if no load handler can be found,\n // assume it is a relative http path.\n handlers.push(\n this.io.browserHTTPRequest(path as string, this.loadOptions));\n } else if (handlers.length > 1) {\n throw new Error(\n `Found more than one (${handlers.length}) load handlers for ` +\n `URL '${[path]}'`);\n }\n this.handler = handlers[0] as IOHandler;\n }\n }\n\n /**\n * Loads the model and weight files, construct the in memory weight map and\n * compile the inference graph.\n */\n load(): UrlIOHandler extends io.IOHandlerSync? boolean:\n Promise {\n type IOHandler = UrlIOHandler;\n this.findIOHandler();\n if (this.handler.load == null) {\n throw new Error(\n 'Cannot proceed with model loading because the IOHandler provided ' +\n 'does not have the `load` method implemented.');\n }\n\n type Result =\n IOHandler extends io.IOHandlerSync ? boolean : Promise;\n\n const loadResult = this.handler.load() as ReturnType;\n if (util.isPromise(loadResult)) {\n return loadResult.then(artifacts => this.loadSync(artifacts)) as Result;\n }\n\n return this.loadSync(loadResult) as Result;\n }\n\n /**\n * Synchronously construct the in memory weight map and\n * compile the inference graph.\n *\n * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}\n */\n loadSync(artifacts: io.ModelArtifacts) {\n this.artifacts = artifacts;\n const graph = this.artifacts.modelTopology as tensorflow.IGraphDef;\n\n let signature = this.artifacts.signature;\n if (this.artifacts.userDefinedMetadata != null) {\n const metadata = this.artifacts.userDefinedMetadata;\n if (metadata.signature != null) {\n signature = metadata.signature;\n }\n\n if (metadata.structuredOutputKeys != null) {\n this.structuredOutputKeys = metadata.structuredOutputKeys as string[];\n }\n }\n this.signature = signature;\n\n this.version = `${graph.versions.producer}.${graph.versions.minConsumer}`;\n const weightMap = this.io.decodeWeights(\n this.artifacts.weightData, this.artifacts.weightSpecs);\n this.executor = new GraphExecutor(\n OperationMapper.Instance.transformGraph(graph, this.signature));\n this.executor.weightMap = this.convertTensorMapToTensorsMap(weightMap);\n // Attach a model-level resourceManager to each executor to share resources,\n // such as `HashTable`.\n this.executor.resourceManager = this.resourceManager;\n\n if (artifacts.modelInitializer != null &&\n (artifacts.modelInitializer as tensorflow.IGraphDef).node != null) {\n const initializer =\n OperationMapper.Instance.transformGraph(artifacts.modelInitializer);\n this.initializer = new GraphExecutor(initializer);\n this.initializer.weightMap = this.executor.weightMap;\n // Attach a model-level resourceManager to the initializer, the\n // hashTables created from when executing the initializer will be stored\n // in the resourceManager.\n this.initializer.resourceManager = this.resourceManager;\n this.initializerSignature = artifacts.initializerSignature;\n }\n\n return true;\n }\n\n /**\n * Save the configuration and/or weights of the GraphModel.\n *\n * An `IOHandler` is an object that has a `save` method of the proper\n * signature defined. The `save` method manages the storing or\n * transmission of serialized data (\"artifacts\") that represent the\n * model's topology and weights onto or via a specific medium, such as\n * file downloads, local storage, IndexedDB in the web browser and HTTP\n * requests to a server. TensorFlow.js provides `IOHandler`\n * implementations for a number of frequently used saving mediums, such as\n * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io`\n * for more details.\n *\n * This method also allows you to refer to certain types of `IOHandler`s\n * as URL-like string shortcuts, such as 'localstorage://' and\n * 'indexeddb://'.\n *\n * Example 1: Save `model`'s topology and weights to browser [local\n * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage);\n * then load it back.\n *\n * ```js\n * const modelUrl =\n * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';\n * const model = await tf.loadGraphModel(modelUrl);\n * const zeros = tf.zeros([1, 224, 224, 3]);\n * model.predict(zeros).print();\n *\n * const saveResults = await model.save('localstorage://my-model-1');\n *\n * const loadedModel = await tf.loadGraphModel('localstorage://my-model-1');\n * console.log('Prediction from loaded model:');\n * model.predict(zeros).print();\n * ```\n *\n * @param handlerOrURL An instance of `IOHandler` or a URL-like,\n * scheme-based string shortcut for `IOHandler`.\n * @param config Options for saving the model.\n * @returns A `Promise` of `SaveResult`, which summarizes the result of\n * the saving, such as byte sizes of the saved artifacts for the model's\n * topology and weight values.\n *\n * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}\n */\n async save(handlerOrURL: io.IOHandler|string, config?: io.SaveConfig):\n Promise {\n if (typeof handlerOrURL === 'string') {\n const handlers = this.io.getSaveHandlers(handlerOrURL);\n if (handlers.length === 0) {\n throw new Error(\n `Cannot find any save handlers for URL '${handlerOrURL}'`);\n } else if (handlers.length > 1) {\n throw new Error(\n `Found more than one (${handlers.length}) save handlers for ` +\n `URL '${handlerOrURL}'`);\n }\n handlerOrURL = handlers[0];\n }\n if (handlerOrURL.save == null) {\n throw new Error(\n 'GraphModel.save() cannot proceed because the IOHandler ' +\n 'provided does not have the `save` attribute defined.');\n }\n\n return handlerOrURL.save(this.artifacts);\n }\n\n private addStructuredOutputNames(outputTensors: Tensor|Tensor[]) {\n if (this.structuredOutputKeys) {\n const outputTensorsArray =\n outputTensors instanceof Tensor ? [outputTensors] : outputTensors;\n const outputTensorMap: NamedTensorMap = {};\n\n outputTensorsArray.forEach(\n (outputTensor, i) => outputTensorMap[this.structuredOutputKeys[i]] =\n outputTensor);\n\n return outputTensorMap;\n }\n return outputTensors;\n }\n\n /**\n * Execute the inference for the input tensors.\n *\n * @param input The input tensors, when there is single input for the model,\n * inputs param should be a `tf.Tensor`. For models with mutliple inputs,\n * inputs params should be in either `tf.Tensor`[] if the input order is\n * fixed, or otherwise NamedTensorMap format.\n *\n * For model with multiple inputs, we recommend you use NamedTensorMap as the\n * input type, if you use `tf.Tensor`[], the order of the array needs to\n * follow the\n * order of inputNodes array. @see {@link GraphModel.inputNodes}\n *\n * You can also feed any intermediate nodes using the NamedTensorMap as the\n * input type. For example, given the graph\n * InputNode => Intermediate => OutputNode,\n * you can execute the subgraph Intermediate => OutputNode by calling\n * model.execute('IntermediateNode' : tf.tensor(...));\n *\n * This is useful for models that uses tf.dynamic_rnn, where the intermediate\n * state needs to be fed manually.\n *\n * For batch inference execution, the tensors for each input need to be\n * concatenated together. For example with mobilenet, the required input shape\n * is [1, 244, 244, 3], which represents the [batch, height, width, channel].\n * If we are provide a batched data of 100 images, the input tensor should be\n * in the shape of [100, 244, 244, 3].\n *\n * @param config Prediction configuration for specifying the batch size.\n * Currently the batch size option is ignored for graph model.\n *\n * @returns Inference result tensors. If the model is converted and it\n * originally had structured_outputs in tensorflow, then a NamedTensorMap\n * will be returned matching the structured_outputs. If no structured_outputs\n * are present, the output will be single `tf.Tensor` if the model has single\n * output node, otherwise Tensor[].\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n predict(inputs: Tensor|Tensor[]|NamedTensorMap, config?: ModelPredictConfig):\n Tensor|Tensor[]|NamedTensorMap {\n const outputTensors = this.execute(inputs, this.outputNodes);\n return this.addStructuredOutputNames(outputTensors);\n }\n\n /**\n * Execute the inference for the input tensors in async fashion, use this\n * method when your model contains control flow ops.\n *\n * @param input The input tensors, when there is single input for the model,\n * inputs param should be a `tf.Tensor`. For models with mutliple inputs,\n * inputs params should be in either `tf.Tensor`[] if the input order is\n * fixed, or otherwise NamedTensorMap format.\n *\n * For model with multiple inputs, we recommend you use NamedTensorMap as the\n * input type, if you use `tf.Tensor`[], the order of the array needs to\n * follow the\n * order of inputNodes array. @see {@link GraphModel.inputNodes}\n *\n * You can also feed any intermediate nodes using the NamedTensorMap as the\n * input type. For example, given the graph\n * InputNode => Intermediate => OutputNode,\n * you can execute the subgraph Intermediate => OutputNode by calling\n * model.execute('IntermediateNode' : tf.tensor(...));\n *\n * This is useful for models that uses tf.dynamic_rnn, where the intermediate\n * state needs to be fed manually.\n *\n * For batch inference execution, the tensors for each input need to be\n * concatenated together. For example with mobilenet, the required input shape\n * is [1, 244, 244, 3], which represents the [batch, height, width, channel].\n * If we are provide a batched data of 100 images, the input tensor should be\n * in the shape of [100, 244, 244, 3].\n *\n * @param config Prediction configuration for specifying the batch size.\n * Currently the batch size option is ignored for graph model.\n *\n * @returns A Promise of inference result tensors. If the model is converted\n * and it originally had structured_outputs in tensorflow, then a\n * NamedTensorMap will be returned matching the structured_outputs. If no\n * structured_outputs are present, the output will be single `tf.Tensor` if\n * the model has single output node, otherwise Tensor[].\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async predictAsync(\n inputs: Tensor|Tensor[]|NamedTensorMap,\n config?: ModelPredictConfig): Promise {\n const outputTensors = await this.executeAsync(inputs, this.outputNodes);\n return this.addStructuredOutputNames(outputTensors);\n }\n\n private normalizeInputs(inputs: Tensor|Tensor[]|\n NamedTensorMap): NamedTensorMap {\n if (!(inputs instanceof Tensor) && !Array.isArray(inputs)) {\n // The input is already a NamedTensorMap.\n const signatureInputs = this.signature?.inputs;\n if (signatureInputs != null) {\n for (const input in signatureInputs) {\n const tensor = signatureInputs[input];\n if (tensor.resourceId != null) {\n inputs[input] = this.resourceIdToCapturedInput[tensor.resourceId];\n }\n }\n }\n return inputs;\n }\n inputs = Array.isArray(inputs) ? inputs : [inputs];\n\n const numCapturedInputs =\n Object.keys(this.resourceIdToCapturedInput).length;\n if (inputs.length + numCapturedInputs !== this.inputNodes.length) {\n throw new Error(`Input tensor count mismatch, the graph model has ${\n this.inputNodes.length -\n numCapturedInputs} non-resource placeholders, while there are ${\n inputs.length} input tensors provided.`);\n }\n\n let inputIndex = 0;\n return this.inputNodes.reduce((map, inputName) => {\n const resourceId = this.signature?.inputs?.[inputName]?.resourceId;\n if (resourceId != null) {\n map[inputName] = this.resourceIdToCapturedInput[resourceId];\n } else {\n map[inputName] = (inputs as Tensor[])[inputIndex++];\n }\n return map;\n }, {} as NamedTensorMap);\n }\n\n private normalizeOutputs(outputs: string|string[]): string[] {\n outputs = outputs || this.outputNodes;\n return !Array.isArray(outputs) ? [outputs] : outputs;\n }\n\n private executeInitializerGraph() {\n if (this.initializer == null) {\n return [];\n }\n if (this.initializerSignature == null) {\n return this.initializer.execute({}, []);\n } else {\n return this.initializer.execute(\n {}, Object.keys(this.initializerSignature.outputs));\n }\n }\n\n private async executeInitializerGraphAsync() {\n if (this.initializer == null) {\n return [];\n }\n if (this.initializerSignature == null) {\n return this.initializer.executeAsync({}, []);\n } else {\n return this.initializer.executeAsync(\n {}, Object.keys(this.initializerSignature.outputs));\n }\n }\n\n private setResourceIdToCapturedInput(outputs: Tensor[]) {\n this.resourceIdToCapturedInput = {};\n\n if (this.initializerSignature) {\n const signatureOutputs = this.initializerSignature.outputs;\n const outputNames = Object.keys(signatureOutputs);\n for (let i = 0; i < outputNames.length; i++) {\n const outputName = outputNames[i];\n const tensorInfo = signatureOutputs[outputName];\n this.resourceIdToCapturedInput[tensorInfo.resourceId] = outputs[i];\n }\n }\n }\n\n /**\n * Executes inference for the model for given input tensors.\n * @param inputs tensor, tensor array or tensor map of the inputs for the\n * model, keyed by the input node names.\n * @param outputs output node name from the TensorFlow model, if no\n * outputs are specified, the default outputs of the model would be used.\n * You can inspect intermediate nodes of the model by adding them to the\n * outputs array.\n *\n * @returns A single tensor if provided with a single output or no outputs\n * are provided and there is only one default output, otherwise return a\n * tensor array. The order of the tensor array is the same as the outputs\n * if provided, otherwise the order of outputNodes attribute of the model.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n execute(inputs: Tensor|Tensor[]|NamedTensorMap, outputs?: string|string[]):\n Tensor|Tensor[] {\n if (this.resourceIdToCapturedInput == null) {\n this.setResourceIdToCapturedInput(this.executeInitializerGraph());\n }\n inputs = this.normalizeInputs(inputs);\n outputs = this.normalizeOutputs(outputs);\n const result = this.executor.execute(inputs, outputs);\n return result.length > 1 ? result : result[0];\n }\n\n /**\n * Executes inference for the model for given input tensors in async\n * fashion, use this method when your model contains control flow ops.\n * @param inputs tensor, tensor array or tensor map of the inputs for the\n * model, keyed by the input node names.\n * @param outputs output node name from the TensorFlow model, if no outputs\n * are specified, the default outputs of the model would be used. You can\n * inspect intermediate nodes of the model by adding them to the outputs\n * array.\n *\n * @returns A Promise of single tensor if provided with a single output or\n * no outputs are provided and there is only one default output, otherwise\n * return a tensor map.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n async executeAsync(\n inputs: Tensor|Tensor[]|NamedTensorMap,\n outputs?: string|string[]): Promise {\n if (this.resourceIdToCapturedInput == null) {\n this.setResourceIdToCapturedInput(\n await this.executeInitializerGraphAsync());\n }\n inputs = this.normalizeInputs(inputs);\n outputs = this.normalizeOutputs(outputs);\n const result = await this.executor.executeAsync(inputs, outputs);\n return result.length > 1 ? result : result[0];\n }\n\n /**\n * Get intermediate tensors for model debugging mode (flag\n * KEEP_INTERMEDIATE_TENSORS is true).\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n getIntermediateTensors(): NamedTensorsMap {\n return this.executor.getIntermediateTensors();\n }\n\n /**\n * Dispose intermediate tensors for model debugging mode (flag\n * KEEP_INTERMEDIATE_TENSORS is true).\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n disposeIntermediateTensors() {\n this.executor.disposeIntermediateTensors();\n }\n\n private convertTensorMapToTensorsMap(map: NamedTensorMap): NamedTensorsMap {\n return Object.keys(map).reduce((newMap: NamedTensorsMap, key) => {\n newMap[key] = [map[key]];\n return newMap;\n }, {});\n }\n\n /**\n * Releases the memory used by the weight tensors and resourceManager.\n *\n * @doc {heading: 'Models', subheading: 'Classes'}\n */\n dispose() {\n this.executor.dispose();\n\n if (this.initializer) {\n this.initializer.dispose();\n if (this.resourceIdToCapturedInput) {\n dispose(this.resourceIdToCapturedInput);\n }\n }\n\n this.resourceManager.dispose();\n }\n}\n\n/**\n * Load a graph model given a URL to the model definition.\n *\n * Example of loading MobileNetV2 from a URL and making a prediction with a\n * zeros input:\n *\n * ```js\n * const modelUrl =\n * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json';\n * const model = await tf.loadGraphModel(modelUrl);\n * const zeros = tf.zeros([1, 224, 224, 3]);\n * model.predict(zeros).print();\n * ```\n *\n * Example of loading MobileNetV2 from a TF Hub URL and making a prediction\n * with a zeros input:\n *\n * ```js\n * const modelUrl =\n * 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2';\n * const model = await tf.loadGraphModel(modelUrl, {fromTFHub: true});\n * const zeros = tf.zeros([1, 224, 224, 3]);\n * model.predict(zeros).print();\n * ```\n * @param modelUrl The url or an `io.IOHandler` that loads the model.\n * @param options Options for the HTTP request, which allows to send\n * credentials\n * and custom headers.\n *\n * @doc {heading: 'Models', subheading: 'Loading'}\n */\nexport async function loadGraphModel(\n modelUrl: string|io.IOHandler, options: io.LoadOptions = {},\n tfio = io): Promise {\n if (modelUrl == null) {\n throw new Error(\n 'modelUrl in loadGraphModel() cannot be null. Please provide a url ' +\n 'or an IOHandler that loads the model');\n }\n if (options == null) {\n options = {};\n }\n\n if (options.fromTFHub && typeof modelUrl === 'string') {\n modelUrl = getTFHubUrl(modelUrl);\n }\n const model = new GraphModel(modelUrl, options, tfio);\n await model.load();\n return model;\n}\n\n/**\n * Load a graph model given a synchronous IO handler with a 'load' method.\n *\n * @param modelSource The `io.IOHandlerSync` that loads the model, or the\n * `io.ModelArtifacts` that encode the model, or a tuple of\n * `[io.ModelJSON, ArrayBuffer]` of which the first element encodes the\n * model and the second contains the weights.\n *\n * @doc {heading: 'Models', subheading: 'Loading'}\n */\nexport function loadGraphModelSync(\n modelSource: io.IOHandlerSync|\n io.ModelArtifacts|[io.ModelJSON, /* Weights */ ArrayBuffer]):\n GraphModel {\n if (modelSource == null) {\n throw new Error(\n 'modelUrl in loadGraphModelSync() cannot be null. Please provide ' +\n 'model artifacts or an IOHandler that loads the model');\n }\n\n let ioHandler: io.IOHandlerSync;\n if (modelSource instanceof Array) {\n const [modelJSON, weights] = modelSource;\n if (!modelJSON) {\n throw new Error('modelJSON must be the first element of the array');\n }\n if (!weights || !(weights instanceof ArrayBuffer)) {\n throw new Error(\n 'An ArrayBuffer of weights must be the second element of' +\n ' the array');\n }\n if (!('modelTopology' in modelJSON)) {\n throw new Error('Model JSON is missing \\'modelTopology\\'');\n }\n if (!('weightsManifest' in modelJSON)) {\n throw new Error('Model JSON is missing \\'weightsManifest\\'');\n }\n\n const weightSpecs = io.getWeightSpecs(modelJSON.weightsManifest);\n const modelArtifacts =\n io.getModelArtifactsForJSONSync(modelJSON, weightSpecs, weights);\n ioHandler = io.fromMemorySync(modelArtifacts);\n } else if ('load' in modelSource) {\n // Then modelSource is already an IOHandlerSync.\n ioHandler = modelSource;\n } else if (\n 'modelTopology' in modelSource && 'weightSpecs' in modelSource &&\n 'weightData' in modelSource) {\n // modelSource is of type ModelArtifacts.\n ioHandler = io.fromMemorySync(modelSource);\n } else {\n throw new Error('Unknown model format');\n }\n\n const model = new GraphModel(ioHandler);\n model.load();\n return model;\n}\n\nfunction getTFHubUrl(modelUrl: string): string {\n if (!modelUrl.endsWith('/')) {\n modelUrl = (modelUrl) + '/';\n }\n return `${modelUrl}${DEFAULT_MODEL_NAME}${TFHUB_SEARCH_PARAM}`;\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport * as tf from '@tensorflow/tfjs-core';\n\n// tslint:disable:no-any\n\n/**\n * A return value for a mapping function that can be applied via deepMap.\n *\n * If recurse is true, the value should be empty, and iteration will continue\n * into the object or array.\n */\nexport type DeepMapResult = {\n value: any,\n recurse: boolean\n};\n\n/**\n * Apply a mapping function to a nested structure in a recursive manner.\n *\n * The result of the mapping is an object with the same nested structure (i.e.,\n * of arrays and dicts) as the input, except that some subtrees are replaced,\n * according to the results of the mapping function.\n *\n * Mappings are memoized. Thus, if the nested structure contains the same\n * object in multiple positions, the output will contain the same mapped object\n * in those positions. Cycles are not supported, however.\n *\n * @param input: The object to which to apply the mapping function.\n * @param mapFn: A function that expects a single node of the object tree, and\n * returns a `DeepMapResult`. The `DeepMapResult` either provides a\n * replacement value for that node (i.e., replacing the subtree), or indicates\n * that the node should be processed recursively.\n */\nexport function deepMap(input: any, mapFn: (x: any) => DeepMapResult): any|\n any[] {\n return deepMapInternal(input, mapFn);\n}\n\n/**\n * @param seen: A Map of known object mappings (i.e., memoized results of\n * `mapFn()`)\n * @param containedIn: An set containing objects on the reference path currently\n * being processed (used to detect cycles).\n */\nfunction deepMapInternal(\n input: any, mapFn: (x: any) => DeepMapResult,\n seen: Map = new Map(), containedIn: Set<{}> = new Set()): any|\n any[] {\n if (input == null) {\n return null;\n }\n if (typeof Blob === 'function' && input instanceof Blob) {\n return input.slice();\n }\n\n if (containedIn.has(input)) {\n throw new Error('Circular references are not supported.');\n }\n if (seen.has(input)) {\n return seen.get(input);\n }\n const result = mapFn(input);\n\n if (result.recurse && result.value !== null) {\n throw new Error(\n 'A deep map function may not return both a value and recurse=true.');\n }\n\n if (!result.recurse) {\n seen.set(input, result.value);\n return result.value;\n } else if (isIterable(input)) {\n // tslint:disable-next-line:no-any\n const mappedIterable: any|any[] = Array.isArray(input) ? [] : {};\n containedIn.add(input);\n for (const k in input) {\n const child = input[k];\n const childResult = deepMapInternal(child, mapFn, seen, containedIn);\n mappedIterable[k] = childResult;\n }\n containedIn.delete(input);\n if (input.__proto__) {\n mappedIterable.__proto__ = input.__proto__;\n }\n return mappedIterable;\n } else {\n throw new Error(`Can't recurse into non-iterable type: ${input}`);\n }\n}\n\n// TODO(soergel, kangyizhang) Reconsider naming of deepZip() to avoid confusion\n// with zip()\n\n/**\n * Zip nested structures together in a recursive manner.\n *\n * This has the effect of transposing or pivoting data, e.g. converting it from\n * a row-major representation to a column-major representation.\n *\n * For example, `deepZip([{a: 1, b: 2}, {a: 3, b: 4}])` returns\n * `{a: [1, 3], b: [2, 4]}`.\n *\n * The inputs should all have the same nested structure (i.e., of arrays and\n * dicts). The result is a single object with the same nested structure, where\n * the leaves are arrays collecting the values of the inputs at that location\n * (or, optionally, the result of a custom function applied to those arrays).\n *\n * @param inputs: An array of the objects to zip together.\n * @param zipFn: (optional) A function that expects an array of elements at a\n * single node of the object tree, and returns a `DeepMapResult`. The\n * `DeepMapResult` either provides a result value for that node (i.e.,\n * representing the subtree), or indicates that the node should be processed\n * recursively. The default zipFn recurses as far as possible and places\n * arrays at the leaves.\n */\nexport function deepZip(\n inputs: any[], zipFn: (xs: any[]) => DeepMapResult = zipToList): any|any[] {\n return deepZipInternal(inputs, zipFn);\n}\n\n/**\n * @param containedIn: An set containing objects on the reference path currently\n * being processed (used to detect cycles).\n */\nfunction deepZipInternal(\n inputs: any[], zipFn: (xs: any[]) => DeepMapResult,\n containedIn: Set<{}> = new Set()): any|any[] {\n // The recursion follows the structure of input 0; it's assumed that all the\n // other inputs have the same structure.\n const input = inputs[0];\n if (containedIn.has(input)) {\n throw new Error('Circular references are not supported.');\n }\n const result = zipFn(inputs);\n\n if (result.recurse && result.value !== null) {\n throw new Error(\n 'A deep zip function may not return both a value and recurse=true.');\n }\n\n if (!result.recurse) {\n return result.value;\n } else if (isIterable(input)) {\n // tslint:disable-next-line:no-any\n const mappedIterable: any|any[] = Array.isArray(input) ? [] : {};\n containedIn.add(input);\n for (const k in input) {\n const children = inputs.map(x => x[k]);\n const childResult = deepZipInternal(children, zipFn, containedIn);\n mappedIterable[k] = childResult;\n }\n containedIn.delete(input);\n return mappedIterable;\n } else {\n throw new Error(`Can't recurse into non-iterable type: ${input}`);\n }\n}\n\n// tslint:disable-next-line:no-any\nexport function zipToList(x: any[]): DeepMapResult {\n if (x === null) {\n return null;\n }\n // TODO(soergel): validate array type?\n\n if (isIterable(x[0])) {\n return {value: null, recurse: true};\n } else {\n return {value: x, recurse: false};\n }\n}\n\n/**\n * A return value for an async map function for use with deepMapAndAwaitAll.\n *\n * If recurse is true, the value should be empty, and iteration will continue\n * into the object or array.\n */\nexport type DeepMapAsyncResult = {\n value: Promise,\n recurse: boolean\n};\n\n/**\n * Apply an async mapping function to a nested structure in a recursive manner.\n *\n * This first creates a nested structure of Promises, and then awaits all of\n * those, resulting in a single Promise for a resolved nested structure.\n *\n * The result of the mapping is an object with the same nested structure (i.e.,\n * of arrays and dicts) as the input, except that some subtrees are replaced,\n * according to the results of the mapping function.\n *\n * Mappings are memoized. Thus, if the nested structure contains the same\n * object in multiple positions, the output will contain the same mapped object\n * in those positions. Cycles are not supported, however.\n *\n * @param input: The object to which to apply the mapping function.\n * @param mapFn: A function that expects a single node of the object tree, and\n * returns a `DeepMapAsyncResult`. The `DeepMapAsyncResult` either provides\n * a `Promise` for a replacement value for that node (i.e., replacing the\n * subtree), or indicates that the node should be processed recursively. Note\n * that the decision whether or not to recurse must be made immediately; only\n * the mapped value may be promised.\n */\nexport async function deepMapAndAwaitAll(\n input: any, mapFn: (x: any) => DeepMapAsyncResult): Promise {\n const seen: Map = new Map();\n\n // First do a normal deepMap, collecting Promises in 'seen' as a side effect.\n deepMapInternal(input, mapFn, seen);\n\n // Replace the Promises in 'seen' in place.\n // Note TypeScript provides no async map iteration, and regular map iteration\n // is broken too, so sadly we have to do Array.from() to make it work.\n // (There's no advantage to Promise.all(), and that would be tricky anyway.)\n for (const key of Array.from(seen.keys())) {\n const value = seen.get(key);\n if (tf.util.isPromise(value)) {\n const mappedValue = await value;\n seen.set(key, mappedValue);\n }\n }\n\n // Normal deepMap again, this time filling in the resolved values.\n // It's unfortunate that we have to do two passes.\n // TODO(soergel): test performance and think harder about a fast solution.\n const result = deepMapInternal(input, mapFn, seen);\n return result;\n}\n\n/**\n * Determine whether the argument is iterable.\n *\n * @returns true if the argument is an array or any non-Tensor object.\n */\n// tslint:disable-next-line:no-any\nexport function isIterable(obj: any): boolean {\n let isTextDecoder = false;\n if (tf.env().get('IS_BROWSER')) {\n isTextDecoder = obj instanceof TextDecoder;\n } else {\n // tslint:disable-next-line:no-require-imports\n const {StringDecoder} = require('string_decoder');\n isTextDecoder = obj instanceof StringDecoder;\n }\n return obj != null && (!ArrayBuffer.isView(obj)) &&\n (Array.isArray(obj) ||\n (typeof obj === 'object' && !(obj instanceof tf.Tensor) &&\n !(obj instanceof Promise) && !isTextDecoder));\n}\n\n/**\n * Determine whether the argument can be converted to Tensor.\n *\n * Tensors, primitives, arrays, and TypedArrays all qualify; anything else does\n * not.\n *\n * @returns true if the argument can be converted to Tensor.\n */\n// tslint:disable-next-line:no-any\nexport function canTensorify(obj: any): boolean {\n return obj == null || isPrimitive(obj) || Array.isArray(obj) ||\n (typeof obj === 'object' && (obj instanceof tf.Tensor)) ||\n tf.util.isTypedArray(obj);\n}\n\n/**\n * Returns true if the given `value` is a primitive type. Otherwise returns\n * false. This is equivalant to node util.isPrimitive\n */\nfunction isPrimitive(value: any): boolean {\n return (\n value === null ||\n (typeof value !== 'object' && typeof value !== 'function'));\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport * as tf from '@tensorflow/tfjs-core';\nimport {deepMap, DeepMapResult, isIterable} from './deep_map';\n\nexport function deepClone(container: T): T {\n return deepMap(container, cloneIfTensor);\n}\n\n// tslint:disable-next-line: no-any\nfunction cloneIfTensor(item: any): DeepMapResult {\n if (item instanceof tf.Tensor) {\n return ({value: item.clone(), recurse: false});\n } else if (isIterable(item)) {\n return {value: null, recurse: true};\n } else {\n return {value: item, recurse: false};\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\n/**\n * A ring buffer, providing O(1) FIFO, LIFO, and related operations.\n */\nexport class RingBuffer {\n // Note we store the indices in the range 0 <= index < 2*capacity.\n // This allows us to distinguish the full from the empty case.\n // See https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/\n protected begin = 0; // inclusive\n protected end = 0; // exclusive\n protected doubledCapacity: number;\n\n protected data: T[];\n\n /**\n * Constructs a `RingBuffer`.\n * @param capacity The number of items that the buffer can accomodate.\n */\n constructor(public capacity: number) {\n if (capacity == null) {\n throw new RangeError('Can\\'t create a ring buffer of unknown capacity.');\n }\n if (capacity < 1) {\n throw new RangeError('Can\\'t create ring buffer of capacity < 1.');\n }\n this.data = new Array(capacity);\n this.doubledCapacity = 2 * capacity;\n }\n\n /**\n * Map any index into the range 0 <= index < 2*capacity.\n */\n protected wrap(index: number) {\n // don't trust % on negative numbers\n while (index < 0) {\n index += this.doubledCapacity;\n }\n return index % this.doubledCapacity;\n }\n\n protected get(index: number) {\n if (index < 0) {\n throw new RangeError('Can\\'t get item at a negative index.');\n }\n return this.data[index % this.capacity];\n }\n\n protected set(index: number, value: T) {\n if (index < 0) {\n throw new RangeError('Can\\'t set item at a negative index.');\n }\n this.data[index % this.capacity] = value;\n }\n\n /**\n * Returns the current number of items in the buffer.\n */\n length(): number {\n let length = this.end - this.begin;\n if (length < 0) {\n length = this.doubledCapacity + length;\n }\n return length;\n }\n\n /**\n * Reports whether the buffer is full.\n * @returns true if the number of items in the buffer equals its capacity, and\n * false otherwise.\n */\n isFull() {\n return this.length() === this.capacity;\n }\n\n /**\n * Reports whether the buffer is empty.\n * @returns true if the number of items in the buffer equals zero, and\n * false otherwise.\n */\n isEmpty() {\n return this.length() === 0;\n }\n\n /**\n * Adds an item to the end of the buffer.\n */\n push(value: T) {\n if (this.isFull()) {\n throw new RangeError('Ring buffer is full.');\n }\n this.set(this.end, value);\n this.end = this.wrap(this.end + 1);\n }\n\n /**\n * Adds many items to the end of the buffer, in order.\n */\n pushAll(values: T[]) {\n for (const value of values) {\n this.push(value);\n }\n }\n\n /**\n * Removes and returns the last item in the buffer.\n */\n pop(): T {\n if (this.isEmpty()) {\n throw new RangeError('Ring buffer is empty.');\n }\n this.end = this.wrap(this.end - 1);\n const result = this.get(this.end);\n this.set(this.end, undefined);\n return result;\n }\n\n /**\n * Adds an item to the beginning of the buffer.\n */\n unshift(value: T) {\n if (this.isFull()) {\n throw new RangeError('Ring buffer is full.');\n }\n this.begin = this.wrap(this.begin - 1);\n this.set(this.begin, value);\n }\n\n /**\n * Removes and returns the first item in the buffer.\n */\n shift(): T {\n if (this.isEmpty()) {\n throw new RangeError('Ring buffer is empty.');\n }\n const result = this.get(this.begin);\n this.set(this.begin, undefined);\n this.begin = this.wrap(this.begin + 1);\n return result;\n }\n\n /**\n * Removes and returns a specific item in the buffer, and moves the last item\n * to the vacated slot. This is useful for implementing a shuffling stream.\n * Note that this operation necessarily scrambles the original order.\n *\n * @param relativeIndex: the index of the item to remove, relative to the\n * first item in the buffer (e.g., hiding the ring nature of the underlying\n * storage).\n */\n shuffleExcise(relativeIndex: number): T {\n if (this.isEmpty()) {\n throw new RangeError('Ring buffer is empty.');\n }\n const index = this.wrap(this.begin + relativeIndex);\n const result = this.get(index);\n this.set(index, this.pop());\n return result;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport {RingBuffer} from './ring_buffer';\n\nexport class GrowingRingBuffer extends RingBuffer {\n private static INITIAL_CAPACITY = 32;\n\n /**\n * Constructs a `GrowingRingBuffer`.\n */\n constructor() {\n super(GrowingRingBuffer.INITIAL_CAPACITY);\n }\n\n override isFull() {\n return false;\n }\n\n override push(value: T) {\n if (super.isFull()) {\n this.expand();\n }\n super.push(value);\n }\n\n override unshift(value: T) {\n if (super.isFull()) {\n this.expand();\n }\n super.unshift(value);\n }\n\n /**\n * Doubles the capacity of the buffer.\n */\n private expand() {\n const newCapacity = this.capacity * 2;\n const newData = new Array(newCapacity);\n const len = this.length();\n\n // Rotate the buffer to start at index 0 again, since we can't just\n // allocate more space at the end.\n for (let i = 0; i < len; i++) {\n newData[i] = this.get(this.wrap(this.begin + i));\n }\n\n this.data = newData;\n this.capacity = newCapacity;\n this.doubledCapacity = 2 * this.capacity;\n this.begin = 0;\n this.end = len;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport * as tf from '@tensorflow/tfjs-core';\nimport * as seedrandom from 'seedrandom';\n\nimport {Container} from '../types';\nimport {deepClone} from '../util/deep_clone';\nimport {deepMapAndAwaitAll, DeepMapAsyncResult, DeepMapResult, deepZip, zipToList} from '../util/deep_map';\nimport {GrowingRingBuffer} from '../util/growing_ring_buffer';\nimport {RingBuffer} from '../util/ring_buffer';\n\n/**\n * A nested structure of LazyIterators, used as the input to zip().\n */\nexport type IteratorContainer = Container>;\n\n// Here we implement a simple asynchronous iterator.\n// This lets us avoid using either third-party stream libraries or\n// recent TypeScript language support requiring polyfills.\n\n/**\n * Create a `LazyIterator` from an array of items.\n */\nexport function iteratorFromItems(items: T[]): LazyIterator {\n return new ArrayIterator(items);\n}\n\n/**\n * Create a `LazyIterator` of incrementing integers.\n */\nexport function iteratorFromIncrementing(start: number): LazyIterator {\n let i = start;\n return iteratorFromFunction(() => ({value: i++, done: false}));\n}\n\n/**\n * Create a `LazyIterator` from a function.\n *\n * ```js\n * let i = -1;\n * const func = () =>\n * ++i < 5 ? {value: i, done: false} : {value: null, done: true};\n * const iter = tf.data.iteratorFromFunction(func);\n * await iter.forEachAsync(e => console.log(e));\n * ```\n *\n * @param func A function that produces data on each call.\n */\nexport function iteratorFromFunction(\n func: () =>\n IteratorResult| Promise>): LazyIterator {\n return new FunctionCallIterator(func);\n}\n\n/**\n * Create a `LazyIterator` by concatenating underlying streams, which are\n * themselves provided as a stream.\n *\n * This can also be thought of as a \"stream flatten\" operation.\n *\n * @param baseIterators A stream of streams to be concatenated.\n * @param baseErrorHandler An optional function that can intercept `Error`s\n * raised during a `next()` call on the base stream. This function can decide\n * whether the error should be propagated, whether the error should be\n * ignored, or whether the base stream should be terminated.\n */\nexport function iteratorFromConcatenated(\n baseIterators: LazyIterator>,\n baseErrorHandler?: (e: Error) => boolean): LazyIterator {\n return new ChainedIterator(baseIterators, baseErrorHandler);\n}\n\n/**\n * Create a `LazyIterator` by concatenating streams produced by calling a\n * stream-generating function a given number of times.\n *\n * Since a `LazyIterator` is read-once, it cannot be repeated, but this\n * function can be used to achieve a similar effect:\n *\n * LazyIterator.ofConcatenatedFunction(() => new MyIterator(), 6);\n *\n * @param iteratorFunc: A function that produces a new stream on each call.\n * @param count: The number of times to call the function.\n * @param baseErrorHandler An optional function that can intercept `Error`s\n * raised during a `next()` call on the base stream. This function can decide\n * whether the error should be propagated, whether the error should be\n * ignored, or whether the base stream should be terminated.\n */\nexport function iteratorFromConcatenatedFunction(\n iteratorFunc: () => IteratorResult>, count: number,\n baseErrorHandler?: (e: Error) => boolean): LazyIterator {\n return iteratorFromConcatenated(\n iteratorFromFunction(iteratorFunc).take(count), baseErrorHandler);\n}\n\n/**\n * Create a `LazyIterator` by zipping together an array, dict, or nested\n * structure of `LazyIterator`s (and perhaps additional constants).\n *\n * The underlying streams must provide elements in a consistent order such\n * that they correspond.\n *\n * Typically, the underlying streams should have the same number of\n * elements. If they do not, the behavior is determined by the\n * `mismatchMode` argument.\n *\n * The nested structure of the `iterators` argument determines the\n * structure of elements in the resulting iterator.\n *\n * @param iterators: An array or object containing LazyIterators at the\n * leaves.\n * @param mismatchMode: Determines what to do when one underlying iterator\n * is exhausted before the others. `ZipMismatchMode.FAIL` (the default)\n * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`\n * causes the zipped iterator to terminate with the furst underlying\n * streams, so elements remaining on the longer streams are ignored.\n * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling\n * in nulls for the exhausted streams, until all streams are exhausted.\n */\nexport function iteratorFromZipped(\n iterators: IteratorContainer,\n mismatchMode: ZipMismatchMode = ZipMismatchMode.FAIL): LazyIterator {\n return new ZipIterator(iterators, mismatchMode);\n}\n\n/**\n * An asynchronous iterator, providing lazy access to a potentially\n * unbounded stream of elements.\n *\n * Iterator can be obtained from a dataset:\n * `const iter = await dataset.iterator();`\n */\nexport abstract class LazyIterator {\n // This class implements AsyncIterator, but we have not yet set the\n // TypeScript --downlevelIteration flag to enable that.\n\n abstract summary(): string;\n\n /**\n * Returns a `Promise` for the next element in the stream.\n *\n * When an item can be provided successfully, the return value is\n * `{value:T, done:false}`.\n *\n * Calling next() on a closed stream returns `{value:null, done:true}`.\n */\n abstract next(): Promise>;\n\n /**\n * Collect all remaining elements of a bounded stream into an array.\n * Obviously this will succeed only for small streams that fit in memory.\n * Useful for testing.\n *\n * @returns A Promise for an array of stream elements, which will resolve\n * when the stream is exhausted.\n */\n async toArray(): Promise {\n const result: T[] = [];\n let x = await this.next();\n while (!x.done) {\n result.push(x.value);\n x = await this.next();\n }\n return result;\n }\n\n /**\n * Collect all elements of this dataset into an array with prefetching 100\n * elements. This is useful for testing, because the prefetch changes the\n * order in which the Promises are resolved along the processing pipeline.\n * This may help expose bugs where results are dependent on the order of\n * Promise resolution rather than on the logical order of the stream (i.e.,\n * due to hidden mutable state).\n *\n * @returns A Promise for an array of stream elements, which will resolve\n * when the stream is exhausted.\n */\n async toArrayForTest(): Promise {\n const stream = this.prefetch(100);\n const result: T[] = [];\n let x = await stream.next();\n while (!x.done) {\n result.push(x.value);\n x = await stream.next();\n }\n return result;\n }\n\n /**\n * Draw items from the stream until it is exhausted.\n *\n * This can be useful when the stream has side effects but no output. In\n * that case, calling this function guarantees that the stream will be\n * fully processed.\n */\n async resolveFully(): Promise {\n let x = await this.next();\n while (!x.done) {\n x = await this.next();\n }\n }\n\n /**\n * Draw items from the stream until it is exhausted, or a predicate fails.\n *\n * This can be useful when the stream has side effects but no output. In\n * that case, calling this function guarantees that the stream will be\n * fully processed.\n */\n async resolveWhile(predicate: (r: T) => boolean): Promise {\n let x = await this.next();\n let shouldContinue = predicate(x.value);\n while ((!x.done) && shouldContinue) {\n x = await this.next();\n shouldContinue = predicate(x.value);\n }\n }\n\n /**\n * Handles errors thrown on this stream using a provided handler function.\n *\n * @param handler A function that handles any `Error` thrown during a `next()`\n * call and returns true if the stream should continue (dropping the failed\n * call) or false if the stream should quietly terminate. If the handler\n * itself throws (or rethrows) an `Error`, that will be propagated.\n *\n * @returns A `LazyIterator` of elements passed through from upstream,\n * possibly filtering or terminating on upstream `next()` calls that\n * throw an `Error`.\n */\n handleErrors(handler: (error: Error) => boolean): LazyIterator {\n return new ErrorHandlingLazyIterator(this, handler);\n }\n\n // TODO(soergel): Implement reduce() etc.\n\n /**\n * Filters this stream according to `predicate`.\n *\n * @param predicate A function mapping a stream element to a boolean or a\n * `Promise` for one.\n *\n * @returns A `LazyIterator` of elements for which the predicate was true.\n */\n filter(predicate: (value: T) => boolean): LazyIterator {\n return new FilterIterator(this, predicate);\n }\n\n /**\n * Maps this stream through a 1-to-1 transform.\n *\n * @param transform A function mapping a stream element to a transformed\n * element.\n *\n * @returns A `LazyIterator` of transformed elements.\n */\n map(transform: (value: T) => O): LazyIterator {\n return new MapIterator(this, transform);\n }\n\n /**\n * Maps this stream through an async 1-to-1 transform.\n *\n * @param transform A function mapping a stream element to a `Promise` for a\n * transformed stream element.\n *\n * @returns A `LazyIterator` of transformed elements.\n */\n mapAsync(transform: (value: T) => Promise): LazyIterator {\n return new AsyncMapIterator(this, transform);\n }\n\n /**\n * Maps this stream through a 1-to-1 transform, forcing serial execution.\n *\n * @param transform A function mapping a stream element to a transformed\n * element.\n *\n * @returns A `LazyIterator` of transformed elements.\n */\n serialMapAsync(transform: (value: T) => Promise): LazyIterator {\n return new AsyncMapIterator(this, transform).serial();\n }\n\n /**\n * Maps this stream through a 1-to-many transform.\n *\n * @param transform A function mapping a stream element to an array of\n * transformed elements.\n *\n * @returns A `DataStream` of transformed elements.\n */\n flatmap(transform: (value: T) => O[]): LazyIterator {\n return new FlatmapIterator(this, transform);\n }\n\n /**\n * Apply a function to every element of the stream.\n *\n * @param f A function to apply to each stream element.\n */\n async forEachAsync(f: (value: T) => void): Promise {\n return this.map(f).resolveFully();\n }\n\n /**\n * Apply a function to every element of the stream, forcing serial execution.\n *\n * @param f A function to apply to each stream element. Should return 'true'\n * to indicate that the stream should continue, or 'false' to cause it to\n * terminate.\n */\n async serialForEach(f: (value: T) => Promise): Promise {\n return this.serialMapAsync(f).resolveWhile(x => (x === true));\n }\n\n /**\n * Groups elements into batches, represented as arrays of elements.\n *\n * We can think of the elements of this iterator as 'rows' (even if they are\n * nested structures). By the same token, consecutive values for a given\n * key within the elements form a 'column'. This matches the usual sense of\n * 'row' and 'column' when processing tabular data (e.g., parsing a CSV).\n *\n * Thus, \"Row-major\" means that the resulting batch is simply a collection of\n * rows: `[row1, row2, row3, ...]`. This is contrast to the column-major\n * form, which is needed for vectorized computation.\n *\n * @param batchSize The number of elements desired per batch.\n * @param smallLastBatch Whether to emit the final batch when it has fewer\n * than batchSize elements. Default true.\n * @returns A `LazyIterator` of batches of elements, represented as arrays\n * of the original element type.\n */\n rowMajorBatch(batchSize: number, smallLastBatch = true): LazyIterator {\n return new RowMajorBatchIterator(this, batchSize, smallLastBatch);\n }\n\n /**\n * Groups elements into batches, represented in column-major form.\n *\n * We can think of the elements of this iterator as 'rows' (even if they are\n * nested structures). By the same token, consecutive values for a given\n * key within the elements form a 'column'. This matches the usual sense of\n * 'row' and 'column' when processing tabular data (e.g., parsing a CSV).\n *\n * Thus, \"column-major\" means that the resulting batch is a (potentially\n * nested) structure representing the columns. Each column entry, then,\n * contains a collection of the values found in that column for a range of\n * input elements. This representation allows for vectorized computation, in\n * contrast to the row-major form.\n *\n * The inputs should all have the same nested structure (i.e., of arrays and\n * dicts). The result is a single object with the same nested structure,\n * where the leaves are arrays collecting the values of the inputs at that\n * location (or, optionally, the result of a custom function applied to those\n * arrays).\n *\n * @param batchSize The number of elements desired per batch.\n * @param smallLastBatch Whether to emit the final batch when it has fewer\n * than batchSize elements. Default true.\n * @param zipFn: (optional) A function that expects an array of elements at a\n * single node of the object tree, and returns a `DeepMapResult`. The\n * `DeepMapResult` either provides a result value for that node (i.e.,\n * representing the subtree), or indicates that the node should be processed\n * recursively. The default zipFn recurses as far as possible and places\n * arrays at the leaves.\n * @returns A `LazyIterator` of batches of elements, represented as an object\n * with collections at the leaves.\n */\n columnMajorBatch(\n batchSize: number, smallLastBatch = true,\n // tslint:disable-next-line:no-any\n zipFn: (xs: any[]) => DeepMapResult = zipToList):\n LazyIterator {\n // First collect the desired number of input elements as a row-major batch.\n const rowBatches = this.rowMajorBatch(batchSize, smallLastBatch);\n // Now 'rotate' or 'pivot' the data, collecting all values from each column\n // in the batch (i.e., for each key within the elements) into an array.\n return rowBatches.map(x => deepZip(x, zipFn));\n }\n\n /**\n * Concatenate this `LazyIterator` with another.\n *\n * @param iterator A `LazyIterator` to be concatenated onto this one.\n * @param baseErrorHandler An optional function that can intercept `Error`s\n * raised during a `next()` call on the base stream. This function can\n * decide whether the error should be propagated, whether the error should\n * be ignored, or whether the base stream should be terminated.\n * @returns A `LazyIterator`.\n */\n concatenate(\n iterator: LazyIterator,\n baseErrorHandler?: (e: Error) => boolean): LazyIterator {\n return new ChainedIterator(\n iteratorFromItems([this, iterator]), baseErrorHandler);\n }\n\n /**\n * Limits this stream to return at most `count` items.\n *\n * @param count The maximum number of items to provide from the stream. If\n * a negative or undefined value is given, the entire stream is returned\n * unaltered.\n */\n take(count: number): LazyIterator {\n if (count < 0 || count == null) {\n return this;\n }\n return new TakeIterator(this, count);\n }\n\n /**\n * Skips the first `count` items in this stream.\n *\n * @param count The number of items to skip. If a negative or undefined\n * value is given, the entire stream is returned unaltered.\n */\n skip(count: number): LazyIterator {\n if (count < 0 || count == null) {\n return this;\n }\n return new SkipIterator(this, count);\n }\n\n /**\n * Prefetch the first `bufferSize` items in this stream.\n *\n * Note this prefetches Promises, but makes no guarantees about when those\n * Promises resolve.\n *\n * @param bufferSize: An integer specifying the number of elements to be\n * prefetched.\n */\n prefetch(bufferSize: number): LazyIterator {\n return new PrefetchIterator(this, bufferSize);\n }\n\n // TODO(soergel): deep sharded shuffle, where supported\n\n /**\n * Randomly shuffles the elements of this stream.\n *\n * @param bufferSize: An integer specifying the number of elements from\n * this stream from which the new stream will sample.\n * @param seed: (Optional.) An integer specifying the random seed that\n * will be used to create the distribution.\n */\n shuffle(windowSize: number, seed?: string): LazyIterator {\n return new ShuffleIterator(this, windowSize, seed);\n }\n\n /**\n * Force an iterator to execute serially: each next() call will await the\n * prior one, so that they cannot execute concurrently.\n */\n serial(): LazyIterator {\n return new SerialIterator(this);\n }\n}\n\n// ============================================================================\n// The following private classes serve to implement the chainable methods\n// on LazyIterator. Unfortunately they can't be placed in separate files,\n// due to resulting trouble with circular imports.\n// ============================================================================\n\n// Iterators that just extend LazyIterator directly\n// ============================================================================\n\nclass ArrayIterator extends LazyIterator {\n private trav = 0;\n constructor(protected items: T[]) {\n super();\n }\n\n summary() {\n return `Array of ${this.items.length} items`;\n }\n\n async next(): Promise> {\n if (this.trav >= this.items.length) {\n return {value: null, done: true};\n }\n const item = this.items[this.trav];\n this.trav++;\n return {value: deepClone(item), done: false};\n }\n}\n\nclass FunctionCallIterator extends LazyIterator {\n constructor(\n protected nextFn: () => IteratorResult| Promise>) {\n super();\n }\n\n summary() {\n return `Function call`;\n }\n\n async next(): Promise> {\n try {\n return this.nextFn();\n } catch (e) {\n // Modify the error message but leave the stack trace intact\n e.message =\n `Error thrown while iterating through a dataset: ${e.message}`;\n throw e;\n }\n }\n}\n\nclass SerialIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n constructor(protected upstream: LazyIterator) {\n super();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n summary() {\n return `${this.upstream.summary()} -> Serial`;\n }\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n private async serialNext(): Promise> {\n return this.upstream.next();\n }\n}\n\nclass SkipIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n // Local state that should not be clobbered by out-of-order execution.\n count = 0;\n\n constructor(protected upstream: LazyIterator, protected maxCount: number) {\n super();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n summary() {\n return `${this.upstream.summary()} -> Skip`;\n }\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n private async serialNext(): Promise> {\n // TODO(soergel): consider tradeoffs of reading in parallel, eg.\n // collecting next() promises in an Array and then waiting for\n // Promise.all() of those. Benefit: pseudo-parallel execution. Drawback:\n // maybe delayed GC.\n while (this.count++ < this.maxCount) {\n const skipped = await this.upstream.next();\n // short-circuit if upstream is already empty\n if (skipped.done) {\n return skipped;\n }\n tf.dispose(skipped.value as {});\n }\n return this.upstream.next();\n }\n}\n\nclass TakeIterator extends LazyIterator {\n count = 0;\n constructor(protected upstream: LazyIterator, protected maxCount: number) {\n super();\n }\n\n summary() {\n return `${this.upstream.summary()} -> Take`;\n }\n\n async next(): Promise> {\n if (this.count++ >= this.maxCount) {\n return {value: null, done: true};\n }\n return this.upstream.next();\n }\n}\n\n// Note this batch just groups items into row-wise element arrays.\n// Rotating these to a column-wise representation happens only at the dataset\n// level.\nclass RowMajorBatchIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n constructor(\n protected upstream: LazyIterator, protected batchSize: number,\n protected enableSmallLastBatch = true) {\n super();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n summary() {\n return `${this.upstream.summary()} -> RowMajorBatch`;\n }\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n private async serialNext(): Promise> {\n const batch: T[] = [];\n while (batch.length < this.batchSize) {\n const item = await this.upstream.next();\n if (item.done) {\n if (this.enableSmallLastBatch && batch.length > 0) {\n return {value: batch, done: false};\n }\n return {value: null, done: true};\n }\n batch.push(item.value);\n }\n return {value: batch, done: false};\n }\n}\n\nclass FilterIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n constructor(\n protected upstream: LazyIterator,\n protected predicate: (value: T) => boolean) {\n super();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n summary() {\n return `${this.upstream.summary()} -> Filter`;\n }\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n private async serialNext(): Promise> {\n while (true) {\n const item = await this.upstream.next();\n if (item.done || this.predicate(item.value)) {\n return item;\n }\n tf.dispose(item.value as {});\n }\n }\n}\n\nclass MapIterator extends LazyIterator {\n constructor(\n protected upstream: LazyIterator,\n protected transform: (value: I) => O) {\n super();\n }\n\n summary() {\n return `${this.upstream.summary()} -> Map`;\n }\n\n async next(): Promise> {\n const item = await this.upstream.next();\n if (item.done) {\n return {value: null, done: true};\n }\n const inputTensors = tf.tensor_util.getTensorsInContainer(item.value as {});\n // Careful: the transform may mutate the item in place.\n // That's why we have to remember the input Tensors above, and then\n // below dispose only those that were not passed through to the output.\n // Note too that the transform function is responsible for tidying\n // any intermediate Tensors. Here we are concerned only about the\n // inputs.\n const mapped = this.transform(item.value);\n const outputTensors = tf.tensor_util.getTensorsInContainer(mapped as {});\n\n // TODO(soergel) faster intersection\n // TODO(soergel) move to tf.disposeExcept(in, out)?\n for (const t of inputTensors) {\n if (!tf.tensor_util.isTensorInList(t, outputTensors)) {\n t.dispose();\n }\n }\n return {value: mapped, done: false};\n }\n}\n\nclass ErrorHandlingLazyIterator extends LazyIterator {\n count = 0;\n constructor(\n protected upstream: LazyIterator,\n protected handler: (error: Error) => boolean) {\n super();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n summary() {\n return `${this.upstream.summary()} -> handleErrors`;\n }\n\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n async serialNext(): Promise> {\n while (true) {\n try {\n return await this.upstream.next();\n } catch (e) {\n if (!this.handler(e)) {\n return {value: null, done: true};\n }\n // If the handler returns true, loop and fetch the next upstream item.\n\n // If the upstream iterator throws an endless stream of errors, and if\n // the handler says to ignore them, then we loop forever here. That is\n // the correct behavior-- it's up to the handler to decide when to stop.\n }\n }\n }\n}\n\nclass AsyncMapIterator extends LazyIterator {\n constructor(\n protected upstream: LazyIterator,\n protected transform: (value: I) => Promise) {\n super();\n }\n\n summary() {\n return `${this.upstream.summary()} -> AsyncMap`;\n }\n\n async next(): Promise> {\n const item = await this.upstream.next();\n if (item.done) {\n return {value: null, done: true};\n }\n const inputTensors = tf.tensor_util.getTensorsInContainer(item.value as {});\n // Careful: the transform may mutate the item in place.\n // That's why we have to remember the input Tensors above, and then\n // below dispose only those that were not passed through to the output.\n // Note too that the transform function is responsible for tidying\n // any intermediate Tensors. Here we are concerned only about the\n // inputs.\n const mapped = await this.transform(item.value);\n const outputTensors = tf.tensor_util.getTensorsInContainer(mapped as {});\n\n // TODO(soergel) faster intersection\n // TODO(soergel) move to tf.disposeExcept(in, out)?\n for (const t of inputTensors) {\n if (!tf.tensor_util.isTensorInList(t, outputTensors)) {\n t.dispose();\n }\n }\n return {value: mapped, done: false};\n }\n}\n\n// Iterators that maintain a queue of pending items\n// ============================================================================\n\n/**\n * A base class for transforming streams that operate by maintaining an\n * output queue of elements that are ready to return via next(). This is\n * commonly required when the transformation is 1-to-many: A call to next()\n * may trigger a call to the underlying stream, which will produce many\n * mapped elements of this stream-- of which we need to return only one, so\n * we have to queue the rest.\n */\nexport abstract class OneToManyIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n // Local state that should not be clobbered by out-of-order execution.\n protected outputQueue: RingBuffer;\n\n constructor() {\n super();\n this.outputQueue = new GrowingRingBuffer();\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n /**\n * Read one or more chunks from upstream and process them, possibly\n * reading or writing a carryover, and adding processed items to the\n * output queue. Note it's possible that no items are added to the queue\n * on a given pump() call, even if the upstream stream is not closed\n * (e.g., because items are filtered).\n *\n * @return `true` if any action was taken, i.e. fetching items from the\n * upstream source OR adding items to the output queue. `false` if the\n * upstream source is exhausted AND nothing was added to the queue\n * (i.e., any remaining carryover).\n */\n protected abstract pump(): Promise;\n\n async serialNext(): Promise> {\n // Fetch so that the queue contains at least one item if possible.\n // If the upstream source is exhausted, AND there are no items left in\n // the output queue, then this stream is also exhausted.\n while (this.outputQueue.length() === 0) {\n // TODO(soergel): consider parallel reads.\n if (!await this.pump()) {\n return {value: null, done: true};\n }\n }\n return {value: this.outputQueue.shift(), done: false};\n }\n}\nclass FlatmapIterator extends OneToManyIterator {\n constructor(\n protected upstream: LazyIterator,\n protected transform: (value: I) => O[]) {\n super();\n }\n\n summary() {\n return `${this.upstream.summary()} -> Flatmap`;\n }\n\n async pump(): Promise {\n const item = await this.upstream.next();\n if (item.done) {\n return false;\n }\n const inputTensors = tf.tensor_util.getTensorsInContainer(item.value as {});\n // Careful: the transform may mutate the item in place.\n // that's why we have to remember the input Tensors above, and then\n // below dispose only those that were not passed through to the output.\n // Note too that the transform function is responsible for tidying any\n // intermediate Tensors. Here we are concerned only about the inputs.\n const mappedArray = this.transform(item.value);\n const outputTensors =\n tf.tensor_util.getTensorsInContainer(mappedArray as {});\n this.outputQueue.pushAll(mappedArray);\n\n // TODO(soergel) faster intersection, and deduplicate outputTensors\n // TODO(soergel) move to tf.disposeExcept(in, out)?\n for (const t of inputTensors) {\n if (!tf.tensor_util.isTensorInList(t, outputTensors)) {\n t.dispose();\n }\n }\n\n return true;\n }\n}\n\n/**\n * Provides a `LazyIterator` that concatenates a stream of underlying\n * streams.\n *\n * Doing this in a concurrency-safe way requires some trickery. In\n * particular, we want this stream to return the elements from the\n * underlying streams in the correct order according to when next() was\n * called, even if the resulting Promises resolve in a different order.\n */\nexport class ChainedIterator extends LazyIterator {\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise> = null;\n\n // Local state that should not be clobbered by out-of-order execution.\n private iterator: LazyIterator = null;\n private moreIterators: LazyIterator>;\n\n constructor(\n iterators: LazyIterator>,\n private readonly baseErrorHandler?: (e: Error) => boolean) {\n super();\n this.moreIterators = iterators;\n }\n\n summary() {\n const upstreamSummaries = 'TODO: fill in upstream of chained summaries';\n return `${upstreamSummaries} -> Chained`;\n }\n\n async next(): Promise> {\n this.lastRead = this.readFromChain(this.lastRead);\n return this.lastRead;\n }\n\n private async readFromChain(lastRead: Promise>):\n Promise> {\n // Must await on the previous read since the previous read may have advanced\n // the stream of streams, from which we need to read.\n // This is unfortunate since we can't parallelize reads. Which means\n // prefetching of chained streams is a no-op.\n // One solution is to prefetch immediately upstream of this.\n await lastRead;\n if (this.iterator == null) {\n const iteratorResult = await this.moreIterators.next();\n if (iteratorResult.done) {\n // No more streams to stream from.\n return {value: null, done: true};\n }\n this.iterator = iteratorResult.value;\n if (this.baseErrorHandler != null) {\n this.iterator = this.iterator.handleErrors(this.baseErrorHandler);\n }\n }\n const itemResult = await this.iterator.next();\n if (itemResult.done) {\n this.iterator = null;\n return this.readFromChain(lastRead);\n }\n return itemResult;\n }\n}\n\nexport enum ZipMismatchMode {\n FAIL, // require zipped streams to have the same length\n SHORTEST, // terminate zip when the first stream is exhausted\n LONGEST // use nulls for exhausted streams; use up the longest stream.\n}\n\n/**\n * Provides a `LazyIterator` that zips together an array, dict, or nested\n * structure of `LazyIterator`s (and perhaps additional constants).\n *\n * The underlying streams must provide elements in a consistent order such\n * that they correspond.\n *\n * Typically, the underlying streams should have the same number of\n * elements. If they do not, the behavior is determined by the\n * `mismatchMode` argument.\n *\n * The nested structure of the `iterators` argument determines the\n * structure of elements in the resulting iterator.\n *\n * Doing this in a concurrency-safe way requires some trickery. In\n * particular, we want this stream to return the elements from the\n * underlying streams in the correct order according to when next() was\n * called, even if the resulting Promises resolve in a different order.\n *\n * @param iterators: An array or object containing LazyIterators at the\n * leaves.\n * @param mismatchMode: Determines what to do when one underlying iterator\n * is exhausted before the others. `ZipMismatchMode.FAIL` (the default)\n * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`\n * causes the zipped iterator to terminate with the furst underlying\n * streams, so elements remaining on the longer streams are ignored.\n * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling\n * in nulls for the exhausted streams, until all streams are exhausted.\n */\nclass ZipIterator extends LazyIterator {\n private count = 0;\n private currentPromise: Promise> = null;\n\n constructor(\n protected readonly iterators: IteratorContainer,\n protected readonly mismatchMode: ZipMismatchMode = ZipMismatchMode.FAIL) {\n super();\n }\n\n summary() {\n const upstreamSummaries = 'TODO: fill in upstream of zip summaries';\n return `{${upstreamSummaries}} -> Zip`;\n }\n\n private async nextState(afterState: Promise>):\n Promise> {\n // This chaining ensures that the underlying next() are not even called\n // before the previous ones have resolved.\n await afterState;\n\n // Collect underlying iterator \"done\" signals as a side effect in\n // getNext()\n let numIterators = 0;\n let iteratorsDone = 0;\n\n function getNext(container: IteratorContainer): DeepMapAsyncResult {\n if (container instanceof LazyIterator) {\n const result = container.next();\n return {\n value: result.then(x => {\n numIterators++;\n if (x.done) {\n iteratorsDone++;\n }\n return x.value;\n }),\n recurse: false\n };\n } else {\n return {value: null, recurse: true};\n }\n }\n\n const mapped: O = await deepMapAndAwaitAll(this.iterators, getNext);\n\n if (numIterators === iteratorsDone) {\n // The streams have all ended.\n return {value: null, done: true};\n }\n if (iteratorsDone > 0) {\n switch (this.mismatchMode) {\n case ZipMismatchMode.FAIL:\n throw new Error(\n 'Zipped streams should have the same length. ' +\n `Mismatched at element ${this.count}.`);\n case ZipMismatchMode.SHORTEST:\n return {value: null, done: true};\n case ZipMismatchMode.LONGEST:\n default:\n // Continue. The exhausted streams already produced value: null.\n }\n }\n\n this.count++;\n return {value: mapped, done: false};\n }\n\n async next(): Promise> {\n this.currentPromise = this.nextState(this.currentPromise);\n return this.currentPromise;\n }\n}\n\n// Iterators that maintain a ring buffer of pending promises\n// ============================================================================\n\n/**\n * A stream that prefetches a given number of items from an upstream source,\n * returning them in FIFO order.\n *\n * Note this prefetches Promises, but makes no guarantees about when those\n * Promises resolve.\n */\nexport class PrefetchIterator extends LazyIterator {\n protected buffer: RingBuffer>>;\n\n constructor(\n protected upstream: LazyIterator, protected bufferSize: number) {\n super();\n this.buffer = new RingBuffer>>(bufferSize);\n }\n\n summary() {\n return `${this.upstream.summary()} -> Prefetch`;\n }\n\n /**\n * Refill the prefetch buffer. Returns only after the buffer is full, or\n * the upstream source is exhausted.\n */\n protected refill() {\n while (!this.buffer.isFull()) {\n const v = this.upstream.next();\n this.buffer.push(v);\n }\n }\n\n next(): Promise> {\n this.refill();\n // This shift will never throw an error because the buffer is always\n // full after a refill. If the stream is exhausted, the buffer will be\n // full of Promises that will resolve to the end-of-stream signal.\n return this.buffer.shift();\n }\n}\n\n/**\n * A stream that performs a sliding-window random shuffle on an upstream\n * source. This is like a `PrefetchIterator` except that the items are\n * returned in randomized order. Mixing naturally improves as the buffer\n * size increases.\n */\nexport class ShuffleIterator extends PrefetchIterator {\n private readonly random: seedrandom.prng;\n\n // Strict Promise execution order:\n // a next() call may not even begin until the previous one completes.\n private lastRead: Promise>;\n\n // Local state that should not be clobbered by out-of-order execution.\n private upstreamExhausted = false;\n\n constructor(\n protected override upstream: LazyIterator, protected windowSize: number,\n seed?: string) {\n super(upstream, windowSize);\n this.random = seedrandom.alea(seed || tf.util.now().toString());\n this.lastRead = Promise.resolve({value: null, done: false});\n }\n\n override async next(): Promise> {\n // This sets this.lastRead to a new Promise right away, as opposed to\n // saying `await this.lastRead; this.lastRead = this.serialNext();` which\n // would not work because this.nextRead would be updated only after the\n // promise resolves.\n this.lastRead = this.lastRead.then(() => this.serialNext());\n return this.lastRead;\n }\n\n private randomInt(max: number) {\n return Math.floor(this.random() * max);\n }\n\n protected chooseIndex(): number {\n return this.randomInt(this.buffer.length());\n }\n\n async serialNext(): Promise> {\n // TODO(soergel): consider performance\n if (!this.upstreamExhausted) {\n this.refill();\n }\n while (!this.buffer.isEmpty()) {\n const chosenIndex = this.chooseIndex();\n const result = await this.buffer.shuffleExcise(chosenIndex);\n if (result.done) {\n this.upstreamExhausted = true;\n } else {\n this.refill();\n return result;\n }\n }\n return {value: null, done: true};\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport * as tf from '@tensorflow/tfjs-core';\nimport {TensorContainer, TensorLike} from '@tensorflow/tfjs-core';\nimport * as seedrandom from 'seedrandom';\n\nimport {iteratorFromConcatenated, iteratorFromFunction, iteratorFromItems, iteratorFromZipped, LazyIterator, ZipMismatchMode} from './iterators/lazy_iterator';\nimport {Container} from './types';\nimport {canTensorify, deepMapAndAwaitAll, DeepMapResult, isIterable} from './util/deep_map';\n\n/**\n * A nested structure of Datasets, used as the input to zip().\n */\nexport type DatasetContainer = Container>;\n\n// TODO(soergel): consider vectorized operations within the pipeline.\n\n/**\n * Represents a potentially large list of independent data elements (typically\n * 'samples' or 'examples').\n *\n * A 'data example' may be a primitive, an array, a map from string keys to\n * values, or any nested structure of these.\n *\n * A `Dataset` represents an ordered collection of elements, together with a\n * chain of transformations to be performed on those elements. Each\n * transformation is a method of `Dataset` that returns another `Dataset`, so\n * these may be chained, e.g.\n * `const processedDataset = rawDataset.filter(...).map(...).batch(...)`.\n *\n * Data loading and transformation is done in a lazy, streaming fashion. The\n * dataset may be iterated over multiple times; each iteration starts the data\n * loading anew and recapitulates the transformations.\n *\n * A `Dataset` is typically processed as a stream of unbatched examples -- i.e.,\n * its transformations are applied one example at a time. Batching produces a\n * new `Dataset` where each element is a batch. Batching should usually come\n * last in a pipeline, because data transformations are easier to express on a\n * per-example basis than on a per-batch basis.\n *\n * The following code examples are calling `await dataset.forEachAsync(...)` to\n * iterate once over the entire dataset in order to print out the data.\n *\n * @doc {heading: 'Data', subheading: 'Classes', namespace: 'data'}\n */\nexport abstract class Dataset {\n /*\n * Provide a new stream of elements. Note this will also start new streams\n * from any underlying `Dataset`s.\n *\n * CAUTION: Any Tensors contained within the elements returned from\n * this stream *must* be manually disposed to avoid a GPU memory leak.\n * The tf.tidy() approach cannot be used in an asynchronous context.\n */\n abstract iterator(): Promise>;\n\n readonly size: number = null;\n\n // TODO(soergel): Make Datasets report whether repeated iterator() calls\n // produce the same result (e.g., reading from a file) or different results\n // (e.g., from the webcam). Currently we don't make this distinction but it\n // could be important for the user to know.\n // abstract isDeterministic(): boolean;\n\n /**\n * Groups elements into batches.\n *\n * It is assumed that each of the incoming dataset elements has the same\n * structure -- i.e. the same set of keys at each location in an object\n * hierarchy. For each key, the resulting `Dataset` provides a batched\n * element collecting all of the incoming values for that key.\n *\n * * Incoming primitives are grouped into a 1-D Tensor.\n * * Incoming Tensors are grouped into a new Tensor where the 0th axis is\n * the batch dimension.\n * * Incoming arrays are converted to Tensor and then batched.\n * * A nested array is interpreted as an n-D Tensor, so the batched result\n * has n+1 dimensions.\n * * An array that cannot be converted to Tensor produces an error.\n *\n * If an array should not be batched as a unit, it should first be converted\n * to an object with integer keys.\n *\n * Here are a few examples:\n *\n * Batch a dataset of numbers:\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6, 7, 8]).batch(4);\n * await a.forEachAsync(e => e.print());\n * ```\n *\n * Batch a dataset of arrays:\n * ```js\n * const b = tf.data.array([[1], [2], [3], [4], [5], [6], [7], [8]]).batch(4);\n * await b.forEachAsync(e => e.print());\n * ```\n *\n * Batch a dataset of objects:\n * ```js\n * const c = tf.data.array([{a: 1, b: 11}, {a: 2, b: 12}, {a: 3, b: 13},\n * {a: 4, b: 14}, {a: 5, b: 15}, {a: 6, b: 16}, {a: 7, b: 17},\n * {a: 8, b: 18}]).batch(4);\n * await c.forEachAsync(e => {\n * console.log('{');\n * for(var key in e) {\n * console.log(key+':');\n * e[key].print();\n * }\n * console.log('}');\n * })\n * ```\n *\n * @param batchSize The number of elements desired per batch.\n * @param smallLastBatch Whether to emit the final batch when it has fewer\n * than batchSize elements. Default true.\n * @returns A `Dataset`, from which a stream of batches can be obtained.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n batch(batchSize: number, smallLastBatch = true): Dataset {\n const base = this;\n tf.util.assert(\n batchSize > 0, () => `batchSize needs to be positive, but it is\n ${batchSize}`);\n let size;\n if (this.size === Infinity || this.size == null) {\n // If the size of this dataset is infinity or null, the new size keeps the\n // same.\n size = this.size;\n } else if (smallLastBatch) {\n // If the size of this dataset is known and include small last batch, the\n // new size is full batch count plus last batch.\n size = Math.ceil(this.size / batchSize);\n } else {\n // If the size of this dataset is known and not include small last batch,\n // the new size is full batch count.\n size = Math.floor(this.size / batchSize);\n }\n return datasetFromIteratorFn(async () => {\n return (await base.iterator())\n .columnMajorBatch(batchSize, smallLastBatch, deepBatchConcat);\n }, size);\n }\n\n /**\n * Concatenates this `Dataset` with another.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3]);\n * const b = tf.data.array([4, 5, 6]);\n * const c = a.concatenate(b);\n * await c.forEachAsync(e => console.log(e));\n * ```\n *\n * @param dataset A `Dataset` to be concatenated onto this one.\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n concatenate(dataset: Dataset): Dataset {\n const base = this;\n let size;\n if (this.size === Infinity || dataset.size === Infinity) {\n // If the size of any of these two dataset is infinity, new size is\n // infinity.\n size = Infinity;\n } else if (this.size != null && dataset.size != null) {\n // If the size of both datasets are known and not infinity, new size is\n // sum the size of these two datasets.\n size = this.size + dataset.size;\n } else {\n // If neither of these two datasets has infinite size and any of these two\n // datasets' size is null, the new size is null.\n size = null;\n }\n return datasetFromIteratorFn(\n async () =>\n (await base.iterator()).concatenate(await dataset.iterator()),\n size);\n }\n\n /**\n * Filters this dataset according to `predicate`.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n * .filter(x => x%2 === 0);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param predicate A function mapping a dataset element to a boolean or a\n * `Promise` for one.\n *\n * @returns A `Dataset` of elements for which the predicate was true.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n filter(predicate: (value: T) => boolean): Dataset {\n const base = this;\n let size;\n if (this.size === Infinity) {\n // If the size of this dataset is infinity, new size is infinity\n size = Infinity;\n } else {\n // If this dataset has limited elements, new size is null because it might\n // exhausted randomly.\n size = null;\n }\n return datasetFromIteratorFn(async () => {\n return (await base.iterator()).filter(x => tf.tidy(() => predicate(x)));\n }, size);\n }\n\n /**\n * Apply a function to every element of the dataset.\n *\n * After the function is applied to a dataset element, any Tensors contained\n * within that element are disposed.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3]);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param f A function to apply to each dataset element.\n * @returns A `Promise` that resolves after all elements have been processed.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n async forEachAsync(f: (input: T) => void): Promise {\n return (await this.iterator()).forEachAsync(f);\n }\n\n /**\n * Maps this dataset through a 1-to-1 transform.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3]).map(x => x*x);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param transform A function mapping a dataset element to a transformed\n * dataset element.\n *\n * @returns A `Dataset` of transformed elements.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n map(transform: (value: T) => O): Dataset {\n const base = this;\n return datasetFromIteratorFn(async () => {\n return (await base.iterator()).map(x => tf.tidy(() => transform(x)));\n }, this.size);\n }\n\n /**\n * Maps this dataset through an async 1-to-1 transform.\n *\n * ```js\n * const a =\n * tf.data.array([1, 2, 3]).mapAsync(x => new Promise(function(resolve){\n * setTimeout(() => {\n * resolve(x * x);\n * }, Math.random()*1000 + 500);\n * }));\n * console.log(await a.toArray());\n * ```\n *\n * @param transform A function mapping a dataset element to a `Promise` for a\n * transformed dataset element. This transform is responsible for disposing\n * any intermediate `Tensor`s, i.e. by wrapping its computation in\n * `tf.tidy()`; that cannot be automated here (as it is in the synchronous\n * `map()` case).\n *\n * @returns A `Dataset` of transformed elements.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n mapAsync(transform: (value: T) => Promise):\n Dataset {\n const base = this;\n return datasetFromIteratorFn(async () => {\n return (await base.iterator()).mapAsync(transform);\n }, this.size);\n }\n\n /**\n * Creates a `Dataset` that prefetches elements from this dataset.\n *\n * @param bufferSize: An integer specifying the number of elements to be\n * prefetched.\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n prefetch(bufferSize: number): Dataset {\n if (bufferSize == null) {\n throw new RangeError(\n '`Dataset.prefetch()` requires bufferSize to be specified.');\n }\n\n const base = this;\n return datasetFromIteratorFn(\n async () => (await base.iterator()).prefetch(bufferSize), this.size);\n }\n\n /**\n * Repeats this dataset `count` times.\n *\n * NOTE: If this dataset is a function of global state (e.g. a random number\n * generator), then different repetitions may produce different elements.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3]).repeat(3);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param count: (Optional) An integer, representing the number of times\n * the dataset should be repeated. The default behavior (if `count` is\n * `undefined` or negative) is for the dataset be repeated indefinitely.\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n repeat(count?: number): Dataset {\n const base = this;\n let size;\n if (this.size != null && count > 0) {\n // If this dataset has size and count is positive, new size is current\n // size multiply count. This also covers the case that current size is\n // infinity.\n size = this.size * count;\n } else if (count === 0) {\n // If count is 0, new size is 0.\n size = 0;\n } else if (this.size != null && (count === undefined || count < 0)) {\n // If this dataset has size and count is undefined or negative, the\n // dataset will be repeated indefinitely and new size is infinity.\n size = Infinity;\n } else {\n // If the size of this dataset is null, the new dataset's size is null.\n size = null;\n }\n return datasetFromIteratorFn(async () => {\n const iteratorIterator = iteratorFromFunction(\n async () => ({value: await base.iterator(), done: false}));\n return iteratorFromConcatenated(iteratorIterator.take(count));\n }, size);\n }\n\n /**\n * Creates a `Dataset` that skips `count` initial elements from this dataset.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6]).skip(3);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param count: The number of elements of this dataset that should be skipped\n * to form the new dataset. If `count` is greater than the size of this\n * dataset, the new dataset will contain no elements. If `count`\n * is `undefined` or negative, skips the entire dataset.\n *\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n skip(count: number): Dataset {\n const base = this;\n let size;\n if (this.size != null && count >= 0 && this.size >= count) {\n // If the size of this dataset is greater than count, the new dataset's\n // size is current size minus skipped size.This also covers the case that\n // current size is infinity.\n size = this.size - count;\n } else if (\n this.size != null &&\n (this.size < count || count === undefined || count < 0)) {\n // If the size of this dataset is smaller than count, or count is\n // undefined or negative, skips the entire dataset and the new size is 0.\n size = 0;\n } else {\n // If the size of this dataset is null, the new dataset's size is null.\n size = null;\n }\n return datasetFromIteratorFn(\n async () => (await base.iterator()).skip(count), size);\n }\n\n // TODO(soergel): deep sharded shuffle, where supported\n\n static readonly MAX_BUFFER_SIZE = 10000;\n\n /**\n * Pseudorandomly shuffles the elements of this dataset. This is done in a\n * streaming manner, by sampling from a given number of prefetched elements.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6]).shuffle(3);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param bufferSize: An integer specifying the number of elements from this\n * dataset from which the new dataset will sample.\n * @param seed: (Optional) An integer specifying the random seed that will\n * be used to create the distribution.\n * @param reshuffleEachIteration: (Optional) A boolean, which if true\n * indicates that the dataset should be pseudorandomly reshuffled each time\n * it is iterated over. If false, elements will be returned in the same\n * shuffled order on each iteration. (Defaults to `true`.)\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n shuffle(bufferSize: number, seed?: string, reshuffleEachIteration = true):\n Dataset {\n if (bufferSize == null || bufferSize < 0) {\n if (this.size == null) {\n throw new RangeError(\n '`Dataset.shuffle()` requires bufferSize to be specified.');\n } else {\n throw new RangeError(\n '`Dataset.shuffle()` requires bufferSize to be specified. ' +\n 'If your data fits in main memory (for regular JS objects), ' +\n 'and/or GPU memory (for `tf.Tensor`s), consider setting ' +\n `bufferSize to the dataset size (${this.size} elements)`);\n }\n }\n const base = this;\n const random = seedrandom.alea(seed || tf.util.now().toString());\n return datasetFromIteratorFn(async () => {\n let seed2 = random.int32();\n if (reshuffleEachIteration) {\n seed2 += random.int32();\n }\n return (await base.iterator()).shuffle(bufferSize, seed2.toString());\n }, this.size);\n }\n\n /**\n * Creates a `Dataset` with at most `count` initial elements from this\n * dataset.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6]).take(3);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * @param count: The number of elements of this dataset that should be taken\n * to form the new dataset. If `count` is `undefined` or negative, or if\n * `count` is greater than the size of this dataset, the new dataset will\n * contain all elements of this dataset.\n * @returns A `Dataset`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n take(count: number): Dataset {\n const base = this;\n let size;\n if (this.size != null && this.size > count) {\n // If the size of this dataset is greater than count, the new dataset's\n // size is count.\n size = count;\n } else if (this.size != null && this.size <= count) {\n // If the size of this dataset is equal or smaller than count, the new\n // dataset's size is the size of this dataset.\n size = this.size;\n } else {\n // If the size of this dataset is null, the new dataset's size is null.\n size = null;\n }\n return datasetFromIteratorFn(\n async () => (await base.iterator()).take(count), size);\n }\n\n /**\n * Collect all elements of this dataset into an array.\n *\n * Obviously this will succeed only for small datasets that fit in memory.\n * Useful for testing and generally should be avoided if possible.\n *\n * ```js\n * const a = tf.data.array([1, 2, 3, 4, 5, 6]);\n * console.log(await a.toArray());\n * ```\n *\n * @returns A Promise for an array of elements, which will resolve\n * when a new stream has been obtained and fully consumed.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n async toArray() {\n if (this.size === Infinity) {\n throw new Error('Can not convert infinite data stream to array.');\n }\n return (await this.iterator()).toArray();\n }\n\n /**\n * Collect all elements of this dataset into an array with prefetching 100\n * elements. This is useful for testing, because the prefetch changes the\n * order in which the Promises are resolved along the processing pipeline.\n * This may help expose bugs where results are dependent on the order of\n * Promise resolution rather than on the logical order of the stream (i.e.,\n * due to hidden mutable state).\n *\n * @returns A Promise for an array of elements, which will resolve\n * when a new stream has been obtained and fully consumed.\n */\n async toArrayForTest() {\n if (this.size === Infinity) {\n throw new Error('Can not convert infinite data stream to array.');\n }\n return (await this.iterator()).toArrayForTest();\n }\n}\n\n/**\n * Create a `Dataset` defined by a provided iterator() function.\n *\n * ```js\n * let i = -1;\n * const func = () =>\n * ++i < 5 ? {value: i, done: false} : {value: null, done: true};\n * const iter = tf.data.iteratorFromFunction(func);\n * const ds = tf.data.datasetFromIteratorFn(iter);\n * await ds.forEachAsync(e => console.log(e));\n * ```\n */\nexport function datasetFromIteratorFn(\n iteratorFn: () => Promise>,\n size: number = null): Dataset {\n return new class extends Dataset {\n override size = size;\n\n /*\n * Provide a new stream of elements. Note this will also start new streams\n * from any underlying `Dataset`s.\n */\n async iterator(): Promise> {\n return iteratorFn();\n }\n }\n ();\n}\n\n/**\n * Create a `Dataset` from an array of elements.\n *\n * Create a Dataset from an array of objects:\n * ```js\n * const a = tf.data.array([{'item': 1}, {'item': 2}, {'item': 3}]);\n * await a.forEachAsync(e => console.log(e));\n * ```\n *\n * Create a Dataset from an array of numbers:\n * ```js\n * const a = tf.data.array([4, 5, 6]);\n * await a.forEachAsync(e => console.log(e));\n * ```\n * @param items An array of elements that will be parsed as items in a dataset.\n *\n * @doc {heading: 'Data', subheading: 'Creation', namespace: 'data'}\n */\nexport function array(items: T[]): Dataset {\n return datasetFromIteratorFn(\n async () => iteratorFromItems(items), items.length);\n}\n\n/**\n * Create a `Dataset` by zipping together an array, dict, or nested\n * structure of `Dataset`s (and perhaps additional constants).\n * The underlying datasets must provide elements in a consistent order such that\n * they correspond.\n *\n * The number of elements in the resulting dataset is the same as the size of\n * the smallest dataset in datasets.\n *\n * The nested structure of the `datasets` argument determines the\n * structure of elements in the resulting iterator.\n *\n * Note this means that, given an array of two datasets that produce dict\n * elements, the result is a dataset that produces elements that are arrays\n * of two dicts:\n *\n * Zip an array of datasets:\n * ```js\n * console.log('Zip two datasets of objects:');\n * const ds1 = tf.data.array([{a: 1}, {a: 2}, {a: 3}]);\n * const ds2 = tf.data.array([{b: 4}, {b: 5}, {b: 6}]);\n * const ds3 = tf.data.zip([ds1, ds2]);\n * await ds3.forEachAsync(e => console.log(JSON.stringify(e)));\n *\n * // If the goal is to merge the dicts in order to produce elements like\n * // {a: ..., b: ...}, this requires a second step such as:\n * console.log('Merge the objects:');\n * const ds4 = ds3.map(x => {return {a: x[0].a, b: x[1].b}});\n * await ds4.forEachAsync(e => console.log(e));\n * ```\n *\n * Zip a dict of datasets:\n * ```js\n * const a = tf.data.array([{a: 1}, {a: 2}, {a: 3}]);\n * const b = tf.data.array([{b: 4}, {b: 5}, {b: 6}]);\n * const c = tf.data.zip({c: a, d: b});\n * await c.forEachAsync(e => console.log(JSON.stringify(e)));\n * ```\n *\n * @doc {heading: 'Data', subheading: 'Operations', namespace: 'data'}\n */\nexport function zip(datasets: DatasetContainer):\n Dataset {\n // manually type-check the argument for JS users\n if (!isIterable(datasets)) {\n throw new Error('The argument to zip() must be an object or array.');\n }\n let size;\n if (Array.isArray(datasets)) {\n for (let i = 0; i < datasets.length; i++) {\n size = size == null ? (datasets[i] as Dataset).size :\n Math.min(size, (datasets[i] as Dataset).size);\n }\n } else if (datasets instanceof Object) {\n for (const ds in datasets) {\n size = size == null ? (datasets[ds] as Dataset).size :\n Math.min(size, (datasets[ds] as Dataset).size);\n }\n }\n return datasetFromIteratorFn(async () => {\n const streams = await deepMapAndAwaitAll(datasets, d => {\n if (d instanceof Dataset) {\n return {value: d.iterator(), recurse: false};\n } else if (isIterable(d)) {\n return {value: null, recurse: true};\n } else {\n throw new Error(\n 'Leaves of the structure passed to zip() must be Datasets, ' +\n 'not primitives.');\n }\n });\n return iteratorFromZipped(streams, ZipMismatchMode.SHORTEST);\n }, size);\n}\n\n/**\n * A zip function for use with deepZip, passed via the columnMajorBatch call.\n *\n * Accepts an array of identically-structured nested elements and either batches\n * them (if they are primitives, numeric arrays, or Tensors) or requests\n * recursion (if not).\n */\n// tslint:disable-next-line:no-any\nfunction deepBatchConcat(rows: any[]): DeepMapResult {\n if (rows === null) {\n return null;\n }\n\n // use the first item to decide whether to recurse or batch here.\n const exampleRow = rows[0];\n\n if (canTensorify(exampleRow)) {\n // rows is an array of primitives, Tensors, or arrays. Batch them.\n const value = batchConcat(rows);\n return {value, recurse: false};\n }\n\n // the example row is an object, so recurse into it.\n return {value: null, recurse: true};\n}\n\n/**\n * Assembles a list of same-shaped numbers, number arrays, or Tensors\n * into a single new Tensor where axis 0 is the batch dimension.\n */\nfunction batchConcat(arrays: T[]):\n tf.Tensor {\n if (arrays.length === 0) {\n // We can't return an empty Tensor because we don't know the element shape.\n throw new Error('Can\\'t make a batch of zero elements.');\n }\n\n if (arrays[0] instanceof tf.Tensor) {\n // Input is an array of Tensors\n return tf.stack(arrays as tf.Tensor[]);\n } else {\n // Input is a possibly-nested array of numbers.\n return tf.tensor(arrays as TensorLike);\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport {TensorContainer, util} from '@tensorflow/tfjs-core';\nimport {Dataset} from '../dataset';\nimport {DataSource} from '../datasource';\nimport {LazyIterator} from '../iterators/lazy_iterator';\nimport {ColumnConfig, CSVConfig} from '../types';\nimport {TextLineDataset} from './text_line_dataset';\n\nconst CODE_QUOTE = '\"';\nconst STATE_OUT = Symbol('out');\nconst STATE_FIELD = Symbol('field');\nconst STATE_QUOTE = Symbol('quote');\nconst STATE_QUOTE_AFTER_QUOTE = Symbol('quoteafterquote');\nconst STATE_WITHIN_QUOTE_IN_QUOTE = Symbol('quoteinquote');\n\n/**\n * Represents a potentially large collection of delimited text records.\n *\n * The produced `TensorContainer`s each contain one key-value pair for\n * every column of the table. When a field is empty in the incoming data, the\n * resulting value is `undefined`, or throw error if it is required. Values\n * that can be parsed as numbers are emitted as type `number`, other values\n * are parsed as `string`.\n *\n * The results are not batched.\n *\n * @doc {heading: 'Data', subheading: 'Classes', namespace: 'data'}\n */\nexport class CSVDataset extends Dataset {\n base: TextLineDataset;\n private hasHeader = true;\n private fullColumnNames: string[] = null;\n private columnNamesValidated = false;\n private columnConfigs: {[key: string]: ColumnConfig} = null;\n private configuredColumnsOnly = false;\n private delimiter = ',';\n private delimWhitespace = false;\n\n /**\n * Returns column names of the csv dataset. If `configuredColumnsOnly` is\n * true, return column names in `columnConfigs`. If `configuredColumnsOnly` is\n * false and `columnNames` is provided, `columnNames`. If\n * `configuredColumnsOnly` is false and `columnNames` is not provided, return\n * all column names parsed from the csv file. For example usage please go to\n * `tf.data.csv`.\n *\n * @doc {heading: 'Data', subheading: 'Classes'}\n */\n async columnNames() {\n if (!this.columnNamesValidated) {\n await this.setColumnNames();\n }\n return this.configuredColumnsOnly ? Object.keys(this.columnConfigs) :\n this.fullColumnNames;\n }\n\n /* 1) If `columnNames` is provided as string[], use this string[] as output\n * keys in corresponding order. The length must match the number of inferred\n * columns if `hasHeader` is true .\n * 2) If `columnNames` is not provided, parse header line as `columnNames` if\n * hasHeader is true. If `hasHeader` is false, throw an error.\n * 3) If `columnConfigs` is provided, all the keys in `columnConfigs` must\n * exist in parsed `columnNames`.\n */\n private async setColumnNames() {\n const columnNamesFromFile = await this.maybeReadHeaderLine();\n if (!this.fullColumnNames && !columnNamesFromFile) {\n // Throw an error if columnNames is not provided and no header line.\n throw new Error(\n 'Column names must be provided if there is no header line.');\n } else if (this.fullColumnNames && columnNamesFromFile) {\n // Check provided columnNames match header line.\n util.assert(\n columnNamesFromFile.length === this.fullColumnNames.length,\n () => 'The length of provided columnNames (' +\n this.fullColumnNames.length.toString() +\n ') does not match the length of the header line read from ' +\n 'file (' + columnNamesFromFile.length.toString() + ').');\n }\n if (!this.fullColumnNames) {\n this.fullColumnNames = columnNamesFromFile;\n }\n // Check if there are duplicate column names.\n const counts: {[key: string]: number} = this.fullColumnNames.reduce(\n (countAcc: {[key: string]: number}, name) => {\n countAcc[name] = (countAcc[name] + 1) || 1;\n return countAcc;\n },\n {});\n const duplicateNames =\n Object.keys(counts).filter((name) => (counts[name] > 1));\n util.assert(\n duplicateNames.length === 0,\n () => 'Duplicate column names found: ' + duplicateNames.toString());\n // Check if keys in columnConfigs match columnNames.\n if (this.columnConfigs) {\n for (const key of Object.keys(this.columnConfigs)) {\n const index = this.fullColumnNames.indexOf(key);\n if (index === -1) {\n throw new Error(\n 'The key \"' + key +\n '\" provided in columnConfigs does not match any of the column ' +\n 'names (' + this.fullColumnNames.toString() + ').');\n }\n }\n }\n this.columnNamesValidated = true;\n }\n\n private async maybeReadHeaderLine() {\n if (this.hasHeader) {\n const iter = await this.base.iterator();\n const firstElement = await iter.next();\n if (firstElement.done) {\n throw new Error('No data was found for CSV parsing.');\n }\n const firstLine: string = firstElement.value;\n const headers = this.parseRow(firstLine, false);\n return headers;\n } else {\n return null;\n }\n }\n\n /**\n * Create a `CSVDataset`.\n *\n * @param input A `DataSource` providing a chunked, UTF8-encoded byte stream.\n * @param csvConfig (Optional) A CSVConfig object that contains configurations\n * of reading and decoding from CSV file(s).\n *\n * hasHeader: (Optional) A boolean value that indicates whether the first\n * row of provided CSV file is a header line with column names, and should\n * not be included in the data. Defaults to `true`.\n *\n * columnNames: (Optional) A list of strings that corresponds to\n * the CSV column names, in order. If provided, it ignores the column\n * names inferred from the header row. If not provided, infers the column\n * names from the first row of the records. If hasHeader is false and\n * columnNames is not provided, this method throws an error.\n *\n * columnConfigs: (Optional) A dictionary whose key is column names, value\n * is an object stating if this column is required, column's data type,\n * default value, and if this column is label. If provided, keys must\n * correspond to names provided in columnNames or inferred from the file\n * header lines. If isLabel is true any column, returns an array of two\n * items: the first item is a dict of features key/value pairs, the second\n * item is a dict of labels key/value pairs. If no feature is marked as\n * label, returns a dict of features only.\n *\n * configuredColumnsOnly (Optional) If true, only columns provided in\n * columnConfigs will be parsed and provided during iteration.\n *\n * delimiter (Optional) The string used to parse each line of the input\n * file. Defaults to `,`.\n */\n constructor(protected readonly input: DataSource, csvConfig?: CSVConfig) {\n super();\n this.base = new TextLineDataset(input);\n if (!csvConfig) {\n csvConfig = {};\n }\n this.hasHeader = csvConfig.hasHeader === false ? false : true;\n this.fullColumnNames = csvConfig.columnNames;\n this.columnConfigs = csvConfig.columnConfigs;\n this.configuredColumnsOnly = csvConfig.configuredColumnsOnly;\n if (csvConfig.delimWhitespace) {\n util.assert(\n csvConfig.delimiter == null,\n () =>\n 'Delimiter should not be provided when delimWhitespace is true.');\n this.delimWhitespace = true;\n this.delimiter = ' ';\n } else {\n this.delimiter = csvConfig.delimiter ? csvConfig.delimiter : ',';\n }\n }\n\n async iterator(): Promise> {\n if (!this.columnNamesValidated) {\n await this.setColumnNames();\n }\n let lines = await this.base.iterator();\n if (this.hasHeader) {\n // We previously read the first line to get the columnNames.\n // Now that we're providing data, skip it.\n lines = lines.skip(1);\n }\n return lines.map(x => this.makeDataElement(x));\n }\n\n makeDataElement(line: string): TensorContainer {\n const values = this.parseRow(line);\n const features: {[key: string]: TensorContainer} = {};\n const labels: {[key: string]: TensorContainer} = {};\n\n for (let i = 0; i < this.fullColumnNames.length; i++) {\n const key = this.fullColumnNames[i];\n const config = this.columnConfigs ? this.columnConfigs[key] : null;\n if (this.configuredColumnsOnly && !config) {\n // This column is not selected.\n continue;\n } else {\n const value = values[i];\n let parsedValue = null;\n if (value === '') {\n // If default value is provided, use it. If default value is not\n // provided, set as undefined.\n if (config && config.default !== undefined) {\n parsedValue = config.default;\n } else if (config && (config.required || config.isLabel)) {\n throw new Error(\n `Required column ${key} is empty in this line: ${line}`);\n } else {\n parsedValue = undefined;\n }\n } else {\n // A value is present, so parse it based on type\n const valueAsNum = Number(value);\n if (isNaN(valueAsNum)) {\n // The value is a string and this column is declared as boolean\n // in config, parse it as boolean.\n if (config && config.dtype === 'bool') {\n parsedValue = this.getBoolean(value);\n } else {\n // Set value as string\n parsedValue = value;\n }\n } else if (!config || !config.dtype) {\n // If this value is a number and no type config is provided, return\n // it as number.\n parsedValue = valueAsNum;\n } else {\n // If this value is a number and data type is provided, parse it\n // according to provided data type.\n switch (config.dtype) {\n case 'float32':\n parsedValue = valueAsNum;\n break;\n case 'int32':\n parsedValue = Math.floor(valueAsNum);\n break;\n case 'bool':\n parsedValue = this.getBoolean(value);\n break;\n default:\n parsedValue = valueAsNum;\n }\n }\n }\n // Check if this column is label.\n (config && config.isLabel) ? labels[key] = parsedValue :\n features[key] = parsedValue;\n }\n }\n // If label exists, return an object of features and labels as {xs:features,\n // ys:labels}, otherwise return features only.\n if (Object.keys(labels).length === 0) {\n return features;\n\n } else {\n return {xs: features, ys: labels};\n }\n }\n\n private getBoolean(value: string): number {\n if (value === '1' || value.toLowerCase() === 'true') {\n return 1;\n } else {\n return 0;\n }\n }\n\n // adapted from https://beta.observablehq.com/@mbostock/streaming-csv\n private parseRow(line: string, validateElementCount = true): string[] {\n const result: string[] = [];\n let readOffset = 0;\n const readLength = line.length;\n let currentState = STATE_OUT;\n // Goes through the line to parse quote.\n for (let i = 0; i < readLength; i++) {\n switch (currentState) {\n // Before enter a new field\n case STATE_OUT:\n switch (line.charAt(i)) {\n // Enter a quoted field\n case CODE_QUOTE:\n readOffset = i + 1;\n currentState = STATE_QUOTE;\n break;\n // Read an empty field\n case this.delimiter:\n readOffset = i + 1;\n // If delimiter is white space and configured to collapse\n // multiple white spaces, ignore this white space.\n if (this.delimiter === ' ' && this.delimWhitespace) {\n break;\n }\n result.push('');\n currentState = STATE_OUT;\n break;\n // Enter an unquoted field\n default:\n currentState = STATE_FIELD;\n readOffset = i;\n break;\n }\n break;\n // In an unquoted field\n case STATE_FIELD:\n switch (line.charAt(i)) {\n // Exit an unquoted field, add it to result\n case this.delimiter:\n result.push(line.substring(readOffset, i));\n currentState = STATE_OUT;\n readOffset = i + 1;\n break;\n default:\n }\n break;\n // In a quoted field\n case STATE_QUOTE:\n switch (line.charAt(i)) {\n // Read a quote after a quote\n case CODE_QUOTE:\n currentState = STATE_QUOTE_AFTER_QUOTE;\n break;\n default:\n }\n break;\n // This state means it's right after a second quote in a field\n case STATE_QUOTE_AFTER_QUOTE:\n switch (line.charAt(i)) {\n // Finished a quoted field\n case this.delimiter:\n result.push(line.substring(readOffset, i - 1));\n currentState = STATE_OUT;\n readOffset = i + 1;\n break;\n // Finished a quoted part in a quoted field\n case CODE_QUOTE:\n currentState = STATE_QUOTE;\n break;\n // In a quoted part in a quoted field\n default:\n currentState = STATE_WITHIN_QUOTE_IN_QUOTE;\n break;\n }\n break;\n case STATE_WITHIN_QUOTE_IN_QUOTE:\n switch (line.charAt(i)) {\n // Exit a quoted part in a quoted field\n case CODE_QUOTE:\n currentState = STATE_QUOTE;\n break;\n default:\n }\n break;\n default:\n }\n }\n // Adds last item based on if it is quoted.\n if (currentState === STATE_QUOTE_AFTER_QUOTE) {\n result.push(line.substring(readOffset, readLength - 1));\n } else {\n result.push(line.substring(readOffset));\n }\n // Check if each row has the same number of elements as column names.\n if (validateElementCount && result.length !== this.fullColumnNames.length) {\n throw new Error(`Invalid row in csv file. Should have ${\n this.fullColumnNames.length} elements in a row, but got ${result}`);\n }\n return result;\n }\n}\n\n// TODO(soergel): add more basic datasets for parity with tf.data\n// tf.data.FixedLengthRecordDataset()\n// tf.data.TFRecordDataset()\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * =============================================================================\n */\n\nimport {TensorContainer} from '@tensorflow/tfjs-core';\nimport {Dataset, datasetFromIteratorFn} from './dataset';\nimport {CSVDataset} from './datasets/csv_dataset';\nimport {iteratorFromFunction} from './iterators/lazy_iterator';\nimport {MicrophoneIterator} from './iterators/microphone_iterator';\nimport {WebcamIterator} from './iterators/webcam_iterator';\nimport {URLDataSource} from './sources/url_data_source';\nimport {CSVConfig, MicrophoneConfig, WebcamConfig} from './types';\n\n/**\n * Create a `CSVDataset` by reading and decoding CSV file(s) from provided URL\n * or local path if it's in Node environment.\n *\n * Note: If isLabel in columnConfigs is `true` for at least one column, the\n * element in returned `CSVDataset` will be an object of\n * `{xs:features, ys:labels}`: xs is a dict of features key/value pairs, ys\n * is a dict of labels key/value pairs. If no column is marked as label,\n * returns a dict of features only.\n *\n * ```js\n * const csvUrl =\n * 'https://storage.googleapis.com/tfjs-examples/multivariate-linear-regression/data/boston-housing-train.csv';\n *\n * async function run() {\n * // We want to predict the column \"medv\", which represents a median value of\n * // a home (in $1000s), so we mark it as a label.\n * const csvDataset = tf.data.csv(\n * csvUrl, {\n * columnConfigs: {\n * medv: {\n * isLabel: true\n * }\n * }\n * });\n *\n * // Number of features is the number of column names minus one for the label\n * // column.\n * const numOfFeatures = (await csvDataset.columnNames()).length - 1;\n *\n * // Prepare the Dataset for training.\n * const flattenedDataset =\n * csvDataset\n * .map(({xs, ys}) =>\n * {\n * // Convert xs(features) and ys(labels) from object form (keyed by\n * // column name) to array form.\n * return {xs:Object.values(xs), ys:Object.values(ys)};\n * })\n * .batch(10);\n *\n * // Define the model.\n * const model = tf.sequential();\n * model.add(tf.layers.dense({\n * inputShape: [numOfFeatures],\n * units: 1\n * }));\n * model.compile({\n * optimizer: tf.train.sgd(0.000001),\n * loss: 'meanSquaredError'\n * });\n *\n * // Fit the model using the prepared Dataset\n * return model.fitDataset(flattenedDataset, {\n * epochs: 10,\n * callbacks: {\n * onEpochEnd: async (epoch, logs) => {\n * console.log(epoch + ':' + logs.loss);\n * }\n * }\n * });\n * }\n *\n * await run();\n * ```\n *\n * @param source URL or local path to get CSV file. If it's a local path, it\n * must have prefix `file://` and it only works in node environment.\n * @param csvConfig (Optional) A CSVConfig object that contains configurations\n * of reading and decoding from CSV file(s).\n *\n * @doc {\n * heading: 'Data',\n * subheading: 'Creation',\n * namespace: 'data',\n * configParamIndices: [1]\n * }\n */\nexport function csv(\n source: RequestInfo, csvConfig: CSVConfig = {}): CSVDataset {\n return new CSVDataset(new URLDataSource(source), csvConfig);\n}\n\n/**\n * Create a `Dataset` that produces each element by calling a provided function.\n *\n * Note that repeated iterations over this `Dataset` may produce different\n * results, because the function will be called anew for each element of each\n * iteration.\n *\n * Also, beware that the sequence of calls to this function may be out of order\n * in time with respect to the logical order of the Dataset. This is due to the\n * asynchronous lazy nature of stream processing, and depends on downstream\n * transformations (e.g. .shuffle()). If the provided function is pure, this is\n * no problem, but if it is a closure over a mutable state (e.g., a traversal\n * pointer), then the order of the produced elements may be scrambled.\n *\n * ```js\n * let i = -1;\n * const func = () =>\n * ++i < 5 ? {value: i, done: false} : {value: null, done: true};\n * const ds = tf.data.func(func);\n * await ds.forEachAsync(e => console.log(e));\n * ```\n *\n * @param f A function that produces one data element on each call.\n */\nexport function func(\n f: () => IteratorResult| Promise>): Dataset {\n const iter = iteratorFromFunction(f);\n return datasetFromIteratorFn(async () => iter);\n}\n\n/**\n * Create a `Dataset` that produces each element from provided JavaScript\n * generator, which is a function*\n * (https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators#Generator_functions),\n * or a function that returns an\n * iterator\n * (https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators#Generator_functions).\n *\n * The returned iterator should have `.next()` function that returns element in\n * format of `{value: TensorContainer, done:boolean}`.\n *\n * Example of creating a dataset from an iterator factory:\n * ```js\n * function makeIterator() {\n * const numElements = 10;\n * let index = 0;\n *\n * const iterator = {\n * next: () => {\n * let result;\n * if (index < numElements) {\n * result = {value: index, done: false};\n * index++;\n * return result;\n * }\n * return {value: index, done: true};\n * }\n * };\n * return iterator;\n * }\n * const ds = tf.data.generator(makeIterator);\n * await ds.forEachAsync(e => console.log(e));\n * ```\n *\n * Example of creating a dataset from a generator:\n * ```js\n * function* dataGenerator() {\n * const numElements = 10;\n * let index = 0;\n * while (index < numElements) {\n * const x = index;\n * index++;\n * yield x;\n * }\n * }\n *\n * const ds = tf.data.generator(dataGenerator);\n * await ds.forEachAsync(e => console.log(e));\n * ```\n *\n * @param generator A JavaScript generator function that returns a JavaScript\n * iterator.\n *\n * @doc {\n * heading: 'Data',\n * subheading: 'Creation',\n * namespace: 'data',\n * configParamIndices: [1]\n * }\n */\nexport function generator(\n generator: () => Iterator| Promise>): Dataset {\n return datasetFromIteratorFn(async () => {\n const gen = await generator();\n return iteratorFromFunction(() => gen.next());\n });\n}\n\n/**\n * Create an iterator that generates `Tensor`s from webcam video stream. This\n * API only works in Browser environment when the device has webcam.\n *\n * Note: this code snippet only works when the device has a webcam. It will\n * request permission to open the webcam when running.\n * ```js\n * const videoElement = document.createElement('video');\n * videoElement.width = 100;\n * videoElement.height = 100;\n * const cam = await tf.data.webcam(videoElement);\n * const img = await cam.capture();\n * img.print();\n * cam.stop();\n * ```\n *\n * @param webcamVideoElement A `HTMLVideoElement` used to play video from\n * webcam. If this element is not provided, a hidden `HTMLVideoElement` will\n * be created. In that case, `resizeWidth` and `resizeHeight` must be\n * provided to set the generated tensor shape.\n * @param webcamConfig A `WebcamConfig` object that contains configurations of\n * reading and manipulating data from webcam video stream.\n *\n * @doc {\n * heading: 'Data',\n * subheading: 'Creation',\n * namespace: 'data',\n * ignoreCI: true\n * }\n */\nexport async function webcam(\n webcamVideoElement?: HTMLVideoElement,\n webcamConfig?: WebcamConfig): Promise {\n return WebcamIterator.create(webcamVideoElement, webcamConfig);\n}\n\n/**\n * Create an iterator that generates frequency-domain spectrogram `Tensor`s from\n * microphone audio stream with browser's native FFT. This API only works in\n * browser environment when the device has microphone.\n *\n * Note: this code snippet only works when the device has a microphone. It will\n * request permission to open the microphone when running.\n * ```js\n * const mic = await tf.data.microphone({\n * fftSize: 1024,\n * columnTruncateLength: 232,\n * numFramesPerSpectrogram: 43,\n * sampleRateHz:44100,\n * includeSpectrogram: true,\n * includeWaveform: true\n * });\n * const audioData = await mic.capture();\n * const spectrogramTensor = audioData.spectrogram;\n * spectrogramTensor.print();\n * const waveformTensor = audioData.waveform;\n * waveformTensor.print();\n * mic.stop();\n * ```\n *\n * @param microphoneConfig A `MicrophoneConfig` object that contains\n * configurations of reading audio data from microphone.\n *\n * @doc {\n * heading: 'Data',\n * subheading: 'Creation',\n * namespace: 'data',\n * ignoreCI: true\n * }\n */\nexport async function microphone(microphoneConfig?: MicrophoneConfig):\n Promise {\n return MicrophoneIterator.create(microphoneConfig);\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo, util} from '@tensorflow/tfjs-core';\n\nexport function assertNotComplex(\n tensor: TensorInfo|TensorInfo[], opName: string): void {\n if (!Array.isArray(tensor)) {\n tensor = [tensor];\n }\n tensor.forEach(t => {\n if (t != null) {\n util.assert(\n t.dtype !== 'complex64',\n () => `${\n opName} does not support complex64 tensors in the CPU backend.`);\n }\n });\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BackendTimingInfo, buffer, DataStorage, DataType, engine, env, kernel_impls, KernelBackend, Rank, ShapeMap, Tensor, Tensor2D, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nconst whereImpl = kernel_impls.whereImpl;\nimport {assertNotComplex} from './cpu_util';\n\ninterface DataId {}\n\nexport interface TensorData {\n values?: backend_util.BackendValues;\n dtype: D;\n // For complex numbers, the real and imaginary parts are stored as their own\n // individual tensors, with a parent joining the two with the\n // complexTensorInfos field.\n complexTensorInfos?: {real: TensorInfo, imag: TensorInfo};\n // refCount keeps track of how many tensors reference it. Used for memory\n // management.\n refCount: number;\n}\n\nexport class MathBackendCPU extends KernelBackend {\n public blockSize = 48;\n\n data: DataStorage>;\n private firstUse = true;\n private static nextDataId = 0;\n private nextDataId(): number {\n return MathBackendCPU.nextDataId++;\n }\n\n constructor() {\n super();\n this.data = new DataStorage(this, engine());\n }\n\n override write(\n values: backend_util.BackendValues, shape: number[],\n dtype: DataType): DataId {\n if (this.firstUse) {\n this.firstUse = false;\n if (env().get('IS_NODE')) {\n backend_util.warn(\n '\\n============================\\n' +\n 'Hi, looks like you are running TensorFlow.js in ' +\n 'Node.js. To speed things up dramatically, install our node ' +\n 'backend, visit https://github.com/tensorflow/tfjs-node for more details. ' +\n '\\n============================');\n }\n }\n const dataId = {id: this.nextDataId()};\n\n this.data.set(dataId, {values, dtype, refCount: 1});\n\n return dataId;\n }\n\n /**\n * Create a data bucket in cpu backend.\n * @param shape Shape of the `TensorInfo`.\n * @param dtype DType of the `TensorInfo`.\n * @param values The value of the `TensorInfo` stored as a flattened array.\n */\n makeTensorInfo(\n shape: number[], dtype: DataType,\n values?: backend_util.BackendValues|string[]): TensorInfo {\n let outId;\n if (dtype === 'string' && values != null && values.length > 0 &&\n util.isString(values[0])) {\n const encodedValues =\n (values as unknown as string[]).map(d => util.encodeString(d));\n\n outId = this.write(encodedValues, shape, dtype);\n } else {\n outId = this.write(values as TypedArray, shape, dtype);\n }\n\n return {dataId: outId, shape, dtype};\n }\n\n /** Return refCount of a `TensorData`. */\n override refCount(dataId: DataId): number {\n if (this.data.has(dataId)) {\n const tensorData = this.data.get(dataId);\n return tensorData.refCount;\n }\n return 0;\n }\n\n /** Increase refCount of a `TensorData`. */\n override incRef(dataId: DataId): void {\n const tensorData = this.data.get(dataId);\n tensorData.refCount++;\n }\n\n /** Decrease refCount of a `TensorData`. */\n decRef(dataId: DataId): void {\n if (this.data.has(dataId)) {\n const tensorData = this.data.get(dataId);\n tensorData.refCount--;\n }\n }\n\n override move(\n dataId: DataId, values: backend_util.BackendValues, shape: number[],\n dtype: DataType, refCount: number): void {\n this.data.set(dataId, {values, dtype, refCount});\n }\n\n override numDataIds(): number {\n return this.data.numDataIds();\n }\n\n override async read(dataId: DataId): Promise {\n return this.readSync(dataId);\n }\n override readSync(dataId: DataId): backend_util.BackendValues {\n const {dtype, complexTensorInfos} = this.data.get(dataId);\n\n if (dtype === 'complex64') {\n const realValues =\n this.readSync(complexTensorInfos.real.dataId) as Float32Array;\n const imagValues =\n this.readSync(complexTensorInfos.imag.dataId) as Float32Array;\n return backend_util.mergeRealAndImagArrays(realValues, imagValues);\n }\n return util.convertBackendValuesAndArrayBuffer(\n this.data.get(dataId).values, dtype);\n }\n\n bufferSync(t: TensorInfo):\n TensorBuffer {\n const data = this.readSync(t.dataId);\n if (t.dtype === 'string') {\n try {\n // Decode the bytes into string.\n const strings = (data as Uint8Array[]).map(d => util.decodeString(d));\n return buffer(t.shape as ShapeMap[R], t.dtype, strings) as\n TensorBuffer;\n } catch {\n throw new Error('Failed to decode encoded string bytes into utf-8');\n }\n }\n return buffer(t.shape as ShapeMap[R], t.dtype, data as TypedArray) as\n TensorBuffer;\n }\n\n makeOutput(\n values: backend_util.BackendValues, shape: number[], dtype: DataType): T {\n return engine().makeTensorFromTensorInfo(\n this.makeTensorInfo(shape, dtype, values), this) as T;\n }\n\n /**\n * Dispose the memory if the dataId has 0 refCount. Return true if the memory\n * is released or memory is not managed in this backend, false if memory is\n * not cleared.\n * @param dataId\n * @oaram force Optional, remove the data regardless of refCount\n */\n override disposeData(dataId: DataId, force = false): boolean {\n if (this.data.has(dataId)) {\n this.data.get(dataId).refCount--;\n if (!force && this.data.get(dataId).refCount > 0) {\n return false;\n }\n\n const {complexTensorInfos} = this.data.get(dataId);\n\n if (complexTensorInfos != null) {\n this.disposeData(complexTensorInfos.real.dataId, true);\n this.disposeData(complexTensorInfos.imag.dataId, true);\n }\n\n this.data.delete(dataId);\n }\n return true;\n }\n\n disposeIntermediateTensorInfo(tensorInfo: TensorInfo): void {\n this.disposeData(tensorInfo.dataId);\n }\n\n override async time(f: () => void): Promise {\n const start = util.now();\n f();\n const kernelMs = util.now() - start;\n return {kernelMs};\n }\n\n override memory() {\n return {\n // Unreliable due to automatic gc. The numbers above are cumulative.\n unreliable: true,\n reasons:\n ['The reported memory is an upper bound. Due to automatic garbage ' +\n 'collection, the true allocated memory may be less.']\n };\n }\n\n where(condition: Tensor): Tensor2D {\n assertNotComplex([condition], 'where');\n\n const condVals = this.readSync(condition.dataId) as TypedArray;\n return whereImpl(condition.shape, condVals);\n }\n\n override dispose() {}\n\n override floatPrecision(): 16|32 {\n return 32;\n }\n\n /** Returns the smallest representable number. */\n override epsilon(): number {\n return super.epsilon();\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, KernelFunc, TypedArray, UnaryInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {SimpleUnaryImpl, SimpleUnaryOperation} from './unary_types';\n\n/**\n * Template that creates a `KernelFunc` for unary ops.\n * @param name Kernel name.\n * @param op A `SimpleUnaryOperation` for the kernel.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the input. This is mainly used in certain\n * kernels that return bool type, such as isFinite, isInf, etc.\n */\nexport function unaryKernelFunc(\n name: string, op: SimpleUnaryOperation, dtype?: DataType): KernelFunc {\n return ({inputs, attrs, backend}) => {\n const {x} = inputs as UnaryInputs;\n assertNotComplex(x, name);\n if (x.dtype === 'string' || dtype === 'string') {\n throw new Error('unaryKernelFunc does not support string input/output');\n }\n\n const cpuBackend = backend as MathBackendCPU;\n const values = cpuBackend.data.get(x.dataId).values as TypedArray;\n const xSize = util.sizeFromShape(x.shape);\n const $dtype = dtype || x.dtype;\n const newValues = util.getArrayFromDType($dtype, xSize);\n for (let i = 0; i < xSize; ++i) {\n newValues[i] = op(values[i], attrs);\n }\n return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);\n };\n}\n\n/**\n * Template that creates a `KernelFunc` for unary ops from the given\n * `SimpleUnaryImpl`..\n * @param name Kernel name.\n * @param unaryImpl A `SimpleUnaryImpl` that implements the op.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the input. This is mainly used in certain\n * kernels that return bool type, such as isFinite, isInf, etc.\n */\nexport function unaryKernelFuncFromImpl(\n name: string, unaryImpl: SimpleUnaryImpl, dtype?: DataType): KernelFunc {\n return ({inputs, attrs, backend}) => {\n const {x} = inputs as UnaryInputs;\n assertNotComplex(x, name);\n if (x.dtype === 'string' || dtype === 'string') {\n throw new Error('unaryKernelFunc does not support string input/output');\n }\n\n const cpuBackend = backend as MathBackendCPU;\n const values = cpuBackend.data.get(x.dataId).values as TypedArray;\n const $dtype = dtype || x.dtype;\n const newValues = unaryImpl(values, $dtype, attrs);\n return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/*\n * base.ts contains all the exports from tfjs-backend-cpu\n * without auto-kernel registration\n */\nimport {registerBackend} from '@tensorflow/tfjs-core';\nimport {MathBackendCPU} from './backend_cpu';\nimport * as shared from './shared';\n\nexport {MathBackendCPU} from './backend_cpu';\nexport {version as version_cpu} from './version';\nexport {shared};\n\n// Side effects for default initialization of MathBackendCPU\nregisterBackend('cpu', () => new MathBackendCPU(), 1 /* priority */);\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Elu, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const elu =\n unaryKernelFunc(Elu, (xi) => xi >= 0 ? xi : (Math.exp(xi) - 1));\n\nexport const eluConfig: KernelConfig = {\n kernelName: Elu,\n backendName: 'cpu',\n kernelFunc: elu,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Identity, IdentityInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function identity(\n args: {inputs: IdentityInputs, backend: MathBackendCPU}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n backend.incRef(x.dataId);\n\n return {dataId: x.dataId, shape: x.shape, dtype: x.dtype};\n}\n\nexport const identityConfig: KernelConfig = {\n kernelName: Identity,\n backendName: 'cpu',\n kernelFunc: identity as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LeakyRelu, LeakyReluAttrs, LeakyReluInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function leakyRelu(args: {\n inputs: LeakyReluInputs,\n backend: MathBackendCPU,\n attrs: LeakyReluAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {alpha} = attrs;\n\n assertNotComplex([x], 'leakyRelu');\n\n const xSize = util.sizeFromShape(x.shape);\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const outVals = util.getTypedArrayFromDType('float32', xSize);\n\n for (let i = 0; i < xVals.length; i++) {\n outVals[i] = xVals[i] < 0 ? alpha * xVals[i] : xVals[i];\n }\n\n return backend.makeTensorInfo(x.shape, 'float32', outVals);\n}\n\nexport const leakyReluConfig: KernelConfig = {\n kernelName: LeakyRelu,\n backendName: 'cpu',\n kernelFunc: leakyRelu as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, DataValues, NumericDataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {SimpleBinaryKernelImpl, SimpleBinaryOperation} from './binary_types';\n\n/**\n * Template that creates implementation for binary ops. Supports broadcast.\n */\nexport function createSimpleBinaryKernelImpl(op: SimpleBinaryOperation):\n SimpleBinaryKernelImpl {\n return (aShape: number[], bShape: number[], aVals: DataValues,\n bVals: DataValues, dtype: DataType): [TypedArray, number[]] => {\n const newShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n\n const resultRank = newShape.length;\n const resultStrides = util.computeStrides(newShape);\n const resultSize = util.sizeFromShape(newShape);\n\n const result =\n util.getTypedArrayFromDType(dtype as NumericDataType, resultSize);\n\n const aRank = aShape.length;\n const bRank = bShape.length;\n\n const aStrides = util.computeStrides(aShape);\n const bStrides = util.computeStrides(bShape);\n\n const aBroadcastDims = backend_util.getBroadcastDims(aShape, newShape);\n const bBroadcastDims = backend_util.getBroadcastDims(bShape, newShape);\n\n if (aBroadcastDims.length + bBroadcastDims.length === 0) {\n for (let i = 0; i < result.length; ++i) {\n result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]);\n }\n } else {\n for (let i = 0; i < result.length; ++i) {\n const loc = util.indexToLoc(i, resultRank, resultStrides);\n\n const aLoc = loc.slice(-aRank);\n aBroadcastDims.forEach(d => aLoc[d] = 0);\n const aIndex = util.locToIndex(aLoc, aRank, aStrides);\n\n const bLoc = loc.slice(-bRank);\n bBroadcastDims.forEach(d => bLoc[d] = 0);\n const bIndex = util.locToIndex(bLoc, bRank, bStrides);\n\n result[i] = op(aVals[aIndex], bVals[bIndex]);\n }\n }\n\n return [result, newShape];\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Prelu, PreluInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\n\nconst preluImpl = createSimpleBinaryKernelImpl(\n (xValue: number, aValue: number) => xValue < 0 ? aValue * xValue : xValue);\n\nexport function prelu(args: {inputs: PreluInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x, alpha} = inputs;\n\n assertNotComplex([x, alpha], 'prelu');\n\n const aVals = backend.data.get(x.dataId).values as TypedArray;\n const bVals = backend.data.get(alpha.dataId).values as TypedArray;\n\n const [resultData, resultShape] =\n preluImpl(x.shape, alpha.shape, aVals, bVals, 'float32');\n\n return backend.makeTensorInfo(resultShape, 'float32', resultData);\n}\n\nexport const preluConfig: KernelConfig = {\n kernelName: Prelu,\n backendName: 'cpu',\n kernelFunc: prelu,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Relu} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const relu = unaryKernelFunc(Relu, (xi) => Math.max(0, xi));\n\nexport const reluConfig: KernelConfig = {\n kernelName: Relu,\n backendName: 'cpu',\n kernelFunc: relu,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Relu6} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const relu6 =\n unaryKernelFunc(Relu6, (xi) => Math.min(Math.max(0, xi), 6));\n\nexport const relu6Config: KernelConfig = {\n kernelName: Relu6,\n backendName: 'cpu',\n kernelFunc: relu6,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {NumericDataType, util} from '@tensorflow/tfjs-core';\n\nimport {SimpleUnaryImpl, SimpleUnaryOperation} from './unary_types';\n\n/**\n * Template that creates implementation for unary op.\n */\nexport function createSimpleUnaryImpl(op: SimpleUnaryOperation):\n SimpleUnaryImpl {\n return (values, dtype, attrs) => {\n const newValues =\n util.getTypedArrayFromDType(dtype as NumericDataType, values.length);\n for (let i = 0; i < values.length; ++i) {\n newValues[i] = op(values[i], attrs);\n }\n return newValues;\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sigmoid} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const sigmoidImpl =\n createSimpleUnaryImpl((xi) => 1 / (1 + Math.exp(-xi)));\nexport const sigmoid =\n unaryKernelFunc(Sigmoid, (xi) => 1 / (1 + Math.exp(-xi)));\n\nexport const sigmoidConfig: KernelConfig = {\n kernelName: Sigmoid,\n backendName: 'cpu',\n kernelFunc: sigmoid,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {_FusedMatMul, _FusedMatMulAttrs, _FusedMatMulInputs, backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {elu} from '../kernels/Elu';\nimport {identity} from '../kernels/Identity';\nimport {leakyRelu} from '../kernels/LeakyRelu';\nimport {prelu} from '../kernels/Prelu';\nimport {relu} from '../kernels/Relu';\nimport {relu6} from '../kernels/Relu6';\nimport {sigmoid} from '../kernels/Sigmoid';\n\nexport function applyActivation(\n backend: MathBackendCPU, x: TensorInfo, activation: backend_util.Activation,\n preluActivationWeights?: TensorInfo, leakyreluAlpha?: number): TensorInfo {\n if (activation === 'linear') {\n return identity({inputs: {x}, backend});\n } else if (activation === 'relu') {\n return relu({inputs: {x}, backend}) as TensorInfo;\n } else if (activation === 'elu') {\n return elu({inputs: {x}, backend}) as TensorInfo;\n } else if (activation === 'relu6') {\n return relu6({inputs: {x}, backend}) as TensorInfo;\n } else if (activation === 'prelu') {\n return prelu({inputs: {x, alpha: preluActivationWeights}, backend});\n } else if (activation === 'leakyrelu') {\n return leakyRelu({inputs: {x}, backend, attrs: {alpha: leakyreluAlpha}});\n } else if (activation === 'sigmoid') {\n return sigmoid({inputs: {x}, backend}) as TensorInfo;\n }\n throw new Error(\n `Activation ${activation} has not been implemented for the CPU backend.`);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Complex, ComplexInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function complex(args: {inputs: ComplexInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {real, imag} = inputs;\n\n const realVals = backend.data.get(real.dataId).values as TypedArray;\n const imagVals = backend.data.get(imag.dataId).values as TypedArray;\n\n const complexInfo = backend.makeTensorInfo(real.shape, 'complex64');\n\n const complex = backend.data.get(complexInfo.dataId);\n\n // The complex tensor owns the underlying real and imag tensorInfos, only the\n // complex tensor tracks refCount, when complexData is disposed the\n // underlying tensorData will be disposed.\n complex.complexTensorInfos = {\n real: backend.makeTensorInfo(real.shape, 'float32', realVals),\n imag: backend.makeTensorInfo(imag.shape, 'float32', imagVals)\n };\n\n return complexInfo;\n}\n\nexport const complexConfig: KernelConfig = {\n kernelName: Complex,\n backendName: 'cpu',\n kernelFunc: complex as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TensorInfo, util} from '@tensorflow/tfjs-core';\nimport {MathBackendCPU} from '../backend_cpu';\nimport {complex} from '../kernels/Complex';\n\n/**\n * Generates a tensorInfo with all zeros value.\n * @param backend cpu backend.\n * @param shape Shape for the zeros tensor.\n * @param dtype Optional. If set, the result has this dtype.\n */\nexport function zeros(\n backend: MathBackendCPU, shape: number[],\n dtype: DataType = 'float32'): TensorInfo {\n if (dtype === 'complex64') {\n const real = zeros(backend, shape, 'float32');\n const imag = zeros(backend, shape, 'float32');\n\n return complex({inputs: {real, imag}, backend});\n }\n\n const values = util.makeZerosTypedArray(util.sizeFromShape(shape), dtype);\n\n return backend.makeTensorInfo(shape, dtype, values);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Real, RealInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function real(args: {inputs: RealInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n const real = backend.data.get(input.dataId).complexTensorInfos.real;\n const realVal = backend.data.get(real.dataId).values;\n\n // When complex tensor is disposed, its underlying parts will be disposed too.\n // Make new tensor out of the real value of the complex. This makes sure the\n // value is still accessible even if complex tensor is disposed.\n return backend.makeTensorInfo(real.shape, real.dtype, realVal);\n}\n\nexport const realConfig: KernelConfig = {\n kernelName: Real,\n backendName: 'cpu',\n kernelFunc: real as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {Cast, CastAttrs, CastInputs, DataType, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {zeros} from '../utils/zeros_impl';\n\nimport {complex} from './Complex';\nimport {identity} from './Identity';\nimport {real} from './Real';\n\nexport function castImpl(\n values: TypedArray, shape: number[], inputType: DataType,\n dtype: DataType): [number[], DataType, TypedArray] {\n if (dtype === 'int32') {\n const resultValues = Int32Array.from(values);\n return [shape, 'int32', resultValues];\n }\n\n if (dtype === 'bool') {\n // This is essentially the result of notEqual(x, 0). We avoid using\n // kernel notEqual to avoid circular dependency, i.e. binary_utils ->\n // cast -> notEqual -> binary_utils.\n const zero = util.toTypedArray([0], inputType);\n\n const [resultData, resultShape] = createSimpleBinaryKernelImpl(\n (a, b) => (a !== b) ? 1 : 0)(shape, [], values, zero, 'bool');\n\n return [resultShape, 'bool', resultData];\n }\n throw new Error(`Error in Cast: failed to cast ${inputType} to ${dtype}`);\n}\n\nexport function cast(\n args: {inputs: CastInputs, backend: MathBackendCPU, attrs: CastAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {dtype} = attrs;\n\n // Casting to complex64.\n if (dtype === 'complex64') {\n if (x.dtype === 'complex64') {\n return identity({inputs: {x}, backend});\n }\n\n const zerosTensorInfo = zeros(backend, x.shape, x.dtype);\n const floatX = cast({inputs: {x}, backend, attrs: {dtype: 'float32'}});\n\n const result =\n complex({inputs: {real: floatX, imag: zerosTensorInfo}, backend});\n\n backend.disposeIntermediateTensorInfo(zerosTensorInfo);\n backend.disposeIntermediateTensorInfo(floatX);\n\n return result;\n }\n\n // Casting from complex64\n if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const result = cast({inputs: {x: realPart}, backend, attrs: {dtype}});\n\n backend.disposeIntermediateTensorInfo(realPart);\n\n return result;\n }\n\n if (!util.hasEncodingLoss(x.dtype, dtype)) {\n // We don't change the underlying data, since we cast to higher\n // precision.\n const result = identity({inputs: {x}, backend});\n return {dataId: result.dataId, shape: result.shape, dtype};\n }\n\n const values = backend.data.get(x.dataId).values as TypedArray;\n const [resultShape, resultType, resultData] =\n castImpl(values, x.shape, x.dtype, dtype);\n return backend.makeTensorInfo(resultShape, resultType, resultData);\n}\n\nexport const castConfig: KernelConfig = {\n kernelName: Cast,\n backendName: 'cpu',\n kernelFunc: cast as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BinaryInputs, DataType, KernelFunc, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {cast} from '../kernels/Cast';\nimport {complex} from '../kernels/Complex';\n\nimport {ComplexBinaryKernelImpl, ComplexBinaryOperation, SimpleBinaryKernelImpl} from './binary_types';\n\n/**\n * Template that creates a `KernelFunc` for binary ops.\n * @param name Kernel name.\n * @param binaryKernelImpl A `SimpleBinaryKernelImpl` for the kernel.\n * @param binaryKernelComplexImpl Optional. If exists, represents a\n * `ComplexBinaryKernelImpl` for the kernel, will be used when input dtype\n * is `complex64`.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the first input. This is mainly used in\n * comparison kernels, such as Equal, Less, Greater, etc.\n */\nexport function binaryKernelFunc(\n name: string, simpleImpl: SimpleBinaryKernelImpl,\n complexImpl?: ComplexBinaryKernelImpl, dtype?: DataType): KernelFunc {\n if (complexImpl == null) {\n return ({inputs, backend}) => {\n const {a, b} = inputs as BinaryInputs;\n const cpuBackend = backend as MathBackendCPU;\n\n assertNotComplex([a, b], name);\n\n const aVals = cpuBackend.data.get(a.dataId).values as TypedArray;\n const bVals = cpuBackend.data.get(b.dataId).values as TypedArray;\n\n const decodedAVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(aVals as any as Uint8Array[]) :\n aVals;\n const decodedBVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(bVals as any as Uint8Array[]) :\n bVals;\n const $dtype = dtype || a.dtype;\n\n const [resultData, resultShape] =\n simpleImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype);\n\n return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);\n };\n }\n\n return ({inputs, backend}) => {\n const {a, b} = inputs as BinaryInputs;\n const cpuBackend = backend as MathBackendCPU;\n\n if (a.dtype === 'complex64' || b.dtype === 'complex64') {\n const $aComplex = cast(\n {inputs: {x: a}, backend: cpuBackend, attrs: {dtype: 'complex64'}});\n\n const $aComplexVals = cpuBackend.data.get($aComplex.dataId);\n\n const aReal = $aComplexVals.complexTensorInfos.real;\n const aImag = $aComplexVals.complexTensorInfos.imag;\n\n const aRealVals =\n cpuBackend.data.get(aReal.dataId).values as Float32Array;\n const aImagVals =\n cpuBackend.data.get(aImag.dataId).values as Float32Array;\n\n const $bComplex = cast(\n {inputs: {x: b}, backend: cpuBackend, attrs: {dtype: 'complex64'}});\n\n const $bComplexVals = cpuBackend.data.get($bComplex.dataId);\n\n const bReal = $bComplexVals.complexTensorInfos.real;\n const bImag = $bComplexVals.complexTensorInfos.imag;\n\n const bRealVals =\n cpuBackend.data.get(bReal.dataId).values as Float32Array;\n const bImagVals =\n cpuBackend.data.get(bImag.dataId).values as Float32Array;\n\n const [resultRealData, resultImagData, resultShape] = complexImpl(\n a.shape, b.shape, aRealVals, aImagVals, bRealVals, bImagVals);\n\n const resultReal =\n cpuBackend.makeTensorInfo(resultShape, 'float32', resultRealData);\n\n const resultImag =\n cpuBackend.makeTensorInfo(resultShape, 'float32', resultImagData);\n\n const result = complex(\n {inputs: {real: resultReal, imag: resultImag}, backend: cpuBackend});\n\n cpuBackend.disposeIntermediateTensorInfo($aComplex);\n cpuBackend.disposeIntermediateTensorInfo($bComplex);\n cpuBackend.disposeIntermediateTensorInfo(resultReal);\n cpuBackend.disposeIntermediateTensorInfo(resultImag);\n\n return result;\n } else {\n const aVals = cpuBackend.data.get(a.dataId).values as TypedArray;\n const bVals = cpuBackend.data.get(b.dataId).values as TypedArray;\n\n const $dtype = dtype || a.dtype;\n\n const [resultData, resultShape] =\n simpleImpl(a.shape, b.shape, aVals, bVals, $dtype);\n\n return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);\n }\n };\n}\n\n/**\n * Template that creates the complex type implementation for binary ops.\n * Supports broadcast.\n */\nexport function createComplexBinaryKernelImpl(op: ComplexBinaryOperation):\n ComplexBinaryKernelImpl {\n return (aShape: number[], bShape: number[], aRealVals: Float32Array,\n aImagVals: Float32Array, bRealVals: Float32Array,\n bImagVals: Float32Array): [TypedArray, TypedArray, number[]] => {\n const resultShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n const resultSize = util.sizeFromShape(resultShape);\n const resultRank = resultShape.length;\n const resultStrides = util.computeStrides(resultShape);\n\n const resultRealVals = util.getTypedArrayFromDType('float32', resultSize);\n const resultImagVals = util.getTypedArrayFromDType('float32', resultSize);\n\n const aBroadcastDims = backend_util.getBroadcastDims(aShape, resultShape);\n const bBroadcastDims = backend_util.getBroadcastDims(bShape, resultShape);\n\n const aVals = backend_util.mergeRealAndImagArrays(aRealVals, aImagVals);\n const bVals = backend_util.mergeRealAndImagArrays(bRealVals, bImagVals);\n\n const aRank = aShape.length;\n const aStrides = util.computeStrides(aShape);\n\n const bRank = bShape.length;\n const bStrides = util.computeStrides(bShape);\n\n if (aBroadcastDims.length + bBroadcastDims.length === 0) {\n for (let i = 0; i < resultRealVals.length; i++) {\n const aIdx = i % aVals.length;\n const bIdx = i % bVals.length;\n\n const result =\n op(aVals[aIdx * 2], aVals[aIdx * 2 + 1], bVals[bIdx * 2],\n bVals[bIdx * 2 + 1]);\n\n resultRealVals[i] = result.real;\n resultImagVals[i] = result.imag;\n }\n } else {\n for (let i = 0; i < resultRealVals.length; i++) {\n const loc = util.indexToLoc(i, resultRank, resultStrides);\n\n const aLoc = loc.slice(-aRank);\n aBroadcastDims.forEach(d => aLoc[d] = 0);\n const aIndex = util.locToIndex(aLoc, aRank, aStrides);\n\n const bLoc = loc.slice(-bRank);\n bBroadcastDims.forEach(d => bLoc[d] = 0);\n const bIndex = util.locToIndex(bLoc, bRank, bStrides);\n\n const opResult =\n op(aVals[aIndex * 2], aVals[aIndex * 2 + 1], bVals[bIndex * 2],\n bVals[bIndex * 2 + 1]);\n\n resultRealVals[i] = opResult.real;\n resultImagVals[i] = opResult.imag;\n }\n }\n return [resultRealVals, resultImagVals, resultShape];\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Add, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const addImpl =\n createSimpleBinaryKernelImpl(((a: number, b: number) => a + b));\nexport const addComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {real: aReal + bReal, imag: aImag + bImag};\n }));\n\nexport const add = binaryKernelFunc(Add, addImpl, addComplexImpl);\n\nexport const addConfig: KernelConfig = {\n kernelName: Add,\n backendName: 'cpu',\n kernelFunc: add\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Reshape, ReshapeAttrs, ReshapeInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function reshape(\n args:\n {inputs: ReshapeInputs, backend: MathBackendCPU, attrs: ReshapeAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {shape} = attrs;\n\n const xSize = util.sizeFromShape(x.shape);\n const $shape = util.inferFromImplicitShape(shape, xSize);\n const $xSize = util.sizeFromShape($shape);\n\n util.assert(\n xSize === $xSize,\n () => `The new shape (${$shape}) has ${$xSize} elements and the old ` +\n `shape (${x.shape}) has ${xSize} elements. The new shape and old ` +\n `shape must have the same number of elements.`);\n\n backend.incRef(x.dataId);\n\n const xData = backend.data.get(x.dataId);\n\n if (xData.complexTensorInfos != null) {\n const real = xData.complexTensorInfos.real;\n const imag = xData.complexTensorInfos.imag;\n\n real.shape = $shape;\n imag.shape = $shape;\n }\n\n return {dataId: x.dataId, shape: $shape, dtype: x.dtype};\n}\n\nexport const reshapeConfig: KernelConfig = {\n kernelName: Reshape,\n backendName: 'cpu',\n kernelFunc: reshape as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs, broadcast_util, buffer, KernelConfig, KernelFunc, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {reshape} from './Reshape';\n\nexport function batchMatMul(args: {\n inputs: BatchMatMulInputs,\n attrs: BatchMatMulAttrs,\n backend: MathBackendCPU\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b} = inputs;\n const {transposeA, transposeB} = attrs;\n\n assertNotComplex([a, b], 'matMul');\n\n const aRank = a.shape.length;\n const bRank = b.shape.length;\n\n const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1];\n const innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2];\n\n const outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2];\n const outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1];\n\n const outerDimsA = a.shape.slice(0, -2);\n const outerDimsB = b.shape.slice(0, -2);\n\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n\n const outShapeOuterDims = broadcast_util.assertAndGetBroadcastShape(\n a.shape.slice(0, -2), b.shape.slice(0, -2));\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n\n util.assert(\n innerShapeA === innerShapeB,\n () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${a.shape} and ` +\n `${b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n\n const a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] :\n [batchDimA, outerShapeA, innerShapeA];\n const b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] :\n [batchDimB, innerShapeB, outerShapeB];\n\n // The rest of the implementation is designed to operate on rank-3 tensors\n const a3d = reshape({inputs: {x: a}, backend, attrs: {shape: a3dShape}});\n const b3d = reshape({inputs: {x: b}, backend, attrs: {shape: b3dShape}});\n\n const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2];\n const leftDim = transposeA ? a3d.shape[2] : a3d.shape[1];\n const rightDim = transposeB ? b3d.shape[1] : b3d.shape[2];\n const batchDim = Math.max(batchDimA, batchDimB);\n\n const a3dValues = backend.data.get(a3d.dataId).values as TypedArray;\n const b3dValues = backend.data.get(b3d.dataId).values as TypedArray;\n\n const a3dStrides = util.computeStrides(a3d.shape);\n const b3dStrides = util.computeStrides(b3d.shape);\n\n const [aBatch, aOuterStep, aInnerStep] = transposeA ?\n [a3dStrides[0], 1, a3dStrides[1]] :\n [a3dStrides[0], a3dStrides[1], 1];\n const [bInnerStep, bOuterStep, bBatch] = transposeB ?\n [1, b3dStrides[1], b3dStrides[0]] :\n [b3dStrides[1], 1, b3dStrides[0]];\n\n const size = leftDim * rightDim;\n const result = buffer([batchDim, leftDim, rightDim], a3d.dtype);\n\n const resVals = result.values as TypedArray;\n const blockSize = backend.blockSize;\n\n for (let bi = 0; bi < batchDim; bi++) {\n const batchIndexA = bi % batchDimA;\n const batchIndexB = bi % batchDimB;\n for (let i0 = 0; i0 < leftDim; i0 += blockSize) {\n // for when blockSize doesn't evenly divide the input\n const iBlock = Math.min(i0 + blockSize, leftDim);\n for (let j0 = 0; j0 < rightDim; j0 += blockSize) {\n const jBlock = Math.min(j0 + blockSize, rightDim);\n for (let k0 = 0; k0 < sharedDim; k0 += blockSize) {\n const kBlock = Math.min(k0 + blockSize, sharedDim);\n\n for (let i = i0; i < iBlock; i++) {\n for (let j = j0; j < jBlock; j++) {\n let sum = 0.0;\n\n for (let k = k0; k < kBlock; k++) {\n const aVal =\n // tslint:disable-next-line: max-line-length\n a3dValues[batchIndexA * aBatch + i * aOuterStep + k * aInnerStep];\n const bVal =\n // tslint:disable-next-line: max-line-length\n b3dValues[k * bInnerStep + j * bOuterStep + batchIndexB * bBatch];\n sum += aVal * bVal;\n }\n resVals[bi * size + (i * rightDim + j)] += sum;\n }\n }\n }\n }\n }\n }\n\n backend.disposeIntermediateTensorInfo(a3d);\n backend.disposeIntermediateTensorInfo(b3d);\n\n // set correct shape on output.\n return backend.makeTensorInfo(\n outShape, result.dtype, result.values as TypedArray);\n}\n\nexport const batchMatMulConfig: KernelConfig = {\n kernelName: BatchMatMul,\n backendName: 'cpu',\n kernelFunc: batchMatMul as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {_FusedMatMul, _FusedMatMulAttrs, _FusedMatMulInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {applyActivation} from '../utils/fused_utils';\n\nimport {add} from './Add';\nimport {batchMatMul} from './BatchMatMul';\n\nexport function _fusedMatMul(args: {\n inputs: _FusedMatMulInputs,\n attrs: _FusedMatMulAttrs,\n backend: MathBackendCPU\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b, bias, preluActivationWeights} = inputs;\n const {transposeA, transposeB, activation, leakyreluAlpha} = attrs;\n\n let current;\n let addRes;\n let activationRes;\n\n const intermediates: TensorInfo[] = [];\n\n const matMulRes =\n batchMatMul({inputs: {a, b}, attrs: {transposeA, transposeB}, backend});\n current = matMulRes;\n\n if (bias) {\n addRes = add({inputs: {a: current, b: bias}, backend}) as TensorInfo;\n intermediates.push(current);\n current = addRes;\n }\n if (activation) {\n activationRes = applyActivation(\n backend, current, activation, preluActivationWeights, leakyreluAlpha);\n intermediates.push(current);\n current = activationRes;\n }\n\n for (const i of intermediates) {\n backend.disposeIntermediateTensorInfo(i);\n }\n\n return current;\n}\n\nexport const _fusedMatMulConfig: KernelConfig = {\n kernelName: _FusedMatMul,\n backendName: 'cpu',\n kernelFunc: _fusedMatMul as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Abs, AbsInputs, KernelConfig, KernelFunc, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function simpleAbsImpl(vals: TypedArray): Float32Array {\n const resultValues = new Float32Array(vals.length);\n for (let i = 0; i < vals.length; ++i) {\n resultValues[i] = Math.abs(vals[i]);\n }\n return resultValues;\n}\n\nexport const abs = (args: {inputs: AbsInputs, backend: MathBackendCPU}) => {\n const {x} = args.inputs;\n const cpuBackend = args.backend;\n\n assertNotComplex(x, 'abs');\n\n let resultValues = new Float32Array(util.sizeFromShape(x.shape));\n const values = cpuBackend.data.get(x.dataId).values as TypedArray;\n resultValues = simpleAbsImpl(values);\n\n return cpuBackend.makeOutput(resultValues, x.shape, x.dtype);\n};\n\nexport const absConfig: KernelConfig = {\n kernelName: Abs,\n backendName: 'cpu',\n kernelFunc: abs as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acos, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const acos = unaryKernelFunc(Acos, (xi) => Math.acos(xi));\n\nexport const acosConfig: KernelConfig = {\n kernelName: Acos,\n backendName: 'cpu',\n kernelFunc: acos,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acosh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const acosh = unaryKernelFunc(Acosh, (xi) => Math.acosh(xi));\n\nexport const acoshConfig: KernelConfig = {\n kernelName: Acosh,\n backendName: 'cpu',\n kernelFunc: acosh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AddN, AddNInputs, buffer, KernelConfig, KernelFunc, Tensor, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function addN(args: {inputs: AddNInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const tensors = inputs as Tensor[];\n\n assertNotComplex(inputs, 'addN');\n\n const vals =\n tensors.map(t => backend.data.get(t.dataId).values as TypedArray);\n const outBuf = buffer(tensors[0].shape, tensors[0].dtype as 'float32');\n const outVals = outBuf.values;\n for (let i = 0; i < tensors.length; i++) {\n const currVals = vals[i];\n for (let j = 0; j < outVals.length; j++) {\n outVals[j] += currVals[j];\n }\n }\n\n return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);\n}\n\nexport const addNConfig: KernelConfig = {\n kernelName: AddN,\n backendName: 'cpu',\n kernelFunc: addN as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, NumericDataType, TypedArray} from '@tensorflow/tfjs-core';\nimport {util} from '@tensorflow/tfjs-core';\n\nexport function transposeImpl(\n xVals: TypedArray, xShape: number[], dtype: DataType, perm: number[],\n newShape: number[]): TypedArray {\n const xRank = xShape.length;\n const xSize = util.sizeFromShape(xShape);\n const xStrides = util.computeStrides(xShape);\n const newStrides = util.computeStrides(newShape);\n\n const result = util.getTypedArrayFromDType(\n dtype as NumericDataType, util.sizeFromShape(newShape));\n\n for (let i = 0; i < xSize; ++i) {\n const loc = util.indexToLoc(i, xRank, xStrides);\n\n // Permute location.\n const newLoc: number[] = new Array(loc.length);\n for (let i = 0; i < newLoc.length; i++) {\n newLoc[i] = loc[perm[i]];\n }\n\n const newIndex = util.locToIndex(newLoc, xRank, newStrides);\n result[newIndex] = xVals[i];\n }\n return result;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Transpose, TransposeAttrs, TransposeInputs, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {transposeImpl} from './Transpose_impl';\n\nexport function transpose(args: {\n inputs: TransposeInputs,\n attrs: TransposeAttrs,\n backend: MathBackendCPU\n}): TensorInfo {\n const {inputs, attrs, backend} = args;\n const {x} = inputs;\n const {perm} = attrs;\n\n assertNotComplex(x, 'transpose');\n\n const xRank = x.shape.length;\n\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = x.shape[perm[i]];\n }\n\n const values = backend.data.get(x.dataId).values as TypedArray;\n const result = transposeImpl(values, x.shape, x.dtype, perm, newShape);\n\n const dataId = backend.write(result, newShape, x.dtype);\n return {dataId, shape: newShape, dtype: x.dtype};\n}\n\nexport const transposeConfig: KernelConfig = {\n kernelName: Transpose,\n backendName: 'cpu',\n kernelFunc: transpose as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {All, AllAttrs, AllInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function all(\n args: {inputs: AllInputs, backend: MathBackendCPU, attrs: AllAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'all');\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('all', axes, $x.shape.length);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes($x.shape, axes);\n const reduceSize = util.sizeFromShape(reduceShape);\n const vals = util.makeZerosTypedArray(util.sizeFromShape(outShape), $x.dtype);\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let all = aVals[offset];\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n all = all && value;\n }\n vals[i] = all;\n }\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo($x);\n }\n\n const result = backend.makeTensorInfo(outShape, $x.dtype, vals);\n\n if (keepDims) {\n const expandedShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n const reshapedResult =\n reshape({inputs: {x: result}, backend, attrs: {shape: expandedShape}});\n\n backend.disposeIntermediateTensorInfo(result);\n\n return reshapedResult;\n }\n\n return result;\n}\n\nexport const allConfig: KernelConfig = {\n kernelName: All,\n backendName: 'cpu',\n kernelFunc: all as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Any, AnyAttrs, AnyInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function any(\n args: {inputs: AnyInputs, backend: MathBackendCPU, attrs: AnyAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'any');\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('any', axes, $x.shape.length);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes($x.shape, axes);\n const reduceSize = util.sizeFromShape(reduceShape);\n const vals = util.makeZerosTypedArray(util.sizeFromShape(outShape), $x.dtype);\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let anyVal = aVals[offset];\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n anyVal = anyVal || value;\n }\n vals[i] = anyVal;\n }\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo($x);\n }\n\n const result = backend.makeTensorInfo(outShape, $x.dtype, vals);\n\n if (keepDims) {\n const expandedShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n const reshapedResult =\n reshape({inputs: {x: result}, backend, attrs: {shape: expandedShape}});\n\n backend.disposeIntermediateTensorInfo(result);\n\n return reshapedResult;\n }\n\n return result;\n}\n\nexport const anyConfig: KernelConfig = {\n kernelName: Any,\n backendName: 'cpu',\n kernelFunc: any as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMax, ArgMaxAttrs, ArgMaxInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function argMax(\n args: {inputs: ArgMaxInputs, backend: MathBackendCPU, attrs: ArgMaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n assertNotComplex(x, 'argMax');\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n axes = [axes[0]];\n backend_util.assertAxesAreInnerMostDims('argMax', axes, $x.shape.length);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes($x.shape, axes);\n\n const outSize = util.sizeFromShape(outShape);\n const vals = util.makeZerosTypedArray(outSize, 'int32');\n const reduceSize = util.sizeFromShape(reduceShape);\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let max = aVals[offset];\n let maxIndex = 0;\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n if (value > max) {\n max = value;\n maxIndex = j;\n }\n }\n vals[i] = maxIndex;\n }\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return backend.makeTensorInfo(outShape, 'int32', vals);\n}\n\nexport const argMaxConfig: KernelConfig = {\n kernelName: ArgMax,\n backendName: 'cpu',\n kernelFunc: argMax as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMin, ArgMinAttrs, ArgMinInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function argMin(\n args: {inputs: ArgMinInputs, backend: MathBackendCPU, attrs: ArgMinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n assertNotComplex(x, 'argMin');\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n axes = [axes[0]];\n backend_util.assertAxesAreInnerMostDims('argMin', axes, $x.shape.length);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes($x.shape, axes);\n\n const outSize = util.sizeFromShape(outShape);\n const vals = util.makeZerosTypedArray(outSize, 'int32');\n const reduceSize = util.sizeFromShape(reduceShape);\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let min = aVals[offset];\n let minIndex = 0;\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n if (value < min) {\n min = value;\n minIndex = j;\n }\n }\n vals[i] = minIndex;\n }\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return backend.makeTensorInfo(outShape, 'int32', vals);\n}\n\nexport const argMinConfig: KernelConfig = {\n kernelName: ArgMin,\n backendName: 'cpu',\n kernelFunc: argMin as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asin, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const asin = unaryKernelFunc(Asin, (xi) => Math.asin(xi));\n\nexport const asinConfig: KernelConfig = {\n kernelName: Asin,\n backendName: 'cpu',\n kernelFunc: asin,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asinh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const asinh = unaryKernelFunc(Asinh, (xi) => Math.asinh(xi));\n\nexport const asinhConfig: KernelConfig = {\n kernelName: Asinh,\n backendName: 'cpu',\n kernelFunc: asinh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const atan = unaryKernelFunc(Atan, (xi) => Math.atan(xi));\n\nexport const atanConfig: KernelConfig = {\n kernelName: Atan,\n backendName: 'cpu',\n kernelFunc: atan,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan2, KernelConfig} from '@tensorflow/tfjs-core';\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const atan2Impl = createSimpleBinaryKernelImpl(\n (aValue, bValue) => Math.atan2(aValue as number, bValue as number));\n\nexport const atan2 = binaryKernelFunc(Atan2, atan2Impl);\n\nexport const atan2Config: KernelConfig = {\n kernelName: Atan2,\n backendName: 'cpu',\n kernelFunc: atan2,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atanh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const atanh = unaryKernelFunc(Atanh, (xi) => Math.atanh(xi));\n\nexport const atanhConfig: KernelConfig = {\n kernelName: Atanh,\n backendName: 'cpu',\n kernelFunc: atanh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, buffer, DataType, Rank, TensorBuffer, TypedArray} from '@tensorflow/tfjs-core';\n\nexport function pool(\n xValues: TypedArray, xShape: number[], dtype: DataType, strides: number[],\n convInfo: backend_util.Conv2DInfo,\n poolType: 'max'|'avg'): TensorBuffer {\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n\n const initialValue =\n (poolType === 'max' ? Number.NEGATIVE_INFINITY :\n Number.POSITIVE_INFINITY);\n\n const output = buffer(convInfo.outShape, dtype);\n const outputVals = output.values;\n\n const outputBatchStrides =\n convInfo.outShape[1] * convInfo.outShape[2] * convInfo.outShape[3];\n const outputRowStrides = convInfo.outShape[2] * convInfo.outShape[3];\n const outputColStrides = convInfo.outShape[3];\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n const outputBatchOffset = b * outputBatchStrides;\n const inputBatchOffset = b * strides[0];\n for (let d = 0; d < convInfo.inChannels; ++d) {\n for (let yR = 0; yR < convInfo.outHeight; ++yR) {\n const xRCorner = yR * strideHeight - padTop;\n const xRMin = Math.max(0, xRCorner);\n const xRMax =\n Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);\n const outputRowOffset = outputBatchOffset + yR * outputRowStrides;\n for (let yC = 0; yC < convInfo.outWidth; ++yC) {\n const xCCorner = yC * strideWidth - padLeft;\n const xCMin = Math.max(0, xCCorner);\n const xCMax =\n Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);\n let minMaxValue = initialValue;\n let avgValue = 0;\n let count = 0;\n for (let xR = xRMin; xR < xRMax; xR += dilationHeight) {\n const xROffset = inputBatchOffset + xR * strides[1];\n for (let xC = xCMin; xC < xCMax; xC += dilationWidth) {\n const xCOffset = xROffset + xC * strides[2];\n const pixel = xValues[xCOffset + d];\n if ((poolType === 'max' && pixel > minMaxValue)) {\n minMaxValue = pixel;\n } else if (poolType === 'avg') {\n avgValue += pixel;\n count++;\n }\n }\n if (isNaN(minMaxValue)) {\n break;\n }\n }\n const outputOffset = outputRowOffset + yC * outputColStrides + d;\n outputVals[outputOffset] =\n poolType === 'avg' ? avgValue / count : minMaxValue;\n }\n }\n }\n }\n return output;\n}\n\nexport function maxPoolPositions(\n xValues: TypedArray, xShape: number[], dtype: DataType,\n convInfo: backend_util.Conv2DInfo, flattenPositions = false,\n includeBatchInIndex = false): TensorBuffer {\n const maxPositions = buffer(convInfo.outShape, 'int32');\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n\n const xBuf = buffer(xShape, dtype, xValues);\n for (let b = 0; b < convInfo.batchSize; ++b) {\n for (let d = 0; d < convInfo.inChannels; ++d) {\n for (let yR = 0; yR < convInfo.outHeight; ++yR) {\n const xRCorner = yR * strideHeight - padTop;\n let xRMin = xRCorner;\n while (xRMin < 0) {\n xRMin += dilationHeight;\n }\n // const xRMin = Math.max(0, xRCorner);\n const xRMax =\n Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner);\n for (let yC = 0; yC < convInfo.outWidth; ++yC) {\n const xCCorner = yC * strideWidth - padLeft;\n let xCMin = xCCorner;\n while (xCMin < 0) {\n xCMin += dilationWidth;\n }\n const xCMax =\n Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner);\n let maxValue = Number.NEGATIVE_INFINITY;\n let maxPosition = -1;\n\n for (let xR = xRMin; xR < xRMax; xR += dilationHeight) {\n const wR = xR - xRCorner;\n for (let xC = xCMin; xC < xCMax; xC += dilationWidth) {\n const wC = xC - xCCorner;\n const pixel = xBuf.get(b, xR, xC, d);\n if (pixel > maxValue) {\n maxValue = pixel as number;\n if (flattenPositions) {\n maxPosition = includeBatchInIndex ?\n ((b * convInfo.inHeight + xR) * convInfo.inWidth + xC) *\n convInfo.inChannels +\n d :\n (xR * convInfo.inWidth + xC) * convInfo.inChannels + d;\n } else {\n maxPosition = wR * effectiveFilterWidth + wC;\n }\n }\n }\n }\n maxPositions.set(maxPosition, b, yR, yC, d);\n }\n }\n }\n }\n return maxPositions;\n}\n\nexport function pool3d(\n xValues: TypedArray, xShape: number[], dtype: DataType, strides: number[],\n convInfo: backend_util.Conv3DInfo,\n poolType: 'max'|'avg'): TensorBuffer {\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padFront = convInfo.padInfo.front;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n\n const initialValue =\n (poolType === 'max' ? Number.NEGATIVE_INFINITY :\n Number.POSITIVE_INFINITY);\n\n const output = buffer(convInfo.outShape, dtype);\n const outputVals = output.values;\n\n const outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] *\n convInfo.outShape[3] * convInfo.outShape[4];\n const outputDepthStrides =\n convInfo.outShape[2] * convInfo.outShape[3] * convInfo.outShape[4];\n const outputRowStrides = convInfo.outShape[3] * convInfo.outShape[4];\n const outputColStrides = convInfo.outShape[4];\n\n for (let batch = 0; batch < convInfo.batchSize; ++batch) {\n const outputBatchOffset = batch * outputBatchStrides;\n const inputBatchOffset = batch * strides[0];\n for (let channel = 0; channel < convInfo.inChannels; ++channel) {\n for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {\n const xDepthCorner = yDepth * strideDepth - padFront;\n let xDepthMin = xDepthCorner;\n while (xDepthMin < 0) {\n xDepthMin += dilationDepth;\n }\n const xDepthMax =\n Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);\n const outputDepthOffset =\n outputBatchOffset + yDepth * outputDepthStrides;\n for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) {\n const xRowCorner = yRow * strideHeight - padTop;\n let xRowMin = xRowCorner;\n while (xRowMin < 0) {\n xRowMin += dilationHeight;\n }\n const xRowMax =\n Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);\n const outputRowOffset = outputDepthOffset + yRow * outputRowStrides;\n for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) {\n const xColCorner = yCol * strideWidth - padLeft;\n let xColMin = xColCorner;\n while (xColMin < 0) {\n xColMin += dilationWidth;\n }\n const xColMax =\n Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);\n // Shader code begins\n const outputColOffset = outputRowOffset + yCol * outputColStrides;\n let minMaxValue = initialValue;\n let avgValue = 0;\n let count = 0;\n for (let xDepth = xDepthMin; xDepth < xDepthMax;\n xDepth += dilationDepth) {\n const xDepthOffset = inputBatchOffset + xDepth * strides[1];\n for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {\n const xRowOffset = xDepthOffset + xRow * strides[2];\n for (let xCol = xColMin; xCol < xColMax;\n xCol += dilationWidth) {\n const xColOffset = xRowOffset + xCol * strides[3];\n const pixel = xValues[xColOffset + channel];\n if ((poolType === 'max' && pixel > minMaxValue)) {\n minMaxValue = pixel;\n } else if (poolType === 'avg') {\n avgValue += pixel;\n count++;\n }\n if (isNaN(minMaxValue)) {\n break;\n }\n }\n if (isNaN(minMaxValue)) {\n break;\n }\n }\n if (isNaN(minMaxValue)) {\n break;\n }\n }\n const outputOffset = outputColOffset + channel;\n outputVals[outputOffset] = poolType === 'avg' ?\n avgValue / Math.max(count, 1) :\n minMaxValue;\n }\n }\n }\n }\n }\n\n return output;\n}\n\nexport function maxPool3dPositions(\n xBuf: TensorBuffer,\n convInfo: backend_util.Conv3DInfo): TensorBuffer {\n const maxPositions = buffer(convInfo.outShape, 'int32');\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padFront = convInfo.padInfo.front;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n\n for (let batch = 0; batch < convInfo.batchSize; ++batch) {\n for (let channel = 0; channel < convInfo.inChannels; ++channel) {\n for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) {\n const xDepthCorner = yDepth * strideDepth - padFront;\n let xDepthMin = xDepthCorner;\n while (xDepthMin < 0) {\n xDepthMin += dilationDepth;\n }\n const xDepthMax =\n Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner);\n for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) {\n const xRowCorner = yRow * strideHeight - padTop;\n let xRowMin = xRowCorner;\n while (xRowMin < 0) {\n xRowMin += dilationHeight;\n }\n const xRowMax =\n Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner);\n for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) {\n const xColCorner = yCol * strideWidth - padLeft;\n let xColMin = xColCorner;\n while (xColMin < 0) {\n xColMin += dilationWidth;\n }\n const xColMax =\n Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner);\n\n // Shader code begins\n let maxValue = Number.NEGATIVE_INFINITY;\n let maxPosition = -1;\n\n for (let xDepth = xDepthMin; xDepth < xDepthMax;\n xDepth += dilationDepth) {\n const wDepth = xDepth - xDepthCorner;\n for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) {\n const wRow = xRow - xRowCorner;\n for (let xCol = xColMin; xCol < xColMax;\n xCol += dilationWidth) {\n const wCol = xCol - xColCorner;\n const pixel = xBuf.get(batch, xDepth, xRow, xCol, channel);\n if (pixel >= maxValue) {\n maxValue = pixel as number;\n maxPosition =\n wDepth * effectiveFilterHeight * effectiveFilterWidth +\n wRow * effectiveFilterHeight + wCol;\n }\n }\n }\n }\n\n maxPositions.set(maxPosition, batch, yDepth, yRow, yCol, channel);\n }\n }\n }\n }\n }\n\n return maxPositions;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPool, AvgPoolAttrs, AvgPoolInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {pool} from '../utils/pool_utils';\nimport {identity} from './Identity';\n\nexport function avgPool(\n args:\n {inputs: AvgPoolInputs, backend: MathBackendCPU, attrs: AvgPoolAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n assertNotComplex(x, 'avgPool');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in avgPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n let res: TensorInfo;\n\n if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&\n util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n res = identity({inputs: {x}, backend});\n } else {\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const strides = util.computeStrides(x.shape);\n const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'avg');\n res = backend.makeTensorInfo(\n convInfo.outShape, x.dtype, buffer.values as TypedArray);\n }\n return res;\n}\n\nexport const avgPoolConfig: KernelConfig = {\n kernelName: AvgPool,\n backendName: 'cpu',\n kernelFunc: avgPool as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AvgPool3D, AvgPool3DAttrs, AvgPool3DInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {pool3d} from '../utils/pool_utils';\n\nexport function avgPool3D(args: {\n inputs: AvgPool3DInputs,\n backend: MathBackendCPU,\n attrs: AvgPool3DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dimRoundingMode, dataFormat} = attrs;\n\n assertNotComplex(x, 'avgPool3d');\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad, dimRoundingMode, dataFormat);\n\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const outBuf = pool3d(\n xValues, x.shape, x.dtype, util.computeStrides(x.shape), convInfo, 'avg');\n\n return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values);\n}\n\nexport const avgPool3DConfig: KernelConfig = {\n kernelName: AvgPool3D,\n backendName: 'cpu',\n kernelFunc: avgPool3D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AvgPool3DGrad, AvgPool3DGradAttrs, AvgPool3DGradInputs, backend_util, buffer, KernelConfig, KernelFunc, Rank, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function avgPool3DGrad(args: {\n inputs: AvgPool3DGradInputs,\n backend: MathBackendCPU,\n attrs: AvgPool3DGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n\n assertNotComplex([dy, input], 'avgPool3DGrad');\n\n const convInfo = backend_util.computePool3DInfo(\n input.shape as [number, number, number, number, number], filterSize,\n strides, 1 /* dilations */, pad, dimRoundingMode);\n\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const filterDepth = convInfo.filterDepth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const dx = buffer(input.shape, 'float32');\n\n const avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth);\n\n const dyBuf = backend.bufferSync(dy);\n\n for (let batch = 0; batch < convInfo.batchSize; ++batch) {\n for (let channel = 0; channel < convInfo.inChannels; ++channel) {\n for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {\n for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {\n for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {\n // Shader code begins.\n const dyDepthCorner = dxDepth - padFront;\n const dyRowCorner = dxRow - padTop;\n const dyColCorner = dxCol - padLeft;\n let dotProd = 0;\n for (let wDepth = 0; wDepth < effectiveFilterDepth;\n wDepth += dilationDepth) {\n const dyDepth = (dyDepthCorner + wDepth) / strideDepth;\n if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||\n Math.floor(dyDepth) !== dyDepth) {\n continue;\n }\n for (let wRow = 0; wRow < effectiveFilterHeight;\n wRow += dilationHeight) {\n const dyRow = (dyRowCorner + wRow) / strideHeight;\n if (dyRow < 0 || dyRow >= convInfo.outHeight ||\n Math.floor(dyRow) !== dyRow) {\n continue;\n }\n for (let wCol = 0; wCol < effectiveFilterWidth;\n wCol += dilationWidth) {\n const dyCol = (dyColCorner + wCol) / strideWidth;\n if (dyCol < 0 || dyCol >= convInfo.outWidth ||\n Math.floor(dyCol) !== dyCol) {\n continue;\n }\n\n const pixel =\n dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);\n dotProd += pixel;\n }\n }\n }\n dx.set(\n dotProd * avgMultiplier, batch, dxDepth, dxRow, dxCol, channel);\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const avgPool3DGradConfig: KernelConfig = {\n kernelName: AvgPool3DGrad,\n backendName: 'cpu',\n kernelFunc: avgPool3DGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPoolGrad, AvgPoolGradAttrs, AvgPoolGradInputs, backend_util, buffer, KernelConfig, KernelFunc, Rank, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function avgPoolGrad(args: {\n inputs: AvgPoolGradInputs,\n backend: MathBackendCPU,\n attrs: AvgPoolGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const x = input;\n assertNotComplex([dy, input], 'avgPoolGrad');\n const {filterSize, strides, pad} = attrs;\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad);\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const dx =\n buffer(x.shape as [number, number, number, number], 'float32');\n\n const avgMultiplier = 1 / (filterHeight * filterWidth);\n\n const dyData = backend.data.get(dy.dataId).values as Float32Array;\n const dyBuf = buffer(\n dy.shape as [number, number, number, number], 'float32', dyData);\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n for (let d = 0; d < convInfo.inChannels; ++d) {\n for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {\n for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {\n // Shader code begins.\n const dyRCorner = dxR - padTop;\n const dyCCorner = dxC - padLeft;\n let dotProd = 0;\n for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {\n const dyR = (dyRCorner + wR) / strideHeight;\n if (dyR < 0 || dyR >= convInfo.outHeight ||\n Math.floor(dyR) !== dyR) {\n continue;\n }\n for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {\n const dyC = (dyCCorner + wC) / strideWidth;\n if (dyC < 0 || dyC >= convInfo.outWidth ||\n Math.floor(dyC) !== dyC) {\n continue;\n }\n\n const pixel = dyBuf.get(b, dyR, dyC, d);\n dotProd += pixel;\n }\n }\n dx.set(dotProd * avgMultiplier, b, dxR, dxC, d);\n }\n }\n }\n }\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const avgPoolGradConfig: KernelConfig = {\n kernelName: AvgPoolGrad,\n backendName: 'cpu',\n kernelFunc: avgPoolGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FusedBatchNorm, FusedBatchNormAttrs, FusedBatchNormInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function batchNorm(args: {\n inputs: FusedBatchNormInputs,\n backend: MathBackendCPU,\n attrs: FusedBatchNormAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, scale, offset, mean, variance} = inputs;\n\n util.assert(\n mean.shape.length === variance.shape.length,\n () => 'Batch normalization gradient requires mean and variance to have ' +\n 'equal ranks.');\n util.assert(\n offset == null || mean.shape.length === offset.shape.length,\n () => 'Batch normalization gradient requires mean and offset to have ' +\n 'equal ranks.');\n util.assert(\n scale == null || mean.shape.length === scale.shape.length,\n () => 'Batch normalization gradient requires mean and scale to have ' +\n 'equal ranks.');\n\n assertNotComplex([x, mean, variance, scale, offset], 'batchNorm');\n\n let {varianceEpsilon} = attrs;\n if (varianceEpsilon == null) {\n varianceEpsilon = 0.001;\n }\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const mVals = backend.data.get(mean.dataId).values as TypedArray;\n const varVals = backend.data.get(variance.dataId).values as TypedArray;\n const sVals = scale ? backend.data.get(scale.dataId).values as TypedArray :\n new Float32Array([1]);\n const offVals = offset ?\n backend.data.get(offset.dataId).values as TypedArray :\n new Float32Array([0]);\n const outVals = new Float32Array(xVals.length);\n\n const offValsLength = offVals.length;\n const sValsLength = sVals.length;\n const varValsLength = varVals.length;\n const mValsLength = mVals.length;\n\n let offi = 0;\n let mi = 0;\n let si = 0;\n let vi = 0;\n for (let i = 0; i < xVals.length; ++i) {\n outVals[i] = offVals[offi++] +\n (xVals[i] - mVals[mi++]) * sVals[si++] /\n Math.sqrt(varVals[vi++] + varianceEpsilon);\n if (offi >= offValsLength) {\n offi = 0;\n }\n if (mi >= mValsLength) {\n mi = 0;\n }\n if (si >= sValsLength) {\n si = 0;\n }\n if (vi >= varValsLength) {\n vi = 0;\n }\n }\n return backend.makeTensorInfo(x.shape, x.dtype, outVals);\n}\n\nexport const batchNormConfig: KernelConfig = {\n kernelName: FusedBatchNorm,\n backendName: 'cpu',\n kernelFunc: batchNorm as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BackendValues, buffer, DataType, KernelConfig, KernelFunc, Slice, slice_util, SliceAttrs, SliceInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function sliceImpl(\n vals: BackendValues, begin: number[], size: number[], shape: number[],\n dtype: DataType): BackendValues {\n const isContinous = slice_util.isSliceContinous(shape, begin, size);\n const length = util.sizeFromShape(size);\n const xStrides = util.computeStrides(shape);\n\n if (isContinous) {\n const flatOffset = slice_util.computeFlatOffset(begin, xStrides);\n\n if (dtype === 'string') {\n return (vals as Uint8Array[]).slice(flatOffset, flatOffset + length);\n }\n\n return (vals as TypedArray).subarray(flatOffset, flatOffset + length);\n }\n\n const decodedData = dtype === 'string' ?\n backend_util.fromUint8ToStringArray(vals as Uint8Array[]) :\n vals as TypedArray;\n\n const inBuf = buffer(shape, dtype, decodedData);\n const outBuf = buffer(size, dtype);\n for (let i = 0; i < outBuf.size; ++i) {\n const outLoc = outBuf.indexToLoc(i);\n const inLoc = outLoc.map((idx: number, j) => idx + begin[j]);\n outBuf.set(inBuf.get(...inLoc), ...outLoc);\n }\n\n if (dtype === 'string') {\n return backend_util.fromStringArrayToUint8(outBuf.values as string[]);\n }\n return outBuf.values as TypedArray;\n}\n\nexport function slice(\n args: {inputs: SliceInputs, backend: MathBackendCPU, attrs: SliceAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {begin, size} = attrs;\n\n assertNotComplex(x, 'slice');\n\n const [$begin, $size] = slice_util.parseSliceParams(x, begin, size);\n slice_util.assertParamsValid(x, $begin, $size);\n\n const vals = backend.data.get(x.dataId).values;\n const outVals = sliceImpl(vals, $begin, $size, x.shape, x.dtype);\n return backend.makeTensorInfo($size, x.dtype, outVals);\n}\n\nexport const sliceConfig: KernelConfig = {\n kernelName: Slice,\n backendName: 'cpu',\n kernelFunc: slice as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BatchToSpaceND, BatchToSpaceNDAttrs, BatchToSpaceNDInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\nimport {transpose} from './Transpose';\n\nexport function batchToSpaceND(args: {\n inputs: BatchToSpaceNDInputs,\n backend: MathBackendCPU,\n attrs: BatchToSpaceNDAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, crops} = attrs;\n\n assertNotComplex([x], 'batchToSpaceND');\n\n const prod = blockShape.reduce((a, b) => a * b);\n\n const reshaped = backend_util.getReshaped(x.shape, blockShape, prod);\n const permuted = backend_util.getPermuted(reshaped.length, blockShape.length);\n const reshapedPermuted =\n backend_util.getReshapedPermuted(x.shape, blockShape, prod);\n const sliceBeginCoords =\n backend_util.getSliceBeginCoords(crops, blockShape.length);\n const sliceSize =\n backend_util.getSliceSize(reshapedPermuted, crops, blockShape.length);\n\n const xReshaped = reshape({inputs: {x}, backend, attrs: {shape: reshaped}});\n const xTransposed =\n transpose({inputs: {x: xReshaped}, backend, attrs: {perm: permuted}});\n const xTransposedReshaped = reshape(\n {inputs: {x: xTransposed}, backend, attrs: {shape: reshapedPermuted}});\n const result = slice({\n inputs: {x: xTransposedReshaped},\n backend,\n attrs: {begin: sliceBeginCoords, size: sliceSize}\n });\n\n backend.disposeIntermediateTensorInfo(xReshaped);\n backend.disposeIntermediateTensorInfo(xTransposed);\n backend.disposeIntermediateTensorInfo(xTransposedReshaped);\n\n return result;\n}\n\nexport const batchToSpaceNDConfig: KernelConfig = {\n kernelName: BatchToSpaceND,\n backendName: 'cpu',\n kernelFunc: batchToSpaceND as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function bincountImpl(\n xVals: TypedArray, weightsVals: TypedArray, weightsDtype: DataType,\n weightsShape: number[], size: number): TypedArray {\n const weightsSize = util.sizeFromShape(weightsShape);\n const outVals = util.makeZerosTypedArray(size, weightsDtype) as TypedArray;\n\n for (let i = 0; i < xVals.length; i++) {\n const value = xVals[i];\n if (value < 0) {\n throw new Error('Input x must be non-negative!');\n }\n\n if (value >= size) {\n continue;\n }\n\n if (weightsSize > 0) {\n outVals[value] += weightsVals[i];\n } else {\n outVals[value] += 1;\n }\n }\n\n return outVals;\n}\n\nexport function bincountReduceImpl(\n xBuf: TensorBuffer, weightsBuf: TensorBuffer, size: number,\n binaryOutput = false): TensorBuffer {\n const numRows = xBuf.shape[0];\n const numCols = xBuf.shape[1];\n\n const outBuf = buffer([numRows, size], weightsBuf.dtype);\n\n for (let i = 0; i < numRows; i++) {\n for (let j = 0; j < numCols; j++) {\n const value = xBuf.get(i, j);\n if (value < 0) {\n throw new Error('Input x must be non-negative!');\n }\n\n if (value >= size) {\n continue;\n }\n\n if (binaryOutput) {\n outBuf.set(1, i, value);\n } else {\n if (weightsBuf.size > 0) {\n outBuf.set(outBuf.get(i, value) + weightsBuf.get(i, j), i, value);\n } else {\n outBuf.set(outBuf.get(i, value) + 1, i, value);\n }\n }\n }\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Bincount, BincountAttrs, BincountInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {bincountImpl} from './Bincount_impl';\n\nexport function bincount(args: {\n inputs: BincountInputs,\n backend: MathBackendCPU,\n attrs: BincountAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, weights} = inputs;\n const {size} = attrs;\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const weightsVals = backend.data.get(weights.dataId).values as TypedArray;\n\n const outVals =\n bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size);\n\n return backend.makeTensorInfo([size], weights.dtype, outVals);\n}\n\nexport const bincountConfig: KernelConfig = {\n kernelName: Bincount,\n backendName: 'cpu',\n kernelFunc: bincount as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BroadcastArgs, BroadcastArgsInputs, KernelConfig, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function broadcastArgs(args: {\n inputs: BroadcastArgsInputs,\n backend: MathBackendCPU,\n}): TensorInfo {\n const {inputs, backend} = args;\n const {s0, s1} = inputs;\n\n const s0Vals = backend.data.get(s0.dataId).values as TypedArray;\n const s1Vals = backend.data.get(s1.dataId).values as TypedArray;\n\n const broadcastShape = backend_util.assertAndGetBroadcastShape(\n Array.from(s0Vals), Array.from(s1Vals));\n\n return backend.makeTensorInfo(\n [broadcastShape.length], 'int32', Int32Array.from(broadcastShape));\n}\n\nexport const broadcastArgsConfig: KernelConfig = {\n kernelName: BroadcastArgs,\n backendName: 'cpu',\n kernelFunc: broadcastArgs\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Ceil, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const ceilImpl = createSimpleUnaryImpl((xi) => Math.ceil(xi));\nexport const ceil = unaryKernelFuncFromImpl(Ceil, ceilImpl);\n\nexport const ceilConfig: KernelConfig = {\n kernelName: Ceil,\n backendName: 'cpu',\n kernelFunc: ceil,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ClipByValue, ClipByValueAttrs, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const clipByValue = unaryKernelFunc(ClipByValue, (xi, attrs) => {\n const clipAttrs = attrs as unknown as ClipByValueAttrs;\n if (xi > clipAttrs.clipValueMax) {\n return clipAttrs.clipValueMax;\n }\n return xi < clipAttrs.clipValueMin ? clipAttrs.clipValueMin : xi;\n});\n\nexport const clipByValueConfig: KernelConfig = {\n kernelName: ClipByValue,\n backendName: 'cpu',\n kernelFunc: clipByValue,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ComplexAbs, ComplexAbsInputs, KernelConfig, KernelFunc, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const complexAbs =\n (args: {inputs: ComplexAbsInputs, backend: MathBackendCPU}) => {\n const {x} = args.inputs;\n const cpuBackend = args.backend;\n const resultValues = new Float32Array(util.sizeFromShape(x.shape));\n const complexVals = cpuBackend.data.get(x.dataId);\n const real = complexVals.complexTensorInfos.real;\n const imag = complexVals.complexTensorInfos.imag;\n const realVals = cpuBackend.data.get(real.dataId).values as Float32Array;\n const imagVals = cpuBackend.data.get(imag.dataId).values as Float32Array;\n for (let i = 0; i < realVals.length; i++) {\n const real = realVals[i];\n const imag = imagVals[i];\n resultValues[i] = Math.hypot(real, imag);\n }\n\n return cpuBackend.makeOutput(resultValues, x.shape, 'float32');\n };\n\nexport const complexAbsConfig: KernelConfig = {\n kernelName: ComplexAbs,\n backendName: 'cpu',\n kernelFunc: complexAbs as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BackendValues, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function concatImpl(\n inputs: Array<{vals: BackendValues, shape: number[]}>, outShape: number[],\n dtype: DataType, simplyConcat: boolean): TypedArray|string[] {\n const outVals = util.getArrayFromDType(dtype, util.sizeFromShape(outShape));\n\n if (simplyConcat && dtype !== 'string') {\n // Use built-in TypedArray.set() method for speed.\n let offset = 0;\n inputs.forEach(input => {\n const size = util.sizeFromShape(input.shape);\n\n (outVals as TypedArray).set(input.vals as TypedArray, offset);\n offset += size;\n });\n } else {\n let colOffset = 0;\n\n inputs.forEach(input => {\n const decodedData = dtype === 'string' ?\n backend_util.fromUint8ToStringArray(input.vals as Uint8Array[]) :\n input.vals as TypedArray;\n\n let tIdx = 0;\n\n for (let row = 0; row < input.shape[0]; ++row) {\n const resIdx = row * outShape[1] + colOffset;\n for (let col = 0; col < input.shape[1]; ++col) {\n outVals[resIdx + col] = decodedData[tIdx++];\n }\n }\n\n colOffset += input.shape[1];\n });\n }\n\n return outVals;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Imag, ImagInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function imag(args: {inputs: ImagInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n const imag = backend.data.get(input.dataId).complexTensorInfos.imag;\n const imagVal = backend.data.get(imag.dataId).values;\n\n // When complex tensor is disposed, its underlying parts will be disposed too.\n // Make new tensor out of the imag value of the complex. This makes sure the\n // value is still accessible even if complex tensor is disposed.\n return backend.makeTensorInfo(imag.shape, imag.dtype, imagVal);\n}\n\nexport const imagConfig: KernelConfig = {\n kernelName: Imag,\n backendName: 'cpu',\n kernelFunc: imag as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Concat, ConcatAttrs, ConcatInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {complex} from './Complex';\nimport {concatImpl} from './Concat_impl';\nimport {identity} from './Identity';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {reshape} from './Reshape';\n\nexport function concat(\n args: {inputs: ConcatInputs, backend: MathBackendCPU, attrs: ConcatAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, inputs[0].shape)[0];\n\n const shapes = inputs.map(t => t.shape);\n backend_util.assertParamsConsistent(shapes, $axis);\n\n let outShape = backend_util.computeOutShape(inputs.map(t => t.shape), $axis);\n\n if (util.sizeFromShape(outShape) === 0) {\n return backend.makeTensorInfo(outShape, inputs[0].dtype, []);\n }\n\n // Keep only non-empty tensors (ignore tensors with 0 in their shape).\n const $inputs = inputs.filter(t => util.sizeFromShape(t.shape) > 0);\n if ($inputs.length === 1) {\n return identity({inputs: {x: $inputs[0]}, backend});\n }\n\n if ($inputs[0].dtype === 'complex64') {\n const reals = $inputs.map((t) => real({inputs: {input: t}, backend}));\n const imags = $inputs.map((t) => imag({inputs: {input: t}, backend}));\n\n const realConcated = concat({inputs: reals, backend, attrs: {axis: $axis}});\n const imagConcated = concat({inputs: imags, backend, attrs: {axis: $axis}});\n\n const result =\n complex({inputs: {real: realConcated, imag: imagConcated}, backend});\n\n reals.forEach(r => backend.disposeIntermediateTensorInfo(r));\n imags.forEach(i => backend.disposeIntermediateTensorInfo(i));\n backend.disposeIntermediateTensorInfo(realConcated);\n backend.disposeIntermediateTensorInfo(imagConcated);\n\n return result;\n }\n\n // Any concat of n-dimensional tensors across any axis can be reduced to\n // a concatenation of two-dimensional tensors across the axis 1 by first\n // partitioning the axes of the original tensors into those less than the\n // axis to be concatenated and the rest. Then reshape the tensors\n // into a two-dimensional tensor by collapsing these two sets of axes and\n // concatenate the resulting matrices across the axis 1, finally reshaping\n // the result to have the proper shape.\n const inputs2D = $inputs.map(t => {\n const innerSize = util.sizeFromShape(t.shape.slice($axis));\n const shape = [-1, innerSize];\n return reshape({inputs: {x: t}, backend, attrs: {shape}});\n });\n\n const inputsValShapes = inputs2D.map(t => {\n return {vals: backend.data.get(t.dataId).values, shape: t.shape};\n });\n\n // Concats 2d tensors along axis=1.\n outShape =\n backend_util.computeOutShape(inputs2D.map(t => t.shape), 1 /* axis */);\n const simplyConcat = inputs2D[0].shape[0] === 1;\n const outVals =\n concatImpl(inputsValShapes, outShape, inputs[0].dtype, simplyConcat);\n\n const finalOutShape =\n backend_util.computeOutShape($inputs.map(t => t.shape), $axis);\n\n const outInfo =\n backend.makeTensorInfo(finalOutShape, inputs[0].dtype, outVals);\n\n inputs2D.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return outInfo;\n}\n\nexport const concatConfig: KernelConfig = {\n kernelName: Concat,\n backendName: 'cpu',\n kernelFunc: concat as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2D, Conv2DAttrs, Conv2DInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv2D(\n args: {inputs: Conv2DInputs, backend: MathBackendCPU, attrs: Conv2DAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dataFormat, dilations, dimRoundingMode} = attrs;\n\n assertNotComplex([x, filter], 'conv2d');\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, dilations, pad,\n dimRoundingMode, false /* depthwise */, $dataFormat);\n\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const padLeft = convInfo.padInfo.left;\n const padTop = convInfo.padInfo.top;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n\n const y = new TensorBuffer(convInfo.outShape, x.dtype as 'float32');\n\n const xStrides = util.computeStrides(x.shape);\n const filterStrides = util.computeStrides(filter.shape);\n\n const xBatchStride = xStrides[0];\n const xRowStride = isChannelsLast ? xStrides[1] : xStrides[2];\n const xColStride = isChannelsLast ? xStrides[2] : 1;\n const xChannelStride = isChannelsLast ? 1 : xStrides[1];\n const yBatchStride = y.strides[0];\n const yRowStride = isChannelsLast ? y.strides[1] : y.strides[2];\n const yColStride = isChannelsLast ? y.strides[2] : 1;\n const yChannelStride = isChannelsLast ? 1 : y.strides[1];\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const wVals = backend.data.get(filter.dataId).values as TypedArray;\n const yVals = y.values;\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n const xOffset1 = b * xBatchStride;\n const yOffset1 = b * yBatchStride;\n for (let yR = 0; yR < convInfo.outHeight; ++yR) {\n const yOffset2 = yOffset1 + yR * yRowStride;\n const xRCorner = yR * convInfo.strideHeight - padTop;\n for (let wR = 0; wR < filterHeight; ++wR) {\n const xR = xRCorner + wR * dilationHeight;\n if (xR < 0 || xR >= convInfo.inHeight) {\n continue;\n }\n const wOffset1 = wR * filterStrides[0];\n const xOffset2 = xOffset1 + xR * xRowStride;\n for (let yC = 0; yC < convInfo.outWidth; ++yC) {\n const yOffset3 = yOffset2 + yC * yColStride;\n const xCCorner = yC * convInfo.strideWidth - padLeft;\n for (let wC = 0; wC < filterWidth; ++wC) {\n const xC = xCCorner + wC * dilationWidth;\n if (xC < 0 || xC >= convInfo.inWidth) {\n continue;\n }\n const wOffset2 = wOffset1 + wC * filterStrides[1];\n const xOffset3 = xOffset2 + xC * xColStride;\n let wOffset3 = wOffset2;\n for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {\n const xVal = xVals[xOffset3 + d1 * xChannelStride];\n for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {\n yVals[yOffset3 + d2 * yChannelStride] +=\n xVal * wVals[wOffset3 + d2];\n }\n wOffset3 += convInfo.outChannels;\n }\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(y.shape, y.dtype, yVals);\n}\n\nexport const conv2DConfig: KernelConfig = {\n kernelName: Conv2D,\n backendName: 'cpu',\n kernelFunc: conv2D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2DBackpropFilter, Conv2DBackpropFilterAttrs, Conv2DBackpropFilterInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv2DBackpropFilter(args: {\n inputs: Conv2DBackpropFilterInputs,\n backend: MathBackendCPU,\n attrs: Conv2DBackpropFilterAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, pad, dataFormat, dimRoundingMode, filterShape} = attrs;\n\n assertNotComplex([x, dy], 'conv2dBackpropFilter');\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number], filterShape, strides,\n 1 /* dilations */, pad, dimRoundingMode, false /* depthwise */,\n $dataFormat);\n\n const {strideHeight, strideWidth, filterHeight, filterWidth} = convInfo;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n const dW = new TensorBuffer(convInfo.filterShape, 'float32');\n\n const leftPad = convInfo.padInfo.left;\n const topPad = convInfo.padInfo.top;\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const dyVals = backend.data.get(dy.dataId).values as TypedArray;\n\n const xBuf = new TensorBuffer(x.shape, x.dtype, xVals);\n const dyBuf = new TensorBuffer(dy.shape, dy.dtype, dyVals);\n\n for (let wR = 0; wR < filterHeight; ++wR) {\n const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));\n const yRMax = Math.min(\n convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);\n\n for (let wC = 0; wC < filterWidth; ++wC) {\n const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));\n const yCMax = Math.min(\n convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);\n\n for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {\n for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {\n let dotProd = 0;\n for (let b = 0; b < convInfo.batchSize; ++b) {\n for (let yR = yRMin; yR < yRMax; ++yR) {\n const xR = wR + yR * strideHeight - topPad;\n for (let yC = yCMin; yC < yCMax; ++yC) {\n const xC = wC + yC * strideWidth - leftPad;\n if (isChannelsLast) {\n dotProd += (xBuf.get(b, xR, xC, d1) as number) *\n (dyBuf.get(b, yR, yC, d2) as number);\n } else {\n dotProd += (xBuf.get(b, d1, xR, xC) as number) *\n (dyBuf.get(b, d2, yR, yC) as number);\n }\n }\n }\n }\n dW.set(dotProd, wR, wC, d1, d2);\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values);\n}\n\nexport const conv2DBackpropFilterConfig: KernelConfig = {\n kernelName: Conv2DBackpropFilter,\n backendName: 'cpu',\n kernelFunc: conv2DBackpropFilter as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2DBackpropInput, Conv2DBackpropInputAttrs, Conv2DBackpropInputInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv2DBackpropInput(args: {\n inputs: Conv2DBackpropInputInputs,\n backend: MathBackendCPU,\n attrs: Conv2DBackpropInputAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {inputShape, strides, pad, dataFormat, dimRoundingMode} = attrs;\n\n assertNotComplex([dy, filter], 'conv2dBackpropInput');\n\n const filterStrides = util.computeStrides(filter.shape);\n const dyStrides = util.computeStrides(dy.shape);\n\n let $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n inputShape, filter.shape as [number, number, number, number], strides,\n 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat);\n\n const dx = new TensorBuffer(convInfo.inShape, 'float32');\n const dxValues = dx.values;\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n const fltValues = backend.data.get(filter.dataId).values as TypedArray;\n const [fltS0, fltS1, fltS2] = filterStrides;\n const {\n batchSize,\n filterHeight,\n filterWidth,\n inChannels,\n inHeight,\n inWidth,\n outChannels,\n outHeight,\n outWidth,\n strideHeight,\n strideWidth\n } = convInfo;\n $dataFormat = convInfo.dataFormat;\n const topPad = filterHeight - 1 - convInfo.padInfo.top;\n const leftPad = filterWidth - 1 - convInfo.padInfo.left;\n\n const isChannelsLast = $dataFormat === 'channelsLast';\n const xBatchStride = dx.strides[0];\n const xRowStride = isChannelsLast ? dx.strides[1] : dx.strides[2];\n const xColStride = isChannelsLast ? dx.strides[2] : 1;\n const xChannelStride = isChannelsLast ? 1 : dx.strides[1];\n const yBatchStride = dyStrides[0];\n const yRowStride = isChannelsLast ? dyStrides[1] : dyStrides[2];\n const yColStride = isChannelsLast ? dyStrides[2] : 1;\n const yChannelStride = isChannelsLast ? 1 : dyStrides[1];\n\n for (let b = 0; b < batchSize; ++b) {\n for (let d1 = 0; d1 < inChannels; ++d1) {\n for (let xR = 0; xR < inHeight; ++xR) {\n const xRCorner = xR - topPad;\n const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));\n const yRMax =\n Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);\n\n for (let xC = 0; xC < inWidth; ++xC) {\n const xCCorner = xC - leftPad;\n const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));\n const yCMax =\n Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);\n\n let dotProd = 0;\n for (let yR = xRMin; yR < yRMax; ++yR) {\n const wR = yR * strideHeight - xRCorner;\n\n for (let yC = xCMin; yC < yCMax; ++yC) {\n const wC = yC * strideWidth - xCCorner;\n const dyOffset =\n yBatchStride * b + yRowStride * yR + yColStride * yC;\n const fltOffset = fltS0 * (filterHeight - 1 - wR) +\n fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;\n\n for (let d2 = 0; d2 < outChannels; ++d2) {\n const pixel = dyValues[dyOffset + yChannelStride * d2];\n const weight = fltValues[fltOffset + d2];\n dotProd += pixel * weight;\n }\n }\n }\n const dxOffset = xBatchStride * b + xRowStride * xR +\n xColStride * xC + xChannelStride * d1;\n dxValues[dxOffset] = dotProd;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const conv2DBackpropInputConfig: KernelConfig = {\n kernelName: Conv2DBackpropInput,\n backendName: 'cpu',\n kernelFunc: conv2DBackpropInput as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3D, Conv3DAttrs, Conv3DInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv3D(\n args: {inputs: Conv3DInputs, backend: MathBackendCPU, attrs: Conv3DAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations} = attrs;\n\n assertNotComplex([x, filter], 'conv3d');\n\n const convInfo = backend_util.computeConv3DInfo(\n x.shape as [number, number, number, number, number],\n filter.shape as [number, number, number, number, number], strides,\n dilations, pad);\n\n const {\n filterDepth,\n filterHeight,\n filterWidth,\n dilationDepth,\n dilationHeight,\n dilationWidth,\n padInfo\n } = convInfo;\n const padFront = padInfo.front;\n const padLeft = padInfo.left;\n const padTop = padInfo.top;\n const y = new TensorBuffer(convInfo.outShape, x.dtype as 'float32');\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const wVals = backend.data.get(filter.dataId).values as TypedArray;\n const yVals = y.values;\n\n const xStrides = util.computeStrides(x.shape);\n const filterStrides = util.computeStrides(filter.shape);\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n const xOffset1 = b * xStrides[0];\n const yOffset1 = b * y.strides[0];\n for (let yF = 0; yF < convInfo.outDepth; ++yF) {\n const yOffset2 = yOffset1 + yF * y.strides[1];\n const xFCorner = yF * convInfo.strideDepth - padFront;\n for (let wF = 0; wF < filterDepth; ++wF) {\n const xF = xFCorner + wF * dilationDepth;\n if (xF < 0 || xF >= convInfo.inDepth) {\n continue;\n }\n const wOffset1 = wF * filterStrides[0];\n const xOffset2 = xOffset1 + xF * xStrides[1];\n\n for (let yR = 0; yR < convInfo.outHeight; ++yR) {\n const yOffset3 = yOffset2 + yR * y.strides[2];\n const xRCorner = yR * convInfo.strideHeight - padTop;\n for (let wR = 0; wR < filterHeight; ++wR) {\n const xR = xRCorner + wR * dilationHeight;\n if (xR < 0 || xR >= convInfo.inHeight) {\n continue;\n }\n const wOffset2 = wOffset1 + wR * filterStrides[1];\n const xOffset3 = xOffset2 + xR * xStrides[2];\n for (let yC = 0; yC < convInfo.outWidth; ++yC) {\n const yOffset4 = yOffset3 + yC * convInfo.outChannels;\n const xCCorner = yC * convInfo.strideWidth - padLeft;\n for (let wC = 0; wC < filterWidth; ++wC) {\n const xC = xCCorner + wC * dilationWidth;\n if (xC < 0 || xC >= convInfo.inWidth) {\n continue;\n }\n const wOffset3 = wOffset2 + wC * filterStrides[2];\n const xOffset4 = xOffset3 + xC * convInfo.inChannels;\n let wOffset4 = wOffset3;\n for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {\n const xVal = xVals[xOffset4 + d1];\n for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {\n yVals[yOffset4 + d2] += xVal * wVals[wOffset4 + d2];\n }\n wOffset4 += convInfo.outChannels;\n }\n }\n }\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(y.shape, y.dtype, y.values);\n}\n\nexport const conv3DConfig: KernelConfig = {\n kernelName: Conv3D,\n backendName: 'cpu',\n kernelFunc: conv3D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3DBackpropFilterV2, Conv3DBackpropFilterV2Attrs, Conv3DBackpropFilterV2Inputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv3DBackpropFilterV2(args: {\n inputs: Conv3DBackpropFilterV2Inputs,\n backend: MathBackendCPU,\n attrs: Conv3DBackpropFilterV2Attrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, pad, filterShape} = attrs;\n\n assertNotComplex([x, dy], 'conv3dBackpropFilterV2');\n\n const xStrides = util.computeStrides(x.shape);\n const dyStrides = util.computeStrides(dy.shape);\n\n const convInfo = backend_util.computeConv3DInfo(\n x.shape as [number, number, number, number, number], filterShape, strides,\n 1 /* dilations */, pad);\n\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const filterDepth = convInfo.filterDepth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n\n const dw = new TensorBuffer(convInfo.filterShape, 'float32');\n const dwValues = dw.values;\n const [dwS0, dwS1, dwS2, dwS3] = dw.strides;\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n const [dyS0, dyS1, dyS2, dyS3] = dyStrides;\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const [xS0, xS1, xS2, xS3] = xStrides;\n\n const frontPad = convInfo.padInfo.front;\n const leftPad = convInfo.padInfo.left;\n const topPad = convInfo.padInfo.top;\n\n for (let wF = 0; wF < filterDepth; ++wF) {\n const yFMin = Math.max(0, Math.ceil((frontPad - wF) / strideDepth));\n const yFMax = Math.min(\n convInfo.outDepth, (convInfo.inDepth + frontPad - wF) / strideDepth);\n const wOffset1 = wF * dwS0;\n\n for (let wR = 0; wR < filterHeight; ++wR) {\n const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));\n const yRMax = Math.min(\n convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);\n const wOffset2 = wR * dwS1 + wOffset1;\n\n for (let wC = 0; wC < filterWidth; ++wC) {\n const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));\n const yCMax = Math.min(\n convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);\n const wOffset3 = wC * dwS2 + wOffset2;\n\n for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {\n const wOffset4 = d1 * dwS3 + wOffset3;\n\n for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {\n let dotProd = 0;\n for (let b = 0; b < convInfo.batchSize; ++b) {\n const xOffset1 = b * xS0;\n const yOffset1 = b * dyS0;\n\n for (let yF = yFMin; yF < yFMax; ++yF) {\n const xF = wF + yF * strideDepth - frontPad;\n const xOffset2 = xF * xS1 + xOffset1;\n const yOffset2 = yF * dyS1 + yOffset1;\n\n for (let yR = yRMin; yR < yRMax; ++yR) {\n const xR = wR + yR * strideHeight - topPad;\n const xOffset3 = xR * xS2 + xOffset2;\n const yOffset3 = yR * dyS2 + yOffset2;\n\n for (let yC = yCMin; yC < yCMax; ++yC) {\n const xC = wC + yC * strideWidth - leftPad;\n const xOffset4 = xC * xS3 + xOffset3;\n const yOffset4 = yC * dyS3 + yOffset3;\n\n dotProd += xValues[xOffset4 + d1] * dyValues[yOffset4 + d2];\n }\n }\n }\n }\n dwValues[wOffset4 + d2] = dotProd;\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dw.shape, dw.dtype, dw.values);\n}\n\nexport const conv3DBackpropFilterV2Config: KernelConfig = {\n kernelName: Conv3DBackpropFilterV2,\n backendName: 'cpu',\n kernelFunc: conv3DBackpropFilterV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3DBackpropInputV2, Conv3DBackpropInputV2Attrs, Conv3DBackpropInputV2Inputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function conv3DBackpropInputV2(args: {\n inputs: Conv3DBackpropInputV2Inputs,\n backend: MathBackendCPU,\n attrs: Conv3DBackpropInputV2Attrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {pad, strides, inputShape} = attrs;\n\n assertNotComplex([dy], 'conv3dBackpropInputV2');\n\n const dyStrides = util.computeStrides(dy.shape);\n const filterStrides = util.computeStrides(filter.shape);\n\n const convInfo = backend_util.computeConv3DInfo(\n inputShape, filter.shape as [number, number, number, number, number],\n strides, 1 /* dilations */, pad);\n\n const dx = new TensorBuffer(convInfo.inShape, 'float32');\n const dxValues = dx.values;\n const [dxS0, dxS1, dxS2, dxS3] = dx.strides;\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n const [dyS0, dyS1, dyS2, dyS3] = dyStrides;\n const fltValues = backend.data.get(filter.dataId).values as TypedArray;\n const [fltS0, fltS1, fltS2, fltS3] = filterStrides;\n const {\n batchSize,\n filterDepth,\n filterHeight,\n filterWidth,\n inChannels,\n inDepth,\n inHeight,\n inWidth,\n outChannels,\n outDepth,\n outHeight,\n outWidth,\n strideDepth,\n strideHeight,\n strideWidth\n } = convInfo;\n const frontPad = filterDepth - 1 - convInfo.padInfo.front;\n const topPad = filterHeight - 1 - convInfo.padInfo.top;\n const leftPad = filterWidth - 1 - convInfo.padInfo.left;\n\n for (let b = 0; b < batchSize; ++b) {\n for (let d1 = 0; d1 < inChannels; ++d1) {\n // Frames of depth\n for (let xF = 0; xF < inDepth; ++xF) {\n const xFCorner = xF - frontPad;\n const xFMin = Math.max(0, Math.ceil(xFCorner / strideDepth));\n const yFMax =\n Math.min(outDepth, (filterDepth + xFCorner) / strideDepth);\n\n // Rows as per standard 2d matrix notation\n for (let xR = 0; xR < inHeight; ++xR) {\n const xRCorner = xR - topPad;\n const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));\n const yRMax =\n Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);\n // Columns as per standard 2d matrix notation\n for (let xC = 0; xC < inWidth; ++xC) {\n const xCCorner = xC - leftPad;\n const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));\n const yCMax =\n Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);\n\n let dotProd = 0;\n for (let yF = xFMin; yF < yFMax; ++yF) {\n const wF = yF * strideDepth - xFCorner;\n\n for (let yR = xRMin; yR < yRMax; ++yR) {\n const wR = yR * strideHeight - xRCorner;\n\n for (let yC = xCMin; yC < yCMax; ++yC) {\n const wC = yC * strideWidth - xCCorner;\n const dyOffset = dyS0 * b + dyS1 * yF + dyS2 * yR + dyS3 * yC;\n const fltOffset = fltS0 * (filterDepth - 1 - wF) +\n fltS1 * (filterHeight - 1 - wR) +\n fltS2 * (filterWidth - 1 - wC) + fltS3 * d1;\n\n for (let d2 = 0; d2 < outChannels; ++d2) {\n const pixel = dyValues[dyOffset + d2];\n const weight = fltValues[fltOffset + d2];\n dotProd += pixel * weight;\n }\n }\n }\n }\n dxValues[dxS0 * b + dxS1 * xF + dxS2 * xR + dxS3 * xC + d1] =\n dotProd;\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const conv3DBackpropInputV2Config: KernelConfig = {\n kernelName: Conv3DBackpropInputV2,\n backendName: 'cpu',\n kernelFunc: conv3DBackpropInputV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cos, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const cos = unaryKernelFunc(Cos, (xi) => Math.cos(xi));\n\nexport const cosConfig: KernelConfig = {\n kernelName: Cos,\n backendName: 'cpu',\n kernelFunc: cos,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cosh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const cosh = unaryKernelFunc(Cosh, (xi) => Math.cosh(xi));\n\nexport const coshConfig: KernelConfig = {\n kernelName: Cosh,\n backendName: 'cpu',\n kernelFunc: cosh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, CropAndResize, CropAndResizeAttrs, CropAndResizeInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function cropAndResize(args: {\n inputs: CropAndResizeInputs,\n backend: MathBackendCPU,\n attrs: CropAndResizeAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {image, boxes, boxInd} = inputs;\n const {cropSize, method, extrapolationValue} = attrs;\n\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n const numBoxes = boxes.shape[0];\n\n const [cropHeight, cropWidth] = cropSize;\n const output =\n buffer([numBoxes, cropHeight, cropWidth, numChannels], 'float32');\n\n const boxVals = backend.data.get(boxes.dataId).values as TypedArray;\n const boxIndVals = backend.data.get(boxInd.dataId).values as TypedArray;\n const imageVals = backend.data.get(image.dataId).values as TypedArray;\n\n const inStride =\n util.computeStrides(image.shape); // to calculate flat indexes into image\n const outStride = util.computeStrides(\n output.shape); // to calculate flat indexes into output\n\n // Reference implementation\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op.cc\n for (let b = 0; b < numBoxes; b++) {\n const startInd = b * 4;\n const y1 = boxVals[startInd];\n const x1 = boxVals[startInd + 1];\n const y2 = boxVals[startInd + 2];\n const x2 = boxVals[startInd + 3];\n\n const bInd: number = boxIndVals[b];\n if (bInd >= batch) {\n continue;\n }\n\n const heightScale =\n (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / (cropHeight - 1) : 0;\n const widthScale =\n (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / (cropWidth - 1) : 0;\n\n for (let y = 0; y < cropHeight; y++) {\n const yInd: number = (cropHeight > 1) ?\n y1 * (imageHeight - 1) + y * (heightScale) :\n 0.5 * (y1 + y2) * (imageHeight - 1);\n\n if (yInd < 0 || yInd > imageHeight - 1) {\n for (let x = 0; x < cropWidth; x++) {\n for (let c = 0; c < numChannels; c++) {\n const ind =\n c + x * outStride[2] + y * outStride[1] + b * outStride[0];\n output.values[ind] = extrapolationValue;\n }\n }\n continue;\n }\n\n if (method === 'bilinear') {\n const topInd = Math.floor(yInd);\n const bottomInd = Math.ceil(yInd);\n const yLerp = yInd - topInd;\n\n for (let x = 0; x < cropWidth; x++) {\n const xInd = (cropWidth > 1) ?\n x1 * (imageWidth - 1) + x * widthScale :\n 0.5 * (x1 + x2) * (imageWidth - 1);\n\n if (xInd < 0 || xInd > imageWidth - 1) {\n for (let c = 0; c < numChannels; c++) {\n const ind =\n c + x * outStride[2] + y * outStride[1] + b * outStride[0];\n output.values[ind] = extrapolationValue;\n }\n continue;\n }\n\n const leftInd = Math.floor(xInd);\n const rightInd = Math.ceil(xInd);\n const xLerp = xInd - leftInd;\n\n for (let c = 0; c < numChannels; c++) {\n let ind = c + leftInd * inStride[2] + topInd * inStride[1] +\n bInd * inStride[0];\n const topLeft = imageVals[ind];\n\n ind = c + rightInd * inStride[2] + topInd * inStride[1] +\n bInd * inStride[0];\n const topRight = imageVals[ind];\n\n ind = c + leftInd * inStride[2] + bottomInd * inStride[1] +\n bInd * inStride[0];\n const bottomLeft = imageVals[ind];\n\n ind = c + rightInd * inStride[2] + bottomInd * inStride[1] +\n bInd * inStride[0];\n const bottomRight = imageVals[ind];\n\n const top = topLeft + (topRight - topLeft) * xLerp;\n const bottom = bottomLeft + (bottomRight - bottomLeft) * xLerp;\n\n ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0];\n output.values[ind] = top + ((bottom - top) * yLerp);\n }\n }\n } else { // method == \"nearest\"\n for (let x = 0; x < cropWidth; ++x) {\n const xInd = (cropWidth > 1) ?\n x1 * (imageWidth - 1) + x * widthScale :\n 0.5 * (x1 + x2) * (imageWidth - 1);\n\n if (xInd < 0 || xInd > imageWidth - 1) {\n for (let c = 0; c < numChannels; c++) {\n const ind =\n c + x * outStride[2] + y * outStride[1] + b * outStride[0];\n output.values[ind] = extrapolationValue;\n }\n continue;\n }\n\n const closestX = Math.round(xInd);\n const closestY = Math.round(yInd);\n for (let c = 0; c < numChannels; c++) {\n const inInd = c + closestX * inStride[2] + closestY * inStride[1] +\n bInd * inStride[0];\n const outInd =\n c + x * outStride[2] + y * outStride[1] + b * outStride[0];\n output.values[outInd] = imageVals[inInd];\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(output.shape, output.dtype, output.values);\n}\n\nexport const cropAndResizeConfig: KernelConfig = {\n kernelName: CropAndResize,\n backendName: 'cpu',\n kernelFunc: cropAndResize as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function cumprod(\n args: {inputs: CumprodInputs, backend: MathBackendCPU,\n attrs: CumprodAttrs}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n\n assertNotComplex(x, 'cumprod');\n\n const permutation = backend_util.getAxesPermutation([axis], x.shape.length);\n let $x = x;\n if (permutation != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n }\n const permutedAxis = backend_util.getInnerMostAxes(1, x.shape.length)[0];\n\n if (permutedAxis !== $x.shape.length - 1) {\n throw new Error(\n `backend.cumprod in CPU expects an inner-most ` +\n `axis=${$x.shape.length - 1} but got axis=${permutedAxis}`);\n }\n\n const resultDtype = upcastType($x.dtype, 'int32');\n const vals = util.makeOnesTypedArray(\n util.sizeFromShape($x.shape), resultDtype) as TypedArray;\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n const finalDim = $x.shape[$x.shape.length - 1];\n const indexAdjuster = reverse ?\n (i: number, j: number) => i + finalDim - j - 1 :\n (i: number, j: number) => i + j;\n for (let i = 0; i < aVals.length; i += finalDim) {\n for (let j = 0; j < finalDim; j++) {\n const idx = indexAdjuster(i, j);\n if (j === 0) {\n vals[idx] = exclusive ? 1 : aVals[idx];\n } else {\n const prevIdx = indexAdjuster(i, j - 1);\n vals[idx] = exclusive ? aVals[prevIdx] * vals[prevIdx] :\n aVals[idx] * vals[prevIdx];\n }\n }\n }\n\n const result = backend.makeTensorInfo($x.shape, resultDtype, vals);\n\n if (permutation != null) {\n const reversePermutation = backend_util.getUndoAxesPermutation(permutation);\n const reverseTransposedResult = transpose(\n {inputs: {x: result}, backend, attrs: {perm: reversePermutation}});\n\n backend.disposeIntermediateTensorInfo(result);\n backend.disposeIntermediateTensorInfo($x);\n\n return reverseTransposedResult;\n }\n\n return result;\n}\n\nexport const cumprodConfig: KernelConfig = {\n kernelName: Cumprod,\n backendName: 'cpu',\n kernelFunc: cumprod as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Cumsum, CumsumAttrs, CumsumInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function cumsum(\n args: {inputs: CumsumInputs, backend: MathBackendCPU, attrs: CumsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n\n assertNotComplex(x, 'cumsum');\n\n const permutation = backend_util.getAxesPermutation([axis], x.shape.length);\n let $x = x;\n if (permutation != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n }\n const permutedAxis = backend_util.getInnerMostAxes(1, x.shape.length)[0];\n\n if (permutedAxis !== $x.shape.length - 1) {\n throw new Error(\n `backend.cumsum in CPU expects an inner-most ` +\n `axis=${$x.shape.length - 1} but got axis=${permutedAxis}`);\n }\n\n const resultDtype = upcastType($x.dtype, 'int32');\n const vals = util.makeZerosTypedArray(\n util.sizeFromShape($x.shape), resultDtype) as TypedArray;\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n const finalDim = $x.shape[$x.shape.length - 1];\n const indexAdjuster = reverse ?\n (i: number, j: number) => i + finalDim - j - 1 :\n (i: number, j: number) => i + j;\n for (let i = 0; i < aVals.length; i += finalDim) {\n for (let j = 0; j < finalDim; j++) {\n const idx = indexAdjuster(i, j);\n if (j === 0) {\n vals[idx] = exclusive ? 0 : aVals[idx];\n } else {\n const prevIdx = indexAdjuster(i, j - 1);\n vals[idx] = exclusive ? aVals[prevIdx] + vals[prevIdx] :\n aVals[idx] + vals[prevIdx];\n }\n }\n }\n\n const result = backend.makeTensorInfo($x.shape, resultDtype, vals);\n\n if (permutation != null) {\n const reversePermutation = backend_util.getUndoAxesPermutation(permutation);\n const reverseTransposedResult = transpose(\n {inputs: {x: result}, backend, attrs: {perm: reversePermutation}});\n\n backend.disposeIntermediateTensorInfo(result);\n backend.disposeIntermediateTensorInfo($x);\n\n return reverseTransposedResult;\n }\n\n return result;\n}\n\nexport const cumsumConfig: KernelConfig = {\n kernelName: Cumsum,\n backendName: 'cpu',\n kernelFunc: cumsum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DenseBincount, DenseBincountAttrs, DenseBincountInputs, KernelConfig, KernelFunc, Rank, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {bincountImpl, bincountReduceImpl} from './Bincount_impl';\n\nexport function denseBincount(args: {\n inputs: DenseBincountInputs,\n backend: MathBackendCPU,\n attrs: DenseBincountAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, weights} = inputs;\n const {size, binaryOutput} = attrs;\n\n if (x.shape.length === 1) {\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const weightsVals = backend.data.get(weights.dataId).values as TypedArray;\n\n const outVals =\n bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size);\n\n return backend.makeTensorInfo([size], weights.dtype, outVals);\n } else if (x.shape.length === 2) {\n const xBuf = backend.bufferSync(x);\n const weightsBuf = backend.bufferSync(weights);\n\n const outBuf = bincountReduceImpl(xBuf, weightsBuf, size, binaryOutput);\n\n return backend.makeTensorInfo(outBuf.shape, weights.dtype, outBuf.values);\n }\n\n throw new Error(\n `Error in denseBincount: input must be at most rank 2, but got rank` +\n `${x.shape.length}.`);\n}\n\nexport const denseBincountConfig: KernelConfig = {\n kernelName: DenseBincount,\n backendName: 'cpu',\n kernelFunc: denseBincount as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DepthToSpace, DepthToSpaceAttrs, DepthToSpaceInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function depthToSpace(args: {\n inputs: DepthToSpaceInputs,\n backend: MathBackendCPU,\n attrs: DepthToSpaceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockSize, dataFormat} = attrs;\n\n util.assert(\n dataFormat === 'NHWC',\n () => `Only NHWC dataFormat supported on CPU for depthToSpace. Got ${\n dataFormat}`);\n\n const batchSize = x.shape[0];\n const inputHeight = x.shape[1];\n const inputWidth = x.shape[2];\n const inputDepth = x.shape[3];\n\n const outputHeight = inputHeight * blockSize;\n const outputWidth = inputWidth * blockSize;\n const outputDepth = inputDepth / (blockSize * blockSize);\n\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const result =\n new Float32Array(batchSize * outputHeight * outputWidth * outputDepth);\n\n let outputIdx = 0;\n for (let b = 0; b < batchSize; ++b) {\n for (let h = 0; h < outputHeight; ++h) {\n const inH = Math.floor(h / blockSize);\n const offsetH = (h % blockSize);\n for (let w = 0; w < outputWidth; ++w) {\n const inW = Math.floor(w / blockSize);\n const offsetW = (w % blockSize);\n const offsetD = (offsetH * blockSize + offsetW) * outputDepth;\n for (let d = 0; d < outputDepth; ++d) {\n const inD = d + offsetD;\n const inputIdx =\n inD + inputDepth * (inW + inputWidth * (inH + inputHeight * b));\n result[outputIdx++] = xValues[inputIdx];\n }\n }\n }\n }\n\n return backend.makeTensorInfo(\n [batchSize, outputHeight, outputWidth, outputDepth], x.dtype, result);\n}\n\nexport const depthToSpaceConfig: KernelConfig = {\n kernelName: DepthToSpace,\n backendName: 'cpu',\n kernelFunc: depthToSpace as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNative, DepthwiseConv2dNativeAttrs, DepthwiseConv2dNativeInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function depthwiseConv2dNative(args: {\n inputs: DepthwiseConv2dNativeInputs,\n backend: MathBackendCPU,\n attrs: DepthwiseConv2dNativeAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations, dimRoundingMode} = attrs;\n\n assertNotComplex([x, filter], 'depthwiseConv2DNative');\n\n const xStrides = util.computeStrides(x.shape);\n const filterStrides = util.computeStrides(filter.shape);\n\n let $dilations = dilations;\n if ($dilations == null) {\n $dilations = [1, 1];\n }\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, $dilations),\n () => 'Error in depthwiseConv2d: Either strides or dilations must be ' +\n `1. Got strides ${strides} and dilations '${$dilations}'`);\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, $dilations,\n pad, dimRoundingMode, true /* depthwise */);\n\n const {filterHeight, filterWidth, dilationHeight, dilationWidth, padInfo} =\n convInfo;\n const padLeft = padInfo.left;\n const padTop = padInfo.top;\n const chMul = convInfo.outChannels / convInfo.inChannels;\n const y = new TensorBuffer(convInfo.outShape, x.dtype as 'float32');\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const wVals = backend.data.get(filter.dataId).values as TypedArray;\n const yVals = y.values;\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n const xOffset1 = b * xStrides[0];\n const yOffset1 = b * y.strides[0];\n for (let yR = 0; yR < convInfo.outHeight; ++yR) {\n const yOffset2 = yOffset1 + yR * y.strides[1];\n const xRCorner = yR * convInfo.strideHeight - padTop;\n for (let wR = 0; wR < filterHeight; ++wR) {\n const xR = xRCorner + wR * dilationHeight;\n if (xR < 0 || xR >= convInfo.inHeight) {\n continue;\n }\n const wOffset1 = wR * filterStrides[0];\n const xOffset2 = xOffset1 + xR * xStrides[1];\n for (let yC = 0; yC < convInfo.outWidth; ++yC) {\n const yOffset3 = yOffset2 + yC * y.strides[2];\n const xCCorner = yC * convInfo.strideWidth - padLeft;\n for (let wC = 0; wC < filterWidth; ++wC) {\n const xC = xCCorner + wC * dilationWidth;\n if (xC < 0 || xC >= convInfo.inWidth) {\n continue;\n }\n const wOffset2 = wOffset1 + wC * filterStrides[1];\n const xOffset3 = xOffset2 + xC * convInfo.inChannels;\n let yOffset4 = yOffset3;\n let wOffset3 = wOffset2;\n for (let d1 = 0; d1 < convInfo.inChannels; ++d1) {\n const xVal = xVals[xOffset3 + d1];\n for (let q = 0; q < chMul; ++q) {\n yVals[yOffset4 + q] += xVal * wVals[wOffset3 + q];\n }\n yOffset4 += chMul;\n wOffset3 += chMul;\n }\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(y.shape, y.dtype, y.values);\n}\n\nexport const depthwiseConv2dNativeConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNative,\n backendName: 'cpu',\n kernelFunc: depthwiseConv2dNative as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNativeBackpropFilter, DepthwiseConv2dNativeBackpropFilterAttrs, DepthwiseConv2dNativeBackpropFilterInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function depthwiseConv2dNativeBackpropFilter(args: {\n inputs: DepthwiseConv2dNativeBackpropFilterInputs,\n backend: MathBackendCPU,\n attrs: DepthwiseConv2dNativeBackpropFilterAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, dilations, pad, dimRoundingMode, filterShape} = attrs;\n\n assertNotComplex([x, dy], 'depthwiseConv2dNativeBackpropFilter');\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number], filterShape, strides,\n dilations, pad, dimRoundingMode, true /* depthwise */);\n\n const {strideHeight, strideWidth, filterHeight, filterWidth} = convInfo;\n\n const dW = new TensorBuffer(convInfo.filterShape, 'float32');\n\n const leftPad = convInfo.padInfo.left;\n const topPad = convInfo.padInfo.top;\n const chMul = convInfo.outChannels / convInfo.inChannels;\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const xBuf = new TensorBuffer(x.shape, x.dtype, xVals);\n const dyVals = backend.data.get(dy.dataId).values as TypedArray;\n const dyBuf = new TensorBuffer(dy.shape, dy.dtype, dyVals);\n for (let wR = 0; wR < filterHeight; ++wR) {\n const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight));\n const yRMax = Math.min(\n convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight);\n\n for (let wC = 0; wC < filterWidth; ++wC) {\n const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth));\n const yCMax = Math.min(\n convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth);\n\n for (let d2 = 0; d2 < convInfo.outChannels; ++d2) {\n const d1 = Math.trunc(d2 / chMul);\n const dm = d2 % chMul;\n\n let dotProd = 0;\n for (let b = 0; b < convInfo.batchSize; ++b) {\n for (let yR = yRMin; yR < yRMax; ++yR) {\n const xR = wR + yR * strideHeight - topPad;\n for (let yC = yCMin; yC < yCMax; ++yC) {\n const xC = wC + yC * strideWidth - leftPad;\n dotProd += (xBuf.get(b, xR, xC, d1) as number) *\n (dyBuf.get(b, yR, yC, d2) as number);\n }\n }\n }\n dW.set(dotProd, wR, wC, d1, dm);\n }\n }\n }\n\n return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values);\n}\n\nexport const depthwiseConv2dNativeBackpropFilterConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNativeBackpropFilter,\n backendName: 'cpu',\n kernelFunc: depthwiseConv2dNativeBackpropFilter as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNativeBackpropInput, DepthwiseConv2dNativeBackpropInputAttrs, DepthwiseConv2dNativeBackpropInputInputs, KernelConfig, KernelFunc, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function depthwiseConv2dNativeBackpropInput(args: {\n inputs: DepthwiseConv2dNativeBackpropInputInputs,\n backend: MathBackendCPU,\n attrs: DepthwiseConv2dNativeBackpropInputAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {strides, dilations, pad, dimRoundingMode, inputShape} = attrs;\n\n assertNotComplex([dy, filter], 'depthwiseConv2DNativeBackpropInput');\n\n const dyStrides = util.computeStrides(dy.shape);\n const filterStrides = util.computeStrides(filter.shape);\n\n const convInfo = backend_util.computeConv2DInfo(\n inputShape, filter.shape as [number, number, number, number], strides,\n dilations, pad, dimRoundingMode, true /* depthwise */);\n\n const dx = new TensorBuffer(convInfo.inShape, 'float32');\n const dxValues = dx.values;\n const [dxS0, dxS1, dxS2] = dx.strides;\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n const [dyS0, dyS1, dyS2] = dyStrides;\n const fltValues = backend.data.get(filter.dataId).values as TypedArray;\n const [fltS0, fltS1, fltS2] = filterStrides;\n const {\n batchSize,\n filterHeight,\n filterWidth,\n inChannels,\n inHeight,\n inWidth,\n outChannels,\n outHeight,\n outWidth,\n strideHeight,\n strideWidth\n } = convInfo;\n const topPad = filterHeight - 1 - convInfo.padInfo.top;\n const leftPad = filterWidth - 1 - convInfo.padInfo.left;\n const chMul = outChannels / inChannels;\n\n for (let b = 0; b < batchSize; ++b) {\n for (let d1 = 0; d1 < inChannels; ++d1) {\n for (let xR = 0; xR < inHeight; ++xR) {\n const xRCorner = xR - topPad;\n const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight));\n const yRMax =\n Math.min(outHeight, (filterHeight + xRCorner) / strideHeight);\n\n for (let xC = 0; xC < inWidth; ++xC) {\n const xCCorner = xC - leftPad;\n const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth));\n const yCMax =\n Math.min(outWidth, (filterWidth + xCCorner) / strideWidth);\n\n let dotProd = 0;\n for (let yR = xRMin; yR < yRMax; ++yR) {\n const wR = yR * strideHeight - xRCorner;\n\n for (let yC = xCMin; yC < yCMax; ++yC) {\n const wC = yC * strideWidth - xCCorner;\n const dyOffset = dyS0 * b + dyS1 * yR + dyS2 * yC;\n const fltOffset = fltS0 * (filterHeight - 1 - wR) +\n fltS1 * (filterWidth - 1 - wC) + fltS2 * d1;\n\n for (let dm = 0; dm < chMul; ++dm) {\n const d2 = d1 * chMul + dm;\n const pixel = dyValues[dyOffset + d2];\n const weight = fltValues[fltOffset + dm];\n dotProd += pixel * weight;\n }\n }\n }\n dxValues[dxS0 * b + dxS1 * xR + dxS2 * xC + d1] = dotProd;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const depthwiseConv2dNativeBackpropInputConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNativeBackpropInput,\n backendName: 'cpu',\n kernelFunc: depthwiseConv2dNativeBackpropInput as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, Diag, DiagInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function diag(args: {inputs: DiagInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n const xSize = util.sizeFromShape(x.shape);\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const outBuf = buffer([xSize, xSize], x.dtype);\n const vals = outBuf.values;\n for (let i = 0; i < xVals.length; i++) {\n vals[i * xSize + i] = xVals[i];\n }\n\n const outShape = [...x.shape, ...x.shape];\n\n return backend.makeTensorInfo(outShape, outBuf.dtype, outBuf.values);\n}\n\nexport const diagConfig: KernelConfig = {\n kernelName: Diag,\n backendName: 'cpu',\n kernelFunc: diag as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Dilation2D, Dilation2DAttrs, Dilation2DInputs, KernelConfig, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const dilation2DConfig: KernelConfig = {\n kernelName: Dilation2D,\n backendName: 'cpu',\n kernelFunc: ({inputs, backend, attrs}) => {\n const {x, filter} = inputs as Dilation2DInputs;\n const {strides, pad, dilations} = attrs as unknown as Dilation2DAttrs;\n const cpuBackend = backend as MathBackendCPU;\n\n const xVals = cpuBackend.data.get(x.dataId).values as TypedArray;\n const xRank = x.shape.length;\n\n const filterVals = cpuBackend.data.get(filter.dataId).values as TypedArray;\n const filterRank = filter.shape.length;\n\n const {\n batchSize,\n inHeight,\n inWidth,\n inChannels,\n outHeight,\n outWidth,\n padInfo,\n strideHeight,\n strideWidth,\n filterHeight,\n filterWidth,\n dilationHeight,\n dilationWidth,\n outShape\n } =\n backend_util.computeDilation2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number], strides, pad,\n 'NHWC' /* dataFormat */, dilations);\n\n const outSize = util.sizeFromShape(outShape);\n const outRank = outShape.length;\n const outputVals = util.getArrayFromDType(x.dtype, outSize);\n\n // Upsampling the input by fill in `dilation size - 1` values between each\n // input value.\n // This implementation follows the TF c++ implementation:\n // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc\n for (let b = 0; b < batchSize; ++b) {\n for (let hOut = 0; hOut < outHeight; ++hOut) {\n const hBeg = hOut * strideHeight - padInfo.top;\n for (let wOut = 0; wOut < outWidth; ++wOut) {\n const wBeg = wOut * strideWidth - padInfo.left;\n for (let d = 0; d < inChannels; ++d) {\n let curVal = Number.MIN_SAFE_INTEGER;\n for (let h = 0; h < filterHeight; ++h) {\n const hIn = hBeg + h * dilationHeight;\n if (hIn >= 0 && hIn < inHeight) {\n for (let w = 0; w < filterWidth; ++w) {\n const wIn = wBeg + w * dilationWidth;\n if (wIn >= 0 && wIn < inWidth) {\n const xIndex = util.locToIndex(\n [b, hIn, wIn, d], xRank, util.computeStrides(x.shape));\n const filterIndex = util.locToIndex(\n [h, w, d], filterRank,\n util.computeStrides(filter.shape));\n const val = xVals[xIndex] + filterVals[filterIndex];\n if (val > curVal) {\n curVal = val;\n }\n }\n }\n }\n }\n const outputIndex = util.locToIndex(\n [b, hOut, wOut, d], outRank, util.computeStrides(outShape));\n outputVals[outputIndex] = curVal;\n }\n }\n }\n }\n\n const dataId = cpuBackend.write(\n util.toTypedArray(outputVals, x.dtype), outShape, x.dtype);\n\n return {dataId, shape: outShape, dtype: x.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Dilation2DAttrs, Dilation2DBackpropFilter, Tensor3D, Tensor4D, TypedArray, util} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const dilation2DBackpropFilterConfig: KernelConfig = {\n kernelName: Dilation2DBackpropFilter,\n backendName: 'cpu',\n kernelFunc: ({inputs, backend, attrs}) => {\n const {x, filter, dy} =\n inputs as {x: Tensor4D, filter: Tensor3D, dy: Tensor4D};\n const {strides, pad, dilations} = attrs as unknown as Dilation2DAttrs;\n const cpuBackend = backend as MathBackendCPU;\n\n const $x =\n util.toNestedArray(\n x.shape, cpuBackend.data.get(x.dataId).values as TypedArray) as\n number[][][][];\n\n const $filter = util.toNestedArray(\n filter.shape,\n cpuBackend.data.get(filter.dataId).values as\n TypedArray) as number[][][];\n\n const {\n batchSize,\n inHeight,\n inWidth,\n inChannels,\n outHeight,\n outWidth,\n padInfo,\n strideHeight,\n strideWidth,\n filterHeight,\n filterWidth,\n dilationHeight,\n dilationWidth,\n outShape\n } =\n backend_util.computeDilation2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number], strides, pad,\n 'NHWC' /* dataFormat */, dilations);\n\n util.assert(\n dy.rank === outShape.length,\n () => `Error in ${Dilation2DBackpropFilter}, dy ` +\n `must have the same rank as output ${outShape.length}, but got ` +\n `${dy.rank}`);\n\n const $dy =\n util.toNestedArray(\n outShape, cpuBackend.data.get(dy.dataId).values as TypedArray) as\n number[][][][];\n\n // The computed filter gradients has the same dimensions as the filter:\n // [filterHeight, filterWidth, depth]\n const gradients = util.makeZerosNestedTypedArray(\n filter.shape, filter.dtype) as number[][][];\n\n // In the case of multiple argmax branches, we only back-propagate along the\n // last branch, i.e., the one with largest value of `h * filter_cols + w`,\n // similarly to the max-pooling backward routines.\n // This implementation follows the TF c++ implementation:\n // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc\n for (let b = 0; b < batchSize; ++b) {\n for (let hOut = 0; hOut < outHeight; ++hOut) {\n const hBeg = hOut * strideHeight - padInfo.top;\n for (let wOut = 0; wOut < outWidth; ++wOut) {\n const wBeg = wOut * strideWidth - padInfo.left;\n for (let d = 0; d < inChannels; ++d) {\n let curVal = Number.MIN_SAFE_INTEGER;\n let hMax = 0;\n let wMax = 0;\n for (let h = 0; h < filterHeight; ++h) {\n const hIn = hBeg + h * dilationHeight;\n if (hIn >= 0 && hIn < inHeight) {\n for (let w = 0; w < filterWidth; ++w) {\n const wIn = wBeg + w * dilationWidth;\n if (wIn >= 0 && wIn < inWidth) {\n const val = $x[b][hIn][wIn][d] + $filter[h][w][d];\n if (val > curVal) {\n curVal = val;\n hMax = h;\n wMax = w;\n }\n }\n }\n }\n }\n gradients[hMax][wMax][d] += $dy[b][hOut][wOut][d];\n }\n }\n }\n }\n\n const dataId = cpuBackend.write(\n util.toTypedArray(gradients, x.dtype), filter.shape, filter.dtype);\n\n return {dataId, shape: filter.shape, dtype: filter.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Dilation2DAttrs, Dilation2DBackpropInput, Tensor3D, Tensor4D, TypedArray, util} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const dilation2DBackpropInputConfig: KernelConfig = {\n kernelName: Dilation2DBackpropInput,\n backendName: 'cpu',\n kernelFunc: ({inputs, backend, attrs}) => {\n const {x, filter, dy} =\n inputs as {x: Tensor4D, filter: Tensor3D, dy: Tensor4D};\n const {strides, pad, dilations} = attrs as unknown as Dilation2DAttrs;\n const cpuBackend = backend as MathBackendCPU;\n\n const $x =\n util.toNestedArray(\n x.shape, cpuBackend.data.get(x.dataId).values as TypedArray) as\n number[][][][];\n\n const $filter = util.toNestedArray(\n filter.shape,\n cpuBackend.data.get(filter.dataId).values as\n TypedArray) as number[][][];\n\n const {\n batchSize,\n inHeight,\n inWidth,\n inChannels,\n outHeight,\n outWidth,\n padInfo,\n strideHeight,\n strideWidth,\n filterHeight,\n filterWidth,\n dilationHeight,\n dilationWidth,\n outShape\n } =\n backend_util.computeDilation2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number], strides, pad,\n 'NHWC' /* dataFormat */, dilations);\n\n util.assert(\n dy.rank === outShape.length,\n () => `Error in ${Dilation2DBackpropInput}, dy ` +\n `must have the same rank as output ${outShape.length}, but got ` +\n `${dy.rank}`);\n\n const $dy =\n util.toNestedArray(\n outShape, cpuBackend.data.get(dy.dataId).values as TypedArray) as\n number[][][][];\n\n // The computed gradients has the same dimensions as the input:\n // [batch, inputHeight, inputCols, inChannel]\n const gradients =\n util.makeZerosNestedTypedArray(x.shape, x.dtype) as number[][][][];\n\n // In the case of multiple argmax branches, we only back-propagate along the\n // last branch, i.e., the one with largest value of `h * filter_cols + w`,\n // similarly to the max-pooling backward routines.\n // This implementation follows the TF c++ implementation:\n // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc\n for (let b = 0; b < batchSize; ++b) {\n for (let hOut = 0; hOut < outHeight; ++hOut) {\n const hBeg = hOut * strideHeight - padInfo.top;\n for (let wOut = 0; wOut < outWidth; ++wOut) {\n const wBeg = wOut * strideWidth - padInfo.left;\n for (let d = 0; d < inChannels; ++d) {\n let curVal = Number.MIN_SAFE_INTEGER;\n let hInMax = (hBeg < 0) ? 0 : hBeg;\n let wInMax = (wBeg < 0) ? 0 : wBeg;\n for (let h = 0; h < filterHeight; ++h) {\n const hIn = hBeg + h * dilationHeight;\n if (hIn >= 0 && hIn < inHeight) {\n for (let w = 0; w < filterWidth; ++w) {\n const wIn = wBeg + w * dilationWidth;\n if (wIn >= 0 && wIn < inWidth) {\n const val = $x[b][hIn][wIn][d] + $filter[h][w][d];\n if (val > curVal) {\n curVal = val;\n hInMax = hIn;\n wInMax = wIn;\n }\n }\n }\n }\n }\n gradients[b][hInMax][wInMax][d] += $dy[b][hOut][wOut][d];\n }\n }\n }\n }\n\n const dataId = cpuBackend.write(\n util.toTypedArray(gradients, x.dtype), x.shape, x.dtype);\n\n return {dataId, shape: x.shape, dtype: x.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Multiply} from '@tensorflow/tfjs-core';\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const multiplyImpl = createSimpleBinaryKernelImpl(\n ((aValue: number, bValue: number) => aValue * bValue));\nexport const multiplyComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {\n real: aReal * bReal - aImag * bImag,\n imag: aReal * bImag + aImag * bReal\n };\n }));\n\nexport const multiply =\n binaryKernelFunc(Multiply, multiplyImpl, multiplyComplexImpl);\n\nexport const multiplyConfig: KernelConfig = {\n kernelName: Multiply,\n backendName: 'cpu',\n kernelFunc: multiply\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Sum, SumAttrs, SumInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {zeros} from '../utils/zeros_impl';\nimport {cast} from './Cast';\nimport {identity} from './Identity';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function sum(\n args: {inputs: SumInputs, backend: MathBackendCPU, attrs: SumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'sum');\n\n let $x;\n if (x.dtype === 'bool') {\n $x = cast({inputs: {x}, backend, attrs: {dtype: 'int32'}});\n } else {\n $x = identity({inputs: {x}, backend});\n }\n\n const xRank = $x.shape.length;\n const axes = util.parseAxisParam(axis, $x.shape);\n const permutation = backend_util.getAxesPermutation(axes, xRank);\n\n let reductionAxes = axes;\n let permutedX = $x;\n if (permutation != null) {\n permutedX =\n transpose({inputs: {x: $x}, backend, attrs: {perm: permutation}});\n reductionAxes = backend_util.getInnerMostAxes(reductionAxes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims(\n 'sum', reductionAxes, permutedX.shape.length);\n\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(permutedX.shape, reductionAxes);\n const resultDtype = backend_util.upcastType(permutedX.dtype, 'int32');\n let result = zeros(backend, outShape, resultDtype);\n const reduceSize = util.sizeFromShape(reduceShape);\n const vals = backend.data.get(result.dataId).values as TypedArray;\n\n const aVals = backend.data.get(permutedX.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let sum = 0;\n for (let j = 0; j < reduceSize; ++j) {\n sum += aVals[offset + j];\n }\n vals[i] = sum;\n }\n\n if (keepDims) {\n const newShape = backend_util.expandShapeToKeepDim(result.shape, axes);\n const oldResult = result;\n result = reshape({inputs: {x: result}, backend, attrs: {shape: newShape}});\n backend.disposeIntermediateTensorInfo(oldResult);\n }\n\n backend.disposeIntermediateTensorInfo($x);\n\n if (permutation != null) {\n backend.disposeIntermediateTensorInfo(permutedX);\n }\n\n return result;\n}\n\nexport const sumConfig: KernelConfig = {\n kernelName: Sum,\n backendName: 'cpu',\n kernelFunc: sum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Einsum, EinsumAttrs, EinsumInputs, KernelConfig, KernelFunc, Tensor, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {multiply} from './Multiply';\nimport {reshape} from './Reshape';\nimport {sum} from './Sum';\nimport {transpose} from './Transpose';\n\nexport function einsum(\n args: {inputs: EinsumInputs, backend: MathBackendCPU, attrs: EinsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {equation} = attrs;\n const tensors = inputs as Tensor[];\n\n const {allDims, summedDims, idDims} =\n backend_util.decodeEinsumEquation(equation, tensors.length);\n backend_util.checkEinsumDimSizes(allDims.length, idDims, tensors);\n const {path, steps} = backend_util.getEinsumComputePath(summedDims, idDims);\n\n const nSteps = steps.length;\n let out: TensorInfo|null = null;\n let numDimsRemaining = allDims.length;\n const tensorsToDispose: TensorInfo[] = [];\n for (let i = 0; i < nSteps; ++i) {\n for (const idTerm of steps[i]) {\n const {permutationIndices: perm, expandDims: dimsToExpand} =\n backend_util.getEinsumPermutation(numDimsRemaining, idDims[idTerm]);\n let x: TensorInfo;\n if (backend_util.isIdentityPermutation(perm)) {\n x = tensors[idTerm];\n } else {\n x = transpose({inputs: {x: tensors[idTerm]}, backend, attrs: {perm}});\n tensorsToDispose.push(x);\n }\n const targetShape: number[] = x.shape.slice();\n for (let k = 0; k < dimsToExpand.length; ++k) {\n targetShape.splice(dimsToExpand[k], 0, 1);\n }\n\n if (!util.arraysEqual(x.shape, targetShape)) {\n x = reshape({inputs: {x}, backend, attrs: {shape: targetShape}});\n tensorsToDispose.push(x);\n }\n if (out === null) {\n out = x;\n } else {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n out = multiply({inputs: {a: x, b: out}, backend}) as TensorInfo;\n tensorsToDispose.push(out);\n }\n }\n if (i < nSteps - 1) {\n if (path[i] >= 0) {\n out = sum({\n inputs: {x: out},\n backend,\n attrs: {\n axis: path[i] - (allDims.length - numDimsRemaining),\n keepDims: false\n }\n });\n tensorsToDispose.push(out);\n }\n numDimsRemaining--;\n }\n }\n\n // Clean up intermediate tensors.\n for (const tensorInfo of tensorsToDispose) {\n if (tensorInfo === out) {\n continue;\n }\n backend.disposeIntermediateTensorInfo(tensorInfo);\n }\n\n return out;\n}\n\nexport const einsumConfig: KernelConfig = {\n kernelName: Einsum,\n backendName: 'cpu',\n kernelFunc: einsum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {EluGrad, EluGradInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function eluGrad(args: {inputs: EluGradInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {dy, y} = inputs;\n\n assertNotComplex([dy, y], 'eluGrad');\n\n const resultValues = new Float32Array(util.sizeFromShape(y.shape));\n const values = backend.data.get(y.dataId).values as TypedArray;\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n for (let i = 0; i < values.length; ++i) {\n const v = values[i];\n if (v >= 1) {\n resultValues[i] = dyValues[i];\n } else {\n resultValues[i] = dyValues[i] * (v + 1);\n }\n }\n\n return backend.makeTensorInfo(y.shape, 'float32', resultValues);\n}\n\nexport const eluGradConfig: KernelConfig = {\n kernelName: EluGrad,\n backendName: 'cpu',\n kernelFunc: eluGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Equal, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const equalImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a === b) ? 1 : 0);\nexport const equal =\n binaryKernelFunc(Equal, equalImpl, null /* complexImpl */, 'bool');\n\nexport const equalConfig: KernelConfig = {\n kernelName: Equal,\n backendName: 'cpu',\n kernelFunc: equal\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Erf, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nconst p = backend_util.ERF_P;\nconst a1 = backend_util.ERF_A1;\nconst a2 = backend_util.ERF_A2;\nconst a3 = backend_util.ERF_A3;\nconst a4 = backend_util.ERF_A4;\nconst a5 = backend_util.ERF_A5;\n\nexport const erf = unaryKernelFunc(\n Erf,\n (xi) => {\n const sign = Math.sign(xi);\n const v = Math.abs(xi);\n const t = 1.0 / (1.0 + p * v);\n return sign *\n (1.0 -\n (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t *\n Math.exp(-v * v));\n },\n);\n\nexport const erfConfig: KernelConfig = {\n kernelName: Erf,\n backendName: 'cpu',\n kernelFunc: erf,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Exp, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const expImpl = createSimpleUnaryImpl((xi) => Math.exp(xi));\nexport const exp = unaryKernelFuncFromImpl(Exp, expImpl, 'float32');\n\nexport const expConfig: KernelConfig = {\n kernelName: Exp,\n backendName: 'cpu',\n kernelFunc: exp,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ExpandDims, ExpandDimsAttrs, ExpandDimsInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {reshape} from './Reshape';\n\nexport function expandDims(args: {\n inputs: ExpandDimsInputs,\n backend: MathBackendCPU,\n attrs: ExpandDimsAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {input} = inputs;\n const {dim} = attrs;\n\n const inputRank = input.shape.length;\n const newShape = input.shape.slice();\n let $dim = dim;\n if (dim < 0) {\n // Negative value is counted from the tail of rank.\n util.assert(\n -(inputRank + 1) <= dim,\n () => `Axis must be in the interval [${- (inputRank + 1)}, ${\n inputRank}]`);\n $dim = inputRank + dim + 1;\n }\n newShape.splice($dim, 0, 1);\n\n return reshape({inputs: {x: input}, backend, attrs: {shape: newShape}});\n}\n\nexport const expandDimsConfig: KernelConfig = {\n kernelName: ExpandDims,\n backendName: 'cpu',\n kernelFunc: expandDims as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Expm1, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const expm1Impl = createSimpleUnaryImpl((xi) => Math.expm1(xi));\nexport const expm1 = unaryKernelFuncFromImpl(Expm1, expm1Impl);\n\nexport const expm1Config: KernelConfig = {\n kernelName: Expm1,\n backendName: 'cpu',\n kernelFunc: expm1,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, RealDiv} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const realDivImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => a / b);\nexport const div = binaryKernelFunc(RealDiv, realDivImpl);\n\nexport const realDivConfig: KernelConfig = {\n kernelName: RealDiv,\n backendName: 'cpu',\n kernelFunc: div\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sub} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc, createComplexBinaryKernelImpl} from '../utils/binary_utils';\n\nexport const subImpl = createSimpleBinaryKernelImpl(\n ((aValue: number, bValue: number) => aValue - bValue));\nexport const subComplexImpl =\n createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => {\n return {real: aReal - bReal, imag: aImag - bImag};\n }));\nexport const sub = binaryKernelFunc(Sub, subImpl, subComplexImpl);\n\nexport const subConfig: KernelConfig = {\n kernelName: Sub,\n backendName: 'cpu',\n kernelFunc: sub\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Tensor, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {add} from '../kernels/Add';\nimport {complex} from '../kernels/Complex';\nimport {concat} from '../kernels/Concat';\nimport {identity} from '../kernels/Identity';\nimport {imag} from '../kernels/Imag';\nimport {multiply} from '../kernels/Multiply';\nimport {real} from '../kernels/Real';\nimport {realDivConfig} from '../kernels/RealDiv';\nimport {slice} from '../kernels/Slice';\nimport {sub} from '../kernels/Sub';\n\n/**\n * Calculate FFT of inner most elements of batch tensor.\n */\nexport function fftBatch(\n input: TensorInfo, inverse: boolean,\n cpuBackend: MathBackendCPU): TensorInfo {\n const inputShape = input.shape;\n const batch = inputShape[0];\n const innerDim = inputShape[1];\n\n const inputVals = cpuBackend.data.get(input.dataId);\n\n const real2D = inputVals.complexTensorInfos.real;\n const imag2D = inputVals.complexTensorInfos.imag;\n\n // Collects real and imaginary values separately.\n const resultShape = [batch, innerDim];\n const resultSize = util.sizeFromShape(resultShape);\n const resultReal = util.getTypedArrayFromDType('float32', resultSize);\n const resultImag = util.getTypedArrayFromDType('float32', resultSize);\n\n for (let b = 0; b < batch; b++) {\n // TODO: Support slice ops for complex type.\n const r = slice({\n inputs: {x: real2D},\n backend: cpuBackend,\n attrs: {begin: [b, 0], size: [1, innerDim]}\n });\n const i = slice({\n inputs: {x: imag2D},\n backend: cpuBackend,\n attrs: {begin: [b, 0], size: [1, innerDim]}\n });\n\n const input = complex({inputs: {real: r, imag: i}, backend: cpuBackend});\n\n // Run FFT by batch element.\n const {real, imag} = fftImpl(input, inverse, cpuBackend);\n const res = backend_util.mergeRealAndImagArrays(real, imag);\n\n for (let d = 0; d < innerDim; d++) {\n const c = backend_util.getComplexWithIndex(res, d);\n resultReal[b * innerDim + d] = c.real;\n resultImag[b * innerDim + d] = c.imag;\n }\n\n cpuBackend.disposeIntermediateTensorInfo(r);\n cpuBackend.disposeIntermediateTensorInfo(i);\n cpuBackend.disposeIntermediateTensorInfo(input);\n }\n\n const $realInfo: TensorInfo =\n cpuBackend.makeTensorInfo(resultShape, 'float32', resultReal);\n const $imagInfo: TensorInfo =\n cpuBackend.makeTensorInfo(resultShape, 'float32', resultImag);\n\n const result = complex(\n {inputs: {real: $realInfo, imag: $imagInfo}, backend: cpuBackend});\n\n cpuBackend.disposeIntermediateTensorInfo($realInfo);\n cpuBackend.disposeIntermediateTensorInfo($imagInfo);\n\n return result;\n}\n\nexport function fftImpl(\n input: TensorInfo, inverse: boolean,\n cpuBackend: MathBackendCPU): {real: Float32Array, imag: Float32Array} {\n const inputSize = util.sizeFromShape(input.shape);\n\n const inputVals = cpuBackend.data.get(input.dataId);\n\n const realVals =\n cpuBackend.data.get(inputVals.complexTensorInfos.real.dataId).values as\n Float32Array;\n\n const imagVals =\n cpuBackend.data.get(inputVals.complexTensorInfos.imag.dataId).values as\n Float32Array;\n\n if (isExponentOf2(inputSize)) {\n const result =\n fftRadix2(realVals, imagVals, inputSize, inverse, cpuBackend);\n\n const resultShape = [input.shape[0], input.shape[1]];\n\n if (inverse) {\n const realInfo: TensorInfo =\n cpuBackend.makeTensorInfo(resultShape, 'float32', result.real);\n const imagInfo: TensorInfo =\n cpuBackend.makeTensorInfo(resultShape, 'float32', result.imag);\n\n const sizeInfo: TensorInfo = cpuBackend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(inputSize as unknown as 'float32', 'float32'));\n const sizeInfoCopy =\n identity({inputs: {x: sizeInfo}, backend: cpuBackend});\n\n const divRealInfo =\n realDivConfig.kernelFunc(\n {inputs: {a: realInfo, b: sizeInfo}, backend: cpuBackend}) as\n TensorInfo;\n const divImagInfo =\n realDivConfig.kernelFunc(\n {inputs: {a: imagInfo, b: sizeInfoCopy}, backend: cpuBackend}) as\n TensorInfo;\n\n const divRealVals =\n cpuBackend.data.get(divRealInfo.dataId).values as Float32Array;\n const divImagVals =\n cpuBackend.data.get(divImagInfo.dataId).values as Float32Array;\n\n cpuBackend.disposeIntermediateTensorInfo(realInfo);\n cpuBackend.disposeIntermediateTensorInfo(imagInfo);\n cpuBackend.disposeIntermediateTensorInfo(sizeInfo);\n cpuBackend.disposeIntermediateTensorInfo(sizeInfoCopy);\n cpuBackend.disposeIntermediateTensorInfo(divRealInfo);\n cpuBackend.disposeIntermediateTensorInfo(divImagInfo);\n\n return {real: divRealVals, imag: divImagVals};\n }\n\n return result;\n } else {\n const data = backend_util.mergeRealAndImagArrays(realVals, imagVals);\n\n const rawOutput =\n fourierTransformByMatmul(data, inputSize, inverse) as Float32Array;\n\n return backend_util.splitRealAndImagArrays(rawOutput);\n }\n}\n\nfunction isExponentOf2(size: number): boolean {\n return (size & size - 1) === 0;\n}\n\n// FFT using Cooley-Tukey algorithm on radix 2 dimensional input.\nfunction fftRadix2(\n realVals: Float32Array, imagVals: Float32Array, size: number,\n inverse: boolean,\n cpuBackend: MathBackendCPU): {real: Float32Array, imag: Float32Array} {\n if (size === 1) {\n return {real: realVals, imag: imagVals};\n }\n\n const data = backend_util.mergeRealAndImagArrays(realVals, imagVals);\n\n const half = size / 2;\n\n const evenComplex = backend_util.complexWithEvenIndex(data);\n\n const evenRealVals = evenComplex.real;\n const evenImagVals = evenComplex.imag;\n\n const evenShape = [evenRealVals.length];\n\n const evenRealInfo =\n cpuBackend.makeTensorInfo(evenShape, 'float32', evenRealVals);\n const evenImagInfo =\n cpuBackend.makeTensorInfo(evenShape, 'float32', evenImagVals);\n\n const evenTensorInfo = complex(\n {inputs: {real: evenRealInfo, imag: evenImagInfo}, backend: cpuBackend});\n\n const oddComplex = backend_util.complexWithOddIndex(data);\n\n const oddRealVals = oddComplex.real;\n const oddImagVals = oddComplex.imag;\n\n const oddShape = [oddRealVals.length];\n\n const oddRealInfo =\n cpuBackend.makeTensorInfo(oddShape, 'float32', oddRealVals);\n const oddImagInfo =\n cpuBackend.makeTensorInfo(oddShape, 'float32', oddImagVals);\n\n const oddTensorInfo = complex(\n {inputs: {real: oddRealInfo, imag: oddImagInfo}, backend: cpuBackend});\n\n // Recursive call for half part of original input.\n const $evenComplex =\n fftRadix2(evenRealVals, evenImagVals, half, inverse, cpuBackend);\n\n const $evenRealVals = $evenComplex.real;\n const $evenImagVals = $evenComplex.imag;\n\n const $evenShape = [$evenRealVals.length];\n\n const $evenRealInfo =\n cpuBackend.makeTensorInfo($evenShape, 'float32', $evenRealVals);\n const $evenImagInfo =\n cpuBackend.makeTensorInfo($evenShape, 'float32', $evenImagVals);\n\n const $evenTensorInfo = complex({\n inputs: {real: $evenRealInfo, imag: $evenImagInfo},\n backend: cpuBackend\n });\n\n const $oddComplex =\n fftRadix2(oddRealVals, oddImagVals, half, inverse, cpuBackend);\n\n const $oddRealVals = $oddComplex.real;\n const $oddImagVals = $oddComplex.imag;\n\n const $oddShape = [$oddRealVals.length];\n\n const $oddRealInfo =\n cpuBackend.makeTensorInfo($oddShape, 'float32', $oddRealVals);\n const $oddImagInfo =\n cpuBackend.makeTensorInfo($oddShape, 'float32', $oddImagVals);\n\n const $oddTensorInfo = complex(\n {inputs: {real: $oddRealInfo, imag: $oddImagInfo}, backend: cpuBackend});\n\n const e = backend_util.exponents(size, inverse);\n const eShape = [e.real.length];\n\n const eRealInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.real);\n const eImagInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.imag);\n\n const complexInfo = complex(\n {inputs: {real: eRealInfo, imag: eImagInfo}, backend: cpuBackend});\n\n const exponentInfo =\n multiply(\n {inputs: {a: complexInfo, b: $oddTensorInfo}, backend: cpuBackend}) as\n TensorInfo;\n\n const addPart = add({\n inputs: {a: $evenTensorInfo, b: exponentInfo},\n backend: cpuBackend\n }) as TensorInfo;\n const subPart = sub({\n inputs: {a: $evenTensorInfo, b: exponentInfo},\n backend: cpuBackend\n }) as TensorInfo;\n\n const addPartReal = real({inputs: {input: addPart}, backend: cpuBackend});\n const subPartReal = real({inputs: {input: subPart}, backend: cpuBackend});\n\n const addPartImag = imag({inputs: {input: addPart}, backend: cpuBackend});\n const subPartImag = imag({inputs: {input: subPart}, backend: cpuBackend});\n\n const $real = concat({\n inputs: [addPartReal as Tensor, subPartReal as Tensor],\n backend: cpuBackend,\n attrs: {axis: 0}\n });\n const $imag = concat({\n inputs: [addPartImag as Tensor, subPartImag as Tensor],\n backend: cpuBackend,\n attrs: {axis: 0}\n });\n\n const $realVals = cpuBackend.data.get($real.dataId).values as Float32Array;\n const $imagVals = cpuBackend.data.get($imag.dataId).values as Float32Array;\n\n cpuBackend.disposeIntermediateTensorInfo(evenRealInfo);\n cpuBackend.disposeIntermediateTensorInfo(evenImagInfo);\n cpuBackend.disposeIntermediateTensorInfo(evenTensorInfo);\n cpuBackend.disposeIntermediateTensorInfo(oddRealInfo);\n cpuBackend.disposeIntermediateTensorInfo(oddImagInfo);\n cpuBackend.disposeIntermediateTensorInfo(oddTensorInfo);\n cpuBackend.disposeIntermediateTensorInfo($evenRealInfo);\n cpuBackend.disposeIntermediateTensorInfo($evenImagInfo);\n cpuBackend.disposeIntermediateTensorInfo($evenTensorInfo);\n cpuBackend.disposeIntermediateTensorInfo($oddRealInfo);\n cpuBackend.disposeIntermediateTensorInfo($oddImagInfo);\n cpuBackend.disposeIntermediateTensorInfo($oddTensorInfo);\n cpuBackend.disposeIntermediateTensorInfo(eRealInfo);\n cpuBackend.disposeIntermediateTensorInfo(eImagInfo);\n cpuBackend.disposeIntermediateTensorInfo(complexInfo);\n cpuBackend.disposeIntermediateTensorInfo(exponentInfo);\n cpuBackend.disposeIntermediateTensorInfo(addPart);\n cpuBackend.disposeIntermediateTensorInfo(subPart);\n cpuBackend.disposeIntermediateTensorInfo(addPartReal);\n cpuBackend.disposeIntermediateTensorInfo(addPartImag);\n cpuBackend.disposeIntermediateTensorInfo(subPartReal);\n cpuBackend.disposeIntermediateTensorInfo(subPartImag);\n cpuBackend.disposeIntermediateTensorInfo($real);\n cpuBackend.disposeIntermediateTensorInfo($imag);\n\n return {real: $realVals, imag: $imagVals};\n}\n\n// Calculate fourier transform by multplying sinusoid matrix.\nfunction fourierTransformByMatmul(\n data: TypedArray, size: number, inverse: boolean): TypedArray {\n const ret = new Float32Array(size * 2);\n // TODO: Use matmul instead once it supports complex64 type.\n for (let r = 0; r < size; r++) {\n let real = 0.0;\n let imag = 0.0;\n for (let c = 0; c < size; c++) {\n const e = backend_util.exponent(r * c, size, inverse);\n const term = backend_util.getComplexWithIndex(data as Float32Array, c);\n real += term.real * e.real - term.imag * e.imag;\n imag += term.real * e.imag + term.imag * e.real;\n }\n if (inverse) {\n real /= size;\n imag /= size;\n }\n backend_util.assignToTypedArray(ret, real, imag, r);\n }\n return ret;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FFT, FFTInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {fftBatch} from '../utils/fft_utils';\nimport {reshape} from './Reshape';\n\nexport function fft(args: {inputs: FFTInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n const inputSize = util.sizeFromShape(input.shape);\n\n // Collapse all outer dimensions to a single batch dimension.\n const innerDimensionSize = input.shape[input.shape.length - 1];\n const batch = inputSize / innerDimensionSize;\n\n const input2D = reshape({\n inputs: {x: input},\n backend,\n attrs: {shape: [batch, innerDimensionSize]}\n });\n\n const result = fftBatch(input2D, false, backend);\n\n const resultReshaped =\n reshape({inputs: {x: result}, backend, attrs: {shape: input.shape}});\n\n backend.disposeIntermediateTensorInfo(input2D);\n backend.disposeIntermediateTensorInfo(result);\n\n return resultReshaped;\n}\n\nexport const fftConfig: KernelConfig = {\n kernelName: FFT,\n backendName: 'cpu',\n kernelFunc: fft as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, DataValues, Fill, FillAttrs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function fill(args: {backend: MathBackendCPU, attrs: FillAttrs}):\n TensorInfo {\n const {backend, attrs} = args;\n const {shape, value, dtype} = attrs;\n\n const $dtype = dtype || util.inferDtype(value);\n const values = util.getArrayFromDType($dtype, util.sizeFromShape(shape));\n fillValues(values, value, $dtype);\n\n return backend.makeTensorInfo(shape, $dtype, values);\n}\n\nexport const fillConfig: KernelConfig = {\n kernelName: Fill,\n backendName: 'cpu',\n kernelFunc: fill as unknown as KernelFunc\n};\n\nfunction fillValues(\n values: DataValues, value: string|number, dtype: DataType): void {\n if (dtype === 'string') {\n (values as string[]).fill(value as string);\n } else {\n (values as TypedArray).fill(value as number);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, NumericDataType, TypedArray} from '@tensorflow/tfjs-core';\nimport {FlipLeftRight, FlipLeftRightInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const flipLeftRightConfig: KernelConfig = {\n kernelName: FlipLeftRight,\n backendName: 'cpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {image} = inputs as FlipLeftRightInputs;\n const cpuBackend = backend as MathBackendCPU;\n\n const output = util.getTypedArrayFromDType(\n image.dtype as NumericDataType, util.sizeFromShape(image.shape));\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n\n const imageVals = cpuBackend.data.get(image.dataId).values as TypedArray;\n\n for (let batchIdx = 0; batchIdx < batch; batchIdx++) {\n const batchOffset = batchIdx * imageWidth * imageHeight * numChannels;\n\n for (let row = 0; row < imageHeight; row++) {\n const rowOffset = row * (imageWidth * numChannels);\n\n for (let col = 0; col < imageWidth; col++) {\n const colOffset = col * numChannels;\n\n for (let channel = 0; channel < numChannels; channel++) {\n const coordX = Math.round(imageWidth - col - 1);\n const outIdx = batchOffset + rowOffset + colOffset + channel;\n\n let outputValue = imageVals[outIdx];\n // If the coordinate position falls within the image boundaries...\n if (coordX >= 0 && coordX < imageWidth) {\n // set the output to the image value at the coordinate position.\n const rotatedColOffset = coordX * numChannels;\n const imageIdx =\n batchOffset + rowOffset + rotatedColOffset + channel;\n outputValue = imageVals[imageIdx];\n }\n output[outIdx] = outputValue;\n }\n }\n }\n }\n\n const dataId = cpuBackend.write(output, image.shape, image.dtype);\n return {dataId, shape: image.shape, dtype: image.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Floor, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const floorImpl = createSimpleUnaryImpl((xi) => Math.floor(xi));\nexport const floor = unaryKernelFuncFromImpl(Floor, floorImpl);\n\nexport const floorConfig: KernelConfig = {\n kernelName: Floor,\n backendName: 'cpu',\n kernelFunc: floor,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FloorDiv, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const floorDivImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => Math.floor(a / b));\nexport const floorDiv =\n binaryKernelFunc(FloorDiv, floorDivImpl, null /* complexImpl */, 'int32');\n\nexport const floorDivConfig: KernelConfig = {\n kernelName: FloorDiv,\n backendName: 'cpu',\n kernelFunc: floorDiv\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FusedConv2D, FusedConv2DAttrs, FusedConv2DInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {applyActivation} from '../utils/fused_utils';\nimport {add} from './Add';\nimport {conv2D} from './Conv2D';\nimport {reshape} from './Reshape';\n\nexport function fusedConv2D(args: {\n inputs: FusedConv2DInputs,\n backend: MathBackendCPU,\n attrs: FusedConv2DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n } = attrs;\n\n let result = conv2D({\n inputs: {x, filter},\n backend,\n attrs: {strides, pad, dataFormat, dilations, dimRoundingMode}\n });\n\n if (bias) {\n const resultOld = result;\n // For NCHW format, if bias is a 1-D tensor, it is supposed to be aligned\n // to the channel of the conv2d's result; if the bias is a scalar, the\n // bias_add is computed as if the bias was broadcasted to the shape of the\n // conv2d's result.\n if (dataFormat === 'NCHW' && bias.shape.length === 1 &&\n bias.shape[0] !== 1) {\n const reshapedBias = reshape(\n {inputs: {x: bias}, backend, attrs: {shape: [bias.shape[0], 1, 1]}});\n result =\n add({inputs: {a: result, b: reshapedBias}, backend}) as TensorInfo;\n backend.disposeIntermediateTensorInfo(reshapedBias);\n } else {\n // This condition handles NHWC and NCHW (scalar case). The only other case\n // for NCHW (1D case) is handled above.\n result = add({inputs: {a: result, b: bias}, backend}) as TensorInfo;\n }\n backend.disposeIntermediateTensorInfo(resultOld);\n }\n\n if (activation) {\n const resultOld = result;\n // For NCHW format, if PReLu activation weights is a 1-D tensor, it is\n // supposed to be aligned with the channel of the conv2d's result. For other\n // cases, whether NCHW or NHWC data format, the conv2d result is\n // already aligned with the activation weights.\n if (dataFormat === 'NCHW' && activation === 'prelu' &&\n preluActivationWeights.shape.length === 1 &&\n preluActivationWeights.shape[0] !== 1) {\n const reshapedAlpha = reshape({\n inputs: {x: preluActivationWeights},\n backend,\n attrs: {shape: [preluActivationWeights.shape[0], 1, 1]}\n });\n result = applyActivation(\n backend, result, activation, reshapedAlpha, leakyreluAlpha);\n backend.disposeIntermediateTensorInfo(reshapedAlpha);\n } else {\n result = applyActivation(\n backend, result, activation, preluActivationWeights, leakyreluAlpha);\n }\n backend.disposeIntermediateTensorInfo(resultOld);\n }\n\n return result;\n}\n\nexport const fusedConv2DConfig: KernelConfig = {\n kernelName: FusedConv2D,\n backendName: 'cpu',\n kernelFunc: fusedConv2D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FusedDepthwiseConv2D, FusedDepthwiseConv2DAttrs, FusedDepthwiseConv2DInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {applyActivation} from '../utils/fused_utils';\nimport {add} from './Add';\nimport {depthwiseConv2dNative} from './DepthwiseConv2dNative';\n\nexport function fusedDepthwiseConv2D(args: {\n inputs: FusedDepthwiseConv2DInputs,\n backend: MathBackendCPU,\n attrs: FusedDepthwiseConv2DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n } = attrs;\n\n let result = depthwiseConv2dNative({\n inputs: {x, filter},\n backend,\n attrs: {strides, pad, dataFormat, dilations, dimRoundingMode}\n });\n\n if (bias) {\n const oldResult = result;\n result = add({inputs: {a: result, b: bias}, backend}) as TensorInfo;\n backend.disposeIntermediateTensorInfo(oldResult);\n }\n if (activation) {\n const oldResult = result;\n result = applyActivation(\n backend, result, activation, preluActivationWeights, leakyreluAlpha);\n backend.disposeIntermediateTensorInfo(oldResult);\n }\n\n return result;\n}\n\nexport const fusedDepthwiseConv2DConfig: KernelConfig = {\n kernelName: FusedDepthwiseConv2D,\n backendName: 'cpu',\n kernelFunc: fusedDepthwiseConv2D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer, TypedArray} from '@tensorflow/tfjs-core';\n\nexport function gatherNdImpl(\n indicesData: TypedArray, paramsBuf: TensorBuffer, dtype: DataType,\n numSlices: number, sliceRank: number, sliceSize: number, strides: number[],\n paramsShape: number[], paramsSize: number): TensorBuffer {\n const outBuf = buffer([numSlices, sliceSize], dtype);\n\n for (let i = 0; i < numSlices; i++) {\n const index = [];\n let flattenIndex = 0;\n for (let j = 0; j < sliceRank; j++) {\n const dim = indicesData[i * sliceRank + j];\n flattenIndex += dim * strides[j];\n index.push(dim);\n }\n if (flattenIndex < 0 || flattenIndex >= paramsSize / sliceSize) {\n throw new Error(\n `Invalid indices: ${index} does not index into ${paramsShape}`);\n }\n\n for (let k = 0; k < sliceSize; k++) {\n outBuf.values[i * sliceSize + k] =\n paramsBuf.get(...paramsBuf.indexToLoc(flattenIndex * sliceSize + k));\n }\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, GatherNd, GatherNdInputs, KernelConfig, KernelFunc, Rank, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {gatherNdImpl} from './GatherNd_Impl';\n\nexport function gatherNd(\n args: {inputs: GatherNdInputs, backend: MathBackendCPU}): TensorInfo {\n const {inputs, backend} = args;\n const {params, indices} = inputs;\n\n const paramsSize = util.sizeFromShape(params.shape);\n\n const indicesShape = indices.shape;\n const sliceRank = indicesShape[indicesShape.length - 1];\n\n const [resultShape, numSlices, sliceSize, strides] =\n backend_util.prepareAndValidate(params, indices);\n if (numSlices === 0) {\n return backend.makeTensorInfo(resultShape, params.dtype, []);\n }\n\n const indicesData = backend.data.get(indices.dataId).values as TypedArray;\n const paramsBuf = backend.bufferSync(params);\n const outBuf = gatherNdImpl(\n indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize,\n strides, params.shape, paramsSize);\n\n return backend.makeTensorInfo(resultShape, params.dtype, outBuf.values);\n}\n\nexport const gatherNdConfig: KernelConfig = {\n kernelName: GatherNd,\n backendName: 'cpu',\n kernelFunc: gatherNd as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\nexport function gatherV2Impl(\n xBuf: TensorBuffer, indicesBuf: TensorBuffer,\n flattenOutputShape: number[]): TensorBuffer {\n const outBuf = buffer(flattenOutputShape, xBuf.dtype);\n for (let i = 0; i < outBuf.size; ++i) {\n const newLoc = outBuf.indexToLoc(i);\n\n const originalLoc: number[] = newLoc.slice();\n const batchIdx = originalLoc[0];\n const indicesIdx = originalLoc[2];\n const indicesIndex = indicesBuf.locToIndex([batchIdx, indicesIdx]);\n originalLoc[2] = indicesBuf.values[indicesIndex] as number;\n\n const originalIndex = xBuf.locToIndex(originalLoc);\n\n if (0 <= originalIndex && originalIndex < xBuf.values.length) {\n outBuf.values[i] = xBuf.values[originalIndex];\n } // Else, index is out of bounds, so leave the default zero val in outBuf.\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, GatherV2, GatherV2Attrs, GatherV2Inputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {gatherV2Impl} from './GatherV2_impl';\nimport {reshape} from './Reshape';\n\nexport function gatherV2(args: {\n inputs: GatherV2Inputs,\n backend: MathBackendCPU,\n attrs: GatherV2Attrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, indices} = inputs;\n const {axis, batchDims} = attrs;\n\n assertNotComplex([x, indices], 'gatherV2');\n\n // Throw error when any index is out of bound.\n const parsedAxis = util.parseAxisParam(axis, x.shape)[0];\n const indicesVals = backend.data.get(indices.dataId).values as TypedArray;\n const axisDim = x.shape[parsedAxis];\n for (let i = 0; i < indicesVals.length; ++i) {\n const index = indicesVals[i];\n util.assert(\n index <= axisDim - 1 && index >= 0,\n () =>\n `GatherV2: the index value ${index} is not in [0, ${axisDim - 1}]`);\n }\n\n let $batchDims = batchDims;\n\n if (batchDims == null) {\n $batchDims = 0;\n }\n\n const indicesSize = util.sizeFromShape(indices.shape);\n\n const shapeInfo = backend_util.segment_util.collectGatherOpShapeInfo(\n x, indices, parsedAxis, $batchDims);\n\n const flattenX = reshape({\n inputs: {x},\n backend,\n attrs: {\n shape: [\n shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize,\n shapeInfo.sliceSize\n ]\n }\n });\n\n const flattenIndex = reshape({\n inputs: {x: indices},\n backend,\n attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]}\n });\n\n const flattenOutputShape = [\n shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize,\n shapeInfo.sliceSize\n ];\n\n const indicesBuf = backend.bufferSync(flattenIndex);\n const xBuf = backend.bufferSync(flattenX);\n const outBuf = gatherV2Impl(xBuf, indicesBuf, flattenOutputShape);\n\n backend.disposeIntermediateTensorInfo(flattenX);\n backend.disposeIntermediateTensorInfo(flattenIndex);\n\n return backend.makeTensorInfo(\n shapeInfo.outputShape, outBuf.dtype, outBuf.values);\n}\n\nexport const gatherV2Config: KernelConfig = {\n kernelName: GatherV2,\n backendName: 'cpu',\n kernelFunc: gatherV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Greater, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const greaterImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a > b) ? 1 : 0);\nexport const greater =\n binaryKernelFunc(Greater, greaterImpl, null /* complexImpl */, 'bool');\n\nexport const greaterConfig: KernelConfig = {\n kernelName: Greater,\n backendName: 'cpu',\n kernelFunc: greater\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GreaterEqual, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const greaterEqualImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a >= b) ? 1 : 0);\nexport const greaterEqual = binaryKernelFunc(\n GreaterEqual, greaterEqualImpl, null /* complexImpl */, 'bool');\n\nexport const greaterEqualConfig: KernelConfig = {\n kernelName: GreaterEqual,\n backendName: 'cpu',\n kernelFunc: greaterEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IFFT, IFFTInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {fftBatch} from '../utils/fft_utils';\nimport {reshape} from './Reshape';\n\nexport function ifft(args: {inputs: IFFTInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n const inputSize = util.sizeFromShape(input.shape);\n\n // Collapse all outer dimensions to a single batch dimension.\n const innerDimensionSize = input.shape[input.shape.length - 1];\n const batch = inputSize / innerDimensionSize;\n\n const input2D = reshape({\n inputs: {x: input},\n backend,\n attrs: {shape: [batch, innerDimensionSize]}\n });\n\n const result = fftBatch(input2D, true, backend);\n\n const resultReshaped =\n reshape({inputs: {x: result}, backend, attrs: {shape: input.shape}});\n\n backend.disposeIntermediateTensorInfo(input2D);\n backend.disposeIntermediateTensorInfo(result);\n\n return resultReshaped;\n}\n\nexport const ifftConfig: KernelConfig = {\n kernelName: IFFT,\n backendName: 'cpu',\n kernelFunc: ifft as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsFinite, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const isFinite =\n unaryKernelFunc(IsFinite, (xi) => Number.isFinite(xi) ? 1 : 0, 'bool');\n\nexport const isFiniteConfig: KernelConfig = {\n kernelName: IsFinite,\n backendName: 'cpu',\n kernelFunc: isFinite,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsInf, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const isInf =\n unaryKernelFunc(IsInf, (xi) => Math.abs(xi) === Infinity ? 1 : 0, 'bool');\n\nexport const isInfConfig: KernelConfig = {\n kernelName: IsInf,\n backendName: 'cpu',\n kernelFunc: isInf,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsNan, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const isNaN =\n unaryKernelFunc(IsNan, (xi) => Number.isNaN(xi) ? 1 : 0, 'bool');\n\nexport const isNaNConfig: KernelConfig = {\n kernelName: IsNan,\n backendName: 'cpu',\n kernelFunc: isNaN,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Less} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const lessImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a < b) ? 1 : 0);\nexport const less =\n binaryKernelFunc(Less, lessImpl, null /* complexImpl */, 'bool');\n\nexport const lessConfig: KernelConfig = {\n kernelName: Less,\n backendName: 'cpu',\n kernelFunc: less\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LessEqual} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const lessEqualImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => (a <= b) ? 1 : 0);\nexport const lessEqual =\n binaryKernelFunc(LessEqual, lessEqualImpl, null /* complexImpl */, 'bool');\n\nexport const lessEqualConfig: KernelConfig = {\n kernelName: LessEqual,\n backendName: 'cpu',\n kernelFunc: lessEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function linSpaceImpl(\n start: number, stop: number, num: number): TypedArray {\n const step = (stop - start) / (num - 1);\n\n const values = util.makeZerosTypedArray(num, 'float32');\n values[0] = start;\n for (let i = 1; i < values.length; i++) {\n values[i] = values[i - 1] + step;\n }\n\n return values;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LinSpace, LinSpaceAttrs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {linSpaceImpl} from './LinSpace_impl';\n\nexport function linSpace(args: {backend: MathBackendCPU, attrs: LinSpaceAttrs}):\n TensorInfo {\n const {backend, attrs} = args;\n const {start, stop, num} = attrs;\n\n const outVals = linSpaceImpl(start, stop, num);\n\n return backend.makeTensorInfo([outVals.length], 'float32', outVals);\n}\n\nexport const linSpaceConfig: KernelConfig = {\n kernelName: LinSpace,\n backendName: 'cpu',\n kernelFunc: linSpace as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Log} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const logImpl = createSimpleUnaryImpl((xi) => Math.log(xi));\nexport const log = unaryKernelFuncFromImpl(Log, logImpl);\n\nexport const logConfig: KernelConfig = {\n kernelName: Log,\n backendName: 'cpu',\n kernelFunc: log,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Log1p} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const log1p = unaryKernelFunc(Log1p, (xi) => Math.log1p(xi));\n\nexport const log1pConfig: KernelConfig = {\n kernelName: Log1p,\n backendName: 'cpu',\n kernelFunc: log1p,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalAnd} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const logicalAndImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => a && b);\nexport const logicalAnd = binaryKernelFunc(\n LogicalAnd, logicalAndImpl, null /* complexImpl */, 'bool');\n\nexport const logicalAndConfig: KernelConfig = {\n kernelName: LogicalAnd,\n backendName: 'cpu',\n kernelFunc: logicalAnd\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalNot} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const logicalNot =\n unaryKernelFunc(LogicalNot, (xi) => xi ? 0 : 1, 'bool');\n\nexport const logicalNotConfig: KernelConfig = {\n kernelName: LogicalNot,\n backendName: 'cpu',\n kernelFunc: logicalNot,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalOr} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const logicalOrImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => a || b);\nexport const logicalOr =\n binaryKernelFunc(LogicalOr, logicalOrImpl, null /* complexImpl */, 'bool');\n\nexport const logicalOrConfig: KernelConfig = {\n kernelName: LogicalOr,\n backendName: 'cpu',\n kernelFunc: logicalOr\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LRN, LRNAttrs, LRNInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function lRN(\n args: {inputs: LRNInputs, backend: MathBackendCPU, attrs: LRNAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {depthRadius, bias, alpha, beta} = attrs;\n\n assertNotComplex(x, 'LRN');\n\n const channels = x.shape[3];\n const maxD = channels - 1;\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const size = util.sizeFromShape(x.shape);\n const result = new Float32Array(size);\n\n function sumAcrossChannels(offset: number) {\n const currentChannel = offset % channels;\n let beginSumOffset =\n offset - currentChannel + Math.max(0, currentChannel - depthRadius);\n const endSumOffset =\n offset - currentChannel + Math.min(currentChannel + depthRadius, maxD);\n\n let sum = 0.0;\n for (; beginSumOffset <= endSumOffset; beginSumOffset++) {\n const z = xValues[beginSumOffset];\n sum += z * z;\n }\n return sum;\n }\n\n for (let offset = 0; offset < size; offset++) {\n const sum = sumAcrossChannels(offset);\n const val = xValues[offset] * Math.pow(bias + alpha * sum, -beta);\n result[offset] = val;\n }\n\n return backend.makeTensorInfo(x.shape, x.dtype, result);\n}\n\n// tslint:disable-next-line: variable-name\nexport const LRNConfig: KernelConfig = {\n kernelName: LRN,\n backendName: 'cpu',\n kernelFunc: lRN as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LRNGrad, LRNGradAttrs, LRNGradInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function lRNGrad(\n args:\n {inputs: LRNGradInputs, backend: MathBackendCPU, attrs: LRNGradAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, y, dy} = inputs;\n const {depthRadius, bias, alpha, beta} = attrs;\n\n assertNotComplex(dy, 'LRNGrad');\n\n const dySize = util.sizeFromShape(dy.shape);\n\n const channels = dy.shape[3];\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const yValues = backend.data.get(y.dataId).values as TypedArray;\n const result = new Float32Array(dySize);\n const size = dySize;\n\n for (let offset = 0; offset < size; offset++) {\n const currentChannel = offset % channels;\n const depthBegin =\n (offset - currentChannel) + Math.max(0, currentChannel - depthRadius);\n const depthEnd = (offset - currentChannel) +\n Math.min(channels, currentChannel + depthRadius + 1);\n\n let norm = 0;\n for (let k = depthBegin; k < depthEnd; k++) {\n norm += Math.pow(xValues[k], 2);\n }\n norm = alpha * norm + bias;\n\n for (let k = depthBegin; k < depthEnd; k++) {\n let dyi = -2 * alpha * beta * xValues[k] * yValues[offset] / norm;\n if (offset === k) {\n dyi += Math.pow(norm, -beta);\n }\n dyi *= dyValues[offset];\n result[k] += dyi;\n }\n }\n\n return backend.makeTensorInfo(dy.shape, x.dtype, result);\n}\n\n// tslint:disable-next-line: variable-name\nexport const LRNGradConfig: KernelConfig = {\n kernelName: LRNGrad,\n backendName: 'cpu',\n kernelFunc: lRNGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, NumericDataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function maxImpl(\n aVals: TypedArray, reduceSize: number, outShape: number[],\n dtype: DataType): TypedArray {\n const vals = util.getTypedArrayFromDType(\n dtype as NumericDataType, util.sizeFromShape(outShape));\n\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let max = aVals[offset];\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n if (Number.isNaN(value) ||\n value > max) { // comparison with NaN always return false\n max = value;\n }\n }\n vals[i] = max;\n }\n return vals;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelFunc, Max, MaxAttrs, MaxInputs, TensorInfo} from '@tensorflow/tfjs-core';\nimport {backend_util, KernelConfig} from '@tensorflow/tfjs-core';\nimport {TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {maxImpl} from './Max_impl';\nimport {transposeImpl} from './Transpose_impl';\n\nexport function max(\n args: {inputs: MaxInputs, backend: MathBackendCPU, attrs: MaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {reductionIndices, keepDims} = attrs;\n const cpuBackend = backend;\n let xShape = x.shape;\n const xRank = xShape.length;\n\n const origAxes = util.parseAxisParam(reductionIndices, xShape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n let xVals = cpuBackend.data.get(x.dataId).values as TypedArray;\n if (permutedAxes != null) {\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = xShape[permutedAxes[i]];\n }\n\n xVals = transposeImpl(xVals, xShape, x.dtype, permutedAxes, newShape);\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n\n xShape = newShape;\n }\n\n assertNotComplex(x, 'max');\n backend_util.assertAxesAreInnerMostDims('max', axes, xRank);\n const [maxOutShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(xShape, axes);\n\n const reduceSize = util.sizeFromShape(reduceShape);\n\n const result = maxImpl(xVals, reduceSize, maxOutShape, x.dtype);\n const dataId = cpuBackend.write(result, maxOutShape, x.dtype);\n\n let outShape = maxOutShape;\n if (keepDims) {\n // reshape\n const newShape = backend_util.expandShapeToKeepDim(maxOutShape, origAxes);\n outShape = newShape;\n }\n\n return {dataId, shape: outShape, dtype: x.dtype};\n}\n\nexport const maxConfig: KernelConfig = {\n kernelName: Max,\n backendName: 'cpu',\n kernelFunc: max as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Maximum} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const maximumImpl = createSimpleBinaryKernelImpl(\n ((aValue, bValue) => Math.max(aValue as number, bValue as number)));\nexport const maximum = binaryKernelFunc(Maximum, maximumImpl);\n\nexport const maximumConfig: KernelConfig = {\n kernelName: Maximum,\n backendName: 'cpu',\n kernelFunc: maximum\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPool, MaxPoolAttrs, MaxPoolInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {pool} from '../utils/pool_utils';\nimport {identity} from './Identity';\n\nexport function maxPool(\n args:\n {inputs: MaxPoolInputs, backend: MathBackendCPU, attrs: MaxPoolAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n assertNotComplex(x, 'maxPool');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in maxPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n let res: TensorInfo;\n\n if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&\n util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n res = identity({inputs: {x}, backend});\n } else {\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const strides = util.computeStrides(x.shape);\n const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'max');\n res = backend.makeTensorInfo(\n convInfo.outShape, x.dtype, buffer.values as TypedArray);\n }\n return res;\n}\n\nexport const maxPoolConfig: KernelConfig = {\n kernelName: MaxPool,\n backendName: 'cpu',\n kernelFunc: maxPool as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, MaxPool3D, MaxPool3DAttrs, MaxPool3DInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {pool3d} from '../utils/pool_utils';\n\nexport function maxPool3D(args: {\n inputs: MaxPool3DInputs,\n backend: MathBackendCPU,\n attrs: MaxPool3DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dimRoundingMode, dataFormat} = attrs;\n\n assertNotComplex(x, 'maxPool3d');\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad, dimRoundingMode, dataFormat);\n\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const outBuf = pool3d(\n xValues, x.shape, x.dtype, util.computeStrides(x.shape), convInfo, 'max');\n\n return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values);\n}\n\nexport const maxPool3DConfig: KernelConfig = {\n kernelName: MaxPool3D,\n backendName: 'cpu',\n kernelFunc: maxPool3D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, buffer, KernelConfig, KernelFunc, MaxPool3DGrad, MaxPool3DGradAttrs, MaxPool3DGradInputs, Rank, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {maxPool3dPositions} from '../utils/pool_utils';\n\nexport function maxPool3DGrad(args: {\n inputs: MaxPool3DGradInputs,\n backend: MathBackendCPU,\n attrs: MaxPool3DGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n\n assertNotComplex([dy, input], 'maxPool3DGrad');\n\n const convInfo = backend_util.computePool3DInfo(\n input.shape as [number, number, number, number, number], filterSize,\n strides, 1 /* dilations */, pad, dimRoundingMode);\n\n const inputBuf = backend.bufferSync(input);\n const maxPosBuf = maxPool3dPositions(inputBuf, convInfo);\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const dx = buffer(input.shape, 'float32');\n\n const dyBuf = backend.bufferSync(dy);\n\n for (let batch = 0; batch < convInfo.batchSize; ++batch) {\n for (let channel = 0; channel < convInfo.inChannels; ++channel) {\n for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) {\n for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) {\n for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) {\n // Shader code begins\n const dyDepthCorner = dxDepth - padFront;\n const dyRowCorner = dxRow - padTop;\n const dyColCorner = dxCol - padLeft;\n let dotProd = 0;\n for (let wDepth = 0; wDepth < effectiveFilterDepth;\n wDepth += dilationDepth) {\n const dyDepth = (dyDepthCorner + wDepth) / strideDepth;\n if (dyDepth < 0 || dyDepth >= convInfo.outDepth ||\n Math.floor(dyDepth) !== dyDepth) {\n continue;\n }\n for (let wRow = 0; wRow < effectiveFilterHeight;\n wRow += dilationHeight) {\n const dyRow = (dyRowCorner + wRow) / strideHeight;\n if (dyRow < 0 || dyRow >= convInfo.outHeight ||\n Math.floor(dyRow) !== dyRow) {\n continue;\n }\n for (let wCol = 0; wCol < effectiveFilterWidth;\n wCol += dilationWidth) {\n const dyCol = (dyColCorner + wCol) / strideWidth;\n if (dyCol < 0 || dyCol >= convInfo.outWidth ||\n Math.floor(dyCol) !== dyCol) {\n continue;\n }\n\n const maxPos = effectiveFilterDepth * effectiveFilterHeight *\n effectiveFilterWidth -\n 1 -\n (maxPosBuf.get(batch, dyDepth, dyRow, dyCol, channel) as\n number);\n const curPos =\n wDepth * effectiveFilterHeight * effectiveFilterWidth +\n wRow * effectiveFilterWidth + wCol;\n\n const mask = maxPos === curPos ? 1 : 0;\n if (mask === 0) {\n continue;\n }\n\n const pixel =\n dyBuf.get(batch, dyDepth, dyRow, dyCol, channel);\n dotProd += pixel * mask;\n }\n }\n }\n dx.set(dotProd, batch, dxDepth, dxRow, dxCol, channel);\n }\n }\n }\n }\n }\n\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const maxPool3DGradConfig: KernelConfig = {\n kernelName: MaxPool3DGrad,\n backendName: 'cpu',\n kernelFunc: maxPool3DGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, buffer, KernelConfig, KernelFunc, MaxPoolGrad, MaxPoolGradAttrs, MaxPoolGradInputs, Rank, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {maxPoolPositions} from '../utils/pool_utils';\n\nexport function maxPoolGrad(args: {\n inputs: MaxPoolGradInputs,\n backend: MathBackendCPU,\n attrs: MaxPoolGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input, output} = inputs;\n const x = input;\n assertNotComplex([input, output], 'maxPoolGrad');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad, dimRoundingMode);\n const xValues = backend.data.get(x.dataId).values as TypedArray;\n const maxPosBuf = buffer(\n convInfo.outShape, x.dtype,\n maxPoolPositions(xValues, x.shape, x.dtype, convInfo).values);\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const dx =\n buffer(x.shape as [number, number, number, number], 'float32');\n\n const dyData = backend.data.get(dy.dataId).values as Float32Array;\n const dyBuf = buffer(\n dy.shape as [number, number, number, number], 'float32', dyData);\n\n for (let b = 0; b < convInfo.batchSize; ++b) {\n for (let d = 0; d < convInfo.inChannels; ++d) {\n for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) {\n for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) {\n // Shader code begins.\n const dyRCorner = dxR - padTop;\n const dyCCorner = dxC - padLeft;\n let dotProd = 0;\n for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) {\n const dyR = (dyRCorner + wR) / strideHeight;\n if (dyR < 0 || dyR >= convInfo.outHeight ||\n Math.floor(dyR) !== dyR) {\n continue;\n }\n for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) {\n const dyC = (dyCCorner + wC) / strideWidth;\n if (dyC < 0 || dyC >= convInfo.outWidth ||\n Math.floor(dyC) !== dyC) {\n continue;\n }\n const maxPos = effectiveFilterHeight * effectiveFilterWidth - 1 -\n (maxPosBuf.get(b, dyR, dyC, d) as number);\n const curPos = wR * effectiveFilterWidth + wC;\n\n const mask = maxPos === curPos ? 1 : 0;\n if (mask === 0) {\n continue;\n }\n\n const pixel = dyBuf.get(b, dyR, dyC, d);\n dotProd += pixel * mask;\n }\n }\n dx.set(dotProd, b, dxR, dxC, d);\n }\n }\n }\n }\n return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values);\n}\n\nexport const maxPoolGradConfig: KernelConfig = {\n kernelName: MaxPoolGrad,\n backendName: 'cpu',\n kernelFunc: maxPoolGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {MaxPoolWithArgmax, MaxPoolWithArgmaxAttrs, MaxPoolWithArgmaxInputs} from '@tensorflow/tfjs-core';\nimport {backend_util, KernelConfig, TypedArray} from '@tensorflow/tfjs-core';\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {maxPoolWithArgmaxImpl} from './MaxPoolWithArgmax_impl';\n\nexport const maxPoolWithArgmaxConfig: KernelConfig = {\n kernelName: MaxPoolWithArgmax,\n backendName: 'cpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {x} = inputs as MaxPoolWithArgmaxInputs;\n const {filterSize, strides, pad, includeBatchInIndex} =\n attrs as unknown as MaxPoolWithArgmaxAttrs;\n const cpuBackend = backend as MathBackendCPU;\n assertNotComplex(x, 'MaxPoolWithArgmax');\n\n const values = cpuBackend.data.get(x.dataId).values as TypedArray;\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n [1, 1], pad);\n const [pooled, indexes] = maxPoolWithArgmaxImpl(\n values, x.shape, x.dtype, includeBatchInIndex, convInfo);\n\n const pooledDataId =\n cpuBackend.write(pooled as Float32Array, convInfo.outShape, x.dtype);\n const indexesDataId =\n cpuBackend.write(indexes as Int32Array, convInfo.outShape, x.dtype);\n return [\n {dataId: pooledDataId, shape: convInfo.outShape, dtype: x.dtype},\n {dataId: indexesDataId, shape: convInfo.outShape, dtype: 'int32'}\n ];\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {maxPoolPositions, pool} from '../utils/pool_utils';\nexport function maxPoolWithArgmaxImpl(\n xValues: TypedArray, xShape: number[], dtype: DataType,\n includeBatchInIndex: boolean, convInfo: backend_util.Conv2DInfo) {\n const strides = util.computeStrides(xShape);\n const maxPools = pool(xValues, xShape, dtype, strides, convInfo, 'max');\n const maxPositions = maxPoolPositions(\n xValues, xShape, dtype, convInfo, true, includeBatchInIndex);\n\n return [maxPools.values, maxPositions.values];\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Mean, MeanAttrs, MeanInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {cast} from './Cast';\nimport {div} from './RealDiv';\nimport {sum} from './Sum';\n\nexport function mean(\n args: {inputs: MeanInputs, backend: MathBackendCPU, attrs: MeanAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n const axes = util.parseAxisParam(axis, x.shape);\n const shapes = backend_util.computeOutAndReduceShapes(x.shape, axes);\n const reduceShape = shapes[1];\n const reduceSize = util.sizeFromShape(reduceShape);\n const toDispose = [];\n const reduceSizeScalar =\n backend.makeTensorInfo([], 'float32', new Float32Array([reduceSize]));\n toDispose.push(reduceSizeScalar);\n\n const $x = cast({inputs: {x}, backend, attrs: {dtype: 'float32'}});\n toDispose.push($x);\n\n const res =\n div({inputs: {a: $x, b: reduceSizeScalar}, backend}) as TensorInfo;\n toDispose.push(res);\n\n const result = sum({inputs: {x: res}, backend, attrs: {axis, keepDims}});\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n}\n\nexport const meanConfig: KernelConfig = {\n kernelName: Mean,\n backendName: 'cpu',\n kernelFunc: mean as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Min, MinAttrs, MinInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function min(\n args: {inputs: MinInputs, backend: MathBackendCPU, attrs: MinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'min');\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('min', axes, $x.shape.length);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes($x.shape, axes);\n const reduceSize = util.sizeFromShape(reduceShape);\n const vals = util.makeZerosTypedArray(util.sizeFromShape(outShape), $x.dtype);\n\n const aVals = backend.data.get($x.dataId).values as TypedArray;\n for (let i = 0; i < vals.length; ++i) {\n const offset = i * reduceSize;\n let min = aVals[offset];\n for (let j = 0; j < reduceSize; ++j) {\n const value = aVals[offset + j];\n if (Number.isNaN(value) ||\n value < min) { // comparison with NaN always return false\n min = value;\n }\n }\n vals[i] = min;\n }\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo($x);\n }\n\n const result = backend.makeTensorInfo(outShape, $x.dtype, vals);\n\n if (keepDims) {\n const expandedShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n const reshapedResult =\n reshape({inputs: {x: result}, backend, attrs: {shape: expandedShape}});\n\n backend.disposeIntermediateTensorInfo(result);\n\n return reshapedResult;\n }\n\n return result;\n}\n\nexport const minConfig: KernelConfig = {\n kernelName: Min,\n backendName: 'cpu',\n kernelFunc: min as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Minimum} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const minimumImpl = createSimpleBinaryKernelImpl(\n ((aValue, bValue) => Math.min(aValue as number, bValue as number)));\nexport const minimum = binaryKernelFunc(Minimum, minimumImpl);\n\nexport const minimumConfig: KernelConfig = {\n kernelName: Minimum,\n backendName: 'cpu',\n kernelFunc: minimum\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, MirrorPad, MirrorPadAttrs, MirrorPadInputs, NumericDataType, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function mirrorPad(args: {\n inputs: MirrorPadInputs,\n backend: MathBackendCPU,\n attrs: MirrorPadAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {paddings, mode} = attrs;\n\n assertNotComplex(x, 'mirrorPad');\n\n const outShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */);\n\n const start = paddings.map(p => p[0]);\n const end = paddings.map((p, i) => p[0] + x.shape[i]);\n const offset = mode === 'reflect' ? 0 : 1;\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const xRank = x.shape.length;\n const xStrides = util.computeStrides(x.shape);\n\n const resultSize = util.sizeFromShape(outShape);\n const resultRank = outShape.length;\n const resultStrides = util.computeStrides(outShape);\n const resVals =\n util.getTypedArrayFromDType(x.dtype as NumericDataType, resultSize);\n\n for (let i = 0; i < resultSize; i++) {\n let coords = util.indexToLoc(i, resultRank, resultStrides);\n for (let i = 0; i < resultRank; i++) {\n if (coords[i] < start[i]) {\n coords[i] = start[i] * 2 - coords[i] - offset;\n } else if (coords[i] >= end[i]) {\n coords[i] = (end[i] - 1) * 2 - coords[i] + offset;\n }\n }\n coords = coords.map((c, i) => c - start[i]);\n\n const inIndex = util.locToIndex(coords, xRank, xStrides);\n\n resVals[i] = xVals[inIndex];\n }\n\n const outId = backend.write(resVals, outShape, x.dtype);\n\n return {dataId: outId, shape: outShape, dtype: x.dtype};\n}\n\nexport const mirrorPadConfig: KernelConfig = {\n kernelName: MirrorPad,\n backendName: 'cpu',\n kernelFunc: mirrorPad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Mod} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const modImpl =\n createSimpleBinaryKernelImpl(((aValue: number, bValue: number) => {\n const rem = aValue % bValue;\n if ((aValue < 0 && bValue < 0) || (aValue >= 0 && bValue >= 0)) {\n return rem;\n } else {\n return (rem + bValue) % bValue;\n }\n }));\n\nexport const mod = binaryKernelFunc(Mod, modImpl);\n\nexport const modConfig: KernelConfig = {\n kernelName: Mod,\n backendName: 'cpu',\n kernelFunc: mod\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Softmax, SoftmaxAttrs, SoftmaxInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {exp} from './Exp';\nimport {max} from './Max';\nimport {div} from './RealDiv';\nimport {reshape} from './Reshape';\nimport {sub} from './Sub';\nimport {sum} from './Sum';\n\nexport function softmax(\n args:\n {inputs: SoftmaxInputs, backend: MathBackendCPU, attrs: SoftmaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {logits} = inputs;\n const {dim} = attrs;\n\n const logitsRank = logits.shape.length;\n\n let $dim = dim;\n if ($dim === -1) {\n $dim = logitsRank - 1;\n }\n if ($dim !== logitsRank - 1) {\n throw Error(\n 'Softmax along a non-last dimension is not yet supported. ' +\n `Logits was rank ${logitsRank} and dim was ${$dim}`);\n }\n\n const axes = util.parseAxisParam([$dim], logits.shape);\n const maxLogit = max({\n inputs: {x: logits},\n backend,\n attrs: {reductionIndices: axes, keepDims: false}\n });\n const expandedShape = backend_util.expandShapeToKeepDim(maxLogit.shape, axes);\n\n const maxLogitReshaped =\n reshape({inputs: {x: maxLogit}, backend, attrs: {shape: expandedShape}});\n const a =\n sub({inputs: {a: logits, b: maxLogitReshaped}, backend}) as TensorInfo;\n const b = exp({inputs: {x: a}, backend}) as TensorInfo;\n const sumExp =\n sum({inputs: {x: b}, backend, attrs: {axis: axes, keepDims: false}});\n const sumReshaped =\n reshape({inputs: {x: sumExp}, backend, attrs: {shape: expandedShape}});\n\n const result = div({inputs: {a: b, b: sumReshaped}, backend}) as TensorInfo;\n\n backend.disposeIntermediateTensorInfo(maxLogit);\n backend.disposeIntermediateTensorInfo(maxLogitReshaped);\n backend.disposeIntermediateTensorInfo(a);\n backend.disposeIntermediateTensorInfo(b);\n backend.disposeIntermediateTensorInfo(sumExp);\n backend.disposeIntermediateTensorInfo(sumReshaped);\n\n return result;\n}\n\nexport const softmaxConfig: KernelConfig = {\n kernelName: Softmax,\n backendName: 'cpu',\n kernelFunc: softmax as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Multinomial, MultinomialAttrs, MultinomialInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\nimport * as seedrandom from 'seedrandom';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {softmax} from './Softmax';\n\nexport function multinomial(args: {\n inputs: MultinomialInputs,\n backend: MathBackendCPU,\n attrs: MultinomialAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {logits} = inputs;\n const {numSamples, seed, normalized} = attrs;\n\n assertNotComplex(logits, 'multinomial');\n\n const probabilities = normalized ?\n logits :\n softmax({inputs: {logits}, backend, attrs: {dim: -1}});\n\n const batchSize = probabilities.shape[0];\n const numEvents = probabilities.shape[1];\n const probVals = backend.data.get(probabilities.dataId).values as TypedArray;\n const resShape = [batchSize, numSamples];\n const resVals =\n util.makeZerosTypedArray(util.sizeFromShape(resShape), 'int32');\n\n for (let b = 0; b < batchSize; ++b) {\n const offset = b * numEvents;\n // The cdf won't include the last event. It will be implicit if no other\n // event happened.\n const cdf = new Float32Array(numEvents - 1);\n cdf[0] = probVals[offset];\n for (let event = 1; event < cdf.length; ++event) {\n cdf[event] = cdf[event - 1] + probVals[offset + event];\n }\n\n const random = seedrandom.alea(seed.toString());\n const outOffset = b * numSamples;\n for (let sampleId = 0; sampleId < numSamples; ++sampleId) {\n const r = random();\n\n // Assume last event happened by default.\n resVals[outOffset + sampleId] = cdf.length;\n\n for (let event = 0; event < cdf.length; event++) {\n if (r < cdf[event]) {\n resVals[outOffset + sampleId] = event;\n break;\n }\n }\n }\n }\n\n if (!normalized) {\n backend.disposeIntermediateTensorInfo(probabilities);\n }\n\n return backend.makeTensorInfo(resShape, 'int32', resVals);\n}\n\nexport const multinomialConfig: KernelConfig = {\n kernelName: Multinomial,\n backendName: 'cpu',\n kernelFunc: multinomial as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, KernelConfig, KernelFunc, Neg, TensorInfo, TypedArray, UnaryInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {multiplyImpl} from './Multiply';\n\nexport function negImpl(xVals: TypedArray, xShape: number[], xDtype: DataType):\n [TypedArray, number[]] {\n const minusOne =\n util.createScalarValue(-1 as unknown as 'float32', xDtype) as TypedArray;\n return multiplyImpl([], xShape, minusOne, xVals, xDtype);\n}\n\nexport function neg(args: {inputs: UnaryInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n assertNotComplex(x, 'neg');\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const [res, newShape] = negImpl(xVals, x.shape, x.dtype);\n\n return backend.makeTensorInfo(newShape, x.dtype, res);\n}\n\nexport const negConfig: KernelConfig = {\n kernelName: Neg,\n backendName: 'cpu',\n kernelFunc: neg as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV3, NonMaxSuppressionV3Attrs, NonMaxSuppressionV3Inputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nconst nonMaxSuppressionV3Impl = kernel_impls.nonMaxSuppressionV3Impl;\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function nonMaxSuppressionV3(args: {\n inputs: NonMaxSuppressionV3Inputs,\n backend: MathBackendCPU,\n attrs: NonMaxSuppressionV3Attrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold} = attrs;\n\n assertNotComplex(boxes, 'NonMaxSuppression');\n\n const boxesVals = backend.data.get(boxes.dataId).values as TypedArray;\n const scoresVals = backend.data.get(scores.dataId).values as TypedArray;\n\n const {selectedIndices} = nonMaxSuppressionV3Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);\n\n return backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices));\n}\n\nexport const nonMaxSuppressionV3Config: KernelConfig = {\n kernelName: NonMaxSuppressionV3,\n backendName: 'cpu',\n kernelFunc: nonMaxSuppressionV3 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV4, NonMaxSuppressionV4Attrs, NonMaxSuppressionV4Inputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nconst nonMaxSuppressionV4Impl = kernel_impls.nonMaxSuppressionV4Impl;\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function nonMaxSuppressionV4(args: {\n inputs: NonMaxSuppressionV4Inputs,\n backend: MathBackendCPU,\n attrs: NonMaxSuppressionV4Attrs\n}): [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize} =\n attrs;\n\n assertNotComplex(boxes, 'NonMaxSuppressionPadded');\n\n const boxesVals = backend.data.get(boxes.dataId).values as TypedArray;\n const scoresVals = backend.data.get(scores.dataId).values as TypedArray;\n\n const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold,\n padToMaxOutputSize);\n\n return [\n backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices)),\n backend.makeTensorInfo([], 'int32', new Int32Array([validOutputs]))\n ];\n}\nexport const nonMaxSuppressionV4Config: KernelConfig = {\n kernelName: NonMaxSuppressionV4,\n backendName: 'cpu',\n kernelFunc: nonMaxSuppressionV4 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV5, NonMaxSuppressionV5Attrs, NonMaxSuppressionV5Inputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nconst nonMaxSuppressionV5Impl = kernel_impls.nonMaxSuppressionV5Impl;\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function nonMaxSuppressionV5(args: {\n inputs: NonMaxSuppressionV5Inputs,\n backend: MathBackendCPU,\n attrs: NonMaxSuppressionV5Attrs\n}): [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = attrs;\n\n assertNotComplex(boxes, 'NonMaxSuppressionWithScore');\n\n const boxesVals = backend.data.get(boxes.dataId).values as TypedArray;\n const scoresVals = backend.data.get(scores.dataId).values as TypedArray;\n\n const maxOutputSizeVal = maxOutputSize;\n const iouThresholdVal = iouThreshold;\n const scoreThresholdVal = scoreThreshold;\n const softNmsSigmaVal = softNmsSigma;\n\n const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl(\n boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal,\n scoreThresholdVal, softNmsSigmaVal);\n\n return [\n backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices)),\n backend.makeTensorInfo(\n [selectedScores.length], 'float32', new Float32Array(selectedScores))\n ];\n}\n\nexport const nonMaxSuppressionV5Config: KernelConfig = {\n kernelName: NonMaxSuppressionV5,\n backendName: 'cpu',\n kernelFunc: nonMaxSuppressionV5 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, NotEqual} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const notEqualImpl =\n createSimpleBinaryKernelImpl(((a, b) => (a !== b) ? 1 : 0));\nexport const notEqual =\n binaryKernelFunc(NotEqual, notEqualImpl, null /* complexOp */, 'bool');\n\nexport const notEqualConfig: KernelConfig = {\n kernelName: NotEqual,\n backendName: 'cpu',\n kernelFunc: notEqual\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, OneHot, OneHotAttrs, OneHotInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function oneHot(\n args: {inputs: OneHotInputs, backend: MathBackendCPU, attrs: OneHotAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {indices} = inputs;\n const {dtype, depth, onValue, offValue} = attrs;\n\n assertNotComplex(indices, 'oneHot');\n\n const indicesSize = util.sizeFromShape(indices.shape);\n\n const res = new Float32Array(indicesSize * depth);\n res.fill(offValue);\n const indicesVal = backend.data.get(indices.dataId).values as TypedArray;\n\n for (let event = 0; event < indicesSize; ++event) {\n if (indicesVal[event] >= 0 && indicesVal[event] < depth) {\n res[event * depth + indicesVal[event]] = onValue;\n }\n }\n\n return backend.makeTensorInfo([...indices.shape, depth], dtype, res);\n}\n\nexport const oneHotConfig: KernelConfig = {\n kernelName: OneHot,\n backendName: 'cpu',\n kernelFunc: oneHot as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, ZerosLike, ZerosLikeInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\n\nexport function zerosLike(\n args: {inputs: ZerosLikeInputs, backend: MathBackendCPU}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (x.dtype === 'string') {\n throw new Error('zerosLike is not supported for string tensors');\n } else if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = zerosLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(r);\n backend.disposeIntermediateTensorInfo(imagPart);\n backend.disposeIntermediateTensorInfo(i);\n\n return result;\n } else {\n return fill({backend, attrs: {shape: x.shape, value: 0, dtype: x.dtype}});\n }\n}\n\nexport const zerosLikeConfig: KernelConfig = {\n kernelName: ZerosLike,\n backendName: 'cpu',\n kernelFunc: zerosLike as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, OnesLike, OnesLikeInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {zerosLike} from './ZerosLike';\n\nexport function onesLike(\n args: {inputs: OnesLikeInputs, backend: MathBackendCPU}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (x.dtype === 'string') {\n throw new Error('onesLike is not supported for string tensors');\n } else if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = onesLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(r);\n backend.disposeIntermediateTensorInfo(imagPart);\n backend.disposeIntermediateTensorInfo(i);\n\n return result;\n } else {\n return fill({backend, attrs: {shape: x.shape, value: 1, dtype: x.dtype}});\n }\n}\n\nexport const onesLikeConfig: KernelConfig = {\n kernelName: OnesLike,\n backendName: 'cpu',\n kernelFunc: onesLike as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Pack, PackAttrs, PackInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {concat} from './Concat';\nimport {expandDims} from './ExpandDims';\n\nexport function pack(\n args: {inputs: PackInputs, backend: MathBackendCPU, attrs: PackAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n if (inputs.length === 1) {\n return expandDims(\n {inputs: {input: inputs[0]}, backend, attrs: {dim: axis}});\n }\n\n const shape = inputs[0].shape;\n const dtype = inputs[0].dtype;\n\n inputs.forEach(t => {\n util.assertShapesMatch(\n shape, t.shape,\n 'All tensors passed to stack must have matching shapes');\n util.assert(\n dtype === t.dtype,\n () => 'All tensors passed to stack must have matching dtypes');\n });\n\n const intermediateTensorInfos: TensorInfo[] = [];\n const expandedTensors = inputs.map(t => {\n const expandedT =\n expandDims({inputs: {input: t}, backend, attrs: {dim: axis}});\n intermediateTensorInfos.push(expandedT);\n return expandedT;\n });\n\n const result = concat({inputs: expandedTensors, backend, attrs: {axis}});\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n}\n\nexport const packConfig: KernelConfig = {\n kernelName: Pack,\n backendName: 'cpu',\n kernelFunc: pack as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, NumericDataType, PadV2, PadV2Attrs, PadV2Inputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function padV2(\n args: {inputs: PadV2Inputs, backend: MathBackendCPU, attrs: PadV2Attrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {paddings, constantValue} = attrs;\n\n assertNotComplex(x, 'pad');\n\n const outShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */);\n\n const start = paddings.map(p => p[0]);\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const xSize = util.sizeFromShape(x.shape);\n const xRank = x.shape.length;\n const xStrides = util.computeStrides(x.shape);\n\n const resultSize = util.sizeFromShape(outShape);\n const resultRank = outShape.length;\n const resultStrides = util.computeStrides(outShape);\n const resVals =\n util.getTypedArrayFromDType(x.dtype as NumericDataType, resultSize);\n\n if (constantValue !== 0) {\n resVals.fill(constantValue);\n }\n\n for (let i = 0; i < xSize; i++) {\n const coords = util.indexToLoc(i, xRank, xStrides);\n const outCoords = coords.map((c, i) => c + start[i]);\n const outIndex = util.locToIndex(outCoords, resultRank, resultStrides);\n\n resVals[outIndex] = xVals[i];\n }\n\n const outId = backend.write(resVals, outShape, x.dtype);\n\n return {dataId: outId, shape: outShape, dtype: x.dtype};\n}\n\nexport const padV2Config: KernelConfig = {\n kernelName: PadV2,\n backendName: 'cpu',\n kernelFunc: padV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Pow} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const powImpl =\n createSimpleBinaryKernelImpl((a: number, b: number) => Math.pow(a, b));\nexport const pow = binaryKernelFunc(Pow, powImpl);\n\nexport const powConfig: KernelConfig = {\n kernelName: Pow,\n backendName: 'cpu',\n kernelFunc: pow\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, KernelConfig, KernelFunc, Prod, ProdAttrs, ProdInputs, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {transpose} from './Transpose';\n\nexport function prodImpl(\n xShape: number[], xDtype: DataType, xVals: TypedArray,\n reductionAxes: number[]):\n {outVals: TypedArray, outShape: number[], outDtype: DataType} {\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(xShape, reductionAxes);\n const outDtype = upcastType(xDtype, 'int32');\n const outVals = util.makeZerosTypedArray(\n util.sizeFromShape(outShape), outDtype) as TypedArray;\n const reduceSize = util.sizeFromShape(reduceShape);\n\n for (let i = 0; i < outVals.length; ++i) {\n const offset = i * reduceSize;\n let prod = 1;\n for (let j = 0; j < reduceSize; ++j) {\n prod *= xVals[offset + j];\n }\n outVals[i] = prod;\n }\n\n return {outVals, outShape, outDtype};\n}\n\nexport function prod(\n args: {inputs: ProdInputs, backend: MathBackendCPU, attrs: ProdAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n assertNotComplex(x, 'prod');\n\n const xRank = x.shape.length;\n const axes = util.parseAxisParam(axis, x.shape);\n\n const permutation = backend_util.getAxesPermutation(axes, xRank);\n let reductionAxes = axes;\n let permutedX = x;\n const intermediateTensorInfos = [];\n if (permutation != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n intermediateTensorInfos.push(permutedX);\n reductionAxes = backend_util.getInnerMostAxes(reductionAxes.length, xRank);\n }\n\n const xVals = backend.data.get(permutedX.dataId).values as TypedArray;\n const {outVals, outShape, outDtype} =\n prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes);\n\n let resultShape = outShape;\n if (keepDims) {\n resultShape = backend_util.expandShapeToKeepDim(outShape, axes);\n }\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return backend.makeTensorInfo(resultShape, outDtype, outVals);\n}\n\nexport const prodConfig: KernelConfig = {\n kernelName: Prod,\n backendName: 'cpu',\n kernelFunc: prod as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nfunction validateIndices(\n indices: TypedArray, indicesShape: number[], numParams: number) {\n indices.forEach((index: number, i: number) => {\n if (index < 0 || index >= numParams) {\n const locString =\n util.indexToLoc(\n i, indicesShape.length, util.computeStrides(indicesShape))\n .join(',');\n throw new Error(\n `indices[${locString}] = ${index} is not in [0, ${numParams})`);\n }\n });\n}\n\nfunction validateSplits(\n paramsNestedSplits: TypedArray[], numParamsDenseValues: number) {\n // Validate\n for (let dim = 0; dim < paramsNestedSplits.length; ++dim) {\n const splits = paramsNestedSplits[dim];\n const lastSplit = (dim === paramsNestedSplits.length - 1) ?\n numParamsDenseValues :\n paramsNestedSplits[dim + 1].length;\n if (splits.length === 0) {\n throw new Error('Ragged splits may not be empty');\n }\n if (splits[0] < 0) {\n throw new Error('Ragged splits must be non-negative');\n }\n if (splits[splits.length - 1] > lastSplit) {\n throw new Error('Ragged splits must not point past values');\n }\n for (let i = 1; i < splits.length; ++i) {\n if (splits[i - 1] > splits[i]) {\n throw new Error('Ragged splits must be sorted in ascending order');\n }\n }\n }\n}\n\n// Construct the `splits` output tensors, encoded using a nested vector.\n// Also find the slices of values that need to be copied, and store them\n// in `valueSlices`. The total number of values that will be copied (which\n// we need for allocating the output values tensor) is stored in `numValues`.\nfunction makeSplits(\n indices: TypedArray, indicesShape: number[],\n paramsNestedSplits: TypedArray[], numParamsDenseValues: number) {\n const valueSlices: Array<[number, number]> = [];\n let numValues = 0;\n\n const numSplits = indicesShape.length - 1 + paramsNestedSplits.length;\n const outSplits = new Array(numSplits).fill(null).map(() => [0]);\n\n validateSplits(paramsNestedSplits, numParamsDenseValues);\n\n // Add `splits` that come from all but the last dimension of the dense\n // Tensor `indices`. In particular, for each dimension D, we add a\n // splits tensor whose values are:\n // range(reduceProd(splits.shape[:D]) + 1) * splits.shape[D+1]\n // E.g., if indices.shape=[2, 3, 4] then we will add splits tensors:\n // [0, 3, 6] # length=2+1, stride=3\n // [0, 4, 8, 12, 16, 20, 24] # length=2*3+1, stride=4\n let nrows = 1;\n for (let dim = 0; dim < indicesShape.length - 1; ++dim) {\n nrows *= indicesShape[dim];\n const rowLength = indicesShape[dim + 1];\n for (let i = 1; i < nrows + 1; ++i) {\n outSplits[dim].push(i * rowLength);\n }\n }\n\n // Add `splits` that come from `paramsNestedSplits`. Starting with the\n // outermost ragged dimension (i.e., the first `splits` tensor), we work\n // our way in, finding the range of values that should be copied. As we\n // go, we update the output `splits` for each dimension with the appropriate\n // values. In particular, the *lengths* of the slices from `param_splits`\n // should be copied to generate corresponding slice lengths in the output\n // splits. E.g., if we are copying a ragged row with length 4, then we\n // should add a new split point to outSplits that is 4 greater than the\n // previous split point in outSplits.\n for (let i = 0; i < indices.length; ++i) {\n let start = indices[i];\n let limit = indices[i] + 1;\n\n // Copy splits.\n for (let dim = 0; dim < paramsNestedSplits.length; ++dim) {\n const splits = paramsNestedSplits[dim];\n const outDim = dim + indicesShape.length - 1;\n if (outDim >= 0) {\n const outSplitsOutDim = outSplits[outDim];\n const delta =\n outSplitsOutDim[outSplitsOutDim.length - 1] - splits[start];\n for (let j = start; j < limit; ++j) {\n outSplits[outDim].push(splits[j + 1] + delta);\n }\n }\n start = splits[start];\n limit = splits[limit];\n }\n if (limit !== start) {\n valueSlices.push([start, limit]);\n numValues += limit - start;\n }\n }\n\n return {outSplits, valueSlices, numValues};\n}\n\nfunction getSplits(outSplits: number[][]) {\n const splitsOut: TypedArray[] = [];\n for (let i = 0; i < outSplits.length; ++i) {\n const numSplits = outSplits[i].length;\n const splits = util.getArrayFromDType('int32', numSplits) as TypedArray;\n splitsOut.push(splits);\n\n outSplits[i].forEach((value, j: number) => splits[j] = value);\n }\n\n return splitsOut;\n}\n\nfunction computeFlatOuterDims(orig: number[], numOutDims: number) {\n const outDims = orig.slice(0, numOutDims);\n while (outDims.length < numOutDims) {\n outDims.push(1);\n }\n\n for (let inDim = numOutDims; inDim < orig.length; inDim++) {\n outDims[numOutDims - 1] *= orig[inDim];\n }\n\n return outDims;\n}\n// For each slice in `(start, limit)` in `valueSlices`, append\n// `paramsDenseValues[start,...,limit] to `values`. `valueSize` indicates\n// the number of scalars contained in each value paramsDenseValues[i].\nfunction writeValueSlices(\n paramsDenseValues: TypedArray, paramsDenseValuesShape: number[],\n valueSlices: Array<[number, number]>, valueSize: number, values: TypedArray,\n valuesShape: number[]) {\n const denseM = computeFlatOuterDims(paramsDenseValuesShape, 2)[1];\n const valuesM = computeFlatOuterDims(valuesShape, 2)[1];\n\n let outPos = 0;\n for (const slice of valueSlices) {\n for (let i = slice[0]; i < slice[1]; ++i) {\n for (let j = 0; j < valueSize; ++j) {\n values[outPos * valuesM + j] = paramsDenseValues[i * denseM + j];\n }\n ++outPos;\n }\n }\n}\n\nfunction getValues(\n paramsDenseValues: TypedArray, paramsDenseValuesShape: number[],\n paramsDenseValuesDType: DataType, valueSlices: Array<[number, number]>,\n numValues: number): [TypedArray, number[]] {\n const valuesShape = paramsDenseValuesShape.slice();\n valuesShape[0] = numValues;\n\n const valuesOut = util.getArrayFromDType(\n paramsDenseValuesDType,\n util.sizeFromShape(valuesShape)) as TypedArray;\n\n const numElements = paramsDenseValues.length;\n const valueSize =\n numElements === 0 ? 0 : (numElements / paramsDenseValuesShape[0]);\n writeValueSlices(\n paramsDenseValues, paramsDenseValuesShape, valueSlices, valueSize,\n valuesOut, valuesShape);\n\n return [valuesOut, valuesShape];\n}\nexport function raggedGatherImpl(\n paramsNestedSplits: TypedArray[], paramsNestedSplitsShapes: number[][],\n paramsDenseValues: TypedArray, paramsDenseValuesShape: number[],\n paramsDenseValuesDType: DataType, indices: TypedArray,\n indicesShape: number[],\n outputRaggedRank: number): [TypedArray[], TypedArray, number[]] {\n if (paramsNestedSplits.length === 0) {\n throw new Error('paramsNestedSplits must be non empty');\n }\n\n if (paramsNestedSplitsShapes[0].length === 0) {\n throw new Error('Split tensors must not be scalars');\n }\n const numParams = paramsNestedSplitsShapes[0][0] - 1;\n validateIndices(indices, indicesShape, numParams);\n\n if (paramsDenseValuesShape.length === 0) {\n throw new Error('params.rank must be nonzero');\n }\n const numParamsDenseValues = paramsDenseValuesShape[0];\n\n // Calculate the `splits`, and store the value slices that we need to\n // copy in `valueSlices`.\n const {outSplits, valueSlices, numValues} = makeSplits(\n indices, indicesShape, paramsNestedSplits, numParamsDenseValues);\n\n // Write the output tensors.\n const outputNestedSplits = getSplits(outSplits);\n const outputDenseValues = getValues(\n paramsDenseValues, paramsDenseValuesShape, paramsDenseValuesDType,\n valueSlices, numValues);\n\n return [outputNestedSplits, outputDenseValues[0], outputDenseValues[1]];\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedGather, RaggedGatherAttrs, RaggedGatherInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {raggedGatherImpl} from './RaggedGather_impl';\n\nexport function raggedGather(args: {\n inputs: RaggedGatherInputs,\n backend: MathBackendCPU,\n attrs: RaggedGatherAttrs\n}): TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {paramsNestedSplits, paramsDenseValues, indices} = inputs;\n const {outputRaggedRank} = attrs;\n\n const $paramsNestedSplits = paramsNestedSplits.map(\n t => backend.data.get(t.dataId).values as TypedArray);\n const $paramsNestedSplitsShapes = paramsNestedSplits.map(t => t.shape);\n const $paramsDenseValues =\n backend.data.get(paramsDenseValues.dataId).values as TypedArray;\n const $indices = backend.data.get(indices.dataId).values as TypedArray;\n\n const [outputNestedSplits, outputDenseValues, outputDenseValuesShape] =\n raggedGatherImpl(\n $paramsNestedSplits, $paramsNestedSplitsShapes, $paramsDenseValues,\n paramsDenseValues.shape, paramsDenseValues.dtype, $indices,\n indices.shape, outputRaggedRank);\n\n const outputNestedSplitsTensors = outputNestedSplits.map(\n (splits) => backend.makeTensorInfo([splits.length], 'int32', splits));\n\n const outputDenseValuesTensor = backend.makeTensorInfo(\n outputDenseValuesShape, paramsDenseValues.dtype, outputDenseValues);\n\n return outputNestedSplitsTensors.concat([outputDenseValuesTensor]);\n}\n\nexport const raggedGatherConfig: KernelConfig = {\n kernelName: RaggedGather,\n backendName: 'cpu',\n kernelFunc: raggedGather as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2022 Google LLC.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nconst INT32_MAX = 2147483647;\n\nexport function raggedRangeImpl(\n starts: TypedArray, startsShape: number[], startsDType: DataType,\n limits: TypedArray, limitsShape: number[], deltas: TypedArray,\n deltasShape: number[]): [TypedArray, TypedArray] {\n // Check input tensor shapes.\n if (startsShape.length > 1) {\n throw new Error('starts must be a scalar or vector');\n }\n if (limitsShape.length > 1) {\n throw new Error('limits must be a scalar or vector');\n }\n if (deltasShape.length > 1) {\n throw new Error('deltas must be a scalar or vector');\n }\n\n // Determine which tensors we need to broadcast.\n const broadcastStarts = startsShape.length === 0;\n const broadcastLimits = limitsShape.length === 0;\n const broadcastDeltas = deltasShape.length === 0;\n\n // nRows (number of output rows) is the size of the non-broadcast inputs,\n // or 1 if all inputs are scalars.\n const inSizes: number[] = [];\n if (!broadcastStarts) {\n inSizes.push(startsShape[0]);\n }\n if (!broadcastLimits) {\n inSizes.push(limitsShape[0]);\n }\n if (!broadcastDeltas) {\n inSizes.push(deltasShape[0]);\n }\n\n for (let i = 1; i < inSizes.length; ++i) {\n if (inSizes[i] !== inSizes[i - 1]) {\n throw new Error('starts, limits, and deltas must have the same shape');\n }\n }\n const nRows = inSizes.length === 0 ? 1 : inSizes[0];\n\n // Construct the rtNestedSplits tensor.\n const rtNestedSplits =\n util.getArrayFromDType('int32', nRows + 1) as TypedArray;\n rtNestedSplits[0] = 0;\n for (let row = 0; row < nRows; ++row) {\n const start = broadcastStarts ? starts[0] : starts[row];\n const limit = broadcastLimits ? limits[0] : limits[row];\n const delta = broadcastDeltas ? deltas[0] : deltas[row];\n if (delta === 0) {\n throw new Error('Requires delta != 0');\n }\n let size: number; // The number of elements in the specified range.\n if (((delta > 0) && (limit < start)) || ((delta < 0) && (limit > start))) {\n size = 0;\n } else {\n size = Math.ceil(Math.abs((limit - start) / delta));\n\n if (size > INT32_MAX) {\n throw new Error(`Requires ((limit - start) / delta) <= ${INT32_MAX}`);\n }\n }\n rtNestedSplits[row + 1] = rtNestedSplits[row] + size;\n }\n\n const nVals = rtNestedSplits[nRows];\n\n // Construct the rtDenseValues tensor.\n const rtDenseValues =\n util.getArrayFromDType(startsDType, nVals) as TypedArray;\n\n let valueIndex = 0;\n for (let row = 0; row < nRows; ++row) {\n const rowSize = rtNestedSplits[row + 1] - rtNestedSplits[row];\n let value = broadcastStarts ? starts[0] : starts[row];\n const delta = broadcastDeltas ? deltas[0] : deltas[row];\n for (let i = 0; i < rowSize; ++i) {\n rtDenseValues[valueIndex++] = value;\n value += delta;\n }\n }\n\n return [rtNestedSplits, rtDenseValues];\n}\n","/**\n * @license\n * Copyright 2022 Google LLC.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedRange, RaggedRangeInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {raggedRangeImpl} from './RaggedRange_impl';\n\nexport function raggedRange(\n args: {inputs: RaggedRangeInputs, backend: MathBackendCPU}):\n [TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {starts, limits, deltas} = inputs;\n\n const $starts = backend.data.get(starts.dataId).values as TypedArray;\n const $limits = backend.data.get(limits.dataId).values as TypedArray;\n const $deltas = backend.data.get(deltas.dataId).values as TypedArray;\n\n const [rtNestedSplitsData, rtDenseValuesData] = raggedRangeImpl(\n $starts, starts.shape, starts.dtype, $limits, limits.shape, $deltas,\n deltas.shape);\n\n const rtNestedSplits = backend.makeTensorInfo(\n [rtNestedSplitsData.length], 'int32', rtNestedSplitsData);\n const rtDenseValues = backend.makeTensorInfo(\n [rtDenseValuesData.length], starts.dtype, rtDenseValuesData);\n\n return [rtNestedSplits, rtDenseValues];\n}\n\nexport const raggedRangeConfig: KernelConfig = {\n kernelName: RaggedRange,\n backendName: 'cpu',\n kernelFunc: raggedRange as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, broadcastTo, DataType, reshape, tidy, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport RowPartitionType = backend_util.RowPartitionType;\n// Based on\n// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc\nclass RaggedTensorToTensorOp {\n private readonly rowPartitionTypes: RowPartitionType[];\n private readonly raggedRank: number;\n constructor(\n private shape: TypedArray, private shapeShape: number[],\n private values: TypedArray, private valuesShape: number[],\n private valuesDType: DataType, private defaultValue: TypedArray,\n private defaultValueShape: number[],\n private readonly rowPartitionValues: TypedArray[],\n private readonly rowPartitionValuesShapes: number[][],\n rowPartitionTypeStrings: string[]) {\n this.rowPartitionTypes =\n backend_util.getRowPartitionTypesHelper(rowPartitionTypeStrings);\n this.raggedRank = backend_util.getRaggedRank(this.rowPartitionTypes);\n }\n\n private getRowPartitionTypeByDimension(dimension: number) {\n if (this.rowPartitionTypes[0] === RowPartitionType.FIRST_DIM_SIZE) {\n return this.rowPartitionTypes[dimension + 1];\n } else {\n return this.rowPartitionTypes[dimension];\n }\n }\n\n // Returns the relationship between dimension and dimension + 1.\n private getRowPartitionTensor(dimension: number) {\n if (this.rowPartitionTypes[0] === RowPartitionType.FIRST_DIM_SIZE) {\n return this.rowPartitionValues[dimension + 1];\n } else {\n return this.rowPartitionValues[dimension];\n }\n }\n\n private getMaxWidth(dimension: number) {\n const rowPartitionTensor = this.getRowPartitionTensor(dimension - 1);\n switch (this.getRowPartitionTypeByDimension(dimension - 1)) {\n case RowPartitionType.VALUE_ROWIDS:\n return RaggedTensorToTensorOp.getMaxWidthValueRowID(rowPartitionTensor);\n case RowPartitionType.ROW_SPLITS:\n return RaggedTensorToTensorOp.getMaxWidthRowSplit(rowPartitionTensor);\n default:\n throw new Error(`Cannot handle partition type ${\n RowPartitionType[this.getRowPartitionTypeByDimension(\n dimension - 1)]}`);\n }\n }\n\n static getMaxWidthRowSplit(rowSplit: TypedArray) {\n const tensorLength = rowSplit.length;\n if (tensorLength === 0 || tensorLength === 1) {\n return 0;\n }\n let maxWidth = 0;\n for (let i = 0; i < tensorLength - 1; ++i) {\n const currentWidth = rowSplit[i + 1] - rowSplit[i];\n if (currentWidth > maxWidth) {\n maxWidth = currentWidth;\n }\n }\n return maxWidth;\n }\n\n static getMaxWidthValueRowID(valueRowIds: TypedArray) {\n const indexLength = valueRowIds.length;\n if (indexLength === 0) {\n return 0;\n }\n let firstEqualIndex = 0;\n let firstEqualIndexValue = valueRowIds[0];\n let maxWidth = 0;\n for (let i = 1; i < indexLength; ++i) {\n const value = valueRowIds[i];\n if (value !== firstEqualIndexValue) {\n firstEqualIndexValue = value;\n maxWidth = Math.max(i - firstEqualIndex, maxWidth);\n firstEqualIndex = i;\n }\n }\n return Math.max(indexLength - firstEqualIndex, maxWidth);\n }\n\n private tensorShapeFromTensor(\n t: TypedArray, tShape: number[], isPartial = true) {\n if (tShape.length === 0) {\n if (t[0] === -1) {\n return [];\n }\n throw new Error(\n `The only valid scalar shape tensor is the fully unknown shape specified as -1.`);\n }\n // MakePartialShape/MakeShapeHelper.\n return makeShape(t, isPartial);\n }\n\n private calculateOutputSize(firstDim: number) {\n const valueShape = this.valuesShape;\n const defaultValueShape = this.defaultValueShape;\n\n backend_util.validateDefaultValueShape(defaultValueShape, valueShape);\n\n const shape = this.tensorShapeFromTensor(this.shape, this.shapeShape);\n const outputShape = backend_util.combineRaggedTensorToTensorShapes(\n this.raggedRank, shape, valueShape);\n\n const result = outputShape;\n\n if (result[0] < 0) {\n result[0] = firstDim;\n }\n for (let i = 1; i <= this.raggedRank; ++i) {\n if (result[i] < 0) {\n result[i] = this.getMaxWidth(i);\n }\n }\n\n return result;\n }\n\n /**\n * The outputIndex represents the index in the output tensor\n * where the first element of a particular dimension would be written.\n * If it is -1, it indicates that the index is out of scope.\n * Example, given firstDimension = 10, firstDimensionOutput = 6,\n * and outputIndexMultiplier = 100:\n * result = [0 100 200 300 400 500 -1 -1 -1 -1]\n * If firstDimensionOutput = 11 instead, then:\n * result = [0 100 200 300 400 500 600 700 800 900]\n */\n private calculateFirstParentOutputIndex(\n firstDimension: number, outputIndexMultiplier: number,\n firstDimensionOutput: number) {\n const minDimension = Math.min(firstDimension, firstDimensionOutput);\n const result: number[] = [];\n let currentOutputIndex = 0;\n for (let i = 0; i < minDimension;\n ++i, currentOutputIndex += outputIndexMultiplier) {\n result.push(currentOutputIndex);\n }\n for (let i = minDimension; i < firstDimension; ++i) {\n result.push(-1);\n }\n util.assert(\n result.length === firstDimension,\n () => 'Final length of result must be equal to firstDimension.');\n\n return result;\n }\n\n private calculateOutputIndexRowSplit(\n rowSplit: TypedArray, parentOutputIndex: number[],\n outputIndexMultiplier: number, outputSize: number) {\n const rowSplitSize = rowSplit.length;\n const result: number[] = [];\n for (let i = 0; i < rowSplitSize - 1; ++i) {\n const rowLength = rowSplit[i + 1] - rowSplit[i];\n let realLength = Math.min(outputSize, rowLength);\n let parentOutputIndexCurrent = parentOutputIndex[i];\n\n if (parentOutputIndexCurrent === -1) {\n realLength = 0;\n }\n for (let j = 0; j < realLength; ++j) {\n result.push(parentOutputIndexCurrent);\n parentOutputIndexCurrent += outputIndexMultiplier;\n }\n for (let j = 0; j < rowLength - realLength; ++j) {\n result.push(-1);\n }\n }\n if (rowSplitSize > 0 && result.length !== rowSplit[rowSplitSize - 1]) {\n throw new Error('Invalid row split size.');\n }\n\n return result;\n }\n\n // Calculate the output index of the first element of a list.\n // The parentOutputIndex is the same computation for the previous list.\n // -1 indicates an element or list that is out of range.\n // The outputIndexMultiplier is the number of output indices one moves\n // forward for each column.\n // E.g., given:\n // valueRowIds:[0 1 2 2 2 3 5 5 6]\n // parentOutputIndex:[1000 1100 2000 2100 -1 3000 4000]\n // outputIndexMultiplier: 10\n // outputSize: 2\n // You get:\n // result = [1000 1100 2000 2010 -1 2100 -1 -1 3000]\n // result[0] = parentOutputIndex[valueRowIds[0]]\n // result[1] = parentOutputIndex[valueRowIds[1]]\n // result[2] = parentOutputIndex[valueRowIds[2]]\n // result[3] = parentOutputIndex[valueRowIds[2] + 10]\n // result[4] = -1 because it is the third element the size is 2.\n // result[5] = parentOutputIndex[valueRowIds[3]]\n // result[6] = -1 because parentOutputIndex[valueRowIds[6]] == -1\n // result[7] = -1 because parentOutputIndex[valueRowIds[6]] == -1\n // result[8] = parentOutputIndex[valueRowIds[7]]\n private calculateOutputIndexValueRowID(\n valueRowIds: TypedArray, parentOutputIndex: number[],\n outputIndexMultiplier: number, outputSize: number) {\n const indexSize = valueRowIds.length;\n const result: number[] = [];\n if (indexSize === 0) {\n return [];\n }\n\n let currentOutputColumn = 0;\n let currentValueRowId = valueRowIds[0];\n\n if (currentValueRowId >= parentOutputIndex.length) {\n throw new Error(\n `Got currentValueRowId=${currentValueRowId}, which is not less than ${\n parentOutputIndex.length}`);\n }\n\n let currentOutputIndex = parentOutputIndex[currentValueRowId];\n result.push(currentOutputIndex);\n for (let i = 1; i < indexSize; ++i) {\n const nextValueRowId = valueRowIds[i];\n if (nextValueRowId === currentValueRowId) {\n if (currentOutputIndex >= 0) {\n ++currentOutputColumn;\n if (currentOutputColumn < outputSize) {\n currentOutputIndex += outputIndexMultiplier;\n } else {\n currentOutputIndex = -1;\n }\n }\n } else {\n currentOutputColumn = 0;\n currentValueRowId = nextValueRowId;\n\n if (nextValueRowId >= parentOutputIndex.length) {\n throw new Error(\n `Got nextValueRowId=${nextValueRowId} which is not less than ${\n parentOutputIndex.length}`);\n }\n\n currentOutputIndex = parentOutputIndex[nextValueRowId];\n }\n result.push(currentOutputIndex);\n }\n\n if (result.length !== valueRowIds.length) {\n throw new Error('Invalid row ids.');\n }\n\n return result;\n }\n\n private calculateOutputIndex(\n dimension: number, parentOutputIndex: number[],\n outputIndexMultiplier: number, outputSize: number) {\n const rowPartitionTensor = this.getRowPartitionTensor(dimension);\n const partitionType = this.getRowPartitionTypeByDimension(dimension);\n switch (partitionType) {\n case RowPartitionType.VALUE_ROWIDS:\n return this.calculateOutputIndexValueRowID(\n rowPartitionTensor, parentOutputIndex, outputIndexMultiplier,\n outputSize);\n case RowPartitionType.ROW_SPLITS:\n if (rowPartitionTensor.length - 1 > parentOutputIndex.length) {\n throw new Error(`Row partition size is greater than output size: ${\n rowPartitionTensor.length - 1} > ${parentOutputIndex.length}`);\n }\n return this.calculateOutputIndexRowSplit(\n rowPartitionTensor, parentOutputIndex, outputIndexMultiplier,\n outputSize);\n default:\n throw new Error(\n `Unsupported partition type: ${RowPartitionType[partitionType]}`);\n }\n }\n\n private getFirstDimensionSize() {\n const firstPartitionTensor = this.rowPartitionValues[0];\n if (this.rowPartitionTypes.length === 0) {\n throw new Error('No row_partition_types given.');\n }\n const firstPartitionType = this.rowPartitionTypes[0];\n switch (firstPartitionType) {\n case RowPartitionType.FIRST_DIM_SIZE:\n return firstPartitionTensor[0];\n case RowPartitionType.VALUE_ROWIDS:\n throw new Error('Cannot handle VALUE_ROWIDS in first dimension.');\n case RowPartitionType.ROW_SPLITS:\n return this.rowPartitionValuesShapes[0][0] - 1;\n default:\n throw new Error(\n `Cannot handle type ${RowPartitionType[firstPartitionType]}`);\n }\n }\n\n compute(): [number[], TypedArray] {\n const firstPartitionTensor = this.rowPartitionValues[0];\n if (firstPartitionTensor.length <= 0) {\n throw new Error(\n 'Invalid first partition input. ' +\n 'Tensor requires at least one element.');\n }\n const firstDimension = this.getFirstDimensionSize();\n const outputSize = this.calculateOutputSize(firstDimension);\n const multiplier: number[] = new Array(this.raggedRank + 1);\n\n multiplier[multiplier.length - 1] = 1;\n for (let i = multiplier.length - 2; i >= 0; --i) {\n multiplier[i] = multiplier[i + 1] * outputSize[i + 1];\n }\n // Full size of the tensor.\n const outputShape: number[] = makeShape(outputSize, false);\n const outputTensor =\n util.getArrayFromDType(\n this.valuesDType, util.sizeFromShape(outputShape)) as TypedArray;\n\n const fullSize = multiplier[0] * outputSize[0];\n if (fullSize > 0) {\n let outputIndex = this.calculateFirstParentOutputIndex(\n firstDimension, multiplier[0], outputSize[0]);\n for (let i = 1; i <= this.raggedRank; ++i) {\n const newOutputIndex = this.calculateOutputIndex(\n i - 1, outputIndex, multiplier[i], outputSize[i]);\n outputIndex = newOutputIndex;\n }\n\n this.setOutput(this.raggedRank, outputIndex, outputTensor, outputShape);\n }\n\n return [outputShape, outputTensor];\n }\n setOutput(\n raggedRank: number, outputIndex: number[], outputTensor: TypedArray,\n outputShape: number[]) {\n if (outputTensor.length === 0) {\n return;\n }\n\n const valuesBase = this.values;\n const outputBase = outputTensor;\n\n let elementShape = outputShape.slice();\n elementShape = elementShape.slice(raggedRank + 1);\n const valueElementSize = util.sizeFromShape(elementShape);\n const outputIndexSize = outputIndex.length;\n\n // Broadcast the default value to value_element_size. (We can skip this\n // if defaultValueTensor.size == 1, since we use fill when that's true.)\n let defaultValue = this.defaultValue;\n if (defaultValue.length !== valueElementSize && defaultValue.length !== 1) {\n const srcShape = this.defaultValueShape;\n tidy(() => {\n const defaultValueTensor = reshape(defaultValue, srcShape);\n const bCastDefault = broadcastTo(defaultValueTensor, elementShape);\n defaultValue = bCastDefault.dataSync();\n });\n }\n\n // Loop through the outputIndex array, finding contiguous regions that\n // should be copied. Once we find the end of a contiguous region, copy it\n // and add any necessary padding (with defaultValue).\n let srcStart = 0; // Start of contiguous region (in values)\n let dstStart = 0; // Destination for contiguous region (in output)\n let dstEnd = 0; // Destination for contiguous region (in output)\n for (let srcI = 0; srcI <= outputIndexSize; ++srcI) {\n // dstI is the destination where the value at srcI should be copied.\n let dstI = srcI < outputIndexSize ? outputIndex[srcI] : -1;\n\n // If we're still in a contiguous region, then update dstEnd go to the\n // next srcI.\n if (dstI === dstEnd) {\n ++dstEnd;\n continue;\n }\n\n // We found the end of contiguous region. This can be because we found\n // a gap (dstI > dstEnd), or a source value that shouldn't be copied\n // because it's out-of-bounds (dstI == -1), or the end of the tensor\n // (dstI === -1).\n if (dstStart < dstEnd) {\n // Copy the contiguous region.\n const src = valuesBase.subarray(srcStart * valueElementSize);\n const dst = outputBase.subarray(dstStart * valueElementSize);\n const nVals = (dstEnd - dstStart) * valueElementSize;\n copyArray(dst, src, nVals);\n }\n\n // Add any necessary padding (w/ defaultValue).\n if (srcI >= outputIndexSize) {\n // We reached the end of values: pad to the end of output.\n const outputSize = outputTensor.length;\n dstI = Math.floor(outputSize / valueElementSize);\n }\n if (dstI > dstEnd) {\n if (this.defaultValue.length === 1) {\n outputBase\n .subarray(dstEnd * valueElementSize, dstI * valueElementSize)\n .fill(this.defaultValue[0]);\n dstEnd = dstI;\n } else {\n while (dstI > dstEnd) {\n const dst = outputBase.slice(dstEnd * valueElementSize);\n copyArray(dst, defaultValue, valueElementSize);\n ++dstEnd;\n }\n }\n }\n\n // Update indices.\n if (dstI < 0) {\n // srcI should be skipped -- leave it out of the contiguous region.\n srcStart = srcI + 1;\n dstStart = dstEnd;\n } else {\n // srcI should be copied -- include it in the contiguous region.\n srcStart = srcI;\n dstStart = dstEnd;\n dstEnd = dstStart + 1;\n }\n }\n }\n}\n\nfunction copyArray(dst: TypedArray, src: TypedArray, size: number) {\n for (let i = 0; i < size; i++) {\n dst[i] = src[i];\n }\n}\n\nfunction makeShape(shape: number[]|TypedArray, isPartial: boolean) {\n const out: number[] = [];\n for (let dim of shape) {\n if (dim < 0) {\n if (!isPartial) {\n throw new Error(`Dimension ${dim} must be >= 0`);\n }\n if (dim < -1) {\n throw new Error(`Dimension ${dim} must be >= -1`);\n }\n dim = -1;\n }\n out.push(dim);\n }\n\n return out;\n}\n\nexport function raggedTensorToTensorImpl(\n shape: TypedArray, shapesShape: number[], values: TypedArray,\n valuesShape: number[], valuesDType: DataType, defaultValue: TypedArray,\n defaultValueShape: number[], rowPartitionValues: TypedArray[],\n rowPartitionValuesShapes: number[][],\n rowPartitionTypes: string[]): [number[], TypedArray] {\n return new RaggedTensorToTensorOp(\n shape, shapesShape, values, valuesShape, valuesDType, defaultValue,\n defaultValueShape, rowPartitionValues, rowPartitionValuesShapes,\n rowPartitionTypes)\n .compute();\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedTensorToTensor, RaggedTensorToTensorAttrs, RaggedTensorToTensorInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {raggedTensorToTensorImpl} from './RaggedTensorToTensor_impl';\n\nexport function raggedTensorToTensor(args: {\n inputs: RaggedTensorToTensorInputs,\n backend: MathBackendCPU,\n attrs: RaggedTensorToTensorAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {shape, values, defaultValue, rowPartitionTensors} = inputs;\n const {rowPartitionTypes} = attrs;\n\n const $shape = backend.data.get(shape.dataId).values as TypedArray;\n const $values = backend.data.get(values.dataId).values as TypedArray;\n const $defaultValue =\n backend.data.get(defaultValue.dataId).values as TypedArray;\n const $rowPartitionValues = rowPartitionTensors.map(\n t => backend.data.get(t.dataId).values as TypedArray);\n const rowPartitionValuesShapes = rowPartitionTensors.map(t => t.shape);\n\n const [outputShape, output] = raggedTensorToTensorImpl(\n $shape, shape.shape, $values, values.shape, values.dtype, $defaultValue,\n defaultValue.shape, $rowPartitionValues, rowPartitionValuesShapes,\n rowPartitionTypes);\n return backend.makeTensorInfo(outputShape, values.dtype, output);\n}\n\nexport const raggedTensorToTensorConfig: KernelConfig = {\n kernelName: RaggedTensorToTensor,\n backendName: 'cpu',\n kernelFunc: raggedTensorToTensor as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DataTypeMap, util} from '@tensorflow/tfjs-core';\n\nexport function rangeImpl(\n start: number, stop: number, step: number,\n dtype: 'float32'|'int32'): DataTypeMap['float32' | 'int32'] {\n const sameStartStop = start === stop;\n const increasingRangeNegativeStep = start < stop && step < 0;\n const decreasingRangePositiveStep = stop < start && step > 1;\n\n if (sameStartStop || increasingRangeNegativeStep ||\n decreasingRangePositiveStep) {\n return util.makeZerosTypedArray(0, dtype);\n }\n\n const numElements = Math.abs(Math.ceil((stop - start) / step));\n const values = util.makeZerosTypedArray(numElements, dtype);\n\n if (stop < start && step === 1) {\n // Auto adjust the step's sign if it hasn't been set\n // (or was set to 1)\n step = -1;\n }\n\n values[0] = start;\n for (let i = 1; i < values.length; i++) {\n values[i] = values[i - 1] + step;\n }\n return values;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Range, RangeAttrs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {rangeImpl} from './Range_impl';\n\nexport function range(args: {backend: MathBackendCPU, attrs: RangeAttrs}):\n TensorInfo {\n const {backend, attrs} = args;\n const {start, stop, dtype, step} = attrs;\n\n const values = rangeImpl(start, stop, step, dtype);\n return backend.makeTensorInfo([values.length], dtype, values);\n}\n\nexport const rangeConfig: KernelConfig = {\n kernelName: Range,\n backendName: 'cpu',\n kernelFunc: range as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Reciprocal} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const reciprocal = unaryKernelFunc(Reciprocal, (xi) => 1 / xi);\n\nexport const reciprocalConfig: KernelConfig = {\n kernelName: Reciprocal,\n backendName: 'cpu',\n kernelFunc: reciprocal,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeBilinear, ResizeBilinearAttrs, ResizeBilinearInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function resizeBilinear(args: {\n inputs: ResizeBilinearInputs,\n backend: MathBackendCPU,\n attrs: ResizeBilinearAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, halfPixelCenters, size} = attrs;\n\n assertNotComplex(images, 'resizeBilinear');\n\n const imagesStrides = util.computeStrides(images.shape);\n const [newHeight, newWidth] = size;\n\n const [batch, oldHeight, oldWidth, numChannels] = images.shape;\n const xValues = backend.data.get(images.dataId).values as TypedArray;\n const result = new Float32Array(\n util.sizeFromShape([batch, newHeight, newWidth, numChannels]));\n\n const effectiveInputSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutputSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n let outputIdx = 0;\n const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];\n const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];\n for (let b = 0; b < batch; b++) {\n for (let r = 0; r < newHeight; r++) {\n let sourceFracRow: number;\n if (halfPixelCenters) {\n sourceFracRow = effectiveRowSizeRatio * (r + 0.5) - 0.5;\n } else {\n sourceFracRow = effectiveRowSizeRatio * r;\n }\n\n const sourceRowFloor = Math.max(0, Math.floor(sourceFracRow));\n const rowFrac = sourceFracRow - sourceRowFloor;\n const sourceRowCeil = Math.min(oldHeight - 1, Math.ceil(sourceFracRow));\n const topRowOffset =\n b * imagesStrides[0] + sourceRowFloor * imagesStrides[1];\n const botRowOffset =\n b * imagesStrides[0] + sourceRowCeil * imagesStrides[1];\n for (let c = 0; c < newWidth; c++) {\n let sourceFracCol: number;\n if (halfPixelCenters) {\n sourceFracCol = effectiveColSizeRatio * (c + 0.5) - 0.5;\n } else {\n sourceFracCol = effectiveColSizeRatio * c;\n }\n const sourceColFloor = Math.max(0, Math.floor(sourceFracCol));\n const colFrac = sourceFracCol - sourceColFloor;\n const sourceColCeil = Math.min(oldWidth - 1, Math.ceil(sourceFracCol));\n const topLeftOffest = topRowOffset + sourceColFloor * imagesStrides[2];\n const botLeftOffset = botRowOffset + sourceColFloor * imagesStrides[2];\n const topRightOffset = topRowOffset + sourceColCeil * imagesStrides[2];\n const botRightOffest = botRowOffset + sourceColCeil * imagesStrides[2];\n for (let d = 0; d < numChannels; d++) {\n // Begin shader.\n\n // Compute the fractional index of the source.\n const topLeft = xValues[topLeftOffest + d];\n const bottomLeft = xValues[botLeftOffset + d];\n const topRight = xValues[topRightOffset + d];\n const bottomRight = xValues[botRightOffest + d];\n\n const top = topLeft + (topRight - topLeft) * colFrac;\n const bottom = bottomLeft + (bottomRight - bottomLeft) * colFrac;\n const newValue = top + (bottom - top) * rowFrac;\n\n result[outputIdx++] = newValue;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(\n [batch, newHeight, newWidth, numChannels], 'float32', result);\n}\n\nexport const resizeBilinearConfig: KernelConfig = {\n kernelName: ResizeBilinear,\n backendName: 'cpu',\n kernelFunc: resizeBilinear as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeBilinearGrad, ResizeBilinearGradAttrs, ResizeBilinearGradInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function resizeBilinearGrad(args: {\n inputs: ResizeBilinearGradInputs,\n backend: MathBackendCPU,\n attrs: ResizeBilinearGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images, dy} = inputs;\n const {alignCorners} = attrs;\n\n assertNotComplex([dy, images], 'resizeBilinearGrad');\n\n const imagesStrides = util.computeStrides(images.shape);\n\n const [batch, xHeight, xWidth, depth] = images.shape;\n const [, yHeight, yWidth] = dy.shape;\n\n const output = new Float32Array(batch * xHeight * xWidth * depth);\n\n // In the backwards pass, we want to find the pixels that were generated\n // for each pixel in the input image the forward pass and add the\n // corresponding coefficient from dy to the gradient (with some\n // interpolation).\n\n const effectiveXSize: [number, number] = [\n (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,\n (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth\n ];\n\n const effectiveYSize: [number, number] = [\n (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,\n (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth\n ];\n\n const heightScale = effectiveXSize[0] / effectiveYSize[0];\n const widthScale = effectiveXSize[1] / effectiveYSize[1];\n\n // Reference implementation\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tensorflow/blob/3039375c86a5bbc9610c7725dcaa95d635f87ba2/tensorflow/core/kernels/resize_bilinear_op.cc#L275\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n let offset = 0;\n for (let b = 0; b < batch; b++) {\n const bOffset = b * imagesStrides[0];\n for (let r = 0; r < yHeight; r++) {\n const dxR = r * heightScale;\n const topDxRIndex = Math.floor(dxR);\n const bottomDxRIndex = Math.min(Math.ceil(dxR), xHeight - 1);\n\n const topDxROffset = bOffset + topDxRIndex * imagesStrides[1];\n const bottomDxROffset = bOffset + bottomDxRIndex * imagesStrides[1];\n\n const dxRLerp = dxR - topDxRIndex;\n const inverseDxRLerp = 1.0 - dxRLerp;\n for (let c = 0; c < yWidth; c++) {\n const dxC = c * widthScale;\n const leftDxCIndex = Math.floor(dxC);\n const rightDxCIndex = Math.min(Math.ceil(dxC), xWidth - 1);\n const dxCLerp = dxC - leftDxCIndex;\n const inverseDxCLerp = 1.0 - dxCLerp;\n\n const topLeftRCOffset = topDxROffset + leftDxCIndex * imagesStrides[2];\n const topRightRCOffset =\n topDxROffset + rightDxCIndex * imagesStrides[2];\n const bottomLeftRCOffset =\n bottomDxROffset + leftDxCIndex * imagesStrides[2];\n const bottomRightRCOffset =\n bottomDxROffset + rightDxCIndex * imagesStrides[2];\n\n const inverseDxRLerpTimesInverseDxCLerp =\n inverseDxRLerp * inverseDxCLerp;\n const inverseDxRLerpTimesDxCLerp = inverseDxRLerp * dxCLerp;\n const dxRLerpTimesInverseDxCLerp = dxRLerp * inverseDxCLerp;\n const dxRLerpTimesDxCLerp = dxRLerp * dxCLerp;\n for (let d = 0; d < depth; d++) {\n const dyVal = dyValues[offset++];\n output[topLeftRCOffset + d] +=\n dyVal * inverseDxRLerpTimesInverseDxCLerp;\n output[topRightRCOffset + d] += dyVal * inverseDxRLerpTimesDxCLerp;\n output[bottomLeftRCOffset + d] += dyVal * dxRLerpTimesInverseDxCLerp;\n output[bottomRightRCOffset + d] += dyVal * dxRLerpTimesDxCLerp;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(\n [batch, xWidth, xHeight, depth], 'float32', output);\n}\n\nexport const resizeBilinearGradConfig: KernelConfig = {\n kernelName: ResizeBilinearGrad,\n backendName: 'cpu',\n kernelFunc: resizeBilinearGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeNearestNeighbor, ResizeNearestNeighborAttrs, ResizeNearestNeighborInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function resizeNearestNeighbor(args: {\n inputs: ResizeNearestNeighborInputs,\n backend: MathBackendCPU,\n attrs: ResizeNearestNeighborAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, halfPixelCenters, size} = attrs;\n\n assertNotComplex(images, 'resizeNearestNeighbor');\n\n const imagesStrides = util.computeStrides(images.shape);\n const [newHeight, newWidth] = size;\n\n const [batch, oldHeight, oldWidth, numChannels] = images.shape;\n const xValues = backend.data.get(images.dataId).values as TypedArray;\n const output = new Float32Array(batch * newHeight * newWidth * numChannels);\n\n const effectiveInputSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutputSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n\n const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0];\n const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1];\n\n let outputOffset = 0;\n for (let b = 0; b < batch; b++) {\n const batchOffset = b * imagesStrides[0];\n for (let r = 0; r < newHeight; r++) {\n const sourceFracRow = halfPixelCenters ?\n effectiveRowSizeRatio * (r + 0.5) :\n effectiveRowSizeRatio * r;\n let sourceNearestRow = Math.min(\n oldHeight - 1,\n alignCorners ? Math.round(sourceFracRow) : Math.floor(sourceFracRow));\n if (halfPixelCenters) {\n sourceNearestRow = Math.max(0, sourceNearestRow);\n }\n const rowOffset = batchOffset + sourceNearestRow * imagesStrides[1];\n for (let c = 0; c < newWidth; c++) {\n const sourceFracCol = halfPixelCenters ?\n effectiveColSizeRatio * (c + 0.5) :\n effectiveColSizeRatio * c;\n let sourceNearestCol = Math.min(\n oldWidth - 1,\n alignCorners ? Math.round(sourceFracCol) :\n Math.floor(sourceFracCol));\n if (halfPixelCenters) {\n sourceNearestCol = Math.max(0, sourceNearestCol);\n }\n const colOffset = rowOffset + sourceNearestCol * imagesStrides[2];\n for (let d = 0; d < numChannels; d++) {\n // Begin shader.\n // Compute the fractional index of the source.\n const newVal = xValues[colOffset + d];\n output[outputOffset++] = newVal;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(\n [batch, newHeight, newWidth, numChannels], images.dtype, output);\n}\n\nexport const resizeNearestNeighborConfig: KernelConfig = {\n kernelName: ResizeNearestNeighbor,\n backendName: 'cpu',\n kernelFunc: resizeNearestNeighbor as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeNearestNeighborGrad, ResizeNearestNeighborGradAttrs, ResizeNearestNeighborGradInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function resizeNearestNeighborGrad(args: {\n inputs: ResizeNearestNeighborGradInputs,\n backend: MathBackendCPU,\n attrs: ResizeNearestNeighborGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images, dy} = inputs;\n const {alignCorners} = attrs;\n\n assertNotComplex([dy, images], 'resizeNearestNeighborGrad');\n\n const imagesStrides = util.computeStrides(images.shape);\n const dyStrides = util.computeStrides(dy.shape);\n const [batch, xHeight, xWidth, depth] = images.shape;\n const [, yHeight, yWidth] = dy.shape;\n\n const output = new Float32Array(batch * xHeight * xWidth * depth);\n const dyValues = backend.data.get(dy.dataId).values as TypedArray;\n\n // In the backwards pass, we want to find the pixels that were generated\n // for each pixel in the input image the forward pass\n\n const effectiveXSize: [number, number] = [\n (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,\n (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth\n ];\n\n const effectiveYSize: [number, number] = [\n (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,\n (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth\n ];\n\n const heightScale = effectiveXSize[0] / effectiveYSize[0];\n const widthScale = effectiveXSize[1] / effectiveYSize[1];\n\n const invHeightScale = 1 / heightScale;\n const invWidthScale = 1 / widthScale;\n\n // This defines the size of the window of values around a particular\n // index in dy that we want to search for contributions to dx.\n const winHeight = (Math.ceil(invHeightScale) * 2) + 2;\n const winWidth = (Math.ceil(invWidthScale) * 2) + 2;\n\n // Loop over the output space.\n for (let b = 0; b < batch; b++) {\n const batchOffset = b * imagesStrides[0];\n for (let r = 0; r < xHeight; r++) {\n const rowOffset = batchOffset + r * imagesStrides[1];\n\n // Compute bounds for where in dy we will look\n const startRLerp = Math.floor(r * invHeightScale);\n const startDyR = Math.floor(startRLerp - (winHeight / 2));\n for (let c = 0; c < xWidth; c++) {\n const colOffset = rowOffset + c * imagesStrides[2];\n\n // Compute bounds for where in dy we will look\n const startCLerp = Math.floor(c * invWidthScale);\n const startDyC = Math.floor(startCLerp - (winWidth / 2));\n\n for (let d = 0; d < depth; d++) {\n let accum = 0;\n // loop over dy\n\n for (let dyRIndex = 0; dyRIndex < winHeight; dyRIndex++) {\n const dyR = dyRIndex + startDyR;\n // Guard against the window exceeding the bounds of dy\n if (dyR < 0 || dyR >= yHeight) {\n continue;\n }\n\n const dyROffset = batchOffset + dyR * dyStrides[1];\n const sourceFracRow = dyR * heightScale;\n const sourceNearestRow = Math.min(\n xHeight - 1,\n alignCorners ? Math.round(sourceFracRow) :\n Math.floor(sourceFracRow));\n if (r !== sourceNearestRow) {\n continue;\n }\n for (let dyCIndex = 0; dyCIndex < winWidth; dyCIndex++) {\n const dyC = dyCIndex + startDyC;\n // Guard against the window exceeding the bounds of dy\n if (dyC < 0 || dyC >= yWidth) {\n continue;\n }\n\n const dyCOffset = dyROffset + dyC * dyStrides[2];\n const sourceFracCol = dyC * widthScale;\n const sourceNearestCol = Math.min(\n xWidth - 1,\n alignCorners ? Math.round(sourceFracCol) :\n Math.floor(sourceFracCol));\n\n if (c === sourceNearestCol) {\n accum += dyValues[dyCOffset + d];\n }\n }\n }\n output[colOffset + d] = accum;\n }\n }\n }\n }\n\n return backend.makeTensorInfo(images.shape, images.dtype, output);\n}\n\nexport const resizeNearestNeighborGradConfig: KernelConfig = {\n kernelName: ResizeNearestNeighborGrad,\n backendName: 'cpu',\n kernelFunc: resizeNearestNeighborGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Reverse, ReverseAttrs, ReverseInputs, TensorBuffer, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {identity} from './Identity';\n\nexport function reverse(\n args:\n {inputs: ReverseInputs, backend: MathBackendCPU, attrs: ReverseAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {dims} = attrs;\n\n assertNotComplex(x, 'reverse');\n\n const xRank = x.shape.length;\n\n const $dims = util.parseAxisParam(dims, x.shape);\n if (xRank === 0) {\n return identity({inputs: {x}, backend});\n }\n\n const outBuf = new TensorBuffer(x.shape, x.dtype);\n const xBuf = backend.bufferSync(x);\n\n for (let i = 0; i < outBuf.size; i++) {\n const outLoc = outBuf.indexToLoc(i);\n const inLoc = outLoc.slice();\n $dims.forEach(d => inLoc[d] = x.shape[d] - 1 - inLoc[d]);\n outBuf.set(xBuf.get(...inLoc), ...outLoc);\n }\n\n return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);\n}\n\nexport const reverseConfig: KernelConfig = {\n kernelName: Reverse,\n backendName: 'cpu',\n kernelFunc: reverse as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, NumericDataType, TypedArray} from '@tensorflow/tfjs-core';\nimport {backend_util, RotateWithOffset, RotateWithOffsetAttrs, RotateWithOffsetInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport const rotateWithOffsetConfig: KernelConfig = {\n kernelName: RotateWithOffset,\n backendName: 'cpu',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {image} = inputs as RotateWithOffsetInputs;\n const {radians, fillValue, center} =\n attrs as unknown as RotateWithOffsetAttrs;\n const cpuBackend = backend as MathBackendCPU;\n\n const output = util.getTypedArrayFromDType(\n image.dtype as NumericDataType, util.sizeFromShape(image.shape));\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n\n const [centerX, centerY] =\n backend_util.getImageCenter(center, imageHeight, imageWidth);\n const fullOpacityValue = 255;\n\n const sinFactor = Math.sin(radians);\n const cosFactor = Math.cos(radians);\n const imageVals = cpuBackend.data.get(image.dataId).values as TypedArray;\n\n for (let batchIdx = 0; batchIdx < batch; batchIdx++) {\n const batchOffset = batchIdx * imageWidth * imageHeight * numChannels;\n\n for (let row = 0; row < imageHeight; row++) {\n const rowOffset = row * (imageWidth * numChannels);\n\n for (let col = 0; col < imageWidth; col++) {\n const colOffset = col * numChannels;\n\n for (let channel = 0; channel < numChannels; channel++) {\n const coords = [batch, row, col, channel];\n\n const x = coords[2];\n const y = coords[1];\n\n // coordX/coordY are the result of rotating and translating x/y.\n let coordX = (x - centerX) * cosFactor - (y - centerY) * sinFactor;\n let coordY = (x - centerX) * sinFactor + (y - centerY) * cosFactor;\n coordX = Math.round(coordX + centerX);\n coordY = Math.round(coordY + centerY);\n\n let outputValue = fillValue;\n if (typeof fillValue !== 'number') {\n if (channel === 3) {\n outputValue = fullOpacityValue;\n } else {\n outputValue = fillValue[channel];\n }\n }\n\n // If the coordinate position falls within the image boundaries...\n if (coordX >= 0 && coordX < imageWidth && coordY >= 0 &&\n coordY < imageHeight) {\n // set the output to the image value at the coordinate position.\n const rotatedRowOffset = coordY * (imageWidth * numChannels);\n const rotatedColOffset = coordX * numChannels;\n const imageIdx =\n batchOffset + rotatedRowOffset + rotatedColOffset + channel;\n outputValue = imageVals[imageIdx];\n }\n\n const outIdx = batchOffset + rowOffset + colOffset + channel;\n output[outIdx] = outputValue as number;\n }\n }\n }\n }\n\n const dataId = cpuBackend.write(output, image.shape, image.dtype);\n return {dataId, shape: image.shape, dtype: image.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Round} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const round = unaryKernelFunc(Round, (xi) => {\n // The algorithm is based on banker's rounding.\n const base = Math.floor(xi);\n if (xi - base < 0.5) {\n return Math.floor(xi);\n } else if (xi - base > 0.5) {\n return Math.ceil(xi);\n } else {\n if (base % 2.0 === 0.0) {\n return base;\n } else {\n return base + 1.0;\n }\n }\n});\n\nexport const roundConfig: KernelConfig = {\n kernelName: Round,\n backendName: 'cpu',\n kernelFunc: round,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Rsqrt} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFuncFromImpl} from '../utils/unary_utils';\n\nexport const rsqrtImpl = createSimpleUnaryImpl((xi) => 1 / Math.sqrt(xi));\nexport const rsqrt = unaryKernelFuncFromImpl(Rsqrt, rsqrtImpl);\n\nexport const rsqrtConfig: KernelConfig = {\n kernelName: Rsqrt,\n backendName: 'cpu',\n kernelFunc: rsqrt,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {buffer, Rank, ShapeMap, TensorBuffer, TypedArray} from '@tensorflow/tfjs-core';\n\ninterface DefaultValueTypeMap {\n bool: boolean;\n int32: number;\n float32: number;\n string: string;\n}\n\nexport function\nscatterImpl(\n indices: TensorBuffer, updates: TensorBuffer,\n shape: number[], outputSize: number, sliceSize: number, numUpdates: number,\n sliceRank: number, strides: number[], defaultValue: DefaultValueTypeMap[D],\n sumDupeIndices: boolean): TensorBuffer {\n const flattenShape = [outputSize / sliceSize, sliceSize];\n\n const indicesData = indices.values as TypedArray;\n const updatesData = updates.values;\n\n if (outputSize === 0) {\n return buffer(shape as ShapeMap[R], updates.dtype);\n }\n\n const outBuf = buffer(flattenShape, updates.dtype);\n if (typeof defaultValue === 'string') {\n (outBuf.values as string[]).fill(defaultValue);\n } else if (typeof defaultValue === 'number') {\n (outBuf.values as TypedArray).fill(defaultValue);\n } else if (typeof defaultValue === 'boolean') {\n (outBuf.values as TypedArray).fill(+defaultValue);\n }\n\n for (let i = 0; i < numUpdates; i++) {\n const index = [];\n let flattenIndex = 0;\n for (let j = 0; j < sliceRank; j++) {\n const dim = indicesData[i * sliceRank + j];\n index.push(dim);\n flattenIndex += dim * strides[j];\n }\n\n if (flattenIndex < 0 || flattenIndex >= outputSize / sliceSize) {\n throw new Error(`Invalid indices: ${index} does not index into ${shape}`);\n }\n\n for (let k = 0; k < sliceSize; k++) {\n if (sumDupeIndices) {\n (outBuf.values as TypedArray)[flattenIndex * sliceSize + k] +=\n (updatesData as TypedArray)[i * sliceSize + k];\n } else {\n outBuf.values[flattenIndex * sliceSize + k] = updates.rank === 0 ?\n updatesData[0] :\n updatesData[i * sliceSize + k];\n }\n }\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Rank, ScatterNd, ScatterNdAttrs, ScatterNdInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {scatterImpl} from './Scatter_impl';\n\nexport function scatterNd(args: {\n inputs: ScatterNdInputs,\n backend: MathBackendCPU,\n attrs: ScatterNdAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {indices, updates} = inputs;\n const {shape} = attrs;\n\n const {sliceRank, numUpdates, sliceSize, strides, outputSize} =\n backend_util.calculateShapes(updates, indices, shape);\n const sumDupeIndices = true;\n\n const indicesBuf = backend.bufferSync(indices);\n const updatesBuf = backend.bufferSync(updates);\n\n const outBuf = scatterImpl(\n indicesBuf, updatesBuf, shape, outputSize, sliceSize, numUpdates,\n sliceRank, strides, 0 /* defaultValue */, sumDupeIndices);\n\n return backend.makeTensorInfo(shape, outBuf.dtype, outBuf.values);\n}\n\nexport const scatterNdConfig: KernelConfig = {\n kernelName: ScatterNd,\n backendName: 'cpu',\n kernelFunc: scatterNd as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray, util} from '@tensorflow/tfjs-core';\n\nfunction lowerBound(array: TypedArray, value: number) {\n let left = 0;\n let right = array.length;\n let mid = 0;\n while (left < right) {\n mid = Math.floor((left + right) / 2);\n if (array[mid] < value) {\n left = mid + 1;\n } else {\n right = mid;\n }\n }\n return right;\n}\n\nfunction upperBound(array: TypedArray, value: number) {\n let left = 0;\n let right = array.length;\n let mid = 0;\n while (left < right) {\n mid = Math.floor((left + right) / 2);\n if (array[mid] <= value) {\n left = mid + 1;\n } else {\n right = mid;\n }\n }\n return right;\n}\n\nexport function searchSortedImpl(\n sortedInputs: TypedArray, values: TypedArray, batchSize: number,\n numInputs: number, numValues: number, side: 'left'|'right'): TypedArray {\n const output =\n util.getArrayFromDType('int32', batchSize * numValues) as TypedArray;\n for (let b = 0; b < batchSize; ++b) {\n const sortedInputsSlice =\n sortedInputs.slice(b * numInputs, (b + 1) * numInputs);\n const outputOffset = b * numValues;\n for (let i = 0; i < numValues; ++i) {\n output[outputOffset + i] = side === 'left' ?\n lowerBound(sortedInputsSlice, values[i + outputOffset]) :\n upperBound(sortedInputsSlice, values[i + outputOffset]);\n }\n }\n return output;\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SearchSorted, SearchSortedAttrs, SearchSortedInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {searchSortedImpl} from './SearchSorted_impl';\n\nexport function searchSorted(args: {\n inputs: SearchSortedInputs,\n backend: MathBackendCPU,\n attrs: SearchSortedAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {sortedSequence, values} = inputs;\n const {side} = attrs;\n\n const $sortedSequence =\n backend.data.get(sortedSequence.dataId).values as TypedArray;\n const $values = backend.data.get(values.dataId).values as TypedArray;\n\n const output = searchSortedImpl(\n $sortedSequence, $values, sortedSequence.shape[0],\n sortedSequence.shape[1], values.shape[1], side);\n return backend.makeTensorInfo(values.shape, 'int32', output);\n}\n\nexport const searchSortedConfig: KernelConfig = {\n kernelName: SearchSorted,\n backendName: 'cpu',\n kernelFunc: searchSorted as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Select, SelectInputs, TensorInfo, TypedArray, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport function select(args: {inputs: SelectInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {condition, t, e} = inputs;\n\n assertNotComplex([condition, t, e], 'select');\n const conditionRank = condition.shape.length;\n\n const values = backend.data.get(condition.dataId).values as TypedArray;\n const tValues = backend.data.get(t.dataId).values as TypedArray;\n const eValues = backend.data.get(e.dataId).values as TypedArray;\n const resultDtype = upcastType(t.dtype, e.dtype);\n const newValues =\n util.makeZerosTypedArray(util.sizeFromShape(t.shape), resultDtype);\n\n let index = 0;\n const offset =\n conditionRank === 0 || conditionRank > 1 || t.shape.length === 1 ?\n 1 :\n util.sizeFromShape(t.shape.slice(1));\n\n for (let i = 0; i < values.length; i++) {\n for (let j = 0; j < offset; j++) {\n if (values[i] === 1) {\n newValues[index++] = tValues[i];\n } else {\n newValues[index++] = eValues[i];\n }\n }\n }\n\n return backend.makeTensorInfo(t.shape, resultDtype, newValues);\n}\n\nexport const selectConfig: KernelConfig = {\n kernelName: Select,\n backendName: 'cpu',\n kernelFunc: select as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, Selu} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nconst scaleAlpha = backend_util.SELU_SCALEALPHA;\nconst scale = backend_util.SELU_SCALE;\n\nexport const selu = unaryKernelFunc(Selu, (xi) => {\n if (xi >= 0) {\n return scale * xi;\n } else {\n return scaleAlpha * (Math.exp(xi) - 1);\n }\n});\n\nexport const seluConfig: KernelConfig = {\n kernelName: Selu,\n backendName: 'cpu',\n kernelFunc: selu,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sign} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const sign = unaryKernelFunc(Sign, (xi) => {\n if (xi < 0) {\n return -1;\n } else if (xi > 0) {\n return 1;\n } else {\n return 0;\n }\n});\n\nexport const signConfig: KernelConfig = {\n kernelName: Sign,\n backendName: 'cpu',\n kernelFunc: sign,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sin} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const sin = unaryKernelFunc(Sin, (xi) => Math.sin(xi));\n\nexport const sinConfig: KernelConfig = {\n kernelName: Sin,\n backendName: 'cpu',\n kernelFunc: sin,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sinh} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const sinh = unaryKernelFunc(Sinh, (xi) => Math.sinh(xi));\n\nexport const sinhConfig: KernelConfig = {\n kernelName: Sinh,\n backendName: 'cpu',\n kernelFunc: sinh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Softplus} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\n// mirrors the implementation of tf.nn.softplus: https://goo.gl/vkcvwX\n\n// epsilon is the difference between 1.0 and the next representable float.\n// For a single precision 32 bit float this should be 2^-23, see:\n// https://math.byu.edu/~schow/work/IEEEFloatingPoint.htm\nconst epsilon = 1.1920928955078125e-7;\nconst threshold = Math.log(epsilon) + 2.0;\n\nexport const softplus = unaryKernelFunc(Softplus, (xi) => {\n // Value above which exp(x) may overflow, but softplus(x) == x\n // is within machine epsilon.\n const tooLarge = xi > -threshold;\n\n // Value below which exp(x) may underflow, but softplus(x) == exp(x)\n // is within machine epsilon.\n const tooSmall = xi < threshold;\n\n const expX = Math.exp(xi);\n let result;\n\n if (tooSmall) {\n result = expX;\n } else if (tooLarge) {\n result = xi;\n } else {\n result = Math.log(1.0 + expX);\n }\n return result;\n});\n\nexport const softplusConfig: KernelConfig = {\n kernelName: Softplus,\n backendName: 'cpu',\n kernelFunc: softplus,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, ReshapeAttrs, ReshapeInputs, SpaceToBatchND, SpaceToBatchNDAttrs, SpaceToBatchNDInputs, TensorInfo, TransposeAttrs, TransposeInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {padV2Config} from './PadV2';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function spaceToBatchND(args: {\n inputs: SpaceToBatchNDInputs,\n backend: MathBackendCPU,\n attrs: SpaceToBatchNDAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, paddings} = attrs;\n\n assertNotComplex([x], 'spaceToBatchND');\n\n const prod = util.sizeFromShape(blockShape);\n\n const completePaddings: Array<[number, number]> = [[0, 0]];\n completePaddings.push(...(paddings as Array<[number, number]>));\n\n for (let i = 1 + blockShape.length; i < x.shape.length; ++i) {\n completePaddings.push([0, 0]);\n }\n\n const paddedX = padV2Config.kernelFunc({\n inputs: {x},\n backend,\n attrs: {paddings: completePaddings, constantValue: 0}\n }) as TensorInfo;\n\n const reshapedPaddedShape =\n backend_util.getReshaped(paddedX.shape, blockShape, prod, false);\n\n const permutedReshapedPaddedPermutation = backend_util.getPermuted(\n reshapedPaddedShape.length, blockShape.length, false);\n\n const flattenShape =\n backend_util.getReshapedPermuted(paddedX.shape, blockShape, prod, false);\n\n const reshapeInputs: ReshapeInputs = {x: paddedX};\n const reshapeAttrs: ReshapeAttrs = {shape: reshapedPaddedShape};\n const paddedXReshaped =\n reshape({inputs: reshapeInputs, backend, attrs: reshapeAttrs});\n\n const transposeInputs: TransposeInputs = {x: paddedXReshaped};\n const transposeAttrs:\n TransposeAttrs = {perm: permutedReshapedPaddedPermutation};\n const paddedXT =\n transpose({inputs: transposeInputs, backend, attrs: transposeAttrs});\n\n const resultReshapeInputs: ReshapeInputs = {x: paddedXT};\n const resultReshapeAttrs: ReshapeAttrs = {shape: flattenShape};\n const result = reshape(\n {inputs: resultReshapeInputs, backend, attrs: resultReshapeAttrs});\n\n backend.disposeIntermediateTensorInfo(paddedX);\n backend.disposeIntermediateTensorInfo(paddedXReshaped);\n backend.disposeIntermediateTensorInfo(paddedXT);\n\n return result;\n}\n\nexport const spaceToBatchNDConfig: KernelConfig = {\n kernelName: SpaceToBatchND,\n backendName: 'cpu',\n kernelFunc: spaceToBatchND as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function sparseFillEmptyRowsImpl(\n indices: TypedArray, indicesShape: number[], indicesDType: DataType,\n values: TypedArray, valuesDType: DataType, denseShape: TypedArray,\n defaultValue: number):\n [TypedArray, number[], TypedArray, boolean[], number[]] {\n const indicesCount = indicesShape[0];\n const denseRows = denseShape[0];\n\n const emptyRowIndicator: boolean[] = new Array(denseRows);\n const reverseIndexMap: number[] = new Array(indicesCount);\n\n const rank = indicesShape[1];\n\n if (denseRows === 0) {\n if (indicesCount !== 0) {\n throw new Error(\n backend_util.getSparseFillEmptyRowsIndicesDenseShapeMismatch(\n indicesCount));\n }\n const outputIndices = util.getArrayFromDType(indicesDType, 0) as TypedArray;\n const outputValues = util.getArrayFromDType(valuesDType, 0) as TypedArray;\n return [\n outputIndices, [0, rank], outputValues, emptyRowIndicator, reverseIndexMap\n ];\n }\n\n let rowsAreOrdered = true;\n let lastIndicesRow = 0;\n const csrOffset: number[] = new Array(denseRows).fill(0);\n\n for (let i = 0; i < indicesCount; ++i) {\n // indices is a 2d tensor with shape of [N, rank]\n const row = indices[i * rank];\n if (row < 0) {\n throw new Error(\n backend_util.getSparseFillEmptyRowsNegativeIndexErrorMessage(i, row));\n }\n if (row >= denseRows) {\n throw new Error(\n backend_util.getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(\n i, row, denseRows));\n }\n ++csrOffset[row];\n rowsAreOrdered = rowsAreOrdered && (row >= lastIndicesRow);\n lastIndicesRow = row;\n }\n\n let allRowsFull = true;\n for (let row = 0; row < denseRows; ++row) {\n // csrOffset here describes the number of elements in this dense row\n const rowEmpty = (csrOffset[row] === 0);\n emptyRowIndicator[row] = rowEmpty;\n allRowsFull = allRowsFull && !rowEmpty;\n // In filled version, each row has at least one element.\n csrOffset[row] = Math.max(csrOffset[row], 1);\n // Update csrOffset to represent the number of elements up to and\n // including denseRows + 1:\n // csrOffset[0] == #{elements of row 0}\n // csrOffset[1] == #{elements of row 1} + #{elements of row 0}\n // ..\n // csrOffset[i] == starting index for elements in row i + 1.\n if (row > 0) {\n csrOffset[row] += csrOffset[row - 1];\n }\n }\n\n if (allRowsFull && rowsAreOrdered) {\n const outputIndices: TypedArray = indices;\n const outputValues: TypedArray = values;\n for (let i = 0; i < indicesCount; ++i) {\n reverseIndexMap[i] = i;\n }\n return [\n outputIndices, [indicesCount, rank], outputValues, emptyRowIndicator,\n reverseIndexMap\n ];\n } else {\n const fullIndicesCount = csrOffset[denseRows - 1];\n const outputIndices =\n util.getArrayFromDType(indicesDType, fullIndicesCount * rank) as\n TypedArray;\n const outputValues =\n util.getArrayFromDType(valuesDType, fullIndicesCount) as TypedArray;\n const filledCount: number[] = new Array(denseRows).fill(0);\n\n // Fill in values for rows that are not missing\n for (let i = 0; i < indicesCount; ++i) {\n // indices is a 2d tensor with shape of [N, rank]\n const row = indices[i * rank];\n const offset = filledCount[row];\n const outputI = ((row === 0) ? 0 : csrOffset[row - 1]) + offset;\n filledCount[row]++; // Increment the filled count for this row.\n for (let j = 0; j < rank; ++j) {\n // indices and outputIndices are 2d tensors with shape of [N, rank]\n outputIndices[outputI * rank + j] = indices[i * rank + j];\n }\n outputValues[outputI] = values[i];\n // We'll need this reverse index map to backprop correctly.\n reverseIndexMap[i] = outputI;\n }\n\n // Fill in values for rows that are missing\n for (let row = 0; row < denseRows; ++row) {\n const rowCount = filledCount[row];\n if (rowCount === 0) { // We haven't filled this row\n const startingIndex = (row === 0) ? 0 : csrOffset[row - 1];\n // Remaining index values were set to zero already.\n // Just need to set the row index in the right location.\n // outputIndices is a 2d tensor with shape of [N, rank]\n outputIndices[startingIndex * rank + 0] = row;\n for (let col = 1; col < rank; ++col) {\n outputIndices[startingIndex * rank + col] = 0;\n }\n outputValues[startingIndex] = defaultValue;\n }\n }\n return [\n outputIndices, [fullIndicesCount, rank], outputValues, emptyRowIndicator,\n reverseIndexMap\n ];\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SparseFillEmptyRows, SparseFillEmptyRowsInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {sparseFillEmptyRowsImpl} from './SparseFillEmptyRows_impl';\n\nexport function sparseFillEmptyRows(args: {\n inputs: SparseFillEmptyRowsInputs,\n backend: MathBackendCPU\n}): [TensorInfo, TensorInfo, TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {indices, values, denseShape, defaultValue} = inputs;\n if (denseShape.shape.length !== 1) {\n throw new Error(`Dense shape must be a vector, saw:\n ${denseShape.shape}`);\n }\n if (indices.shape.length !== 2) {\n throw new Error(`Indices must be a matrix, saw:\n ${indices.shape}`);\n }\n if (values.shape.length !== 1) {\n throw new Error(`Values must be a vector, saw:\n ${values.shape}`);\n }\n if (defaultValue.shape.length !== 0) {\n throw new Error(`Default value must be a scalar, saw:\n ${defaultValue.shape}`);\n }\n\n const $indices = backend.data.get(indices.dataId).values as TypedArray;\n const $values = backend.data.get(values.dataId).values as TypedArray;\n const $denseShape = backend.data.get(denseShape.dataId).values as TypedArray;\n const $defaultValue =\n backend.data.get(defaultValue.dataId).values[0] as number;\n\n const [outputIndices, outputIndicesShape, outputValues,\n emptyRowIndicator, reverseIndexMap] =\n sparseFillEmptyRowsImpl(\n $indices, indices.shape, indices.dtype, $values, values.dtype,\n $denseShape, $defaultValue);\n return [\n backend.makeTensorInfo(outputIndicesShape, indices.dtype, outputIndices),\n backend.makeTensorInfo(\n [outputIndicesShape[0]], values.dtype, outputValues),\n backend.makeTensorInfo(\n [emptyRowIndicator.length], 'bool',\n new Uint8Array(\n emptyRowIndicator.map((value: boolean) => Number(value)))),\n backend.makeTensorInfo(\n [reverseIndexMap.length], indices.dtype,\n new Int32Array(reverseIndexMap)),\n ];\n}\n\nexport const sparseFillEmptyRowsConfig: KernelConfig = {\n kernelName: SparseFillEmptyRows,\n backendName: 'cpu',\n kernelFunc: sparseFillEmptyRows as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function sparseReshapeImpl(\n inputIndices: TypedArray, inputIndicesShape: number[], inputDType: DataType,\n inputShape: number[],\n targetShape: number[]): [TypedArray, number[], number[]] {\n const denseSize = util.sizeFromShape(inputShape);\n const nnz = inputIndicesShape[0];\n const outputRank = targetShape.length;\n\n // Compute the output shape. Determine product of specified dimensions, and\n // find the index of the unspecified one.\n const outputShape: number[] = [];\n let product = 1;\n let unknownIndex = -1;\n for (let d = 0; d < outputRank; ++d) {\n const size = targetShape[d];\n if (size === -1) {\n if (unknownIndex !== -1) {\n throw new Error(\n backend_util\n .getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(\n unknownIndex, d));\n }\n unknownIndex = d;\n outputShape.push(1);\n } else {\n if (size < 0) {\n throw new Error(\n backend_util.getSparseReshapeNegativeOutputDimErrorMessage(\n d, size));\n }\n product *= size;\n outputShape.push(size);\n }\n }\n if (unknownIndex !== -1) {\n if (product <= 0) {\n throw new Error(\n backend_util.getSparseReshapeEmptyTensorZeroOutputDimErrorMessage());\n }\n const missing = Math.trunc(denseSize / product);\n if (product * missing !== denseSize) {\n throw new Error(\n backend_util.getSparseReshapeInputOutputMultipleErrorMessage(\n inputShape, outputShape));\n }\n\n outputShape[unknownIndex] = missing;\n }\n const outputSize = util.sizeFromShape(outputShape);\n if (outputSize !== denseSize) {\n throw new Error(\n backend_util.getSparseReshapeInputOutputMismatchErrorMessage(\n inputShape, outputShape));\n }\n\n const inputRank = inputShape.length;\n const inputStrides: number[] = [];\n if (inputRank > 0) {\n inputStrides[inputRank - 1] = 1;\n for (let d = inputRank - 2; d >= 0; --d) {\n inputStrides[d] = inputStrides[d + 1] * inputShape[d + 1];\n }\n }\n\n const outputStrides: number[] = [];\n if (outputRank > 0) {\n outputStrides[outputRank - 1] = 1;\n for (let d = outputRank - 2; d >= 0; --d) {\n outputStrides[d] = outputStrides[d + 1] * outputShape[d + 1];\n }\n }\n\n const newIndices =\n util.getArrayFromDType(inputDType, nnz * outputRank) as TypedArray;\n for (let i = 0; i < nnz; ++i) {\n let id = 0;\n for (let j = 0; j < inputRank; ++j) {\n // inputIndices is a 2d tensor with shape of [nnz, inputRank]\n id += inputIndices[i * inputRank + j] * inputStrides[j];\n }\n for (let j = 0; j < outputRank; ++j) {\n // newIndices is a 2d tensor with shape of [nnz, outputRank]\n newIndices[i * outputRank + j] = Math.trunc(id / outputStrides[j]);\n id %= outputStrides[j];\n }\n }\n return [newIndices, [nnz, outputRank], outputShape];\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SparseReshape, SparseReshapeInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {sparseReshapeImpl} from './SparseReshape_impl';\n\nexport function sparseReshape(\n args: {inputs: SparseReshapeInputs, backend: MathBackendCPU}):\n [TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {inputIndices, inputShape, newShape} = inputs;\n if (inputIndices.shape.length !== 2) {\n throw new Error(`Input indices should be a matrix but received shape\n ${inputIndices.shape}`);\n }\n if (inputShape.shape.length !== 1) {\n throw new Error(`Input shape should be a vector but received shape\n ${inputShape.shape}`);\n }\n\n if (newShape.shape.length !== 1) {\n throw new Error(\n `Target shape should be a vector but received shape ${newShape.shape}`);\n }\n\n const $inputShape =\n Array.from(backend.data.get(inputShape.dataId).values as TypedArray);\n const $inputIndices =\n backend.data.get(inputIndices.dataId).values as TypedArray;\n const targetShape =\n Array.from(backend.data.get(newShape.dataId).values as TypedArray);\n\n const [newIndices, indicesShape, outputShape] = sparseReshapeImpl(\n $inputIndices, inputIndices.shape, inputIndices.dtype, $inputShape,\n targetShape);\n return [\n backend.makeTensorInfo(indicesShape, inputIndices.dtype, newIndices),\n backend.makeTensorInfo(\n [outputShape.length], newShape.dtype, new Int32Array(outputShape)),\n ];\n}\n\nexport const sparseReshapeConfig: KernelConfig = {\n kernelName: SparseReshape,\n backendName: 'cpu',\n kernelFunc: sparseReshape,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function sparseSegmentReductionImpl(\n input: TypedArray, inputShape: number[], inputDType: DataType,\n indices: TypedArray, segmentIds: TypedArray, isMean = false,\n defaultValue = 0): [TypedArray, number[]] {\n const numIndices = indices.length;\n\n // Flatten the array to two dimensions\n const inputFlat: number[] = [inputShape[0], input.length / inputShape[0]];\n const numCol = inputFlat[1];\n // Note that the current implementation assumes that segmentIds values are\n // sorted.\n const lastSegmentIdPlusOne =\n numIndices > 0 ? segmentIds[numIndices - 1] + 1 : 0;\n const outputRows = lastSegmentIdPlusOne;\n\n if (outputRows < 0) {\n throw new Error(\n backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());\n }\n\n const outputShape = inputShape.slice();\n outputShape[0] = outputRows;\n\n const outputLength =\n outputShape.reduce((product, value) => product * value, 1);\n // Output array is initialized with the value 0 by default.\n const output = util.getArrayFromDType(inputDType, outputLength) as TypedArray;\n\n // Note that we do not initialize the output buffer with a default value, so\n // we need to explicitly set missing indices to the default value.\n if (numIndices === 0) {\n if (outputRows > 0) {\n output.fill(defaultValue);\n }\n return [output, outputShape];\n }\n\n if (outputRows <= 0) {\n throw new Error(\n backend_util.getSparseSegmentReductionNegativeSegmentIdsErrorMessage());\n }\n\n let start = 0, end = 1;\n // Index from which the output is not initialized.\n let uninitializedIndex = 0;\n let outIndex = segmentIds[start];\n\n while (true) {\n // We initialize nextIndex to 0 to avoid may be uninitialized warning\n let nextIndex = 0;\n if (end < numIndices) {\n nextIndex = segmentIds[end];\n if (outIndex === nextIndex) {\n ++end;\n continue;\n }\n // We have a new segment here. Verify that the segment ids are growing.\n if (outIndex >= nextIndex) {\n throw new Error(backend_util\n .getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage());\n }\n }\n\n if (outIndex < 0 || outIndex >= outputRows) {\n throw new Error(\n backend_util.getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(\n outIndex, outputRows));\n }\n\n // If there is a gap between two indices, we need to set that gap to the\n // default value.\n if (outIndex > uninitializedIndex) {\n output.fill(defaultValue, uninitializedIndex * numCol, outIndex * numCol);\n }\n\n for (let i = start; i < end; ++i) {\n const index = indices[i];\n if (index < 0 || index >= inputFlat[0]) {\n throw new Error(\n backend_util.getSparseSegmentReductionIndicesOutOfRangeErrorMessage(\n i, indices[i], inputFlat[0]));\n }\n for (let j = 0; j < numCol; j++) {\n output[outIndex * numCol + j] += input[index * numCol + j];\n }\n }\n\n if (isMean) {\n for (let j = 0; j < numCol; j++) {\n output[outIndex * numCol + j] /= end - start;\n }\n }\n\n start = end;\n ++end;\n uninitializedIndex = outIndex + 1;\n outIndex = nextIndex;\n if (end > numIndices) {\n break;\n }\n }\n\n // Fill the gap at the end with the default value.\n if (uninitializedIndex < outputRows) {\n output.fill(defaultValue, uninitializedIndex * numCol, outputRows * numCol);\n }\n\n return [output, outputShape];\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SparseSegmentMean, SparseSegmentMeanInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {sparseSegmentReductionImpl} from './SparseSegmentReduction_impl';\n\nexport function sparseSegmentMean(\n args: {inputs: SparseSegmentMeanInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {data, indices, segmentIds} = inputs;\n if (data.shape.length < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if (indices.shape.length !== 1) {\n throw new Error(`Indices should be a vector but received shape\n ${indices.shape}`);\n }\n if (segmentIds.shape.length !== 1) {\n throw new Error(`Segment ids should be a vector but received shape\n ${segmentIds.shape}`);\n }\n if (indices.shape[0] !== segmentIds.shape[0]) {\n throw new Error(`segmentIds and indices should have same size.`);\n }\n\n const $data = backend.data.get(data.dataId).values as TypedArray;\n const $indices = backend.data.get(indices.dataId).values as TypedArray;\n const $segmentIds = backend.data.get(segmentIds.dataId).values as TypedArray;\n\n const [outputData, outputDataShape] = sparseSegmentReductionImpl(\n $data, data.shape, data.dtype, $indices, $segmentIds, true);\n return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);\n}\n\nexport const sparseSegmentMeanConfig: KernelConfig = {\n kernelName: SparseSegmentMean,\n backendName: 'cpu',\n kernelFunc: sparseSegmentMean,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SparseSegmentSum, SparseSegmentSumInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {sparseSegmentReductionImpl} from './SparseSegmentReduction_impl';\n\nexport function sparseSegmentSum(\n args: {inputs: SparseSegmentSumInputs, backend: MathBackendCPU}):\n TensorInfo {\n const {inputs, backend} = args;\n const {data, indices, segmentIds} = inputs;\n if (data.shape.length < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if (indices.shape.length !== 1) {\n throw new Error(`Indices should be a vector but received shape\n ${indices.shape}`);\n }\n if (segmentIds.shape.length !== 1) {\n throw new Error(`Segment ids should be a vector but received shape\n ${segmentIds.shape}`);\n }\n if (indices.shape[0] !== segmentIds.shape[0]) {\n throw new Error(`segmentIds and indices should have same size.`);\n }\n\n const $data = backend.data.get(data.dataId).values as TypedArray;\n const $indices = backend.data.get(indices.dataId).values as TypedArray;\n const $segmentIds = backend.data.get(segmentIds.dataId).values as TypedArray;\n\n const [outputData, outputDataShape] = sparseSegmentReductionImpl(\n $data, data.shape, data.dtype, $indices, $segmentIds);\n return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);\n}\n\nexport const sparseSegmentSumConfig: KernelConfig = {\n kernelName: SparseSegmentSum,\n backendName: 'cpu',\n kernelFunc: sparseSegmentSum,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Rank, SparseToDense, SparseToDenseAttrs, SparseToDenseInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {scatterImpl} from './Scatter_impl';\n\nexport function sparseToDense(args: {\n inputs: SparseToDenseInputs,\n backend: MathBackendCPU,\n attrs: SparseToDenseAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {sparseIndices, sparseValues, defaultValue} = inputs;\n const {outputShape} = attrs;\n\n const {sliceRank, numUpdates, sliceSize, strides, outputSize} =\n backend_util.calculateShapes(sparseValues, sparseIndices, outputShape);\n const sumDupeIndices = false;\n\n const indicesBuf = backend.bufferSync(sparseIndices);\n\n let outBuf;\n switch (sparseValues.dtype) {\n case 'bool': {\n const updatesBuf = backend.bufferSync(sparseValues);\n const $defaultValue =\n Boolean(backend.data.get(defaultValue.dataId).values[0]);\n outBuf = scatterImpl(\n indicesBuf, updatesBuf, outputShape, outputSize, sliceSize,\n numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices);\n break;\n }\n case 'float32': {\n const updatesBuf = backend.bufferSync(sparseValues);\n const $defaultValue =\n backend.data.get(defaultValue.dataId).values[0] as number;\n outBuf = scatterImpl(\n indicesBuf, updatesBuf, outputShape, outputSize, sliceSize,\n numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices);\n break;\n }\n case 'int32': {\n const updatesBuf = backend.bufferSync(sparseValues);\n const $defaultValue =\n backend.data.get(defaultValue.dataId).values[0] as number;\n outBuf = scatterImpl(\n indicesBuf, updatesBuf, outputShape, outputSize, sliceSize,\n numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices);\n break;\n }\n case 'string': {\n const updatesBuf = backend.bufferSync(sparseValues);\n const $defaultValue = util.decodeString(\n backend.data.get(defaultValue.dataId).values[0] as Uint8Array);\n outBuf = scatterImpl(\n indicesBuf, updatesBuf, outputShape, outputSize, sliceSize,\n numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices);\n break;\n }\n default:\n throw new Error(`Unsupported type ${sparseValues.dtype}`);\n }\n return backend.makeTensorInfo(outputShape, outBuf.dtype, outBuf.values);\n}\n\nexport const sparseToDenseConfig: KernelConfig = {\n kernelName: SparseToDense,\n backendName: 'cpu',\n kernelFunc: sparseToDense as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, SplitVAttrs, SplitVInputs} from '@tensorflow/tfjs-core';\nimport {KernelConfig, KernelFunc, SplitV, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {slice} from './Slice';\n\nexport function splitV(\n args: {inputs: SplitVInputs, backend: MathBackendCPU, attrs: SplitVAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {numOrSizeSplits, axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, x.shape)[0];\n const splitSizes = backend_util.prepareSplitSize(x, numOrSizeSplits, $axis);\n\n const begin = new Array(x.shape.length).fill(0);\n const size = x.shape.slice();\n return splitSizes.map(s => {\n const sliceSize = [...size];\n sliceSize[$axis] = s;\n const sliceT =\n slice({inputs: {x}, backend, attrs: {begin, size: sliceSize}});\n begin[$axis] += s;\n return sliceT;\n });\n}\n\nexport const splitVConfig: KernelConfig = {\n kernelName: SplitV,\n backendName: 'cpu',\n kernelFunc: splitV as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sqrt} from '@tensorflow/tfjs-core';\n\nimport {createSimpleUnaryImpl} from '../utils/unary_impl';\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const sqrtImpl = createSimpleUnaryImpl((xi) => Math.sqrt(xi));\nexport const sqrt = unaryKernelFunc(Sqrt, (xi) => Math.sqrt(xi));\n\nexport const sqrtConfig: KernelConfig = {\n kernelName: Sqrt,\n backendName: 'cpu',\n kernelFunc: sqrt,\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Square, SquareInputs} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nexport const squareConfig: KernelConfig = {\n kernelName: Square,\n backendName: 'cpu',\n kernelFunc: ({inputs, backend}) => {\n const {x} = inputs as SquareInputs;\n const cpuBackend = backend as MathBackendCPU;\n assertNotComplex(x, 'square');\n\n const values = cpuBackend.data.get(x.dataId).values as Float32Array;\n const newValues = new Float32Array(values.length);\n for (let i = 0; i < values.length; ++i) {\n const value = values[i];\n newValues[i] = value * value;\n }\n const dataId = cpuBackend.write(newValues, x.shape, x.dtype);\n return {dataId, shape: x.shape, dtype: x.dtype};\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SquaredDifference} from '@tensorflow/tfjs-core';\n\nimport {createSimpleBinaryKernelImpl} from '../utils/binary_impl';\nimport {binaryKernelFunc} from '../utils/binary_utils';\n\nexport const squaredDifferenceImpl =\n createSimpleBinaryKernelImpl(((a: number, b: number) => {\n const diff = a - b;\n return diff * diff;\n }));\nexport const squaredDifference =\n binaryKernelFunc(SquaredDifference, squaredDifferenceImpl);\n\nexport const squaredDifferenceConfig: KernelConfig = {\n kernelName: SquaredDifference,\n backendName: 'cpu',\n kernelFunc: squaredDifference\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Step, StepAttrs} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const step = unaryKernelFunc(Step, (xi, attrs) => {\n const stepAttrs = attrs as unknown as StepAttrs;\n if (isNaN(xi)) {\n return NaN;\n } else {\n return xi > 0 ? 1 : stepAttrs.alpha;\n }\n});\n\nexport const stepConfig: KernelConfig = {\n kernelName: Step,\n backendName: 'cpu',\n kernelFunc: step,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\nexport function stridedSliceImpl(\n outShape: number[], xBuf: TensorBuffer, strides: number[],\n begin: number[]): TensorBuffer {\n const outBuf = buffer(outShape, xBuf.dtype);\n\n for (let i = 0; i < outBuf.size; i++) {\n const loc = outBuf.indexToLoc(i);\n\n const newLoc: number[] = new Array(loc.length);\n for (let j = 0; j < newLoc.length; j++) {\n newLoc[j] = loc[j] * strides[j] + begin[j];\n }\n outBuf.set(xBuf.get(...newLoc), ...loc);\n }\n\n return outBuf as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Rank, slice_util, StridedSlice, StridedSliceAttrs, StridedSliceInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\nimport {stridedSliceImpl} from './StridedSlice_impl';\n\nexport function stridedSlice(args: {\n inputs: StridedSliceInputs,\n backend: MathBackendCPU,\n attrs: StridedSliceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {\n begin,\n end,\n strides,\n beginMask,\n endMask,\n ellipsisMask,\n newAxisMask,\n shrinkAxisMask\n } = attrs;\n\n assertNotComplex(x, 'stridedSlice');\n\n const {\n finalShapeSparse,\n finalShape,\n isIdentity,\n sliceDim0,\n isSimpleSlice,\n begin: $begin,\n end: $end,\n strides: $strides\n } =\n slice_util.sliceInfo(\n x.shape, begin, end, strides, beginMask, endMask, ellipsisMask,\n newAxisMask, shrinkAxisMask);\n\n let result;\n\n // ref:\n // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/strided_slice_op.cc\n if (isIdentity) {\n // Optimization #1, slice is a no-op plus reshape\n result = reshape({inputs: {x}, backend, attrs: {shape: finalShape}});\n } else if (sliceDim0 || isSimpleSlice) {\n // Optimization #2, slice is memory contiguous (only occurs in dim 0)\n util.assert(\n x.shape.length >= 1,\n () => `Input must have rank at least 1, got: ${x.shape.length}`);\n\n const size = slice_util.computeOutShape($begin, $end, $strides);\n // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end).\n const sliced = slice({inputs: {x}, backend, attrs: {begin: $begin, size}});\n result =\n reshape({inputs: {x: sliced}, backend, attrs: {shape: finalShape}});\n backend.disposeIntermediateTensorInfo(sliced);\n } else {\n const xBuf = backend.bufferSync(x);\n const outBuf = stridedSliceImpl(finalShapeSparse, xBuf, $strides, $begin);\n\n result = backend.makeTensorInfo(finalShape, outBuf.dtype, outBuf.values);\n }\n\n return result;\n}\n\nexport const stridedSliceConfig: KernelConfig = {\n kernelName: StridedSlice,\n backendName: 'cpu',\n kernelFunc: stridedSlice as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {util} from '@tensorflow/tfjs-core';\n\n/**\n * The StringNGramsOp class creates ngrams from ragged string data.\n * The constructor contains all attributes related to the operation such as\n * padding widths and strings, and the compute function can be used to\n * compute the ngrams for different ragged tensor inputs.\n */\nclass StringNGramsOp {\n private separator: Uint8Array;\n private nGramWidths: number[];\n private padWidth: number;\n private leftPad: Uint8Array;\n private rightPad: Uint8Array;\n private preserveShort: boolean;\n\n constructor(\n separator: string, nGramWidths: number[], leftPad: string,\n rightPad: string, padWidth: number, preserveShortSequences: boolean) {\n this.separator = util.encodeString(separator);\n this.nGramWidths = nGramWidths;\n this.leftPad = util.encodeString(leftPad);\n this.rightPad = util.encodeString(rightPad);\n this.padWidth = padWidth;\n this.preserveShort = preserveShortSequences;\n }\n\n private getPadWidth(nGramWidth: number) {\n // Ngrams can be padded with either a fixed pad width or a dynamic pad\n // width depending on the 'padWidth' arg, but in no case should the padding\n // ever be wider than 'nGramWidth' - 1.\n return Math.min(\n this.padWidth < 0 ? nGramWidth - 1 : this.padWidth, nGramWidth - 1);\n }\n\n private getNumNGrams(length: number, nGramWidth: number) {\n const padWidth = this.getPadWidth(nGramWidth);\n return Math.max(0, ((length + 2 * padWidth) - nGramWidth) + 1);\n }\n\n private createNGrams(\n data: Uint8Array[], splitIndex: number, output: Uint8Array[],\n outputStartIndex: number, numNGrams: number, nGramWidth: number) {\n for (let nGramIndex = 0; nGramIndex < numNGrams; ++nGramIndex) {\n const padWidth = this.getPadWidth(nGramWidth);\n const leftPadding = Math.max(0, padWidth - nGramIndex);\n const rightPadding =\n Math.max(0, padWidth - (numNGrams - (nGramIndex + 1)));\n const numTokens = nGramWidth - (leftPadding + rightPadding);\n const dataStartIndex =\n splitIndex + (leftPadding > 0 ? 0 : nGramIndex - padWidth);\n\n // Calculate the total expected size of the nGram so we can reserve the\n // correct amount of space in the string.\n let nGramSize = 0;\n // Size of the left padding.\n nGramSize += leftPadding * this.leftPad.length;\n // Size of the tokens.\n for (let n = 0; n < numTokens; ++n) {\n nGramSize += data[dataStartIndex + n].length;\n }\n // Size of the right padding.\n nGramSize += rightPadding * this.rightPad.length;\n // Size of the separators.\n const numSeparators = leftPadding + rightPadding + numTokens - 1;\n nGramSize += numSeparators * this.separator.length;\n\n // Build the nGram.\n output[outputStartIndex + nGramIndex] = new Uint8Array(nGramSize);\n const nGram = output[outputStartIndex + nGramIndex];\n\n let nextNGramIndex = 0;\n const appendToNGram = (str: Uint8Array) =>\n str.forEach((value) => nGram[nextNGramIndex++] = value);\n\n for (let n = 0; n < leftPadding; ++n) {\n appendToNGram(this.leftPad);\n appendToNGram(this.separator);\n }\n // Only output first numTokens - 1 pairs of data and separator\n for (let n = 0; n < numTokens - 1; ++n) {\n appendToNGram(data[dataStartIndex + n]);\n appendToNGram(this.separator);\n }\n // Handle case when there are no tokens or no right padding as these\n // can result in consecutive separators.\n if (numTokens > 0) {\n // If we have tokens, then output last and then pair each separator\n // with the right padding that follows, to ensure nGram ends either with\n // the token or with the right pad.\n appendToNGram(data[dataStartIndex + numTokens - 1]);\n for (let n = 0; n < rightPadding; ++n) {\n appendToNGram(this.separator);\n appendToNGram(this.rightPad);\n }\n } else {\n // If we don't have tokens, then the last item inserted into the nGram\n // has been the separator from the left padding loop above. Hence,\n // output right pad and separator and make sure to finish with a\n // padding, not a separator.\n for (let n = 0; n < rightPadding - 1; ++n) {\n appendToNGram(this.rightPad);\n appendToNGram(this.separator);\n }\n appendToNGram(this.rightPad);\n }\n }\n }\n\n // Data and splits together form the definition of the ragged tensor,\n // where data is 1 dimensional and contains the values of the tensor\n // and splits denotes the indices at which each row starts.\n public compute(data: Uint8Array[], splits: Int32Array):\n [Uint8Array[], Int32Array] {\n // Validate that the splits are valid indices into data, only if there are\n // splits specified.\n const inputDataSize = data.length;\n const splitsSize = splits.length;\n if (splitsSize > 0) {\n let prevSplit = splits[0];\n if (prevSplit !== 0) {\n throw new Error(`First split value must be 0, got ${prevSplit}`);\n }\n for (let i = 1; i < splitsSize; ++i) {\n let validSplits = splits[i] >= prevSplit;\n validSplits = validSplits && (splits[i] <= inputDataSize);\n if (!validSplits) {\n throw new Error(`Invalid split value ${splits[i]}, must be in [${\n prevSplit}, ${inputDataSize}]`);\n }\n prevSplit = splits[i];\n }\n if (prevSplit !== inputDataSize) {\n throw new Error(`Last split value must be data size. Expected ${\n inputDataSize}, got ${prevSplit}`);\n }\n }\n\n const numBatchItems = splitsSize - 1;\n const nGramsSplits = util.getArrayFromDType('int32', splitsSize);\n // If there is no data or size, return an empty ragged tensor.\n if (inputDataSize === 0 || splitsSize === 0) {\n const empty: Uint8Array[] = new Array(inputDataSize);\n for (let i = 0; i <= numBatchItems; ++i) {\n nGramsSplits[i] = 0;\n }\n return [empty, nGramsSplits];\n }\n\n nGramsSplits[0] = 0;\n for (let i = 1; i <= numBatchItems; ++i) {\n const length = splits[i] - splits[i - 1];\n let numNGrams = 0;\n this.nGramWidths.forEach((nGramWidth) => {\n numNGrams += this.getNumNGrams(length, nGramWidth);\n });\n if (this.preserveShort && length > 0 && numNGrams === 0) {\n numNGrams = 1;\n }\n nGramsSplits[i] = nGramsSplits[i - 1] + numNGrams;\n }\n\n const nGrams: Uint8Array[] = new Array(nGramsSplits[numBatchItems]);\n\n for (let i = 0; i < numBatchItems; ++i) {\n const splitIndex = splits[i];\n let outputStartIdx = nGramsSplits[i];\n this.nGramWidths.forEach((nGramWidth) => {\n const length = splits[i + 1] - splits[i];\n const numNGrams = this.getNumNGrams(length, nGramWidth);\n this.createNGrams(\n data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);\n outputStartIdx += numNGrams;\n });\n // If we're preserving short sequences, check to see if no sequence was\n // generated by comparing the current output start idx to the original\n // one (nGramSplitsdata). If no ngrams were generated, then they will\n // be equal (since we increment outputStartIdx by numNGrams every\n // time we create a set of ngrams.)\n if (this.preserveShort && outputStartIdx === nGramsSplits[i]) {\n const dataLength = splits[i + 1] - splits[i];\n // One legitimate reason to not have any ngrams when this.preserveShort\n // is true is if the sequence itself is empty. In that case, move on.\n if (dataLength === 0) {\n continue;\n }\n // We don't have to worry about dynamic padding sizes here: if padding\n // was dynamic, every sequence would have had sufficient padding to\n // generate at least one nGram.\n const nGramWidth = dataLength + 2 * this.padWidth;\n const numNGrams = 1;\n this.createNGrams(\n data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth);\n }\n }\n return [nGrams, nGramsSplits];\n }\n}\n\nexport function stringNGramsImpl(\n data: Uint8Array[], dataSplits: Int32Array, separator: string,\n nGramWidths: number[], leftPad: string, rightPad: string, padWidth: number,\n preserveShortSequences: boolean): [Uint8Array[], Int32Array] {\n return new StringNGramsOp(\n separator, nGramWidths, leftPad, rightPad, padWidth,\n preserveShortSequences)\n .compute(data, dataSplits);\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringNGrams, StringNGramsAttrs, StringNGramsInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {stringNGramsImpl} from './StringNGrams_impl';\n\nexport function stringNGrams(args: {\n inputs: StringNGramsInputs,\n backend: MathBackendCPU,\n attrs: StringNGramsAttrs\n}): [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {\n separator,\n nGramWidths,\n leftPad,\n rightPad,\n padWidth,\n preserveShortSequences\n } = attrs;\n const {data, dataSplits} = inputs;\n const $data = backend.data.get(data.dataId).values as Uint8Array[];\n const $dataSplits = backend.data.get(dataSplits.dataId).values as Int32Array;\n\n const [nGrams, nGramsSplits] = stringNGramsImpl(\n $data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth,\n preserveShortSequences);\n return [\n backend.makeTensorInfo([nGrams.length], 'string', nGrams),\n backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits),\n ];\n}\n\nexport const stringNGramsConfig: KernelConfig = {\n kernelName: StringNGrams,\n backendName: 'cpu',\n kernelFunc: stringNGrams as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray, util} from '@tensorflow/tfjs-core';\n\nfunction split(\n str: Uint8Array, delimiters: Uint8Array, skipEmpty: boolean,\n result: Uint8Array[]): void {\n if (!str.length) {\n return;\n }\n // When the delimiter is empty, the input is split into individual characters.\n if (delimiters.length === 0) {\n for (let i = 0; i < str.length; ++i) {\n result.push(str.subarray(i, i + 1));\n }\n return;\n }\n // When there is one delimiter, the input is split only at that delimiter.\n if (delimiters.length === 1) {\n const delimiter = delimiters[0];\n let f = str.indexOf(delimiter);\n while (f !== -1) {\n const token = str.subarray(0, f);\n if (!skipEmpty || token.length !== 0) {\n result.push(token);\n }\n str = str.subarray(f + 1);\n f = str.indexOf(delimiter);\n }\n if (!skipEmpty || str.length !== 0) {\n result.push(str);\n }\n return;\n }\n // When there are multiple delimiters, the input is split at every instance\n // one of the delimiters appears.\n let tokenStart = 0;\n for (let i = 0; i < str.length + 1; i++) {\n if ((i === str.length) || (delimiters.indexOf(str[i]) !== -1)) {\n const token = str.subarray(tokenStart, i);\n if (!skipEmpty || token.length !== 0) {\n result.push(token);\n }\n tokenStart = i + 1;\n }\n }\n}\n\nexport function stringSplitImpl(\n input: Uint8Array[], delimiter: Uint8Array,\n skipEmpty: boolean): [TypedArray, Uint8Array[], [number, number]] {\n const batchSize = input.length;\n\n // Empty delimiter means split the input character by character.\n const tokens: Uint8Array[] = [];\n\n let outputSize = 0;\n let maxNumEntries = 0;\n const numIndices: number[] = new Array(batchSize);\n for (let i = 0; i < batchSize; ++i) {\n const prevTokensLength = tokens.length;\n split(input[i], delimiter, skipEmpty, tokens);\n const nEntries = tokens.length - prevTokensLength;\n numIndices[i] = nEntries;\n outputSize += nEntries;\n maxNumEntries = Math.max(maxNumEntries, nEntries);\n }\n\n const indices = util.getArrayFromDType('int32', outputSize * 2) as TypedArray;\n const values: Uint8Array[] = new Array(outputSize);\n const shape: [number, number] = [batchSize, maxNumEntries];\n\n let c = 0;\n for (let i = 0; i < batchSize; ++i) {\n for (let j = 0; j < numIndices[i]; ++j) {\n // indices is a 2d tensor with shape of [outputSize, 2]\n indices[c * 2] = i;\n indices[c * 2 + 1] = j;\n values[c] = tokens[c];\n ++c;\n }\n }\n\n return [indices, values, shape];\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringSplit, StringSplitAttrs, StringSplitInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {stringSplitImpl} from './StringSplit_impl';\n\nexport function stringSplit(args: {\n inputs: StringSplitInputs,\n backend: MathBackendCPU,\n attrs: StringSplitAttrs\n}): [TensorInfo, TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {skipEmpty} = attrs;\n const {input, delimiter} = inputs;\n\n if (input.dtype !== 'string') {\n throw new Error('Input must be of datatype string');\n }\n if (input.shape.length !== 1) {\n throw new Error(`Input must be a vector, got shape: ${input.shape}`);\n }\n if (delimiter.shape.length !== 0) {\n throw new Error(\n `Delimiter must be a scalar, got shape: ${delimiter.shape}`);\n }\n\n const $input = backend.data.get(input.dataId).values as Uint8Array[];\n const $delimiter = backend.data.get(delimiter.dataId).values[0] as Uint8Array;\n\n const [indices, values, shape] =\n stringSplitImpl($input, $delimiter, skipEmpty);\n const outputSize = values.length;\n return [\n backend.makeTensorInfo([outputSize, 2], 'int32', indices),\n backend.makeTensorInfo([outputSize], 'string', values),\n backend.makeTensorInfo([2], 'int32', new Int32Array(shape))\n ];\n}\n\nexport const stringSplitConfig: KernelConfig = {\n kernelName: StringSplit,\n backendName: 'cpu',\n kernelFunc: stringSplit as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function stringToHashBucketFastImpl(\n input: Uint8Array[], numBuckets: number): TypedArray {\n const output = util.getArrayFromDType('int32', input.length) as TypedArray;\n\n for (let i = 0; i < input.length; ++i) {\n output[i] =\n util.fingerPrint64(input[i]).modulo(numBuckets).getLowBitsUnsigned();\n }\n\n return output;\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringToHashBucketFast, StringToHashBucketFastAttrs, StringToHashBucketFastInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nimport {stringToHashBucketFastImpl} from './StringToHashBucketFast_impl';\n\nexport function stringToHashBucketFast(args: {\n inputs: StringToHashBucketFastInputs,\n backend: MathBackendCPU,\n attrs: StringToHashBucketFastAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {numBuckets} = attrs;\n const {input} = inputs;\n\n if (input.dtype !== 'string') {\n throw new Error('Input must be of datatype string');\n }\n if (numBuckets <= 0) {\n throw new Error(`Number of buckets must be at least 1`);\n }\n\n const $input = backend.data.get(input.dataId).values as Uint8Array[];\n\n const output = stringToHashBucketFastImpl($input, numBuckets);\n return backend.makeTensorInfo(input.shape, 'int32', output);\n}\n\nexport const stringToHashBucketFastConfig: KernelConfig = {\n kernelName: StringToHashBucketFast,\n backendName: 'cpu',\n kernelFunc: stringToHashBucketFast as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tan} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const tan = unaryKernelFunc(Tan, (xi) => Math.tan(xi));\n\nexport const tanConfig: KernelConfig = {\n kernelName: Tan,\n backendName: 'cpu',\n kernelFunc: tan,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tanh} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../utils/unary_utils';\n\nexport const tanh = unaryKernelFunc(Tanh, (xi) => Math.tanh(xi));\n\nexport const tanhConfig: KernelConfig = {\n kernelName: Tanh,\n backendName: 'cpu',\n kernelFunc: tanh,\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, DataType, Rank, TensorBuffer} from '@tensorflow/tfjs-core';\n\n/**\n * An implementation of the tile kernel shared between webgl and cpu for string\n * tensors only.\n */\n\nexport function tileImpl(\n xBuf: TensorBuffer,\n reps: number[]): TensorBuffer {\n const newShape: number[] = new Array(xBuf.rank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = xBuf.shape[i] * reps[i];\n }\n const result = buffer(newShape, xBuf.dtype);\n for (let i = 0; i < result.values.length; ++i) {\n const newLoc = result.indexToLoc(i);\n\n const originalLoc: number[] = new Array(xBuf.rank);\n for (let j = 0; j < originalLoc.length; j++) {\n originalLoc[j] = newLoc[j] % xBuf.shape[j];\n }\n\n const originalIndex = xBuf.locToIndex(originalLoc);\n\n result.values[i] = xBuf.values[originalIndex];\n }\n return result as TensorBuffer;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Tile, TileAttrs, TileInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {tileImpl} from './Tile_impl';\n\nexport function tile(\n args: {inputs: TileInputs, backend: MathBackendCPU, attrs: TileAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {reps} = attrs;\n\n assertNotComplex(x, 'tile');\n const outBuf = tileImpl(backend.bufferSync(x), reps);\n\n return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);\n}\n\nexport const tileConfig: KernelConfig = {\n kernelName: Tile,\n backendName: 'cpu',\n kernelFunc: tile as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n/** An implementation of the TopK kernel shared between webgl and cpu. */\n\nimport {buffer, NumericDataType, Rank, ShapeMap, Tensor, TensorBuffer, TypedArray, util} from '@tensorflow/tfjs-core';\n\ntype Pair = {\n value: number,\n index: number\n};\n\nconst comparePair = (a: Pair, b: Pair) => {\n const valueDiff = b.value - a.value;\n return valueDiff === 0 ? a.index - b.index : valueDiff;\n};\n\n/**\n * Partitions array where all elements smaller than the (k+1) smallest element\n * are found to the left of it, and all larger to the right of it.\n * Based on the Floyd-Rivest Algorithm, ref:\n * https://en.wikipedia.org/wiki/Floyd%E2%80%93Rivest_algorithm\n * @param array: Array to partition\n * @param left: Left index for the interval\n * @param right: Right index for the interval\n * @param k: Desired index value, where array[k] is the (k+1)th smallest element\n * when left = 0\n */\nfunction select(array: Pair[], k: number, left = 0, right = array.length - 1) {\n while (right > left) {\n // Use select recursively to sample a smaller set of size s\n // the arbitrary constants 600 and 0.5 are used in the original\n // version to minimize execution time.\n if (right - left > 600) {\n const n = right - left + 1;\n const i = k - left + 1;\n const z = Math.log(n);\n const s = 0.5 * Math.exp(2 * z / 3);\n const sd = 0.5 * Math.sqrt(z * s * (n - s) / n) * Math.sign(i - n / 2);\n const newLeft = Math.max(left, Math.floor(k - i * s / n + sd));\n const newRight = Math.min(right, Math.floor(k + (n - i) * s / n + sd));\n select(array, k, newLeft, newRight);\n }\n // partition the elements between left and right around t\n const t = array[k];\n let i = left;\n let j = right;\n\n util.swap(array, left, k);\n\n if (comparePair(array[right], t) > 0) {\n util.swap(array, left, right);\n }\n while (i < j) {\n util.swap(array, i, j);\n i++;\n j--;\n while (comparePair(array[i], t) < 0) {\n i = i + 1;\n }\n while (comparePair(array[j], t) > 0) {\n j = j - 1;\n }\n }\n if (comparePair(array[left], t) === 0) {\n util.swap(array, left, j);\n } else {\n j = j + 1;\n util.swap(array, j, right);\n }\n // Adjust left and right towards the boundaries of the subset\n // containing the (k - left + 1)th smallest element.\n if (j <= k) {\n left = j + 1;\n }\n if (k <= j) {\n right = j - 1;\n }\n }\n}\n\nexport function topKImpl(\n x: TypedArray, xShape: number[], xDtype: NumericDataType, k: number,\n sorted: boolean):\n [TensorBuffer, TensorBuffer] {\n // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.\n const lastDim = xShape[xShape.length - 1];\n const [batch, size] = [x.length / lastDim, lastDim];\n const allTopKVals = util.getTypedArrayFromDType(xDtype, batch * k);\n const allTopKIndices = util.getTypedArrayFromDType('int32', batch * k);\n\n for (let b = 0; b < batch; b++) {\n const offset = b * size;\n const vals = x.subarray(offset, offset + size);\n\n let valAndInd: Pair[] = new Array(vals.length);\n vals.forEach(\n (value: number, index: number) => valAndInd[index] = {value, index});\n\n if (k < valAndInd.length) {\n select(valAndInd, k);\n valAndInd = valAndInd.slice(0, k);\n }\n\n if (sorted) {\n valAndInd.sort(comparePair);\n }\n \n const outOffset = b * k;\n const topKVals = allTopKVals.subarray(outOffset, outOffset + k);\n const topKIndices = allTopKIndices.subarray(outOffset, outOffset + k);\n for (let i = 0; i < k; i++) {\n topKVals[i] = valAndInd[i].value;\n topKIndices[i] = valAndInd[i].index;\n }\n }\n // Reshape back to the original input shape, except that the last\n // dimension is k.\n const outputShape = xShape.slice();\n outputShape[outputShape.length - 1] = k;\n\n return [\n buffer(outputShape as ShapeMap[R], xDtype, allTopKVals),\n buffer(outputShape as ShapeMap[R], 'int32', allTopKIndices)\n ];\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, NumericDataType, TensorInfo, TopK, TopKAttrs, TopKInputs, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {topKImpl} from './TopK_impl';\n\nexport function topK(\n args: {inputs: TopKInputs, backend: MathBackendCPU, attrs: TopKAttrs}):\n [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {k, sorted} = attrs;\n\n assertNotComplex(x, 'topk');\n\n const xVals = backend.data.get(x.dataId).values as TypedArray;\n const [allTopKVals, allTopKIndices] =\n topKImpl(xVals, x.shape, x.dtype as NumericDataType, k, sorted);\n\n return [\n backend.makeTensorInfo(\n allTopKVals.shape, allTopKVals.dtype, allTopKVals.values),\n backend.makeTensorInfo(\n allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values)\n ];\n}\n\nexport const topKConfig: KernelConfig = {\n kernelName: TopK,\n backendName: 'cpu',\n kernelFunc: topK as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, NumericDataType, TensorInfo, Transform, TransformAttrs, TransformInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\n\nexport function transform(args: {\n inputs: TransformInputs,\n attrs: TransformAttrs,\n backend: MathBackendCPU\n}): TensorInfo {\n const {inputs, attrs, backend} = args;\n const {image, transforms} = inputs;\n const {interpolation, fillMode, fillValue, outputShape} = attrs;\n\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n const [outHeight, outWidth] =\n outputShape != null ? outputShape : [imageHeight, imageWidth];\n const outShape = [batch, outHeight, outWidth, numChannels];\n\n const inStrides = util.computeStrides(image.shape);\n const batchInStride = inStrides[0];\n const rowInStride = inStrides[1];\n const colInStride = inStrides[2];\n\n const outStrides = util.computeStrides(outShape);\n const batchOutStride = outStrides[0];\n const rowOutStride = outStrides[1];\n const colOutStride = outStrides[2];\n\n const outVals = util.getTypedArrayFromDType(\n image.dtype as NumericDataType, util.sizeFromShape(outShape));\n\n outVals.fill(fillValue);\n\n const imageVals = backend.data.get(image.dataId).values as TypedArray;\n const transformVals =\n backend.data.get(transforms.dataId).values as TypedArray;\n\n // Ref TF implementation:\n // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/image/image_ops.h\n for (let b = 0; b < batch; ++b) {\n const transform = transforms.shape[0] === 1 ?\n transformVals :\n transformVals.subarray(b * 8, b * 8 + 8);\n\n for (let outY = 0; outY < outHeight; ++outY) {\n for (let outX = 0; outX < outWidth; ++outX) {\n for (let channel = 0; channel < numChannels; ++channel) {\n let val;\n\n const projection = transform[6] * outX + transform[7] * outY + 1;\n\n if (projection === 0) {\n // Return the fill value for infinite coordinates,\n // which are outside the input image\n continue;\n }\n\n const inX =\n (transform[0] * outX + transform[1] * outY + transform[2]) /\n projection;\n const inY =\n (transform[3] * outX + transform[4] * outY + transform[5]) /\n projection;\n\n const x = mapCoord(inX, imageWidth, fillMode);\n const y = mapCoord(inY, imageHeight, fillMode);\n\n switch (interpolation) {\n case 'nearest':\n val = nearestInterpolation(\n imageVals, imageHeight, imageWidth, batchInStride,\n rowInStride, colInStride, b, y, x, channel, fillValue);\n break;\n case 'bilinear':\n val = bilinearInterpolation(\n imageVals, imageHeight, imageWidth, batchInStride,\n rowInStride, colInStride, b, y, x, channel, fillValue);\n break;\n default:\n throw new Error(\n `Error in Transform: Expect 'nearest' or ` +\n `'bilinear', but got ${interpolation}`);\n }\n\n const ind =\n b * batchOutStride + outY * rowOutStride +\n outX * colOutStride + channel;\n\n outVals[ind] = val;\n }\n }\n }\n\n return backend.makeTensorInfo(outShape, image.dtype, outVals);\n }\n\n const dataId = backend.write(outVals, outShape, image.dtype);\n return {dataId, shape: image.shape, dtype: image.dtype};\n}\n\nexport const transformConfig: KernelConfig = {\n kernelName: Transform,\n backendName: 'cpu',\n kernelFunc: transform as unknown as KernelFunc\n};\n\nfunction mapCoord(\n outCoord: number, len: number,\n mode: 'constant'|'reflect'|'wrap'|'nearest') {\n switch (mode) {\n case 'reflect':\n return mapCoordReflect(outCoord, len);\n case 'wrap':\n return mapCoordWrap(outCoord, len);\n case 'nearest':\n return mapCoordNearest(outCoord, len);\n case 'constant':\n default:\n return mapCoordConstant(outCoord, len);\n }\n}\n\nfunction mapCoordReflect(outCoord: number, len: number): number {\n // Reflect [abcd] to [dcba|abcd|dcba].\n let inCoord = outCoord;\n if (inCoord < 0) {\n if (len <= 1) {\n inCoord = 0;\n } else {\n const sz2 = 2 * len;\n if (inCoord < sz2) {\n inCoord = sz2 * Math.trunc(-inCoord / sz2) + inCoord;\n }\n inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1;\n }\n } else if (inCoord > len - 1) {\n if (len <= 1) {\n inCoord = 0;\n } else {\n const sz2 = 2 * len;\n inCoord -= sz2 * Math.trunc(inCoord / sz2);\n if (inCoord >= len) {\n inCoord = sz2 - inCoord - 1;\n }\n }\n }\n // clamp is necessary because when outCoord = 3.5 and len = 4,\n // inCoord = 3.5 and will be rounded to 4 in nearest interpolation.\n return util.clamp(0, inCoord, len - 1);\n}\n\nfunction mapCoordWrap(outCoord: number, len: number): number {\n // Wrap [abcd] to [abcd|abcd|abcd].\n let inCoord = outCoord;\n if (inCoord < 0) {\n if (len <= 1) {\n inCoord = 0;\n } else {\n const sz = len - 1;\n inCoord += len * (Math.trunc(-inCoord / sz) + 1);\n }\n } else if (inCoord > len - 1) {\n if (len <= 1) {\n inCoord = 0;\n } else {\n const sz = len - 1;\n inCoord -= len * Math.trunc(inCoord / sz);\n }\n }\n // clamp is necessary because when outCoord = -0.5 and len = 4,\n // inCoord = 3.5 and will be rounded to 4 in nearest interpolation.\n return util.clamp(0, inCoord, len - 1);\n}\n\nfunction mapCoordConstant(outCoord: number, len: number): number {\n return outCoord;\n}\n\nfunction mapCoordNearest(outCoord: number, len: number): number {\n return util.clamp(0, outCoord, len - 1);\n}\n\nfunction readWithFillValue(\n imageVals: TypedArray, imageHeight: number, imageWidth: number,\n batchStride: number, rowStride: number, colStride: number, batch: number,\n y: number, x: number, channel: number, fillValue: number): number {\n const ind = batch * batchStride + y * rowStride + x * colStride + channel;\n if (0 <= y && y < imageHeight && 0 <= x && x < imageWidth) {\n return imageVals[ind];\n } else {\n return fillValue;\n }\n}\n\nfunction nearestInterpolation(\n imageVals: TypedArray, imageHeight: number, imageWidth: number,\n batchStride: number, rowStride: number, colStride: number, batch: number,\n y: number, x: number, channel: number, fillValue: number): number {\n const $y = Math.round(y);\n const $x = Math.round(x);\n\n return readWithFillValue(\n imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride,\n batch, $y, $x, channel, fillValue);\n}\n\nfunction bilinearInterpolation(\n imageVals: TypedArray, imageHeight: number, imageWidth: number,\n batchStride: number, rowStride: number, colStride: number, batch: number,\n y: number, x: number, channel: number, fillValue: number) {\n const yFloor = Math.floor(y);\n const xFloor = Math.floor(x);\n const yCeil = yFloor + 1;\n const xCeil = xFloor + 1;\n // f(x, yFloor) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yFloor)\n // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yFloor)\n const valueYFloor =\n (xCeil - x) *\n readWithFillValue(\n imageVals, imageHeight, imageWidth, batchStride, rowStride,\n colStride, batch, yFloor, xFloor, channel, fillValue) +\n (x - xFloor) *\n readWithFillValue(\n imageVals, imageHeight, imageWidth, batchStride, rowStride,\n colStride, batch, yFloor, xCeil, channel, fillValue);\n // f(x, yCeil) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yCeil)\n // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yCeil)\n const valueYCeil =\n (xCeil - x) *\n readWithFillValue(\n imageVals, imageHeight, imageWidth, batchStride, rowStride,\n colStride, batch, yCeil, xFloor, channel, fillValue) +\n (x - xFloor) *\n readWithFillValue(\n imageVals, imageHeight, imageWidth, batchStride, rowStride,\n colStride, batch, yCeil, xCeil, channel, fillValue);\n // f(x, y) = (yCeil - y) / (yCeil - yFloor) * f(x, yFloor)\n // + (y - yFloor) / (yCeil - yFloor) * f(x, yCeil)\n return (yCeil - y) * valueYFloor + (y - yFloor) * valueYCeil;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BackendValues, DataType, TensorBuffer, TypedArray, util} from '@tensorflow/tfjs-core';\n\nexport function uniqueImpl(\n values: BackendValues, axis: number, shape: number[], dtype: DataType): {\n outputValues: BackendValues,\n outputShape: number[],\n indices: BackendValues\n} {\n // Normalize and validate axis.\n const $axis = util.parseAxisParam(axis, shape)[0];\n\n // Calculate the new shape that is suitable for extracting data along the\n // given axis.\n //\n // The rank is 3.\n // The size of the 1st dimension is the size of all the axes < the given axis.\n // The size of the 2nd dimension is the same as the size of the given axis.\n // The size of the 3rd dimension is the size of all the axes > the given axis.\n //\n // For example, for a 4D tensor with shape=[2, 3, 5, 4] and axis=2, the\n // newShape would be: [2*3, 5, 4].\n //\n // Note that this is not the final output shape. This will be the shape for an\n // intermediate TensorBuffer (see inputBuffer below) to allow us to extract\n // values along the given axis. To demonstrate how it works, consider the\n // following example:\n //\n // Input: a 3D tensor, with shape [1, 2, 3]\n // [\n // [\n // [1,2,3],\n // [4,5,6]\n // ]\n // ]\n // Axis: 2 (the last axis).\n // Along axis 2, we expect to extract 3 tensors: [1,4], [2,5], [3,6].\n //\n // For this example, newShape would be: [2, 3, 1], where 2 is calculated from\n // 1*2. The re-shaped data would look like:\n //\n // [\n // [\n // [1], [2], [3]\n // ],\n // [\n // [4], [5], [6]\n // ]\n // ]\n //\n // Then, we can construct a 3-level nested loop by the following dimension\n // order to extract the values along the axis (dimension1):\n // i: dimension1 // 0,1,2 (newShape[1])\n // m: dimension0 // 0,1 (newShape[0])\n // n: dimension2 // 0 (newShape[2])\n //\n // m, i, n\n // ---------\n // Iteration 0: data at [0, 0, 0] => \"1\"\n // Iteration 1: data at [1, 0, 0] => \"4\"\n // We got [1,4].\n // Iteration 2: data at [0, 1, 0] => \"2\"\n // Iteration 3: data at [1, 1, 0] => \"5\"\n // We got [2,5].\n // Iteration 4: data at [0, 2, 0] => \"3\"\n // Iteration 5: data at [1, 2, 0] => \"6\"\n // We got [3,6].\n const newShape = [1, shape[0], 1];\n for (let i = 0; i < $axis; i++) {\n newShape[0] *= shape[i];\n }\n newShape[1] = shape[$axis];\n for (let i = $axis + 1; i < shape.length; i++) {\n newShape[2] *= shape[i];\n }\n\n // A map from unique elements (their string representations) to their values\n // in \"indices\" (below).\n const uniqueElements: {[key: string]: number} = {};\n // The indices of each unique element in the original tensor along the given\n // axis. It is 1D and has the same size as the given axis.\n const indices = new Int32Array(shape[$axis]);\n // Create a buffer so we can easily extract value at a given location.\n const inputBuffer = new TensorBuffer(newShape, dtype, values as TypedArray);\n // The indices along the given axis that have unique elements. This is a\n // de-duped version of \"indices\" above.\n const uniqueIndices: number[] = [];\n const is1DTensor = newShape[0] === 1 && newShape[2] === 1;\n for (let i = 0; i < shape[$axis]; i++) {\n // Extract values along the axis.\n let element: string;\n if (is1DTensor) {\n // Fast path for 1D tensor input.\n element = values[i].toString();\n } else {\n const axisValues = [];\n for (let m = 0; m < newShape[0]; m++) {\n for (let n = 0; n < newShape[2]; n++) {\n axisValues.push(inputBuffer.get(m, i, n));\n }\n }\n element = axisValues.join(',');\n }\n\n // Dedup and update various indices.\n if (uniqueElements[element] !== undefined) {\n indices[i] = uniqueElements[element];\n } else {\n const uniqueIndex = Object.keys(uniqueElements).length;\n uniqueElements[element] = uniqueIndex;\n indices[i] = uniqueIndex;\n uniqueIndices.push(i);\n }\n }\n\n // Now we know where each of the unique elements are located along the axis\n // (uniqueIndices). Extract them from input buffer and store them in the\n // output buffer.\n const outputTmpShape = newShape.slice();\n outputTmpShape[1] = Object.keys(uniqueElements).length;\n const outputBuffer = new TensorBuffer(outputTmpShape, dtype);\n uniqueIndices.forEach((uniqueElementIndex, i) => {\n for (let m = 0; m < newShape[0]; m++) {\n for (let n = 0; n < newShape[2]; n++) {\n outputBuffer.set(inputBuffer.get(m, uniqueElementIndex, n), m, i, n);\n }\n }\n });\n\n // The output shape can be calculated from the input shape with the size of\n // the given axis replaced by the number of unique elements along that axis.\n const outputShape = shape.slice();\n outputShape[$axis] = outputTmpShape[1];\n\n return {\n outputValues: outputBuffer.values as BackendValues,\n outputShape,\n indices,\n };\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Unique, UniqueAttrs, UniqueInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\n\nimport {uniqueImpl} from './Unique_impl';\n\nexport function unique(\n args: {inputs: UniqueInputs, attrs: UniqueAttrs, backend: MathBackendCPU}):\n TensorInfo[] {\n const {inputs, attrs, backend} = args;\n const {axis} = attrs;\n const {x} = inputs;\n assertNotComplex(x, 'unique');\n\n const values = backend.data.get(x.dataId).values;\n const {outputValues, outputShape, indices} =\n uniqueImpl(values, axis, x.shape, x.dtype);\n return [\n backend.makeTensorInfo(outputShape, x.dtype, outputValues),\n backend.makeTensorInfo([indices.length], 'int32', indices),\n ];\n}\n\nexport const uniqueConfig: KernelConfig = {\n kernelName: Unique,\n backendName: 'cpu',\n kernelFunc: unique as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Unpack, UnpackAttrs, UnpackInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nexport function unpack(\n args: {inputs: UnpackInputs, backend: MathBackendCPU, attrs: UnpackAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {value} = inputs;\n let {axis} = attrs;\n\n if (axis < 0) {\n axis += value.shape.length;\n }\n\n const valueRank = value.shape.length;\n\n const num = value.shape[axis];\n const outShape: number[] = new Array(valueRank - 1);\n let outIndex = 0;\n for (let i = 0; i < valueRank; i++) {\n if (i !== axis) {\n outShape[outIndex++] = value.shape[i];\n }\n }\n\n const begin = new Array(valueRank).fill(0);\n const size = value.shape.slice();\n size[axis] = 1;\n const res = new Array(num);\n for (let i = 0; i < res.length; i++) {\n begin[axis] = i;\n const tempRes = slice({inputs: {x: value}, backend, attrs: {begin, size}});\n res[i] = reshape({inputs: {x: tempRes}, backend, attrs: {shape: outShape}});\n backend.disposeIntermediateTensorInfo(tempRes);\n }\n\n return res;\n}\n\nexport const unpackConfig: KernelConfig = {\n kernelName: Unpack,\n backendName: 'cpu',\n kernelFunc: unpack as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n// We explicitly import the modular kernels so they get registered in the\n// global registry when we compile the library. A modular build would replace\n// the contents of this file and import only the kernels that are needed.\nimport {KernelConfig, registerKernel} from '@tensorflow/tfjs-core';\n\nimport {_fusedMatMulConfig} from './kernels/_FusedMatMul';\nimport {absConfig} from './kernels/Abs';\nimport {acosConfig} from './kernels/Acos';\nimport {acoshConfig} from './kernels/Acosh';\nimport {addConfig} from './kernels/Add';\nimport {addNConfig} from './kernels/AddN';\nimport {allConfig} from './kernels/All';\nimport {anyConfig} from './kernels/Any';\nimport {argMaxConfig} from './kernels/ArgMax';\nimport {argMinConfig} from './kernels/ArgMin';\nimport {asinConfig} from './kernels/Asin';\nimport {asinhConfig} from './kernels/Asinh';\nimport {atanConfig} from './kernels/Atan';\nimport {atan2Config} from './kernels/Atan2';\nimport {atanhConfig} from './kernels/Atanh';\nimport {avgPoolConfig} from './kernels/AvgPool';\nimport {avgPool3DConfig} from './kernels/AvgPool3D';\nimport {avgPool3DGradConfig} from './kernels/AvgPool3DGrad';\nimport {avgPoolGradConfig} from './kernels/AvgPoolGrad';\nimport {batchMatMulConfig} from './kernels/BatchMatMul';\nimport {batchNormConfig} from './kernels/BatchNorm';\nimport {batchToSpaceNDConfig} from './kernels/BatchToSpaceND';\nimport {bincountConfig} from './kernels/Bincount';\nimport {broadcastArgsConfig} from './kernels/BroadcastArgs';\nimport {castConfig} from './kernels/Cast';\nimport {ceilConfig} from './kernels/Ceil';\nimport {clipByValueConfig} from './kernels/ClipByValue';\nimport {complexConfig} from './kernels/Complex';\nimport {complexAbsConfig} from './kernels/ComplexAbs';\nimport {concatConfig} from './kernels/Concat';\nimport {conv2DConfig} from './kernels/Conv2D';\nimport {conv2DBackpropFilterConfig} from './kernels/Conv2DBackpropFilter';\nimport {conv2DBackpropInputConfig} from './kernels/Conv2DBackpropInput';\nimport {conv3DConfig} from './kernels/Conv3D';\nimport {conv3DBackpropFilterV2Config} from './kernels/Conv3DBackpropFilterV2';\nimport {conv3DBackpropInputV2Config} from './kernels/Conv3DBackpropInputV2';\nimport {cosConfig} from './kernels/Cos';\nimport {coshConfig} from './kernels/Cosh';\nimport {cropAndResizeConfig} from './kernels/CropAndResize';\nimport {cumprodConfig} from './kernels/Cumprod';\nimport {cumsumConfig} from './kernels/Cumsum';\nimport {denseBincountConfig} from './kernels/DenseBincount';\nimport {depthToSpaceConfig} from './kernels/DepthToSpace';\nimport {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative';\nimport {depthwiseConv2dNativeBackpropFilterConfig} from './kernels/DepthwiseConv2dNativeBackpropFilter';\nimport {depthwiseConv2dNativeBackpropInputConfig} from './kernels/DepthwiseConv2dNativeBackpropInput';\nimport {diagConfig} from './kernels/Diag';\nimport {dilation2DConfig} from './kernels/Dilation2D';\nimport {dilation2DBackpropFilterConfig} from './kernels/Dilation2DBackpropFilter';\nimport {dilation2DBackpropInputConfig} from './kernels/Dilation2DBackpropInput';\nimport {einsumConfig} from './kernels/Einsum';\nimport {eluConfig} from './kernels/Elu';\nimport {eluGradConfig} from './kernels/EluGrad';\nimport {equalConfig} from './kernels/Equal';\nimport {erfConfig} from './kernels/Erf';\nimport {expConfig} from './kernels/Exp';\nimport {expandDimsConfig} from './kernels/ExpandDims';\nimport {expm1Config} from './kernels/Expm1';\nimport {fftConfig} from './kernels/FFT';\nimport {fillConfig} from './kernels/Fill';\nimport {flipLeftRightConfig} from './kernels/FlipLeftRight';\nimport {floorConfig} from './kernels/Floor';\nimport {floorDivConfig} from './kernels/FloorDiv';\nimport {fusedConv2DConfig} from './kernels/FusedConv2D';\nimport {fusedDepthwiseConv2DConfig} from './kernels/FusedDepthwiseConv2D';\nimport {gatherNdConfig} from './kernels/GatherNd';\nimport {gatherV2Config} from './kernels/GatherV2';\nimport {greaterConfig} from './kernels/Greater';\nimport {greaterEqualConfig} from './kernels/GreaterEqual';\nimport {identityConfig} from './kernels/Identity';\nimport {ifftConfig} from './kernels/IFFT';\nimport {imagConfig} from './kernels/Imag';\nimport {isFiniteConfig} from './kernels/IsFinite';\nimport {isInfConfig} from './kernels/IsInf';\nimport {isNaNConfig} from './kernels/IsNaN';\nimport {leakyReluConfig} from './kernels/LeakyRelu';\nimport {lessConfig} from './kernels/Less';\nimport {lessEqualConfig} from './kernels/LessEqual';\nimport {linSpaceConfig} from './kernels/LinSpace';\nimport {logConfig} from './kernels/Log';\nimport {log1pConfig} from './kernels/Log1p';\nimport {logicalAndConfig} from './kernels/LogicalAnd';\nimport {logicalNotConfig} from './kernels/LogicalNot';\nimport {logicalOrConfig} from './kernels/LogicalOr';\nimport {LRNConfig} from './kernels/LRN';\nimport {LRNGradConfig} from './kernels/LRNGrad';\nimport {maxConfig} from './kernels/Max';\nimport {maximumConfig} from './kernels/Maximum';\nimport {maxPoolConfig} from './kernels/MaxPool';\nimport {maxPool3DConfig} from './kernels/MaxPool3D';\nimport {maxPool3DGradConfig} from './kernels/MaxPool3DGrad';\nimport {maxPoolGradConfig} from './kernels/MaxPoolGrad';\nimport {maxPoolWithArgmaxConfig} from './kernels/MaxPoolWithArgmax';\nimport {meanConfig} from './kernels/Mean';\nimport {minConfig} from './kernels/Min';\nimport {minimumConfig} from './kernels/Minimum';\nimport {mirrorPadConfig} from './kernels/MirrorPad';\nimport {modConfig} from './kernels/Mod';\nimport {multinomialConfig} from './kernels/Multinomial';\nimport {multiplyConfig} from './kernels/Multiply';\nimport {negConfig} from './kernels/Neg';\nimport {nonMaxSuppressionV3Config} from './kernels/NonMaxSuppressionV3';\nimport {nonMaxSuppressionV4Config} from './kernels/NonMaxSuppressionV4';\nimport {nonMaxSuppressionV5Config} from './kernels/NonMaxSuppressionV5';\nimport {notEqualConfig} from './kernels/NotEqual';\nimport {oneHotConfig} from './kernels/OneHot';\nimport {onesLikeConfig} from './kernels/OnesLike';\nimport {packConfig} from './kernels/Pack';\nimport {padV2Config} from './kernels/PadV2';\nimport {powConfig} from './kernels/Pow';\nimport {preluConfig} from './kernels/Prelu';\nimport {prodConfig} from './kernels/Prod';\nimport {raggedGatherConfig} from './kernels/RaggedGather';\nimport {raggedRangeConfig} from './kernels/RaggedRange';\nimport {raggedTensorToTensorConfig} from './kernels/RaggedTensorToTensor';\nimport {rangeConfig} from './kernels/Range';\nimport {realConfig} from './kernels/Real';\nimport {realDivConfig} from './kernels/RealDiv';\nimport {reciprocalConfig} from './kernels/Reciprocal';\nimport {reluConfig} from './kernels/Relu';\nimport {relu6Config} from './kernels/Relu6';\nimport {reshapeConfig} from './kernels/Reshape';\nimport {resizeBilinearConfig} from './kernels/ResizeBilinear';\nimport {resizeBilinearGradConfig} from './kernels/ResizeBilinearGrad';\nimport {resizeNearestNeighborConfig} from './kernels/ResizeNearestNeighbor';\nimport {resizeNearestNeighborGradConfig} from './kernels/ResizeNearestNeighborGrad';\nimport {reverseConfig} from './kernels/Reverse';\nimport {rotateWithOffsetConfig} from './kernels/RotateWithOffset';\nimport {roundConfig} from './kernels/Round';\nimport {rsqrtConfig} from './kernels/Rsqrt';\nimport {scatterNdConfig} from './kernels/ScatterNd';\nimport {searchSortedConfig} from './kernels/SearchSorted';\nimport {selectConfig} from './kernels/Select';\nimport {seluConfig} from './kernels/Selu';\nimport {sigmoidConfig} from './kernels/Sigmoid';\nimport {signConfig} from './kernels/Sign';\nimport {sinConfig} from './kernels/Sin';\nimport {sinhConfig} from './kernels/Sinh';\nimport {sliceConfig} from './kernels/Slice';\nimport {softmaxConfig} from './kernels/Softmax';\nimport {softplusConfig} from './kernels/Softplus';\nimport {spaceToBatchNDConfig} from './kernels/SpaceToBatchND';\nimport {sparseFillEmptyRowsConfig} from './kernels/SparseFillEmptyRows';\nimport {sparseReshapeConfig} from './kernels/SparseReshape';\nimport {sparseSegmentMeanConfig} from './kernels/SparseSegmentMean';\nimport {sparseSegmentSumConfig} from './kernels/SparseSegmentSum';\nimport {sparseToDenseConfig} from './kernels/SparseToDense';\nimport {splitVConfig} from './kernels/SplitV';\nimport {sqrtConfig} from './kernels/Sqrt';\nimport {squareConfig} from './kernels/Square';\nimport {squaredDifferenceConfig} from './kernels/SquaredDifference';\nimport {stepConfig} from './kernels/Step';\nimport {stridedSliceConfig} from './kernels/StridedSlice';\nimport {stringNGramsConfig} from './kernels/StringNGrams';\nimport {stringSplitConfig} from './kernels/StringSplit';\nimport {stringToHashBucketFastConfig} from './kernels/StringToHashBucketFast';\nimport {subConfig} from './kernels/Sub';\nimport {sumConfig} from './kernels/Sum';\nimport {tanConfig} from './kernels/Tan';\nimport {tanhConfig} from './kernels/Tanh';\nimport {tileConfig} from './kernels/Tile';\nimport {topKConfig} from './kernels/TopK';\nimport {transformConfig} from './kernels/Transform';\nimport {transposeConfig} from './kernels/Transpose';\nimport {uniqueConfig} from './kernels/Unique';\nimport {unpackConfig} from './kernels/Unpack';\nimport {unsortedSegmentSumConfig} from './kernels/UnsortedSegmentSum';\nimport {zerosLikeConfig} from './kernels/ZerosLike';\n\n// List all kernel configs here\nconst kernelConfigs: KernelConfig[] = [\n _fusedMatMulConfig,\n absConfig,\n acosConfig,\n acoshConfig,\n addConfig,\n addNConfig,\n allConfig,\n anyConfig,\n argMaxConfig,\n argMinConfig,\n asinConfig,\n asinhConfig,\n atanConfig,\n atan2Config,\n atanhConfig,\n avgPoolConfig,\n avgPool3DConfig,\n avgPool3DGradConfig,\n avgPoolGradConfig,\n batchMatMulConfig,\n batchNormConfig,\n batchToSpaceNDConfig,\n bincountConfig,\n broadcastArgsConfig,\n castConfig,\n ceilConfig,\n clipByValueConfig,\n complexConfig,\n complexAbsConfig,\n concatConfig,\n conv2DConfig,\n conv2DBackpropFilterConfig,\n conv2DBackpropInputConfig,\n conv3DConfig,\n conv3DBackpropFilterV2Config,\n conv3DBackpropInputV2Config,\n cosConfig,\n coshConfig,\n cropAndResizeConfig,\n cumprodConfig,\n cumsumConfig,\n denseBincountConfig,\n depthToSpaceConfig,\n depthwiseConv2dNativeConfig,\n depthwiseConv2dNativeBackpropFilterConfig,\n depthwiseConv2dNativeBackpropInputConfig,\n diagConfig,\n dilation2DConfig,\n dilation2DBackpropFilterConfig,\n dilation2DBackpropInputConfig,\n einsumConfig,\n eluConfig,\n eluGradConfig,\n equalConfig,\n erfConfig,\n expConfig,\n expandDimsConfig,\n expm1Config,\n fftConfig,\n fillConfig,\n flipLeftRightConfig,\n floorConfig,\n floorDivConfig,\n fusedConv2DConfig,\n fusedDepthwiseConv2DConfig,\n gatherNdConfig,\n gatherV2Config,\n greaterConfig,\n greaterEqualConfig,\n identityConfig,\n ifftConfig,\n imagConfig,\n isFiniteConfig,\n isInfConfig,\n isNaNConfig,\n leakyReluConfig,\n lessConfig,\n lessEqualConfig,\n linSpaceConfig,\n logConfig,\n log1pConfig,\n logicalAndConfig,\n logicalNotConfig,\n logicalOrConfig,\n LRNConfig,\n LRNGradConfig,\n maxConfig,\n maximumConfig,\n maxPoolConfig,\n maxPool3DConfig,\n maxPool3DGradConfig,\n maxPoolGradConfig,\n maxPoolWithArgmaxConfig,\n meanConfig,\n minConfig,\n minimumConfig,\n mirrorPadConfig,\n modConfig,\n multinomialConfig,\n multiplyConfig,\n negConfig,\n nonMaxSuppressionV3Config,\n nonMaxSuppressionV4Config,\n nonMaxSuppressionV5Config,\n notEqualConfig,\n oneHotConfig,\n onesLikeConfig,\n packConfig,\n padV2Config,\n powConfig,\n preluConfig,\n prodConfig,\n raggedGatherConfig,\n raggedRangeConfig,\n raggedTensorToTensorConfig,\n rangeConfig,\n realConfig,\n realDivConfig,\n reciprocalConfig,\n reluConfig,\n relu6Config,\n reshapeConfig,\n resizeBilinearConfig,\n resizeBilinearGradConfig,\n resizeNearestNeighborConfig,\n resizeNearestNeighborGradConfig,\n reverseConfig,\n rotateWithOffsetConfig,\n roundConfig,\n rsqrtConfig,\n scatterNdConfig,\n searchSortedConfig,\n selectConfig,\n seluConfig,\n sigmoidConfig,\n signConfig,\n sinConfig,\n sinhConfig,\n sliceConfig,\n softmaxConfig,\n softplusConfig,\n spaceToBatchNDConfig,\n sparseFillEmptyRowsConfig,\n sparseReshapeConfig,\n sparseSegmentMeanConfig,\n sparseSegmentSumConfig,\n sparseToDenseConfig,\n splitVConfig,\n sqrtConfig,\n squareConfig,\n squaredDifferenceConfig,\n stepConfig,\n stridedSliceConfig,\n stringNGramsConfig,\n stringSplitConfig,\n stringToHashBucketFastConfig,\n subConfig,\n sumConfig,\n tanConfig,\n tanhConfig,\n tileConfig,\n topKConfig,\n transformConfig,\n transposeConfig,\n uniqueConfig,\n unpackConfig,\n unsortedSegmentSumConfig,\n zerosLikeConfig\n];\n\nfor (const kernelConfig of kernelConfigs) {\n registerKernel(kernelConfig);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, UnsortedSegmentSum, UnsortedSegmentSumAttrs, UnsortedSegmentSumInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendCPU} from '../backend_cpu';\nimport {assertNotComplex} from '../cpu_util';\nimport {cast} from './Cast';\nimport {equal} from './Equal';\nimport {expandDims} from './ExpandDims';\nimport {multiply} from './Multiply';\nimport {pack} from './Pack';\nimport {sum} from './Sum';\n\nexport function unsortedSegmentSum(args: {\n inputs: UnsortedSegmentSumInputs,\n backend: MathBackendCPU,\n attrs: UnsortedSegmentSumAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, segmentIds} = inputs;\n const {numSegments} = attrs;\n\n assertNotComplex(x, 'unsortedSegmentSum');\n\n const xRank = x.shape.length;\n const segmentIdsRank = segmentIds.shape.length;\n const res = [];\n const intermediates: TensorInfo[] = [];\n\n // Reshape the segment id's so that they can be broadcast with\n // x. The new shape should be [segmentIds.shape, 1, ..., 1]\n const numIters = xRank - segmentIdsRank;\n let $segmentIds = segmentIds;\n\n for (let i = 0; i < numIters; ++i) {\n const expanded = expandDims(\n {inputs: {input: $segmentIds}, backend, attrs: {dim: i + 1}});\n $segmentIds = expanded;\n intermediates.push(expanded);\n }\n\n for (let i = 0; i < numSegments; ++i) {\n const scalarValue = util.createScalarValue(\n i as unknown as 'int32', 'int32');\n const segmentId = backend.makeTensorInfo([], 'int32', scalarValue);\n const mask =\n equal({inputs: {a: segmentId, b: $segmentIds}, backend}) as TensorInfo;\n const maskCasted =\n cast({inputs: {x: mask}, backend, attrs: {dtype: 'float32'}});\n const mul =\n multiply({inputs: {a: maskCasted, b: x}, backend}) as TensorInfo;\n const sumTensorInfo =\n sum({inputs: {x: mul}, backend, attrs: {axis: 0, keepDims: false}});\n res.push(sumTensorInfo);\n intermediates.push(segmentId);\n intermediates.push(mask);\n intermediates.push(maskCasted);\n intermediates.push(mul);\n intermediates.push(sumTensorInfo);\n }\n\n const result = pack({inputs: res, backend, attrs: {axis: 0}});\n\n intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n}\n\nexport const unsortedSegmentSumConfig: KernelConfig = {\n kernelName: UnsortedSegmentSum,\n backendName: 'cpu',\n kernelFunc: unsortedSegmentSum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\n\nconst contexts: {[key: string]: WebGLRenderingContext} = {};\n\nconst WEBGL_ATTRIBUTES: WebGLContextAttributes = {\n alpha: false,\n antialias: false,\n premultipliedAlpha: false,\n preserveDrawingBuffer: false,\n depth: false,\n stencil: false,\n failIfMajorPerformanceCaveat: true\n};\n\nexport function clearWebGLContext(webGLVersion: number) {\n delete contexts[webGLVersion];\n}\n\nexport function setWebGLContext(\n webGLVersion: number, gl: WebGLRenderingContext) {\n contexts[webGLVersion] = gl;\n}\n\nexport function getWebGLContext(\n webGLVersion: number,\n customCanvas?: HTMLCanvasElement|OffscreenCanvas): WebGLRenderingContext {\n if (!(webGLVersion in contexts) || customCanvas != null) {\n const newCtx = getWebGLRenderingContext(webGLVersion, customCanvas);\n if (newCtx !== null) {\n contexts[webGLVersion] = newCtx;\n } else {\n console.log('Could not get context for WebGL version', webGLVersion);\n return null;\n }\n }\n const gl = contexts[webGLVersion];\n if (gl == null || gl.isContextLost()) {\n delete contexts[webGLVersion];\n return getWebGLContext(webGLVersion);\n }\n\n gl.disable(gl.DEPTH_TEST);\n gl.disable(gl.STENCIL_TEST);\n gl.disable(gl.BLEND);\n gl.disable(gl.DITHER);\n gl.disable(gl.POLYGON_OFFSET_FILL);\n gl.disable(gl.SAMPLE_COVERAGE);\n gl.enable(gl.SCISSOR_TEST);\n gl.enable(gl.CULL_FACE);\n gl.cullFace(gl.BACK);\n\n return contexts[webGLVersion];\n}\n\nfunction createCanvas(webGLVersion: number) {\n if (typeof OffscreenCanvas !== 'undefined' && webGLVersion === 2) {\n return new OffscreenCanvas(300, 150);\n } else if (typeof document !== 'undefined') {\n return document.createElement('canvas');\n } else {\n throw new Error('Cannot create a canvas in this context');\n }\n}\n\nfunction getWebGLRenderingContext(\n webGLVersion: number,\n customCanvas?: HTMLCanvasElement|OffscreenCanvas): WebGLRenderingContext {\n if (webGLVersion !== 1 && webGLVersion !== 2) {\n throw new Error('Cannot get WebGL rendering context, WebGL is disabled.');\n }\n const canvas =\n customCanvas == null ? createCanvas(webGLVersion) : customCanvas;\n\n canvas.addEventListener('webglcontextlost', (ev: Event) => {\n ev.preventDefault();\n delete contexts[webGLVersion];\n }, false);\n\n if (env().getBool('SOFTWARE_WEBGL_ENABLED')) {\n WEBGL_ATTRIBUTES.failIfMajorPerformanceCaveat = false;\n }\n\n if (webGLVersion === 1) {\n return (canvas.getContext('webgl', WEBGL_ATTRIBUTES) ||\n (canvas as HTMLCanvasElement)\n .getContext('experimental-webgl', WEBGL_ATTRIBUTES));\n }\n return canvas.getContext('webgl2', WEBGL_ATTRIBUTES) as WebGLRenderingContext;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataId, DataType, env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nexport enum PackingScheme {\n /**\n * All values in a single texel are densely packed without any constraints.\n *\n * This is how the shader encodes a tensor with shape = [2, 3, 4]\n * (indices are [batch, row, col]).\n *\n * 000|001 010|011 020|021\n * ------- ------- -------\n * 002|003 012|013 022|023\n *\n * 100|101 110|111 120|121\n * ------- ------- -------\n * 102|103 112|113 122|123\n *\n */\n DENSE,\n\n /**\n * Single texels contain only values from the same batch, and from adjacent\n * rows and columns.\n *\n * This is how the shader encodes a tensor with shape = [2, 3, 5]\n * (indices are [batch, row, col]).\n *\n * 000|001 002|003 004|xxx 020|021 022|023 024|xxx\n * ------- ------- ------- ------- ------- -------\n * 010|011 012|013 014|xxx xxx|xxx xxx|xxx xxx|xxx\n *\n * 100|101 102|103 104|xxx 120|121 122|123 124|xxx\n * ------- ------- ------- ------- ------- -------\n * 110|111 112|113 114|xxx xxx|xxx xxx|xxx xxx|xxx\n *\n */\n SHARED_BATCH\n}\n\nexport enum TextureUsage {\n RENDER,\n UPLOAD,\n PIXELS,\n DOWNLOAD\n}\n\nexport enum PhysicalTextureType {\n UNPACKED_FLOAT16,\n UNPACKED_FLOAT32,\n PACKED_4X1_UNSIGNED_BYTE,\n PACKED_2X2_FLOAT32,\n PACKED_2X2_FLOAT16\n}\n\nexport interface Texture {\n texture: WebGLTexture;\n texShape: [number, number];\n}\nexport interface TextureData {\n // Required.\n shape: number[];\n dtype: DataType;\n\n // Optional.\n values?: backend_util.BackendValues;\n texture?: Texture;\n // For complex numbers, the real and imaginary parts are stored as their own\n // individual tensorInfos, with a parent joining the two with the\n // complexTensors field. When this is defined, texture will be null.\n complexTensorInfos?: {real: TensorInfo, imag: TensorInfo};\n /** [rows, columns] shape of the texture. */\n texShape?: [number, number];\n usage?: TextureUsage;\n isPacked?: boolean;\n\n refCount: number;\n\n // Available when the tensor has been sliced.\n slice?: {\n // Offset in the 'flat index' space.\n flatOffset: number;\n // Used for counting how many sliced tensors point to the same texture.\n origDataId: DataId;\n };\n}\n\nexport function getUnpackedMatrixTextureShapeWidthHeight(\n rows: number, columns: number): [number, number] {\n return [columns, rows];\n}\n\nexport function getUnpackedArraySizeFromMatrixSize(\n matrixSize: number, channelsPerTexture: number): number {\n return matrixSize * channelsPerTexture;\n}\n\nexport function getColorMatrixTextureShapeWidthHeight(\n rows: number, columns: number): [number, number] {\n return [columns * 4, rows];\n}\n\n/**\n * Get shape for densely packed RGBA texture.\n */\nexport function getDenseTexShape(shape: number[]): [number, number] {\n const size = util.sizeFromShape(shape);\n const texelsNeeded = Math.ceil(size / 4);\n return util.sizeToSquarishShape(texelsNeeded);\n}\n\nexport function getMatrixSizeFromUnpackedArraySize(\n unpackedSize: number, channelsPerTexture: number): number {\n if (unpackedSize % channelsPerTexture !== 0) {\n throw new Error(\n `unpackedSize (${unpackedSize}) must be a multiple of ` +\n `${channelsPerTexture}`);\n }\n return unpackedSize / channelsPerTexture;\n}\n\nexport function decodeMatrixFromUnpackedColorRGBAArray(\n unpackedArray: Float32Array, matrix: Float32Array, channels: number) {\n const requiredSize = unpackedArray.length * channels / 4;\n if (matrix.length < requiredSize) {\n throw new Error(\n `matrix length (${matrix.length}) must be >= ${requiredSize}`);\n }\n let dst = 0;\n for (let src = 0; src < unpackedArray.length; src += 4) {\n for (let c = 0; c < channels; c++) {\n matrix[dst++] = unpackedArray[src + c];\n }\n }\n}\n\nexport function getPackedMatrixTextureShapeWidthHeight(\n rows: number, columns: number): [number, number] {\n return [\n Math.max(1, Math.ceil(columns / 2)), Math.max(1, Math.ceil(rows / 2))\n ];\n}\n\nexport function getPackedRGBAArraySizeFromMatrixShape(\n rows: number, columns: number): number {\n const [w, h] = getPackedMatrixTextureShapeWidthHeight(rows, columns);\n return w * h * 4;\n}\n\nexport interface TextureConfig {\n internalFormatFloat: number;\n textureFormatFloat: number;\n internalFormatPackedHalfFloat: number;\n internalFormatHalfFloat: number;\n internalFormatPackedFloat: number;\n\n // The format to use during a gl.readPixels call.\n downloadTextureFormat: number;\n // How many channels need to be unpacked after a gl.readPixels call.\n downloadUnpackNumChannels: number;\n\n defaultNumChannels: number;\n textureTypeHalfFloat: number;\n textureTypeFloat: number;\n}\n\nexport function getTextureConfig(\n // tslint:disable-next-line:no-any\n gl: WebGLRenderingContext, textureHalfFloatExtension?: any): TextureConfig {\n // tslint:disable-next-line:no-any\n const glany = gl as any;\n\n let internalFormatFloat: number;\n let internalFormatHalfFloat: number;\n let internalFormatPackedHalfFloat: number;\n let internalFormatPackedFloat: number;\n let textureFormatFloat: number;\n\n let downloadTextureFormat: number;\n let downloadUnpackNumChannels: number;\n\n let defaultNumChannels: number;\n let textureTypeHalfFloat: number;\n let textureTypeFloat: number;\n\n if (env().getNumber('WEBGL_VERSION') === 2) {\n internalFormatFloat = glany.R32F;\n internalFormatHalfFloat = glany.R16F;\n internalFormatPackedHalfFloat = glany.RGBA16F;\n internalFormatPackedFloat = glany.RGBA32F;\n textureFormatFloat = glany.RED;\n downloadUnpackNumChannels = 4;\n defaultNumChannels = 1;\n textureTypeHalfFloat = glany.HALF_FLOAT;\n textureTypeFloat = glany.FLOAT;\n downloadTextureFormat = glany.RGBA8;\n } else {\n internalFormatFloat = gl.RGBA;\n internalFormatHalfFloat = gl.RGBA;\n internalFormatPackedHalfFloat = gl.RGBA;\n internalFormatPackedFloat = glany.RGBA;\n textureFormatFloat = gl.RGBA;\n downloadUnpackNumChannels = 4;\n defaultNumChannels = 4;\n textureTypeHalfFloat = textureHalfFloatExtension != null ?\n textureHalfFloatExtension.HALF_FLOAT_OES :\n null;\n textureTypeFloat = gl.FLOAT;\n downloadTextureFormat = gl.RGBA;\n }\n\n return {\n internalFormatFloat,\n internalFormatHalfFloat,\n internalFormatPackedHalfFloat,\n internalFormatPackedFloat,\n textureFormatFloat,\n downloadTextureFormat,\n downloadUnpackNumChannels,\n defaultNumChannels,\n textureTypeHalfFloat,\n textureTypeFloat\n };\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {getWebGLContext} from './canvas_util';\nimport {getTextureConfig} from './tex_util';\n\nexport function callAndCheck(gl: WebGLRenderingContext, func: () => T): T {\n const returnValue = func();\n if (env().getBool('DEBUG')) {\n checkWebGLError(gl);\n }\n return returnValue;\n}\n\nfunction checkWebGLError(gl: WebGLRenderingContext) {\n const error = gl.getError();\n if (error !== gl.NO_ERROR) {\n throw new Error('WebGL Error: ' + getWebGLErrorMessage(gl, error));\n }\n}\n\n// https://en.wikipedia.org/wiki/Half-precision_floating-point_format\nconst MIN_FLOAT16 = 5.96e-8;\nconst MAX_FLOAT16 = 65504;\n\nexport function canBeRepresented(num: number): boolean {\n if (env().getBool('WEBGL_RENDER_FLOAT32_ENABLED') || num === 0 ||\n (MIN_FLOAT16 < Math.abs(num) && Math.abs(num) < MAX_FLOAT16)) {\n return true;\n }\n return false;\n}\n\nexport function getWebGLErrorMessage(\n gl: WebGLRenderingContext, status: number): string {\n switch (status) {\n case gl.NO_ERROR:\n return 'NO_ERROR';\n case gl.INVALID_ENUM:\n return 'INVALID_ENUM';\n case gl.INVALID_VALUE:\n return 'INVALID_VALUE';\n case gl.INVALID_OPERATION:\n return 'INVALID_OPERATION';\n case gl.INVALID_FRAMEBUFFER_OPERATION:\n return 'INVALID_FRAMEBUFFER_OPERATION';\n case gl.OUT_OF_MEMORY:\n return 'OUT_OF_MEMORY';\n case gl.CONTEXT_LOST_WEBGL:\n return 'CONTEXT_LOST_WEBGL';\n default:\n return `Unknown error code ${status}`;\n }\n}\n\nexport function getExtensionOrThrow(\n gl: WebGLRenderingContext, extensionName: string): {} {\n return throwIfNull<{}>(\n gl, () => gl.getExtension(extensionName),\n 'Extension \"' + extensionName + '\" not supported on this browser.');\n}\n\nexport function createVertexShader(\n gl: WebGLRenderingContext, vertexShaderSource: string): WebGLShader {\n const vertexShader: WebGLShader = throwIfNull(\n gl, () => gl.createShader(gl.VERTEX_SHADER),\n 'Unable to create vertex WebGLShader.');\n callAndCheck(gl, () => gl.shaderSource(vertexShader, vertexShaderSource));\n callAndCheck(gl, () => gl.compileShader(vertexShader));\n if (gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS) === false) {\n console.log(gl.getShaderInfoLog(vertexShader));\n throw new Error('Failed to compile vertex shader.');\n }\n return vertexShader;\n}\n\nexport function createFragmentShader(\n gl: WebGLRenderingContext, fragmentShaderSource: string): WebGLShader {\n const fragmentShader: WebGLShader = throwIfNull(\n gl, () => gl.createShader(gl.FRAGMENT_SHADER),\n 'Unable to create fragment WebGLShader.');\n callAndCheck(gl, () => gl.shaderSource(fragmentShader, fragmentShaderSource));\n callAndCheck(gl, () => gl.compileShader(fragmentShader));\n if (env().get('ENGINE_COMPILE_ONLY')) {\n return fragmentShader;\n }\n if (gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS) === false) {\n logShaderSourceAndInfoLog(\n fragmentShaderSource, gl.getShaderInfoLog(fragmentShader));\n throw new Error('Failed to compile fragment shader.');\n }\n return fragmentShader;\n}\n\nconst lineNumberRegex = /ERROR: [0-9]+:([0-9]+):/g;\nexport function logShaderSourceAndInfoLog(\n shaderSource: string, shaderInfoLog: string) {\n const lineNumberRegexResult = lineNumberRegex.exec(shaderInfoLog);\n if (lineNumberRegexResult == null) {\n console.log(`Couldn't parse line number in error: ${shaderInfoLog}`);\n console.log(shaderSource);\n return;\n }\n\n const lineNumber = +lineNumberRegexResult[1];\n\n const shaderLines = shaderSource.split('\\n');\n const pad = shaderLines.length.toString().length + 2;\n const linesWithLineNumbers = shaderLines.map(\n (line, lineNumber) =>\n util.rightPad((lineNumber + 1).toString(), pad) + line);\n let maxLineLength = 0;\n for (let i = 0; i < linesWithLineNumbers.length; i++) {\n maxLineLength = Math.max(linesWithLineNumbers[i].length, maxLineLength);\n }\n\n const beforeErrorLines = linesWithLineNumbers.slice(0, lineNumber - 1);\n const errorLine = linesWithLineNumbers.slice(lineNumber - 1, lineNumber);\n const afterErrorLines = linesWithLineNumbers.slice(lineNumber);\n\n console.log(beforeErrorLines.join('\\n'));\n console.log(shaderInfoLog.split('\\n')[0]);\n console.log(\n `%c ${util.rightPad(errorLine[0], maxLineLength)}`,\n 'border:1px solid red; background-color:#e3d2d2; color:#a61717');\n console.log(afterErrorLines.join('\\n'));\n}\n\nexport function createProgram(gl: WebGLRenderingContext): WebGLProgram {\n return throwIfNull(\n gl, () => gl.createProgram(), 'Unable to create WebGLProgram.');\n}\n\nexport function linkProgram(gl: WebGLRenderingContext, program: WebGLProgram) {\n callAndCheck(gl, () => gl.linkProgram(program));\n if (env().get('ENGINE_COMPILE_ONLY')) {\n return;\n }\n if (gl.getProgramParameter(program, gl.LINK_STATUS) === false) {\n console.log(gl.getProgramInfoLog(program));\n throw new Error('Failed to link vertex and fragment shaders.');\n }\n}\n\n/// validateProgram is effectively \"If we `useProgram(program); drawArrays();`,\n/// give feedback in log about perf/correctness warnings or errors that would\n/// occur.\"\n/// So make sure we set up all vertex/texture/sampler/uniform data before\n/// calling validateProgram!\nexport function validateProgram(\n gl: WebGLRenderingContext, program: WebGLProgram) {\n callAndCheck(gl, () => gl.validateProgram(program));\n if (gl.getProgramParameter(program, gl.VALIDATE_STATUS) === false) {\n console.log(gl.getProgramInfoLog(program));\n throw new Error('Shader program validation failed.');\n }\n}\n\nexport function createStaticVertexBuffer(\n gl: WebGLRenderingContext, data: Float32Array): WebGLBuffer {\n const buffer: WebGLBuffer = throwIfNull(\n gl, () => gl.createBuffer(), 'Unable to create WebGLBuffer');\n callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, buffer));\n callAndCheck(gl, () => gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW));\n return buffer;\n}\n\nexport function createStaticIndexBuffer(\n gl: WebGLRenderingContext, data: Uint16Array): WebGLBuffer {\n const buffer: WebGLBuffer = throwIfNull(\n gl, () => gl.createBuffer(), 'Unable to create WebGLBuffer');\n callAndCheck(gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer));\n callAndCheck(\n gl, () => gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, data, gl.STATIC_DRAW));\n return buffer;\n}\n\nexport function getNumChannels(): number {\n if (env().getNumber('WEBGL_VERSION') === 2) {\n return 1;\n }\n return 4;\n}\n\nexport function createTexture(gl: WebGLRenderingContext): WebGLTexture {\n return throwIfNull(\n gl, () => gl.createTexture(), 'Unable to create WebGLTexture.');\n}\n\nexport function validateTextureSize(width: number, height: number) {\n const maxTextureSize = env().getNumber('WEBGL_MAX_TEXTURE_SIZE');\n if ((width <= 0) || (height <= 0)) {\n const requested = `[${width}x${height}]`;\n throw new Error('Requested texture size ' + requested + ' is invalid.');\n }\n if ((width > maxTextureSize) || (height > maxTextureSize)) {\n const requested = `[${width}x${height}]`;\n const max = `[${maxTextureSize}x${maxTextureSize}]`;\n throw new Error(\n 'Requested texture size ' + requested +\n ' greater than WebGL maximum on this browser / GPU ' + max + '.');\n }\n}\n\nexport function createFramebuffer(gl: WebGLRenderingContext): WebGLFramebuffer {\n return throwIfNull(\n gl, () => gl.createFramebuffer(), 'Unable to create WebGLFramebuffer.');\n}\n\nexport function bindVertexBufferToProgramAttribute(\n gl: WebGLRenderingContext, program: WebGLProgram, attribute: string,\n buffer: WebGLBuffer, arrayEntriesPerItem: number, itemStrideInBytes: number,\n itemOffsetInBytes: number): boolean {\n const loc = gl.getAttribLocation(program, attribute);\n if (loc === -1) {\n // The GPU compiler decided to strip out this attribute because it's unused,\n // thus no need to bind.\n return false;\n }\n callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, buffer));\n callAndCheck(\n gl,\n () => gl.vertexAttribPointer(\n loc, arrayEntriesPerItem, gl.FLOAT, false, itemStrideInBytes,\n itemOffsetInBytes));\n callAndCheck(gl, () => gl.enableVertexAttribArray(loc));\n return true;\n}\n\nexport function bindTextureUnit(\n gl: WebGLRenderingContext, texture: WebGLTexture, textureUnit: number) {\n validateTextureUnit(gl, textureUnit);\n callAndCheck(gl, () => gl.activeTexture(gl.TEXTURE0 + textureUnit));\n callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture));\n}\n\nexport function unbindTextureUnit(\n gl: WebGLRenderingContext, textureUnit: number) {\n validateTextureUnit(gl, textureUnit);\n callAndCheck(gl, () => gl.activeTexture(gl.TEXTURE0 + textureUnit));\n callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null));\n}\n\nexport function getProgramUniformLocationOrThrow(\n gl: WebGLRenderingContext, program: WebGLProgram,\n uniformName: string): WebGLUniformLocation {\n return throwIfNull(\n gl, () => gl.getUniformLocation(program, uniformName),\n 'uniform \"' + uniformName + '\" not present in program.');\n}\n\nexport function getProgramUniformLocation(\n gl: WebGLRenderingContext, program: WebGLProgram,\n uniformName: string): WebGLUniformLocation {\n return gl.getUniformLocation(program, uniformName);\n}\n\nexport function bindTextureToProgramUniformSampler(\n gl: WebGLRenderingContext, texture: WebGLTexture,\n uniformSamplerLocation: WebGLUniformLocation, textureUnit: number) {\n callAndCheck(gl, () => bindTextureUnit(gl, texture, textureUnit));\n callAndCheck(gl, () => gl.uniform1i(uniformSamplerLocation, textureUnit));\n}\n\nexport function bindCanvasToFramebuffer(gl: WebGLRenderingContext) {\n callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, null));\n callAndCheck(gl, () => gl.viewport(0, 0, gl.canvas.width, gl.canvas.height));\n callAndCheck(gl, () => gl.scissor(0, 0, gl.canvas.width, gl.canvas.height));\n}\n\nexport function bindColorTextureToFramebuffer(\n gl: WebGLRenderingContext, texture: WebGLTexture,\n framebuffer: WebGLFramebuffer) {\n callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer));\n callAndCheck(\n gl,\n () => gl.framebufferTexture2D(\n gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0));\n}\n\nexport function unbindColorTextureFromFramebuffer(\n gl: WebGLRenderingContext, framebuffer: WebGLFramebuffer) {\n callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer));\n callAndCheck(\n gl,\n () => gl.framebufferTexture2D(\n gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, null, 0));\n}\n\nexport function validateFramebuffer(gl: WebGLRenderingContext) {\n const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER);\n if (status !== gl.FRAMEBUFFER_COMPLETE) {\n throw new Error(\n 'Error binding framebuffer: ' + getFramebufferErrorMessage(gl, status));\n }\n}\n\nexport function getFramebufferErrorMessage(\n gl: WebGLRenderingContext, status: number): string {\n switch (status) {\n case gl.FRAMEBUFFER_INCOMPLETE_ATTACHMENT:\n return 'FRAMEBUFFER_INCOMPLETE_ATTACHMENT';\n case gl.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:\n return 'FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT';\n case gl.FRAMEBUFFER_INCOMPLETE_DIMENSIONS:\n return 'FRAMEBUFFER_INCOMPLETE_DIMENSIONS';\n case gl.FRAMEBUFFER_UNSUPPORTED:\n return 'FRAMEBUFFER_UNSUPPORTED';\n default:\n return `unknown error ${status}`;\n }\n}\n\nfunction throwIfNull(\n gl: WebGLRenderingContext, returnTOrNull: () => T | null,\n failureMessage: string): T {\n const tOrNull: T|null = callAndCheck(gl, () => returnTOrNull());\n if (tOrNull == null) {\n throw new Error(failureMessage);\n }\n return tOrNull;\n}\n\nfunction validateTextureUnit(gl: WebGLRenderingContext, textureUnit: number) {\n const maxTextureUnit = gl.MAX_COMBINED_TEXTURE_IMAGE_UNITS - 1;\n const glTextureUnit = textureUnit + gl.TEXTURE0;\n if (glTextureUnit < gl.TEXTURE0 || glTextureUnit > maxTextureUnit) {\n const textureUnitRange = `[gl.TEXTURE0, gl.TEXTURE${maxTextureUnit}]`;\n throw new Error(`textureUnit must be in ${textureUnitRange}.`);\n }\n}\n\nexport function getBatchDim(shape: number[], dimsToSkip = 2): number {\n return util.sizeFromShape(shape.slice(0, shape.length - dimsToSkip));\n}\n\nexport function getRowsCols(shape: number[]): [number, number] {\n if (shape.length === 0) {\n throw Error('Cannot get rows and columns of an empty shape array.');\n }\n\n return [\n shape.length > 1 ? shape[shape.length - 2] : 1, shape[shape.length - 1]\n ];\n}\n\nexport function getShapeAs3D(shape: number[]): [number, number, number] {\n let shapeAs3D: [number, number, number] = [1, 1, 1];\n const isScalar = shape.length === 0 || (shape.length === 1 && shape[0] === 1);\n if (!isScalar) {\n shapeAs3D =\n [getBatchDim(shape), ...getRowsCols(shape)] as [number, number, number];\n }\n return shapeAs3D;\n}\n\nexport function getTextureShapeFromLogicalShape(\n logShape: number[], isPacked = false): [number, number] {\n let maxTexSize = env().getNumber('WEBGL_MAX_TEXTURE_SIZE');\n let maxSizeForNarrowTex =\n env().getNumber('WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE');\n if (maxSizeForNarrowTex === Infinity &&\n env().getBool('WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE')) {\n maxSizeForNarrowTex = maxTexSize / 2;\n }\n\n if (isPacked) {\n maxTexSize = maxTexSize * 2;\n maxSizeForNarrowTex = maxSizeForNarrowTex * 2;\n\n // This logic ensures we accurately count the number of packed texels needed\n // to accommodate the tensor. We can only pack values in the same texel if\n // they are from adjacent pairs of rows/cols within the same batch. So if a\n // tensor has 3 rows, we pretend it has 4 rows in order to account for the\n // fact that the texels containing the third row are half empty.\n logShape = logShape.map(\n (d, i) => i >= logShape.length - 2 ?\n util.nearestLargerEven(logShape[i]) :\n logShape[i]);\n\n // Packed texture height is at least 2 (the channel height of a single\n // texel).\n if (logShape.length === 1) {\n logShape = [2, logShape[0]];\n }\n }\n\n // If logical shape is 2, we don't squeeze, since we want to match physical.\n if (logShape.length !== 2) {\n const squeezeResult = util.squeezeShape(logShape);\n logShape = squeezeResult.newShape;\n }\n\n let size = util.sizeFromShape(logShape);\n let textureShape: [number, number] = null;\n if (logShape.length <= 1 && size <= maxTexSize) {\n textureShape = [1, size];\n } else if (\n logShape.length === 2 && logShape[0] <= maxTexSize &&\n logShape[1] <= maxTexSize) {\n textureShape = logShape as [number, number];\n } else if (\n logShape.length === 3 && logShape[0] * logShape[1] <= maxTexSize &&\n logShape[2] <= maxTexSize) {\n textureShape = [logShape[0] * logShape[1], logShape[2]];\n } else if (\n logShape.length === 3 && logShape[0] <= maxTexSize &&\n logShape[1] * logShape[2] <= maxTexSize) {\n textureShape = [logShape[0], logShape[1] * logShape[2]];\n } else if (\n logShape.length === 4 &&\n logShape[0] * logShape[1] * logShape[2] <= maxTexSize &&\n logShape[3] <= maxTexSize) {\n textureShape = [logShape[0] * logShape[1] * logShape[2], logShape[3]];\n } else if (\n logShape.length === 4 && logShape[0] <= maxTexSize &&\n logShape[1] * logShape[2] * logShape[3] <= maxTexSize) {\n textureShape = [logShape[0], logShape[1] * logShape[2] * logShape[3]];\n }\n\n // true if one edge length is 1 (1 or 2, if packed), while another edge\n // length exceeds maxSizeForNarrowTex.\n const isLongNarrowTex = textureShape != null &&\n Math.max(...textureShape) > maxSizeForNarrowTex &&\n Math.min(...textureShape) <= (isPacked ? 2 : 1) &&\n Math.min(...textureShape) > 0;\n\n if (textureShape == null || isLongNarrowTex) {\n if (isPacked) {\n // For packed textures size equals the number of channels required to\n // accommodate the texture data. However in order to squarify such that\n // inner dimensions stay even, we rewrite size to equal the number of\n // texels. Then in the return statement we rehydrate the squarified\n // dimensions to channel units.\n\n const batchDim = getBatchDim(logShape);\n let rows = 2, cols = 2;\n if (logShape.length) {\n [rows, cols] = getRowsCols(logShape);\n }\n size = batchDim * (rows / 2) * (cols / 2);\n textureShape =\n util.sizeToSquarishShape(size).map(d => d * 2) as [number, number];\n } else {\n textureShape = util.sizeToSquarishShape(size);\n }\n }\n\n return textureShape;\n}\n\nfunction isEven(n: number): boolean {\n return n % 2 === 0;\n}\n\n/**\n * This determines whether reshaping a packed texture requires rearranging\n * the data within the texture, assuming 2x2 packing.\n */\nexport function isReshapeFree(shape1: number[], shape2: number[]): boolean {\n shape1 = shape1.slice(-2);\n shape2 = shape2.slice(-2);\n\n if (util.arraysEqual(shape1, shape2)) {\n return true;\n }\n\n if (!shape1.length || !shape2.length) { // One of the shapes is a scalar.\n return true;\n }\n\n if (shape1[0] === 0 || shape1[1] === 0 || shape2[0] === 0 ||\n shape2[1] === 0) {\n return true;\n }\n\n if (shape1.length !== shape2.length) { // One of the shapes is a vector.\n const shape1Cols = shape1.slice(-1)[0];\n const shape2Cols = shape2.slice(-1)[0];\n if (shape1Cols === shape2Cols) {\n return true;\n }\n\n if (isEven(shape1Cols) && isEven(shape2Cols) &&\n (shape1[0] === 1 || shape2[0] === 1)) {\n return true;\n }\n }\n return shape1[1] === shape2[1] && isEven(shape1[0]) && isEven(shape2[0]);\n}\n\n// We cache webgl params because the environment gets reset between\n// unit tests and we don't want to constantly query the WebGLContext for\n// MAX_TEXTURE_SIZE.\nlet MAX_TEXTURE_SIZE: number;\nlet MAX_TEXTURES_IN_SHADER: number;\n\nexport function getWebGLMaxTextureSize(webGLVersion: number): number {\n if (MAX_TEXTURE_SIZE == null) {\n const gl = getWebGLContext(webGLVersion);\n MAX_TEXTURE_SIZE = gl.getParameter(gl.MAX_TEXTURE_SIZE);\n }\n return MAX_TEXTURE_SIZE;\n}\n\nexport function resetMaxTextureSize() {\n MAX_TEXTURE_SIZE = null;\n}\nexport function resetMaxTexturesInShader() {\n MAX_TEXTURES_IN_SHADER = null;\n}\n\nexport function getMaxTexturesInShader(webGLVersion: number): number {\n if (MAX_TEXTURES_IN_SHADER == null) {\n const gl = getWebGLContext(webGLVersion);\n MAX_TEXTURES_IN_SHADER = gl.getParameter(gl.MAX_TEXTURE_IMAGE_UNITS);\n }\n // We cap at 16 to avoid spurious runtime \"memory exhausted\" error.\n return Math.min(16, MAX_TEXTURES_IN_SHADER);\n}\n\nexport function getWebGLDisjointQueryTimerVersion(webGLVersion: number):\n number {\n if (webGLVersion === 0) {\n return 0;\n }\n\n let queryTimerVersion: number;\n const gl = getWebGLContext(webGLVersion);\n\n if (hasExtension(gl, 'EXT_disjoint_timer_query_webgl2') &&\n webGLVersion === 2) {\n queryTimerVersion = 2;\n } else if (hasExtension(gl, 'EXT_disjoint_timer_query')) {\n queryTimerVersion = 1;\n } else {\n queryTimerVersion = 0;\n }\n return queryTimerVersion;\n}\n\nexport function hasExtension(gl: WebGLRenderingContext, extensionName: string) {\n const ext = gl.getExtension(extensionName);\n return ext != null;\n}\n\nexport function isWebGLVersionEnabled(webGLVersion: 1|2) {\n try {\n const gl = getWebGLContext(webGLVersion);\n if (gl != null) {\n return true;\n }\n } catch (e) {\n console.log('Error when getting WebGL context: ', e);\n return false;\n }\n return false;\n}\n\nexport function isCapableOfRenderingToFloatTexture(webGLVersion: number):\n boolean {\n if (webGLVersion === 0) {\n return false;\n }\n\n const gl = getWebGLContext(webGLVersion);\n\n if (webGLVersion === 1) {\n if (!hasExtension(gl, 'OES_texture_float')) {\n return false;\n }\n } else {\n if (!hasExtension(gl, 'EXT_color_buffer_float')) {\n return false;\n }\n }\n\n const isFrameBufferComplete = createFloatTextureAndBindToFramebuffer(gl);\n return isFrameBufferComplete;\n}\n\n/**\n * Check if we can download values from a float/half-float texture.\n *\n * Note that for performance reasons we use binding a texture to a framebuffer\n * as a proxy for ability to download float values later using readPixels. The\n * texture params of this texture will not match those in readPixels exactly\n * but if we are unable to bind some kind of float texture to the frameBuffer\n * then we definitely will not be able to read float values from it.\n */\nexport function isDownloadFloatTextureEnabled(webGLVersion: number): boolean {\n if (webGLVersion === 0) {\n return false;\n }\n\n const gl = getWebGLContext(webGLVersion);\n\n if (webGLVersion === 1) {\n if (!hasExtension(gl, 'OES_texture_float')) {\n return false;\n }\n if (!hasExtension(gl, 'WEBGL_color_buffer_float')) {\n return false;\n }\n } else {\n if (hasExtension(gl, 'EXT_color_buffer_float')) {\n return createFloatTextureAndBindToFramebuffer(gl);\n }\n\n const COLOR_BUFFER_HALF_FLOAT = 'EXT_color_buffer_half_float';\n if (hasExtension(gl, COLOR_BUFFER_HALF_FLOAT)) {\n const textureHalfFloatExtension =\n gl.getExtension(COLOR_BUFFER_HALF_FLOAT);\n return createHalfFloatTextureAndBindToFramebuffer(\n gl, textureHalfFloatExtension);\n }\n\n return false;\n }\n\n const isFrameBufferComplete = createFloatTextureAndBindToFramebuffer(gl);\n return isFrameBufferComplete;\n}\n\nfunction createFloatTextureAndBindToFramebuffer(gl: WebGLRenderingContext):\n boolean {\n const texConfig = getTextureConfig(gl);\n\n const texture = gl.createTexture();\n gl.bindTexture(gl.TEXTURE_2D, texture);\n\n const width = 1;\n const height = 1;\n gl.texImage2D(\n gl.TEXTURE_2D, 0, texConfig.internalFormatFloat, width, height, 0,\n texConfig.textureFormatFloat, texConfig.textureTypeFloat, null);\n\n const frameBuffer = gl.createFramebuffer();\n gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);\n gl.framebufferTexture2D(\n gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);\n\n const isFrameBufferComplete =\n gl.checkFramebufferStatus(gl.FRAMEBUFFER) === gl.FRAMEBUFFER_COMPLETE;\n\n gl.bindTexture(gl.TEXTURE_2D, null);\n gl.bindFramebuffer(gl.FRAMEBUFFER, null);\n gl.deleteTexture(texture);\n gl.deleteFramebuffer(frameBuffer);\n\n return isFrameBufferComplete;\n}\n\nfunction createHalfFloatTextureAndBindToFramebuffer(\n // tslint:disable-next-line:no-any\n gl: WebGLRenderingContext, textureHalfFloatExtension: any): boolean {\n const texConfig = getTextureConfig(gl, textureHalfFloatExtension);\n const texture = gl.createTexture();\n gl.bindTexture(gl.TEXTURE_2D, texture);\n\n const width = 1;\n const height = 1;\n gl.texImage2D(\n gl.TEXTURE_2D, 0, texConfig.internalFormatHalfFloat, width, height, 0,\n texConfig.textureFormatFloat, texConfig.textureTypeHalfFloat, null);\n\n const frameBuffer = gl.createFramebuffer();\n gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer);\n gl.framebufferTexture2D(\n gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);\n\n const isFrameBufferComplete =\n gl.checkFramebufferStatus(gl.FRAMEBUFFER) === gl.FRAMEBUFFER_COMPLETE;\n\n gl.bindTexture(gl.TEXTURE_2D, null);\n gl.bindFramebuffer(gl.FRAMEBUFFER, null);\n gl.deleteTexture(texture);\n gl.deleteFramebuffer(frameBuffer);\n\n return isFrameBufferComplete;\n}\n\nexport function isWebGLFenceEnabled(webGLVersion: number) {\n if (webGLVersion !== 2) {\n return false;\n }\n const gl = getWebGLContext(webGLVersion);\n\n // tslint:disable-next-line:no-any\n const isEnabled = (gl as any).fenceSync != null;\n return isEnabled;\n}\n\nexport function assertNotComplex(\n tensor: TensorInfo|TensorInfo[], opName: string): void {\n if (!Array.isArray(tensor)) {\n tensor = [tensor];\n }\n tensor.forEach(t => {\n if (t != null) {\n util.assert(\n t.dtype !== 'complex64',\n () => `${opName} does not support complex64 tensors ` +\n 'in the WebGL backend.');\n }\n });\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {device_util, env} from '@tensorflow/tfjs-core';\n\nimport {getMaxTexturesInShader, getWebGLDisjointQueryTimerVersion, getWebGLMaxTextureSize, isCapableOfRenderingToFloatTexture, isDownloadFloatTextureEnabled, isWebGLFenceEnabled, isWebGLVersionEnabled} from './webgl_util';\n\nconst ENV = env();\n\n/**\n * This file contains WebGL-specific flag registrations.\n */\n\n/**\n * True if WebGL is supported.\n */\nENV.registerFlag('HAS_WEBGL', () => ENV.getNumber('WEBGL_VERSION') > 0);\n\n/** 0: No WebGL, 1: WebGL 1.0, 2: WebGL 2.0. */\nENV.registerFlag('WEBGL_VERSION', () => {\n if (isWebGLVersionEnabled(2)) {\n return 2;\n } else if (isWebGLVersionEnabled(1)) {\n return 1;\n }\n return 0;\n});\n\n/** Whether to check for numerical representation problems. */\nENV.registerFlag('WEBGL_CHECK_NUMERICAL_PROBLEMS', () => false);\n\nENV.registerFlag(\n 'WEBGL_BUFFER_SUPPORTED', () => ENV.get('WEBGL_VERSION') === 2);\n\n/** Whether the WebGL backend will sometimes forward ops to the CPU. */\nENV.registerFlag('WEBGL_CPU_FORWARD', () => true);\n\n/** Whether the WebGL backend will always use f16 textures for rendering. */\nENV.registerFlag('WEBGL_FORCE_F16_TEXTURES', () => false);\n\n/** Whether to turn all packing related flags on. */\nENV.registerFlag('WEBGL_PACK', () => ENV.getBool('HAS_WEBGL'));\n\n/** Whether we will pack the batchnormalization op. */\nENV.registerFlag('WEBGL_PACK_NORMALIZATION', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack the clip op. */\nENV.registerFlag('WEBGL_PACK_CLIP', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack the depthwise conv op. */\nENV.registerFlag('WEBGL_PACK_DEPTHWISECONV', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack binary ops. */\nENV.registerFlag(\n 'WEBGL_PACK_BINARY_OPERATIONS', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack unary ops. */\nENV.registerFlag(\n 'WEBGL_PACK_UNARY_OPERATIONS', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack array ops. */\nENV.registerFlag(\n 'WEBGL_PACK_ARRAY_OPERATIONS', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack image ops. */\nENV.registerFlag(\n 'WEBGL_PACK_IMAGE_OPERATIONS', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will pack reduce ops. */\nENV.registerFlag('WEBGL_PACK_REDUCE', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether packed WebGL kernels lazily unpack their outputs. */\nENV.registerFlag('WEBGL_LAZILY_UNPACK', () => ENV.getBool('WEBGL_PACK'));\n\n/** Whether we will use the im2col algorithm to speed up convolutions. */\nENV.registerFlag('WEBGL_CONV_IM2COL', () => ENV.getBool('WEBGL_PACK'));\n\n/** The maximum texture dimension. */\nENV.registerFlag(\n 'WEBGL_MAX_TEXTURE_SIZE',\n () => getWebGLMaxTextureSize(ENV.getNumber('WEBGL_VERSION')));\n\n/** The maximum texture dimension. */\nENV.registerFlag(\n 'WEBGL_MAX_TEXTURES_IN_SHADER',\n () => getMaxTexturesInShader(ENV.getNumber('WEBGL_VERSION')));\n\n/**\n * The disjoint_query_timer extension version.\n * 0: disabled, 1: EXT_disjoint_timer_query, 2:\n * EXT_disjoint_timer_query_webgl2.\n * In Firefox with WebGL 2.0,\n * EXT_disjoint_timer_query_webgl2 is not available, so we must use the\n * WebGL 1.0 extension.\n */\nENV.registerFlag('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION', () => {\n const webGLVersion = ENV.getNumber('WEBGL_VERSION');\n\n if (webGLVersion === 0) {\n return 0;\n }\n return getWebGLDisjointQueryTimerVersion(webGLVersion);\n});\n\n/**\n * Whether the timer object from the disjoint_query_timer extension gives\n * timing information that is reliable.\n */\nENV.registerFlag(\n 'WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE',\n () => ENV.getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') > 0 &&\n !device_util.isMobile());\n\n/**\n * Whether the device is physically capable of rendering to float32 textures.\n */\nENV.registerFlag(\n 'WEBGL_RENDER_FLOAT32_CAPABLE',\n () => isCapableOfRenderingToFloatTexture(ENV.getNumber('WEBGL_VERSION')));\n\n/**\n * Whether rendering to float32 textures is enabled. If disabled, renders to\n * float16 textures.\n */\nENV.registerFlag('WEBGL_RENDER_FLOAT32_ENABLED', () => {\n return ENV.getBool('WEBGL_FORCE_F16_TEXTURES') ?\n false :\n ENV.getBool('WEBGL_RENDER_FLOAT32_CAPABLE');\n});\n\n/**\n * Whether downloading float textures is enabled (16 or 32 bit). If disabled,\n * uses IEEE 754 encoding of the float32 values to 4 uint8 when downloading.\n */\nENV.registerFlag(\n 'WEBGL_DOWNLOAD_FLOAT_ENABLED',\n () => isDownloadFloatTextureEnabled(ENV.getNumber('WEBGL_VERSION')));\n\n/** Whether the fence API is available. */\nENV.registerFlag(\n 'WEBGL_FENCE_API_ENABLED',\n () => isWebGLFenceEnabled(ENV.getNumber('WEBGL_VERSION')));\n\n/**\n * Tensors with size <= than this will be uploaded as uniforms, not textures.\n */\nENV.registerFlag('WEBGL_SIZE_UPLOAD_UNIFORM', () => {\n // Use uniform uploads only when 32bit floats are supported. In\n // 16bit\n // environments there are problems with comparing a 16bit texture value\n // with a 32bit uniform value.\n const useUniforms = ENV.getBool('WEBGL_RENDER_FLOAT32_ENABLED');\n return useUniforms ? 4 : 0;\n});\n\n/**\n * If the total number of bytes allocated on the GPU is greater than this\n * number, we will aggressively delete textures upon disposal with\n * gl.deleteMatrixTexture, rather than making them available for reuse.\n *\n * Default value -1 indicates that we will never aggressively delete textures.\n */\nENV.registerFlag(\n 'WEBGL_DELETE_TEXTURE_THRESHOLD',\n () => {\n return -1;\n },\n threshold => {\n if (threshold < 0 && threshold !== -1) {\n throw new Error(\n `WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never ` +\n `delete) or at least 0, but got ${threshold}.`);\n }\n });\n\n/**\n * Trigger a manual GL command flush if the threshold of time has passed since\n * previous Kernel execution. This can be useful for Andorid device where GL\n * command flush are delayed un til the end of javascript task. This value is\n * measured in millisecond. Typically you want to set this value to close to 1.\n *\n * Default value 1 for mobile chrome, and -1 for rest cases. -1 indicates that\n * we will not enforce manual flush and depend on system default flush schedule.\n */\nENV.registerFlag(\n 'WEBGL_FLUSH_THRESHOLD',\n () => {\n return device_util.isMobile() ? 1 : -1;\n },\n threshold => {\n if (threshold < 0 && threshold !== -1) {\n throw new Error(\n `WEBGL_FLUSH_THRESHOLD must be -1 (indicating never ` +\n `manual flush) or at least 0, but got ${threshold}.`);\n }\n });\n\n/**\n * Threshold for input tensor size that determines whether WebGL backend will\n * delegate computation to CPU.\n *\n * Default value is 128.\n */\nENV.registerFlag('CPU_HANDOFF_SIZE_THRESHOLD', () => 128);\n\n/** Whether we will use shapes uniforms. */\nENV.registerFlag('WEBGL_USE_SHAPES_UNIFORMS', () => false);\n\n/**\n * Threshold for last dimension of input tensor that determines whether\n * WebGL backend for the Top K op will delegate computation to CPU. If input\n * is smaller than threshold then CPU will be used\n *\n * Default value is 100000.\n */\nENV.registerFlag('TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD', () => 100000);\n\n/**\n * Threshold for K that determines whether\n * WebGL backend for the Top K op will delegate computation to CPU. If k\n * is larger than threshold then CPU will be used\n *\n * Default value is 128.\n */\nENV.registerFlag('TOPK_K_CPU_HANDOFF_THRESHOLD', () => 128);\n\n/** Whether we will use the experimental conv op. */\nENV.registerFlag('WEBGL_EXP_CONV', () => false);\n\n/**\n * If the device performance is low or if no hardware GPU is available, whether\n * software WebGL will be used.\n */\nENV.registerFlag('SOFTWARE_WEBGL_ENABLED', () => ENV.getBool('IS_TEST'));\n\n/**\n * For narrow texture (physical height or physical width is 1), if the length of\n * any texture edges exceed the threshold, the texture will be reshaped to be\n * more squarish.\n *\n * This flag is used to help some GPUs that could not provide correct\n * interpolations for long skinny triangles. We found Mali GPU probably has this\n * problem: https://github.com/tensorflow/tfjs/issues/6775.\n */\nENV.registerFlag('WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE', () => Infinity);\n\n/**\n * If the flag is set to true, the max size of the narrow texture will be auto\n * computed and it will be considerred as a threshold to reshape the narrow\n * texture to be more squarish.\n *\n * This flag is used to help some GPUs that could not provide correct\n * interpolations for long skinny triangles. We found Mali GPU probably has this\n * problem: https://github.com/tensorflow/tfjs/issues/6775.\n */\nENV.registerFlag('WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE', () => false);\n\n/**\n * Whether to use the customized isnan. It's only useful for webgl2 since webgl1\n * doesn't have the builtin isnan.\n */\nENV.registerFlag('WEBGL2_ISNAN_CUSTOM', () => false);\n\n/** Experimental flag, whether enter compile only phase. */\nENV.registerFlag('ENGINE_COMPILE_ONLY', () => false);\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {env} from '@tensorflow/tfjs-core';\n\nexport type GLSL = {\n version: string,\n attribute: string,\n varyingVs: string,\n varyingFs: string,\n texture2D: string,\n output: string,\n defineOutput: string,\n defineSpecialNaN: string,\n defineSpecialInf: string,\n defineRound: string\n};\n\nexport function getGlslDifferences(): GLSL {\n let version: string;\n let attribute: string;\n let varyingVs: string;\n let varyingFs: string;\n let texture2D: string;\n let output: string;\n let defineOutput: string;\n let defineSpecialNaN: string;\n let defineSpecialInf: string;\n let defineRound: string;\n\n if (env().getNumber('WEBGL_VERSION') === 2) {\n version = '#version 300 es';\n attribute = 'in';\n varyingVs = 'out';\n varyingFs = 'in';\n texture2D = 'texture';\n output = 'outputColor';\n defineOutput = 'out vec4 outputColor;';\n\n // Use custom isnan definition to work across differences between\n // implementations on various platforms. While this should happen in ANGLE\n // we still see differences between android and windows (on chrome) when\n // using isnan directly. Since WebGL2 supports uint type and\n // floatBitsToUinT built-in function, we could implment isnan following\n // IEEE 754 rules.\n // NaN defination in IEEE 754-1985 is :\n // - sign = either 0 or 1.\n // - biased exponent = all 1 bits.\n // - fraction = anything except all 0 bits (since all 0 bits represents\n // infinity).\n // https://en.wikipedia.org/wiki/IEEE_754-1985#Representation_of_non-numbers\n defineSpecialNaN = env().getBool('WEBGL2_ISNAN_CUSTOM') ? `\n bool isnan_custom(float val) {\n uint floatToUint = floatBitsToUint(val);\n return (floatToUint & 0x7fffffffu) > 0x7f800000u;\n }\n\n bvec4 isnan_custom(vec4 val) {\n return bvec4(isnan_custom(val.x),\n isnan_custom(val.y), isnan_custom(val.z), isnan_custom(val.w));\n }\n\n #define isnan(value) isnan_custom(value)\n ` :\n '';\n // In webgl 2 we do not need to specify a custom isinf so there is no\n // need for a special INFINITY constant.\n defineSpecialInf = ``;\n defineRound = `\n #define round(value) newRound(value)\n int newRound(float value) {\n return int(floor(value + 0.5));\n }\n\n ivec4 newRound(vec4 value) {\n return ivec4(floor(value + vec4(0.5)));\n }\n `;\n } else {\n version = '';\n attribute = 'attribute';\n varyingVs = 'varying';\n varyingFs = 'varying';\n texture2D = 'texture2D';\n output = 'gl_FragColor';\n defineOutput = '';\n // WebGL1 has no built in isnan so we define one here.\n defineSpecialNaN = `\n #define isnan(value) isnan_custom(value)\n bool isnan_custom(float val) {\n return (val > 0. || val < 1. || val == 0.) ? false : true;\n }\n bvec4 isnan_custom(vec4 val) {\n return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w));\n }\n `;\n defineSpecialInf = `\n uniform float INFINITY;\n\n bool isinf(float val) {\n return abs(val) == INFINITY;\n }\n bvec4 isinf(vec4 val) {\n return equal(abs(val), vec4(INFINITY));\n }\n `;\n defineRound = `\n int round(float value) {\n return int(floor(value + 0.5));\n }\n\n ivec4 round(vec4 value) {\n return ivec4(floor(value + vec4(0.5)));\n }\n `;\n }\n\n return {\n version,\n attribute,\n varyingVs,\n varyingFs,\n texture2D,\n output,\n defineOutput,\n defineSpecialNaN,\n defineSpecialInf,\n defineRound\n };\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {util} from '@tensorflow/tfjs-core';\n\n/**\n * Produces GLSL code that derives logical coordinates from a flat\n * index. The code performs integer division with each stride and decrements\n * the index until the index equals the final dimension coordinate.\n */\nexport function getLogicalCoordinatesFromFlatIndex(\n coords: string[], shape: number[], index = 'index'): string {\n const strides = util.computeStrides(shape);\n return strides\n .map((stride, i) => {\n const line1 = `int ${coords[i]} = ${index} / ${stride}`;\n const line2 = i === strides.length - 1 ?\n `int ${coords[i + 1]} = ${index} - ${coords[i]} * ${stride}` :\n `index -= ${coords[i]} * ${stride}`;\n return `${line1}; ${line2};`;\n })\n .join('');\n}\n\nexport function getOutputLogicalCoordinatesFromFlatIndexByUniform(\n coords: string[], shape: number[], index = 'index'): string {\n const strides = util.computeStrides(shape);\n return strides\n .map((_, i) => {\n const line1 = `int ${coords[i]} = ${index} / outShapeStrides[${i}]`;\n const line2 = i === strides.length - 1 ?\n `int ${coords[i + 1]} = ${index} - ${coords[i]} * outShapeStrides[${\n i}]` :\n `index -= ${coords[i]} * outShapeStrides[${i}]`;\n return `${line1}; ${line2};`;\n })\n .join('');\n}\n\n// Produces GLSL code that computes strides.\nfunction symbolicallyComputeStrides(\n indicesArr: number[], variableName: string): string[] {\n const numCoords = indicesArr.length;\n const shape = indicesArr.map(d => `${variableName}[${d}]`);\n const strides = new Array(numCoords - 1);\n strides[numCoords - 2] = shape[numCoords - 1];\n for (let i = numCoords - 3; i >= 0; --i) {\n strides[i] = `(${strides[i + 1]} * ${shape[i + 1]})`;\n }\n\n return strides;\n}\n\nexport function getLogicalCoordinatesFromFlatIndexByUniform(\n coords: string[], variableName: string, index = 'index'): string {\n const indicesArray = coords.map((_, i) => i);\n const strides = symbolicallyComputeStrides(indicesArray, variableName);\n return strides\n .map((_, i) => {\n const line1 = `int ${coords[i]} = ${index} / ${strides[i]}`;\n const line2 = i === strides.length - 1 ?\n `int ${coords[i + 1]} = ${index} - ${coords[i]} * ${strides[i]}` :\n `index -= ${coords[i]} * ${strides[i]}`;\n return `${line1}; ${line2};`;\n })\n .join('');\n}\n\nfunction buildVec(x: string[]): string {\n if (x.length === 1) {\n return `${x[0]}`;\n }\n return `vec${x.length}(${x.join(',')})`;\n}\n\n/**\n * Produces GLSL code that computes the dot product of the input x and y\n * vectors. Handles splitting inputs into increments of vec4s when necessary.\n */\nexport function dotify(x: string[], y: string[]): string {\n if (x.length !== y.length) {\n throw new Error(\n `Vectors to be dotted must be of the same length -` +\n `got ${x.length} and ${y.length}`);\n }\n\n const slices: string[] = [];\n const nearestVec4 = Math.floor(x.length / 4);\n const nearestVec4Remainder = x.length % 4;\n\n for (let i = 0; i < nearestVec4; i++) {\n const xSlice = x.slice(i * 4, i * 4 + 4);\n const ySlice = y.slice(i * 4, i * 4 + 4);\n slices.push(`${buildVec(xSlice)}, ${buildVec(ySlice)}`);\n }\n\n if (nearestVec4Remainder !== 0) {\n let xSlice = x.slice(nearestVec4 * 4);\n let ySlice = y.slice(nearestVec4 * 4);\n if (xSlice.length === 1) {\n xSlice = xSlice.map(d => `float(${d})`);\n ySlice = ySlice.map(d => `float(${d})`);\n }\n slices.push(`${buildVec(xSlice)}, ${buildVec(ySlice)}`);\n }\n\n return slices.map((d, i) => `dot(${d})`).join('+');\n}\n\n/**\n * Produces GLSL that computes the flat index from 3D coordinates.\n */\nexport function getFlatIndexFrom3D(shape: [number, number, number]): string {\n const strides = util.computeStrides(shape).map(d => d.toString());\n\n return `\n int getFlatIndex(ivec3 coords) {\n return coords.x * ${strides[0]} + coords.y * ${strides[1]} + coords.z;\n }\n`;\n}\n\nexport function getFlatIndexFrom3DOutput(): string {\n return `\n int getFlatIndex(ivec3 coords) {\n return coords.x * outShapeStrides[0] + coords.y * outShapeStrides[1] + coords.z;\n }\n`;\n}\n\nexport const ENCODE_FLOAT_SNIPPET = `\n const float FLOAT_MAX = 1.70141184e38;\n const float FLOAT_MIN = 1.17549435e-38;\n\n lowp vec4 encode_float(highp float v) {\n if (isnan(v)) {\n return vec4(255, 255, 255, 255);\n }\n\n highp float av = abs(v);\n\n if(av < FLOAT_MIN) {\n return vec4(0.0, 0.0, 0.0, 0.0);\n } else if(v > FLOAT_MAX) {\n return vec4(0.0, 0.0, 128.0, 127.0) / 255.0;\n } else if(v < -FLOAT_MAX) {\n return vec4(0.0, 0.0, 128.0, 255.0) / 255.0;\n }\n\n highp vec4 c = vec4(0,0,0,0);\n\n highp float e = floor(log2(av));\n highp float m = exp2(fract(log2(av))) - 1.0;\n\n c[2] = floor(128.0 * m);\n m -= c[2] / 128.0;\n c[1] = floor(32768.0 * m);\n m -= c[1] / 32768.0;\n c[0] = floor(8388608.0 * m);\n\n highp float ebias = e + 127.0;\n c[3] = floor(ebias / 2.0);\n ebias -= c[3] * 2.0;\n c[2] += floor(ebias) * 128.0;\n\n c[3] += 128.0 * step(0.0, -v);\n\n return c / 255.0;\n }\n`;\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Please make sure the shaker key in makeShaderKey in gpgpu_math.ts is well\n// mapped if any shader source code is changed in this file.\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\nconst {getBroadcastDims} = backend_util;\nimport {getGlslDifferences, GLSL} from './glsl_version';\nimport * as shader_util from './shader_compiler_util';\n\nexport type ShapeInfo = {\n logicalShape: number[],\n texShape: [number, number],\n isUniform: boolean,\n isPacked: boolean,\n flatOffset: number\n};\n\nexport type InputInfo = {\n name: string,\n shapeInfo: ShapeInfo\n};\n\nexport type UniformType =\n 'float'|'vec2'|'vec3'|'vec4'|'int'|'ivec2'|'ivec3'|'ivec4';\n\ninterface ProgramParams {\n userCode: string;\n enableShapeUniforms?: boolean;\n packedInputs?: boolean;\n customUniforms?:\n Array<{name: string; arrayIndex?: number; type: UniformType;}>;\n}\n\nexport function makeShader(\n inputsInfo: InputInfo[], outputShape: ShapeInfo,\n program: ProgramParams): string {\n const prefixSnippets: string[] = [];\n inputsInfo.forEach(x => {\n const size = util.sizeFromShape(x.shapeInfo.logicalShape);\n\n // Snippet when we decided to upload the values as uniform.\n if (x.shapeInfo.isUniform) {\n prefixSnippets.push(\n `uniform float ${x.name}${size > 1 ? `[${size}]` : ''};`);\n } else {\n prefixSnippets.push(`uniform sampler2D ${x.name};`);\n prefixSnippets.push(`uniform int offset${x.name};`);\n }\n\n if (program.enableShapeUniforms) {\n const {uniformShape} = getUniformInfoFromShape(\n program.packedInputs, x.shapeInfo.logicalShape, x.shapeInfo.texShape);\n switch (uniformShape.length) {\n case 1:\n prefixSnippets.push(`uniform int ${x.name}Shape;`);\n break;\n case 2:\n prefixSnippets.push(`uniform ivec2 ${x.name}Shape;`);\n break;\n case 3:\n prefixSnippets.push(`uniform ivec3 ${x.name}Shape;`);\n break;\n case 4:\n prefixSnippets.push(`uniform ivec4 ${x.name}Shape;`);\n break;\n default:\n break;\n }\n prefixSnippets.push(`uniform ivec2 ${x.name}TexShape;`);\n }\n });\n\n if (program.enableShapeUniforms) {\n switch (outputShape.logicalShape.length) {\n case 1:\n prefixSnippets.push(`uniform int outShape;`);\n break;\n case 2:\n prefixSnippets.push(`uniform ivec2 outShape;`);\n prefixSnippets.push(`uniform int outShapeStrides;`);\n break;\n case 3:\n prefixSnippets.push(`uniform ivec3 outShape;`);\n prefixSnippets.push(`uniform ivec2 outShapeStrides;`);\n break;\n case 4:\n prefixSnippets.push(`uniform ivec4 outShape;`);\n prefixSnippets.push(`uniform ivec3 outShapeStrides;`);\n break;\n default:\n break;\n }\n prefixSnippets.push(`uniform ivec2 outTexShape;`);\n }\n if (program.customUniforms) {\n program.customUniforms.forEach((d) => {\n prefixSnippets.push(`uniform ${d.type} ${d.name}${\n d.arrayIndex ? `[${d.arrayIndex}]` : ''};`);\n });\n }\n const inputPrefixSnippet = prefixSnippets.join('\\n');\n\n const inputSamplingSnippet = inputsInfo\n .map(\n x => getInputSamplingSnippet(\n x, outputShape, program.packedInputs,\n program.enableShapeUniforms))\n .join('\\n');\n const outTexShape = outputShape.texShape;\n const glsl = getGlslDifferences();\n const floatTextureSampleSnippet = getFloatTextureSampleSnippet(glsl);\n let outputSamplingSnippet: string;\n let floatTextureSetOutputSnippet: string;\n let shaderPrefix = getShaderPrefix(glsl);\n\n if (outputShape.isPacked) {\n outputSamplingSnippet = getPackedOutputSamplingSnippet(\n outputShape.logicalShape, outTexShape, program.enableShapeUniforms);\n floatTextureSetOutputSnippet = getFloatTextureSetRGBASnippet(glsl);\n } else {\n outputSamplingSnippet = getOutputSamplingSnippet(\n outputShape.logicalShape, outTexShape, program.enableShapeUniforms);\n floatTextureSetOutputSnippet = getFloatTextureSetRSnippet(glsl);\n }\n\n if (program.packedInputs) {\n shaderPrefix += SHADER_PACKED_PREFIX;\n }\n\n const source = [\n shaderPrefix, floatTextureSampleSnippet, floatTextureSetOutputSnippet,\n inputPrefixSnippet, outputSamplingSnippet, inputSamplingSnippet,\n program.userCode\n ].join('\\n');\n return source;\n}\n\nfunction getSamplerFromInInfo(\n inInfo: InputInfo, enableShapeUniforms = false): string {\n const shape = inInfo.shapeInfo.logicalShape;\n switch (shape.length) {\n case 0:\n return getSamplerScalar(inInfo, enableShapeUniforms);\n case 1:\n return getSampler1D(inInfo, enableShapeUniforms);\n case 2:\n return getSampler2D(inInfo, enableShapeUniforms);\n case 3:\n return getSampler3D(inInfo, enableShapeUniforms);\n case 4:\n return getSampler4D(inInfo, enableShapeUniforms);\n case 5:\n return getSampler5D(inInfo);\n case 6:\n return getSampler6D(inInfo);\n default:\n throw new Error(\n `${shape.length}-D input sampling` +\n ` is not yet supported`);\n }\n}\n\nfunction getPackedSamplerFromInInfo(\n inInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inInfo.shapeInfo.logicalShape;\n switch (shape.length) {\n case 0:\n return getPackedSamplerScalar(inInfo);\n case 1:\n return getPackedSampler1D(inInfo, enableShapeUniforms);\n case 2:\n return getPackedSampler2D(inInfo, enableShapeUniforms);\n case 3:\n return getPackedSampler3D(inInfo, enableShapeUniforms);\n default:\n return getPackedSamplerND(inInfo, enableShapeUniforms);\n }\n}\n\nfunction getInputSamplingSnippet(\n inInfo: InputInfo, outShapeInfo: ShapeInfo, usesPackedTextures = false,\n enableShapeUniforms: boolean): string {\n let res = '';\n if (usesPackedTextures) {\n res += getPackedSamplerFromInInfo(inInfo, enableShapeUniforms);\n } else {\n res += getSamplerFromInInfo(inInfo, enableShapeUniforms);\n }\n\n const inShape = inInfo.shapeInfo.logicalShape;\n const outShape = outShapeInfo.logicalShape;\n if (inShape.length <= outShape.length) {\n if (usesPackedTextures) {\n res += getPackedSamplerAtOutputCoords(inInfo, outShapeInfo);\n } else {\n res += getSamplerAtOutputCoords(inInfo, outShapeInfo);\n }\n }\n return res;\n}\n\nfunction getPackedOutputSamplingSnippet(\n outShape: number[], outTexShape: [number, number],\n enableShapeUniforms: boolean): string {\n switch (outShape.length) {\n case 0:\n return getOutputScalarCoords();\n case 1:\n return getOutputPacked1DCoords(\n outShape as [number], outTexShape, enableShapeUniforms);\n case 2:\n return getOutputPacked2DCoords(\n outShape as [number, number], outTexShape, enableShapeUniforms);\n case 3:\n return getOutputPacked3DCoords(\n outShape as [number, number, number], outTexShape,\n enableShapeUniforms);\n default:\n return getOutputPackedNDCoords(\n outShape, outTexShape, enableShapeUniforms);\n }\n}\n\nfunction getOutputSamplingSnippet(\n outShape: number[], outTexShape: [number, number],\n enableShapeUniforms: boolean): string {\n switch (outShape.length) {\n case 0:\n return getOutputScalarCoords();\n case 1:\n return getOutput1DCoords(\n outShape as [number], outTexShape, enableShapeUniforms);\n case 2:\n return getOutput2DCoords(\n outShape as [number, number], outTexShape, enableShapeUniforms);\n case 3:\n return getOutput3DCoords(\n outShape as [number, number, number], outTexShape,\n enableShapeUniforms);\n case 4:\n return getOutput4DCoords(\n outShape as [number, number, number, number], outTexShape,\n enableShapeUniforms);\n case 5:\n return getOutput5DCoords(\n outShape as [number, number, number, number, number], outTexShape);\n case 6:\n return getOutput6DCoords(\n outShape as [number, number, number, number, number, number],\n outTexShape);\n default:\n throw new Error(\n `${outShape.length}-D output sampling is not yet supported`);\n }\n}\n\nfunction getFloatTextureSampleSnippet(glsl: GLSL): string {\n return `\n float sampleTexture(sampler2D textureSampler, vec2 uv) {\n return ${glsl.texture2D}(textureSampler, uv).r;\n }\n `;\n}\n\nfunction getFloatTextureSetRSnippet(glsl: GLSL): string {\n return `\n void setOutput(float val) {\n ${glsl.output} = vec4(val, 0, 0, 0);\n }\n `;\n}\n\nfunction getFloatTextureSetRGBASnippet(glsl: GLSL): string {\n return `\n void setOutput(vec4 val) {\n ${glsl.output} = val;\n }\n `;\n}\n\nfunction getShaderPrefix(glsl: GLSL): string {\n const SHADER_PREFIX = `${glsl.version}\n precision highp float;\n precision highp int;\n precision highp sampler2D;\n ${glsl.varyingFs} vec2 resultUV;\n ${glsl.defineOutput}\n const vec2 halfCR = vec2(0.5, 0.5);\n\n struct ivec5\n {\n int x;\n int y;\n int z;\n int w;\n int u;\n };\n\n struct ivec6\n {\n int x;\n int y;\n int z;\n int w;\n int u;\n int v;\n };\n\n uniform float NAN;\n ${glsl.defineSpecialNaN}\n ${glsl.defineSpecialInf}\n ${glsl.defineRound}\n\n int imod(int x, int y) {\n return x - y * (x / y);\n }\n\n int idiv(int a, int b, float sign) {\n int res = a / b;\n int mod = imod(a, b);\n if (sign < 0. && mod != 0) {\n res -= 1;\n }\n return res;\n }\n\n //Based on the work of Dave Hoskins\n //https://www.shadertoy.com/view/4djSRW\n #define HASHSCALE1 443.8975\n float random(float seed){\n vec2 p = resultUV * seed;\n vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1);\n p3 += dot(p3, p3.yzx + 19.19);\n return fract((p3.x + p3.y) * p3.z);\n }\n\n ${SAMPLE_1D_SNIPPET}\n ${SAMPLE_2D_SNIPPET}\n ${SAMPLE_3D_SNIPPET}\n `;\n\n return SHADER_PREFIX;\n}\n\nconst SAMPLE_1D_SNIPPET = `\nvec2 uvFromFlat(int texNumR, int texNumC, int index) {\n int texR = index / texNumC;\n int texC = index - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\nvec2 packedUVfrom1D(int texNumR, int texNumC, int index) {\n int texelIndex = index / 2;\n int texR = texelIndex / texNumC;\n int texC = texelIndex - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n`;\n\nconst SAMPLE_2D_SNIPPET = `\nvec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR,\n int texNumC, int row, int col) {\n int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2);\n int texR = texelIndex / texNumC;\n int texC = texelIndex - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n`;\n\nconst SAMPLE_3D_SNIPPET = `\nvec2 packedUVfrom3D(int texNumR, int texNumC,\n int texelsInBatch, int texelsInLogicalRow, int b,\n int row, int col) {\n int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2);\n int texR = index / texNumC;\n int texC = index - texR * texNumC;\n return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR);\n}\n`;\n\nconst SHADER_PACKED_PREFIX = `\n float getChannel(vec4 frag, vec2 innerDims) {\n vec2 modCoord = mod(innerDims, 2.);\n return modCoord.x == 0. ?\n (modCoord.y == 0. ? frag.r : frag.g) :\n (modCoord.y == 0. ? frag.b : frag.a);\n }\n float getChannel(vec4 frag, int dim) {\n float modCoord = mod(float(dim), 2.);\n return modCoord == 0. ? frag.r : frag.g;\n }\n`;\n\nfunction getOutputScalarCoords() {\n return `\n int getOutputCoords() {\n return 0;\n }\n `;\n}\n\nfunction getOutputPacked1DCoords(\n shape: [number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n if (packedTexShape[0] === 1) {\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n return 2 * int(resultUV.x * ceil(float(outTexShape[1]) / 2.0));\n }\n `;\n }\n\n return `\n int getOutputCoords() {\n return 2 * int(resultUV.x * ${packedTexShape[1]}.0);\n }\n `;\n }\n\n if (packedTexShape[1] === 1) {\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n return 2 * int(resultUV.y * ceil(float(outTexShape[0]) / 2.0));\n }\n `;\n }\n\n return `\n int getOutputCoords() {\n return 2 * int(resultUV.y * ${packedTexShape[0]}.0);\n }\n `;\n }\n\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0));\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(packedTexShape[0], packedTexShape[1]));\n return 2 * (resTexRC.x * packedTexShape[1] + resTexRC.y);\n }\n `;\n }\n\n return `\n int getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${packedTexShape[0]}, ${packedTexShape[1]}));\n return 2 * (resTexRC.x * ${packedTexShape[1]} + resTexRC.y);\n }\n `;\n}\n\nfunction getOutput1DCoords(\n shape: [number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (texShape[0] === 1) {\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n return int(resultUV.x * float(outTexShape[1]));\n }\n `;\n }\n return `\n int getOutputCoords() {\n return int(resultUV.x * ${texShape[1]}.0);\n }\n `;\n }\n if (texShape[1] === 1) {\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n return int(resultUV.y * float(outTexShape[0]));\n }\n `;\n }\n return `\n int getOutputCoords() {\n return int(resultUV.y * ${texShape[0]}.0);\n }\n `;\n }\n if (enableShapeUniforms) {\n return `\n int getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n return resTexRC.x * outTexShape[1] + resTexRC.y;\n }\n `;\n }\n return `\n int getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n return resTexRC.x * ${texShape[1]} + resTexRC.y;\n }\n `;\n}\n\nfunction getOutputPacked3DCoords(\n shape: [number, number, number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (enableShapeUniforms) {\n return `\n ivec3 getOutputCoords() {\n ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0));\n int texelsInLogicalRow = int(ceil(float(outShape[2]) / 2.0));\n int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[1]) / 2.0));\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(packedTexShape[0], packedTexShape[1]));\n int index = resTexRC.x * packedTexShape[1] + resTexRC.y;\n\n int b = index / texelsInBatch;\n index -= b * texelsInBatch;\n\n int r = 2 * (index / texelsInLogicalRow);\n int c = imod(index, texelsInLogicalRow) * 2;\n\n return ivec3(b, r, c);\n }\n `;\n }\n\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n const texelsInLogicalRow = Math.ceil(shape[2] / 2);\n const texelsInBatch = texelsInLogicalRow * Math.ceil(shape[1] / 2);\n\n return `\n ivec3 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${packedTexShape[0]}, ${packedTexShape[1]}));\n int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y;\n\n int b = index / ${texelsInBatch};\n index -= b * ${texelsInBatch};\n\n int r = 2 * (index / ${texelsInLogicalRow});\n int c = imod(index, ${texelsInLogicalRow}) * 2;\n\n return ivec3(b, r, c);\n }\n `;\n}\n\nfunction getOutput3DCoords(\n shape: [number, number, number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (enableShapeUniforms) {\n const coordsFromIndexSnippet =\n shader_util.getOutputLogicalCoordinatesFromFlatIndexByUniform(\n ['r', 'c', 'd'], shape);\n\n return `\n ivec3 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n int index = resTexRC.x * outTexShape[1] + resTexRC.y;\n ${coordsFromIndexSnippet}\n return ivec3(r, c, d);\n }\n`;\n }\n const coordsFromIndexSnippet =\n shader_util.getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], shape);\n\n return `\n ivec3 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n ${coordsFromIndexSnippet}\n return ivec3(r, c, d);\n }\n `;\n}\n\nfunction getOutputPackedNDCoords(\n shape: number[], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (enableShapeUniforms) {\n // TODO: support 5d and 6d\n return `\n ivec4 getOutputCoords() {\n ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0));\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(packedTexShape[0], packedTexShape[1]));\n int index = resTexRC.x * packedTexShape[1] + resTexRC.y;\n\n int texelsInLogicalRow = int(ceil(float(outShape[3]) / 2.0));\n int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[2]) / 2.0));\n int texelsInBatchN = texelsInBatch * outShape[1];\n\n int b2 = index / texelsInBatchN;\n index -= b2 * texelsInBatchN;\n\n int b = index / texelsInBatch;\n index -= b * texelsInBatch;\n\n int r = 2 * (index / texelsInLogicalRow);\n int c = imod(index, texelsInLogicalRow) * 2;\n\n return ivec4(b2, b, r, c);\n }\n `;\n }\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n\n const texelsInLogicalRow = Math.ceil(shape[shape.length - 1] / 2);\n const texelsInBatch =\n texelsInLogicalRow * Math.ceil(shape[shape.length - 2] / 2);\n let texelsInBatchN = texelsInBatch;\n let batches = ``;\n let coords = 'b, r, c';\n\n for (let b = 2; b < shape.length - 1; b++) {\n texelsInBatchN *= shape[shape.length - b - 1];\n batches = `\n int b${b} = index / ${texelsInBatchN};\n index -= b${b} * ${texelsInBatchN};\n ` + batches;\n coords = `b${b}, ` + coords;\n }\n\n return `\n ivec${shape.length} getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${packedTexShape[0]}, ${packedTexShape[1]}));\n int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y;\n\n ${batches}\n\n int b = index / ${texelsInBatch};\n index -= b * ${texelsInBatch};\n\n int r = 2 * (index / ${texelsInLogicalRow});\n int c = imod(index, ${texelsInLogicalRow}) * 2;\n\n return ivec${shape.length}(${coords});\n }\n `;\n}\n\nfunction getOutput4DCoords(\n shape: [number, number, number, number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (enableShapeUniforms) {\n const coordsFromIndexSnippet =\n shader_util.getOutputLogicalCoordinatesFromFlatIndexByUniform(\n ['r', 'c', 'd', 'd2'], shape);\n\n return `\n ivec4 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n int index = resTexRC.x * outTexShape[1] + resTexRC.y;\n ${coordsFromIndexSnippet}\n return ivec4(r, c, d, d2);\n }\n `;\n }\n const coordsFromIndexSnippet = shader_util.getLogicalCoordinatesFromFlatIndex(\n ['r', 'c', 'd', 'd2'], shape);\n\n return `\n ivec4 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n ${coordsFromIndexSnippet}\n return ivec4(r, c, d, d2);\n }\n `;\n}\n\nfunction getOutput5DCoords(\n shape: [number, number, number, number, number],\n texShape: [number, number]): string {\n const coordsFromIndexSnippet = shader_util.getLogicalCoordinatesFromFlatIndex(\n ['r', 'c', 'd', 'd2', 'd3'], shape);\n\n return `\n ivec5 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx * vec2(${texShape[0]},\n ${texShape[1]}));\n\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n\n ${coordsFromIndexSnippet}\n\n ivec5 outShape = ivec5(r, c, d, d2, d3);\n return outShape;\n }\n `;\n}\n\nfunction getOutput6DCoords(\n shape: [number, number, number, number, number, number],\n texShape: [number, number]): string {\n const coordsFromIndexSnippet = shader_util.getLogicalCoordinatesFromFlatIndex(\n ['r', 'c', 'd', 'd2', 'd3', 'd4'], shape);\n\n return `\n ivec6 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n\n ${coordsFromIndexSnippet}\n\n ivec6 result = ivec6(r, c, d, d2, d3, d4);\n return result;\n }\n `;\n}\n\nfunction getOutputPacked2DCoords(\n shape: [number, number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n if (util.arraysEqual(shape, texShape)) {\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0));\n return 2 * ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1]));\n }\n `;\n }\n\n return `\n ivec2 getOutputCoords() {\n return 2 * ivec2(resultUV.yx * vec2(${packedTexShape[0]}, ${\n packedTexShape[1]}));\n }\n `;\n }\n\n // texels needed to accommodate a logical row\n const texelsInLogicalRow = Math.ceil(shape[1] / 2);\n\n /**\n * getOutputCoords\n *\n * resTexRC: The rows and columns of the texels. If you move over one\n * texel to the right in the packed texture, you are moving over one column\n * (not two).\n *\n * index: The texel index\n */\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0));\n int texelsInLogicalRow = int(ceil(float(outShape[1]) / 2.0));\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(packedTexShape[0], packedTexShape[1]));\n\n int index = resTexRC.x * packedTexShape[1] + resTexRC.y;\n int r = 2 * (index / texelsInLogicalRow);\n int c = imod(index, texelsInLogicalRow) * 2;\n\n return ivec2(r, c);\n }\n `;\n }\n\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${packedTexShape[0]}, ${packedTexShape[1]}));\n\n int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y;\n int r = 2 * (index / ${texelsInLogicalRow});\n int c = imod(index, ${texelsInLogicalRow}) * 2;\n\n return ivec2(r, c);\n }\n `;\n}\n\nfunction getOutput2DCoords(\n shape: [number, number], texShape: [number, number],\n enableShapeUniforms: boolean): string {\n if (util.arraysEqual(shape, texShape)) {\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n return ivec2(resultUV.yx * vec2(outTexShape[0], outTexShape[1]));\n }\n `;\n }\n return `\n ivec2 getOutputCoords() {\n return ivec2(resultUV.yx * vec2(${texShape[0]}, ${texShape[1]}));\n }\n `;\n }\n if (shape[1] === 1) {\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n int index = resTexRC.x * outTexShape[1] + resTexRC.y;\n return ivec2(index, 0);\n }\n `;\n }\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n return ivec2(index, 0);\n }\n `;\n }\n if (shape[0] === 1) {\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n int index = resTexRC.x * outTexShape[1] + resTexRC.y;\n return ivec2(0, index);\n }\n `;\n }\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n return ivec2(0, index);\n }\n `;\n }\n if (enableShapeUniforms) {\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(outTexShape[0], outTexShape[1]));\n int index = resTexRC.x * outTexShape[1] + resTexRC.y;\n int r = index / outShape[1];\n int c = index - r * outShape[1];\n return ivec2(r, c);\n }\n `;\n }\n return `\n ivec2 getOutputCoords() {\n ivec2 resTexRC = ivec2(resultUV.yx *\n vec2(${texShape[0]}, ${texShape[1]}));\n int index = resTexRC.x * ${texShape[1]} + resTexRC.y;\n int r = index / ${shape[1]};\n int c = index - r * ${shape[1]};\n return ivec2(r, c);\n }\n `;\n}\n\nfunction getFlatOffsetUniformName(texName: string): string {\n return `offset${texName}`;\n}\n\nfunction getPackedSamplerScalar(inputInfo: InputInfo): string {\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const glsl = getGlslDifferences();\n return `\n vec4 ${funcName}() {\n return ${glsl.texture2D}(${texName}, halfCR);\n }\n `;\n}\n\nfunction getSamplerScalar(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n if (inputInfo.shapeInfo.isUniform) {\n return `float ${funcName}() {return ${texName};}`;\n }\n const [texNumR, texNumC] = inputInfo.shapeInfo.texShape;\n if (texNumR === 1 && texNumC === 1) {\n return `\n float ${funcName}() {\n return sampleTexture(${texName}, halfCR);\n }\n `;\n }\n\n const offset = getFlatOffsetUniformName(texName);\n if (enableShapeUniforms) {\n return `\n float ${funcName}() {\n vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], ${\n offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const [tNumR, tNumC] = inputInfo.shapeInfo.texShape;\n return `\n float ${funcName}() {\n vec2 uv = uvFromFlat(${tNumR}, ${tNumC}, ${offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getPackedSampler1D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const texShape = inputInfo.shapeInfo.texShape;\n const glsl = getGlslDifferences();\n if (enableShapeUniforms) {\n return `\n vec4 ${funcName}(int index) {\n ivec2 packedTexShape = ivec2(ceil(float(${\n texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0));\n vec2 uv = packedUVfrom1D(\n packedTexShape[0], packedTexShape[1], index);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n return `\n vec4 ${funcName}(int index) {\n vec2 uv = packedUVfrom1D(\n ${packedTexShape[0]}, ${packedTexShape[1]}, index);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler1D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int index) {\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const texShape = inputInfo.shapeInfo.texShape;\n const tNumR = texShape[0];\n const tNumC = texShape[1];\n\n if (tNumC === 1 && tNumR === 1) {\n return `\n float ${funcName}(int index) {\n return sampleTexture(${texName}, halfCR);\n }\n `;\n }\n const offset = getFlatOffsetUniformName(texName);\n if (tNumC === 1) {\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int index) {\n vec2 uv = vec2(0.5, (float(index + ${offset}) + 0.5) / float(${\n texName}TexShape[0]));\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n return `\n float ${funcName}(int index) {\n vec2 uv = vec2(0.5, (float(index + ${offset}) + 0.5) / ${tNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n if (tNumR === 1) {\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int index) {\n vec2 uv = vec2((float(index + ${offset}) + 0.5) / float(${\n texName}TexShape[1]), 0.5);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n return `\n float ${funcName}(int index) {\n vec2 uv = vec2((float(index + ${offset}) + 0.5) / ${tNumC}.0, 0.5);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int index) {\n vec2 uv = uvFromFlat(${texName}TexShape[0], ${\n texName}TexShape[1], index + ${offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n return `\n float ${funcName}(int index) {\n vec2 uv = uvFromFlat(${tNumR}, ${tNumC}, index + ${offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getPackedSampler2D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const texShape = inputInfo.shapeInfo.texShape;\n\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n const glsl = getGlslDifferences();\n if (texShape != null && util.arraysEqual(shape, texShape)) {\n if (enableShapeUniforms) {\n return `\n vec4 ${funcName}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${texName}TexShape[1], ${\n texName}TexShape[0]);\n\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n return `\n vec4 ${funcName}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0);\n\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n\n if (enableShapeUniforms) {\n return `\n vec4 ${funcName}(int row, int col) {\n ivec2 packedTexShape = ivec2(ceil(float(${\n texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0));\n int valuesPerRow = int(ceil(float(${texName}Shape[1]) / 2.0));\n vec2 uv = packedUVfrom2D(valuesPerRow, packedTexShape[0], packedTexShape[1], row, col);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n const valuesPerRow = Math.ceil(shape[1] / 2);\n\n return `\n vec4 ${funcName}(int row, int col) {\n vec2 uv = packedUVfrom2D(${valuesPerRow}, ${packedTexShape[0]}, ${\n packedTexShape[1]}, row, col);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler2D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const texShape = inputInfo.shapeInfo.texShape;\n\n if (texShape != null && util.arraysEqual(shape, texShape)) {\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${texName}TexShape[1], ${\n texName}TexShape[0]);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n return `\n float ${funcName}(int row, int col) {\n vec2 uv = (vec2(col, row) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const {newShape, keptDims} = util.squeezeShape(shape);\n const squeezedShape = newShape;\n if (squeezedShape.length < shape.length) {\n const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape);\n const params = ['row', 'col'];\n return `\n ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)}\n float ${funcName}(int row, int col) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col) {\n int index = round(dot(vec2(row, col), vec2(${shape[1]}, 1)));\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n const offset = getFlatOffsetUniformName(texName);\n if (texNumC === 1) {\n // index is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col) {\n float index = dot(vec3(row, col, ${offset}), vec3(${\n texName}Shape[1], 1, 1));\n vec2 uv = vec2(0.5, (index + 0.5) / float(${texName}TexShape[0]));\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col) {\n float index = dot(vec3(row, col, ${offset}), vec3(${shape[1]}, 1, 1));\n vec2 uv = vec2(0.5, (index + 0.5) / ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n if (texNumR === 1) {\n // index is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col) {\n float index = dot(vec3(row, col, ${offset}), vec3(${\n texName}Shape[1], 1, 1));\n vec2 uv = vec2((index + 0.5) / float(${texName}TexShape[1]), 0.5);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col) {\n float index = dot(vec3(row, col, ${offset}), vec3(${shape[1]}, 1, 1));\n vec2 uv = vec2((index + 0.5) / ${texNumC}.0, 0.5);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${texName}Shape[1] + col + ${offset};\n vec2 uv = uvFromFlat(${texName}TexShape[0], ${\n texName}TexShape[1], index);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${shape[1]} + col + ${offset};\n vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index);\n return sampleTexture(${texName}, uv);\n }\n`;\n}\n\nfunction getPackedSampler3D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const texShape = inputInfo.shapeInfo.texShape;\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n\n if (shape[0] === 1) {\n const squeezedShape = shape.slice(1);\n const keptDims = [1, 2];\n const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape);\n const params = ['b', 'row', 'col'];\n return `\n ${getPackedSamplerFromInInfo(newInputInfo, enableShapeUniforms)}\n vec4 ${funcName}(int b, int row, int col) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n const glsl = getGlslDifferences();\n if (enableShapeUniforms) {\n return `\n vec4 ${funcName}(int b, int row, int col) {\n ivec2 packedTexShape = ivec2(ceil(float(${\n texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0));\n int valuesPerRow = int(ceil(float(${texName}Shape[2]) / 2.0));\n int texelsInBatch = valuesPerRow * int(ceil(float(${\n texName}Shape[1]) / 2.0));\n vec2 uv = packedUVfrom3D(\n packedTexShape[0], packedTexShape[1], texelsInBatch, valuesPerRow, b, row, col);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n\n const texNumR = packedTexShape[0];\n const texNumC = packedTexShape[1];\n\n const valuesPerRow = Math.ceil(shape[2] / 2);\n const texelsInBatch = valuesPerRow * Math.ceil(shape[1] / 2);\n\n return `\n vec4 ${funcName}(int b, int row, int col) {\n vec2 uv = packedUVfrom3D(\n ${texNumR}, ${texNumC}, ${texelsInBatch}, ${valuesPerRow}, b, row, col);\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler3D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const stride0 = shape[1] * shape[2];\n const stride1 = shape[2];\n\n const {newShape, keptDims} = util.squeezeShape(shape);\n const squeezedShape = newShape;\n if (squeezedShape.length < shape.length) {\n const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape);\n const params = ['row', 'col', 'depth'];\n return `\n ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)}\n float ${funcName}(int row, int col, int depth) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth) {\n int index = round(dot(vec3(row, col, depth),\n vec3(${stride0}, ${stride1}, 1)));\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const texShape = inputInfo.shapeInfo.texShape;\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n const flatOffset = inputInfo.shapeInfo.flatOffset;\n if (texNumC === stride0 && flatOffset == null) {\n // texC is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth) {\n int stride1 = ${texName}Shape[2];\n float texR = float(row);\n float texC = dot(vec2(col, depth), vec2(stride1, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texName}TexShape[1], ${texName}TexShape[0]);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth) {\n float texR = float(row);\n float texC = dot(vec2(col, depth), vec2(${stride1}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n if (texNumC === stride1 && flatOffset == null) {\n // texR is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth) {\n float texR = dot(vec2(row, col), vec2(${texName}Shape[1], 1));\n float texC = float(depth);\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texName}TexShape[1], ${\n texName}TexShape[0]);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth) {\n float texR = dot(vec2(row, col), vec2(${shape[1]}, 1));\n float texC = float(depth);\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const offset = getFlatOffsetUniformName(texName);\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth) {\n // Explicitly use integer operations as dot() only works on floats.\n int stride0 = ${texName}Shape[1] * ${texName}Shape[2];\n int stride1 = ${texName}Shape[2];\n int index = row * stride0 + col * stride1 + depth + ${offset};\n vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], index);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${stride0} + col * ${stride1} + depth + ${offset};\n vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index);\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getPackedSamplerND(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const glsl = getGlslDifferences();\n if (enableShapeUniforms) {\n // TODO: support 5d and 6d\n return `\n vec4 ${funcName}(int b2, int b, int row, int col) {\n int valuesPerRow = int(ceil(float(${texName}Shape[3]) / 2.0));\n int texelsInBatch = valuesPerRow * int(ceil(float(${\n texName}Shape[2]) / 2.0));\n int index = b * texelsInBatch + (row / 2) * valuesPerRow + (col / 2);\n texelsInBatch *= ${texName}Shape[1];\n index = b2 * texelsInBatch + index;\n ivec2 packedTexShape = ivec2(ceil(float(${\n texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0));\n int texR = index / packedTexShape[1];\n int texC = index - texR * packedTexShape[1];\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(packedTexShape[1], packedTexShape[0]); return ${\n glsl.texture2D}(${texName}, uv);\n }\n `;\n }\n const shape = inputInfo.shapeInfo.logicalShape;\n const rank = shape.length;\n const texShape = inputInfo.shapeInfo.texShape;\n const packedTexShape =\n [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)];\n const texNumR = packedTexShape[0];\n const texNumC = packedTexShape[1];\n\n const valuesPerRow = Math.ceil(shape[rank - 1] / 2);\n let texelsInBatch = valuesPerRow * Math.ceil(shape[rank - 2] / 2);\n let params = `int b, int row, int col`;\n let index = `b * ${texelsInBatch} + (row / 2) * ${valuesPerRow} + (col / 2)`;\n for (let b = 2; b < rank - 1; b++) {\n params = `int b${b}, ` + params;\n texelsInBatch *= shape[rank - b - 1];\n index = `b${b} * ${texelsInBatch} + ` + index;\n }\n return `\n vec4 ${funcName}(${params}) {\n int index = ${index};\n int texR = index / ${texNumC};\n int texC = index - texR * ${texNumC};\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texNumC}, ${texNumR});\n return ${glsl.texture2D}(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler4D(\n inputInfo: InputInfo, enableShapeUniforms: boolean): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const stride2 = shape[3];\n const stride1 = shape[2] * stride2;\n const stride0 = shape[1] * stride1;\n\n const {newShape, keptDims} = util.squeezeShape(shape);\n if (newShape.length < shape.length) {\n const newInputInfo = squeezeInputInfo(inputInfo, newShape);\n const params = ['row', 'col', 'depth', 'depth2'];\n return `\n ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)}\n float ${funcName}(int row, int col, int depth, int depth2) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n int index = round(dot(vec4(row, col, depth, depth2),\n vec4(${stride0}, ${stride1}, ${stride2}, 1)));\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const flatOffset = inputInfo.shapeInfo.flatOffset;\n const texShape = inputInfo.shapeInfo.texShape;\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n\n const stride2Str = `int stride2 = ${texName}Shape[3];`;\n const stride1Str = `int stride1 = ${texName}Shape[2] * stride2;`;\n const stride0Str = `int stride0 = ${texName}Shape[1] * stride1;`;\n if (texNumC === stride0 && flatOffset == null) {\n // texC is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n ${stride2Str}\n ${stride1Str}\n float texR = float(row);\n float texC =\n dot(vec3(col, depth, depth2),\n vec3(stride1, stride2, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texName}TexShape[1], ${texName}TexShape[0]);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n float texR = float(row);\n float texC =\n dot(vec3(col, depth, depth2),\n vec3(${stride1}, ${stride2}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n if (texNumC === stride2 && flatOffset == null) {\n // texR is used directly as physical (no risk of float16 overflow).\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n float texR = dot(vec3(row, col, depth),\n vec3(${texName}Shape[1] * ${texName}Shape[2], ${\n texName}Shape[2], 1));\n float texC = float(depth2);\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texName}TexShape[1], ${texName}TexShape[0]);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n float texR = dot(vec3(row, col, depth),\n vec3(${shape[1] * shape[2]}, ${shape[2]}, 1));\n float texC = float(depth2);\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const offset = getFlatOffsetUniformName(texName);\n if (enableShapeUniforms) {\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n // Explicitly use integer operations as dot() only works on floats.\n ${stride2Str}\n ${stride1Str}\n ${stride0Str}\n int index = row * stride0 + col * stride1 +\n depth * stride2 + depth2;\n vec2 uv = uvFromFlat(${texName}TexShape[0], ${\n texName}TexShape[1], index + ${offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n return `\n float ${funcName}(int row, int col, int depth, int depth2) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${stride0} + col * ${stride1} +\n depth * ${stride2} + depth2;\n vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index + ${offset});\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler5D(inputInfo: InputInfo): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n const stride3 = shape[4];\n const stride2 = shape[3] * stride3;\n const stride1 = shape[2] * stride2;\n const stride0 = shape[1] * stride1;\n\n const {newShape, keptDims} = util.squeezeShape(shape);\n if (newShape.length < shape.length) {\n const newInputInfo = squeezeInputInfo(inputInfo, newShape);\n const params = ['row', 'col', 'depth', 'depth2', 'depth3'];\n return `\n ${getSamplerFromInInfo(newInputInfo)}\n float ${funcName}(int row, int col, int depth, int depth2, int depth3) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth, int depth2, int depth3) {\n float index = dot(\n vec4(row, col, depth, depth2),\n vec4(${stride0}, ${stride1}, ${stride2}, ${stride3})) +\n depth3;\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const flatOffset = inputInfo.shapeInfo.flatOffset;\n const texShape = inputInfo.shapeInfo.texShape;\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n\n if (texNumC === stride0 && flatOffset == null) {\n // texC is used directly as physical (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth, int depth2, int depth3) {\n int texR = row;\n float texC = dot(vec4(col, depth, depth2, depth3),\n vec4(${stride1}, ${stride2}, ${stride3}, 1));\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n if (texNumC === stride3 && flatOffset == null) {\n // texR is used directly as physical (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth, int depth2, int depth3) {\n float texR = dot(\n vec4(row, col, depth, depth2),\n vec4(${shape[1] * shape[2] * shape[3]},\n ${shape[2] * shape[3]}, ${shape[3]}, 1));\n int texC = depth3;\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n\n const offset = getFlatOffsetUniformName(texName);\n return `\n float ${funcName}(int row, int col, int depth, int depth2, int depth3) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${stride0} + col * ${stride1} + depth * ${stride2} +\n depth2 * ${stride3} + depth3 + ${offset};\n vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index);\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getSampler6D(inputInfo: InputInfo): string {\n const shape = inputInfo.shapeInfo.logicalShape;\n const texName = inputInfo.name;\n const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1);\n\n const {newShape, keptDims} = util.squeezeShape(shape);\n if (newShape.length < shape.length) {\n const newInputInfo = squeezeInputInfo(inputInfo, newShape);\n const params = ['row', 'col', 'depth', 'depth2', 'depth3', 'depth4'];\n return `\n ${getSamplerFromInInfo(newInputInfo)}\n float ${funcName}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n return ${funcName}(${getSqueezedParams(params, keptDims)});\n }\n `;\n }\n\n const stride4 = shape[5];\n const stride3 = shape[4] * stride4;\n const stride2 = shape[3] * stride3;\n const stride1 = shape[2] * stride2;\n const stride0 = shape[1] * stride1;\n\n if (inputInfo.shapeInfo.isUniform) {\n // Uniform arrays will be less than 65505 (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n int index = round(dot(\n vec4(row, col, depth, depth2),\n vec4(${stride0}, ${stride1}, ${stride2}, ${stride3})) +\n dot(\n vec2(depth3, depth4),\n vec2(${stride4}, 1)));\n ${getUniformSampler(inputInfo)}\n }\n `;\n }\n\n const flatOffset = inputInfo.shapeInfo.flatOffset;\n const texShape = inputInfo.shapeInfo.texShape;\n const texNumR = texShape[0];\n const texNumC = texShape[1];\n if (texNumC === stride0 && flatOffset == null) {\n // texC is used directly as physical (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n int texR = row;\n float texC = dot(vec4(col, depth, depth2, depth3),\n vec4(${stride1}, ${stride2}, ${stride3}, ${stride4})) +\n float(depth4);\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n if (texNumC === stride4 && flatOffset == null) {\n // texR is used directly as physical (no risk of float16 overflow).\n return `\n float ${funcName}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n float texR = dot(vec4(row, col, depth, depth2),\n vec4(${shape[1] * shape[2] * shape[3] * shape[4]},\n ${shape[2] * shape[3] * shape[4]},\n ${shape[3] * shape[4]},\n ${shape[4]})) + float(depth3);\n int texC = depth4;\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${texNumC}.0, ${texNumR}.0);\n return sampleTexture(${texName}, uv);\n }\n `;\n }\n const offset = getFlatOffsetUniformName(texName);\n return `\n float ${funcName}(int row, int col, int depth,\n int depth2, int depth3, int depth4) {\n // Explicitly use integer operations as dot() only works on floats.\n int index = row * ${stride0} + col * ${stride1} + depth * ${stride2} +\n depth2 * ${stride3} + depth3 * ${stride4} + depth4 + ${offset};\n vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index);\n return sampleTexture(${texName}, uv);\n }\n `;\n}\n\nfunction getUniformSampler(inputInfo: InputInfo): string {\n const texName = inputInfo.name;\n const inSize = util.sizeFromShape(inputInfo.shapeInfo.logicalShape);\n\n if (inSize < 2) {\n return `return ${texName};`;\n }\n\n return `\n for (int i = 0; i < ${inSize}; i++) {\n if (i == index) {\n return ${texName}[i];\n }\n }\n `;\n}\n\nfunction getPackedSamplerAtOutputCoords(\n inputInfo: InputInfo, outShapeInfo: ShapeInfo) {\n const texName = inputInfo.name;\n const texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1);\n const funcName = 'get' + texFuncSnippet + 'AtOutCoords';\n const inRank = inputInfo.shapeInfo.logicalShape.length;\n const outRank = outShapeInfo.logicalShape.length;\n\n const broadcastDims = getBroadcastDims(\n inputInfo.shapeInfo.logicalShape, outShapeInfo.logicalShape);\n\n const type = getCoordsDataType(outRank);\n const rankDiff = outRank - inRank;\n let coordsSnippet: string;\n const fields = ['x', 'y', 'z', 'w', 'u', 'v'];\n\n if (inRank === 0) {\n coordsSnippet = '';\n } else if (outRank < 2 && broadcastDims.length >= 1) {\n coordsSnippet = 'coords = 0;';\n } else {\n coordsSnippet =\n broadcastDims.map(d => `coords.${fields[d + rankDiff]} = 0;`)\n .join('\\n');\n }\n let unpackedCoordsSnippet = '';\n if (outRank < 2 && inRank > 0) {\n unpackedCoordsSnippet = 'coords';\n } else {\n unpackedCoordsSnippet = inputInfo.shapeInfo.logicalShape\n .map((s, i) => `coords.${fields[i + rankDiff]}`)\n .join(', ');\n }\n\n let output = `return outputValue;`;\n const inSize = util.sizeFromShape(inputInfo.shapeInfo.logicalShape);\n const isInputScalar = inSize === 1;\n const outSize = util.sizeFromShape(outShapeInfo.logicalShape);\n const isOutputScalar = outSize === 1;\n\n if (inRank === 1 && !isInputScalar && !isOutputScalar) {\n output = `\n return vec4(outputValue.xy, outputValue.xy);\n `;\n } else if (isInputScalar && !isOutputScalar) {\n if (outRank === 1) {\n output = `\n return vec4(outputValue.x, outputValue.x, 0., 0.);\n `;\n } else {\n output = `\n return vec4(outputValue.x);\n `;\n }\n } else if (broadcastDims.length) {\n const rows = inRank - 2;\n const cols = inRank - 1;\n\n if (broadcastDims.indexOf(rows) > -1 && broadcastDims.indexOf(cols) > -1) {\n output = `return vec4(outputValue.x);`;\n } else if (broadcastDims.indexOf(rows) > -1) {\n output = `return vec4(outputValue.x, outputValue.y, ` +\n `outputValue.x, outputValue.y);`;\n } else if (broadcastDims.indexOf(cols) > -1) {\n output = `return vec4(outputValue.xx, outputValue.zz);`;\n }\n }\n\n return `\n vec4 ${funcName}() {\n ${type} coords = getOutputCoords();\n ${coordsSnippet}\n vec4 outputValue = get${texFuncSnippet}(${unpackedCoordsSnippet});\n ${output}\n }\n `;\n}\n\nfunction getSamplerAtOutputCoords(\n inputInfo: InputInfo, outShapeInfo: ShapeInfo) {\n const texName = inputInfo.name;\n const texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1);\n const funcName = 'get' + texFuncSnippet + 'AtOutCoords';\n const outTexShape = outShapeInfo.texShape;\n const inTexShape = inputInfo.shapeInfo.texShape;\n const inRank = inputInfo.shapeInfo.logicalShape.length;\n const outRank = outShapeInfo.logicalShape.length;\n\n if (!inputInfo.shapeInfo.isUniform && inRank === outRank &&\n inputInfo.shapeInfo.flatOffset == null &&\n util.arraysEqual(inTexShape, outTexShape)) {\n return `\n float ${funcName}() {\n return sampleTexture(${texName}, resultUV);\n }\n `;\n }\n\n const type = getCoordsDataType(outRank);\n const broadcastDims = getBroadcastDims(\n inputInfo.shapeInfo.logicalShape, outShapeInfo.logicalShape);\n const rankDiff = outRank - inRank;\n let coordsSnippet: string;\n const fields = ['x', 'y', 'z', 'w', 'u', 'v'];\n\n if (inRank === 0) {\n coordsSnippet = '';\n } else if (outRank < 2 && broadcastDims.length >= 1) {\n coordsSnippet = 'coords = 0;';\n } else {\n coordsSnippet =\n broadcastDims.map(d => `coords.${fields[d + rankDiff]} = 0;`)\n .join('\\n');\n }\n let unpackedCoordsSnippet = '';\n if (outRank < 2 && inRank > 0) {\n unpackedCoordsSnippet = 'coords';\n } else {\n unpackedCoordsSnippet = inputInfo.shapeInfo.logicalShape\n .map((s, i) => `coords.${fields[i + rankDiff]}`)\n .join(', ');\n }\n\n return `\n float ${funcName}() {\n ${type} coords = getOutputCoords();\n ${coordsSnippet}\n return get${texFuncSnippet}(${unpackedCoordsSnippet});\n }\n `;\n}\n\nexport function getCoordsDataType(rank: number): string {\n if (rank <= 1) {\n return 'int';\n } else if (rank === 2) {\n return 'ivec2';\n } else if (rank === 3) {\n return 'ivec3';\n } else if (rank === 4) {\n return 'ivec4';\n } else if (rank === 5) {\n return 'ivec5';\n } else if (rank === 6) {\n return 'ivec6';\n } else {\n throw Error(`GPU for rank ${rank} is not yet supported`);\n }\n}\n\nexport function getUniformInfoFromShape(\n isPacked: boolean, shape: number[], texShape: number[]) {\n const {newShape, keptDims} = util.squeezeShape(shape);\n const rank = shape.length;\n const useSqueezePackedShape = isPacked && rank === 3 && shape[0] === 1;\n const squeezeShape = useSqueezePackedShape ? shape.slice(1) : newShape;\n const useSqueezeShape =\n (!isPacked && rank > 1 && !util.arraysEqual(shape, texShape) &&\n newShape.length < rank) ||\n useSqueezePackedShape;\n const uniformShape = useSqueezeShape ? squeezeShape : shape;\n return {useSqueezeShape, uniformShape, keptDims};\n}\n\n/** Returns a new input info (a copy) that has a squeezed logical shape. */\nexport function squeezeInputInfo(\n inInfo: InputInfo, squeezedShape: number[]): InputInfo {\n // Deep copy.\n const newInputInfo: InputInfo = JSON.parse(JSON.stringify(inInfo));\n newInputInfo.shapeInfo.logicalShape = squeezedShape;\n return newInputInfo;\n}\n\nfunction getSqueezedParams(params: string[], keptDims: number[]): string {\n return keptDims.map(d => params[d]).join(', ');\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, env, Tensor, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUContext, GPGPUContextProgram} from './gpgpu_context';\nimport * as shader_compiler from './shader_compiler';\nimport {InputInfo, ShapeInfo, UniformType} from './shader_compiler';\nimport {PackingScheme, TextureData, TextureUsage} from './tex_util';\nimport {createFragmentShader} from './webgl_util';\n\nexport interface GPGPUProgram {\n variableNames: string[];\n outputShape: number[];\n userCode: string;\n enableShapeUniforms?: boolean;\n /** If true, this program expects packed input textures. Defaults to false. */\n packedInputs?: boolean;\n /** If true, this program produces a packed texture. Defaults to false. */\n packedOutput?: boolean;\n /**\n * Affects what type of texture we allocate for the output. Defaults to\n * `TextureUsage.RENDER`.\n */\n outTexUsage?: TextureUsage;\n /**\n * The type of scheme to use when packing texels for the output values.\n * See `PackingScheme` for details. Defaults to `PackingScheme.SHARED_BATCH`.\n */\n outPackingScheme?: PackingScheme;\n customUniforms?:\n Array<{name: string; arrayIndex?: number; type: UniformType;}>;\n}\n\nexport interface GPGPUBinary {\n webGLProgram: GPGPUContextProgram;\n program: GPGPUProgram;\n uniformLocations: {[name: string]: WebGLUniformLocation};\n customUniformLocations?: WebGLUniformLocation[];\n source: string;\n fragmentShader: WebGLShader;\n inShapeInfos: ShapeInfo[];\n outShapeInfo: ShapeInfo;\n infLoc: WebGLUniformLocation;\n nanLoc: WebGLUniformLocation;\n inShapesLocations?: {[name: string]: WebGLUniformLocation};\n inTexShapesLocations?: {[name: string]: WebGLUniformLocation};\n outShapeLocation?: WebGLUniformLocation;\n outShapeStridesLocation?: WebGLUniformLocation;\n outTexShapeLocation?: WebGLUniformLocation;\n}\n\nexport interface GPGPUBinaryLocations {\n uniformLocations: {[name: string]: WebGLUniformLocation};\n customUniformLocations?: WebGLUniformLocation[];\n infLoc: WebGLUniformLocation;\n nanLoc: WebGLUniformLocation;\n inShapesLocations?: {[name: string]: WebGLUniformLocation};\n inTexShapesLocations?: {[name: string]: WebGLUniformLocation};\n outShapeLocation?: WebGLUniformLocation;\n outShapeStridesLocation?: WebGLUniformLocation;\n outTexShapeLocation?: WebGLUniformLocation;\n}\n\nexport interface TensorData {\n shape: number[];\n texData: TextureData;\n isUniform: boolean;\n // Available when we decide to upload as uniform instead of texture.\n uniformValues?: TypedArray;\n}\n\nexport function compileProgram(\n gpgpu: GPGPUContext, program: GPGPUProgram, inputs: TensorData[],\n output: TensorData): GPGPUBinary {\n const inputInfos: InputInfo[] = inputs.map((input, i) => {\n const shapeInfo: ShapeInfo = {\n logicalShape: input.shape,\n texShape: input.isUniform ? null : input.texData.texShape,\n isUniform: input.isUniform,\n isPacked: input.isUniform ? false : input.texData.isPacked,\n flatOffset: null\n };\n if (input.texData != null && input.texData.slice != null &&\n input.texData.slice.flatOffset > 0) {\n shapeInfo.flatOffset = input.texData.slice.flatOffset;\n }\n return {name: program.variableNames[i], shapeInfo};\n });\n const inShapeInfos = inputInfos.map(x => x.shapeInfo);\n const outShapeInfo: ShapeInfo = {\n logicalShape: output.shape,\n texShape: output.texData.texShape,\n isUniform: false,\n isPacked: output.texData.isPacked,\n flatOffset: null\n };\n const source = shader_compiler.makeShader(inputInfos, outShapeInfo, program);\n const fragmentShader = createFragmentShader(gpgpu.gl, source);\n const webGLProgram = gpgpu.createProgram(fragmentShader);\n\n if (!env().get('ENGINE_COMPILE_ONLY')) {\n return {\n program,\n fragmentShader,\n source,\n webGLProgram,\n inShapeInfos,\n outShapeInfo,\n ...getUniformLocations(gpgpu, program, webGLProgram)\n };\n } else {\n return {\n program,\n fragmentShader,\n source,\n webGLProgram,\n inShapeInfos,\n outShapeInfo,\n uniformLocations: null,\n customUniformLocations: null,\n infLoc: null,\n nanLoc: null,\n inShapesLocations: null,\n inTexShapesLocations: null,\n outShapeLocation: null,\n outShapeStridesLocation: null,\n outTexShapeLocation: null\n };\n }\n}\n\nexport function getUniformLocations(\n gpgpu: GPGPUContext, program: GPGPUProgram,\n webGLProgram: WebGLProgram): GPGPUBinaryLocations {\n const uniformLocations: {[name: string]: WebGLUniformLocation} = {};\n const inShapesLocations: {[name: string]: WebGLUniformLocation} = {};\n const inTexShapesLocations: {[name: string]: WebGLUniformLocation} = {};\n const customUniformLocations: WebGLUniformLocation[] = [];\n let outShapeLocation: WebGLUniformLocation;\n let outTexShapeLocation: WebGLUniformLocation;\n let outShapeStridesLocation: WebGLUniformLocation;\n let infLoc: WebGLUniformLocation = null;\n let nanLoc: WebGLUniformLocation = null;\n\n // Add special uniforms (NAN, INFINITY)\n nanLoc = gpgpu.getUniformLocation(webGLProgram, 'NAN', false);\n if (env().getNumber('WEBGL_VERSION') === 1) {\n infLoc = gpgpu.getUniformLocation(webGLProgram, 'INFINITY', false);\n }\n\n // Add user-defined uniforms\n const shouldThrow = false;\n for (let i = 0; i < program.variableNames.length; i++) {\n const varName = program.variableNames[i];\n uniformLocations[varName] =\n gpgpu.getUniformLocation(webGLProgram, varName, shouldThrow);\n uniformLocations[`offset${varName}`] =\n gpgpu.getUniformLocation(webGLProgram, `offset${varName}`, shouldThrow);\n if (program.enableShapeUniforms) {\n inShapesLocations[`${varName}Shape`] = gpgpu.getUniformLocation(\n webGLProgram, `${varName}Shape`, shouldThrow);\n inTexShapesLocations[`${varName}TexShape`] = gpgpu.getUniformLocation(\n webGLProgram, `${varName}TexShape`, shouldThrow);\n }\n }\n\n if (program.enableShapeUniforms) {\n outShapeLocation =\n gpgpu.getUniformLocation(webGLProgram, 'outShape', shouldThrow);\n outShapeStridesLocation =\n gpgpu.getUniformLocation(webGLProgram, 'outShapeStrides', shouldThrow);\n outTexShapeLocation =\n gpgpu.getUniformLocation(webGLProgram, 'outTexShape', shouldThrow);\n }\n\n if (program.customUniforms) {\n program.customUniforms.forEach((d, i) => {\n customUniformLocations[i] =\n gpgpu.getUniformLocation(webGLProgram, d.name, shouldThrow);\n });\n }\n\n return {\n uniformLocations,\n customUniformLocations,\n infLoc,\n nanLoc,\n inShapesLocations,\n inTexShapesLocations,\n outShapeLocation,\n outShapeStridesLocation,\n outTexShapeLocation\n };\n}\n\nfunction validateBinaryAndProgram(\n shapeInfos: ShapeInfo[], inputs: TensorData[]) {\n if (shapeInfos.length !== inputs.length) {\n throw Error(\n `Binary was compiled with ${shapeInfos.length} inputs, but ` +\n `was executed with ${inputs.length} inputs`);\n }\n\n shapeInfos.forEach((s, i) => {\n const shapeA = s.logicalShape;\n const input = inputs[i];\n const shapeB = input.shape;\n\n if (!util.arraysEqual(shapeA, shapeB)) {\n throw Error(\n `Binary was compiled with different shapes than ` +\n `the current args. Shapes ${shapeA} and ${shapeB} must match`);\n }\n // The input is uploaded as uniform.\n if (s.isUniform && input.isUniform) {\n return;\n }\n\n const texShapeA = s.texShape;\n const texShapeB = input.isUniform ? null : input.texData.texShape;\n if (!util.arraysEqual(texShapeA, texShapeB)) {\n throw Error(\n `Binary was compiled with different texture shapes than the` +\n ` current args. Shape ${texShapeA} and ${texShapeB} must match`);\n }\n });\n}\n\nexport function runProgram(\n gpgpu: GPGPUContext, binary: GPGPUBinary, inputs: TensorData[],\n output: TensorData, customUniformValues?: number[][]): void {\n if (!binary.program.enableShapeUniforms) {\n validateBinaryAndProgram(binary.inShapeInfos, inputs);\n validateBinaryAndProgram([binary.outShapeInfo], [output]);\n }\n\n const outTex = output.texData.texture;\n const outTexShape = output.texData.texShape;\n if (output.texData.isPacked) {\n gpgpu.setOutputPackedMatrixTexture(\n outTex.texture, outTexShape[0], outTexShape[1]);\n } else {\n gpgpu.setOutputMatrixTexture(\n outTex.texture, outTexShape[0], outTexShape[1]);\n }\n gpgpu.setProgram(binary.webGLProgram);\n\n // Set special uniforms (NAN, INFINITY)\n if (env().getNumber('WEBGL_VERSION') === 1) {\n if (binary.infLoc !== null) {\n gpgpu.gl.uniform1f(binary.infLoc, Infinity);\n }\n }\n if (binary.nanLoc !== null) {\n gpgpu.gl.uniform1f(binary.nanLoc, NaN);\n }\n\n // Set user-defined inputs\n inputs.forEach((input, i) => {\n const varName = binary.program.variableNames[i];\n const varLoc = binary.uniformLocations[varName];\n const varOffsetLoc = binary.uniformLocations[`offset${varName}`];\n const varShapeLoc = binary.inShapesLocations[`${varName}Shape`];\n const varTexShapeLoc = binary.inTexShapesLocations[`${varName}TexShape`];\n\n if (varShapeLoc) {\n const {uniformShape} = shader_compiler.getUniformInfoFromShape(\n binary.program.packedInputs, input.shape, input.texData.texShape);\n switch (uniformShape.length) {\n case 1:\n gpgpu.gl.uniform1iv(varShapeLoc, new Int32Array(uniformShape));\n break;\n case 2:\n gpgpu.gl.uniform2iv(varShapeLoc, new Int32Array(uniformShape));\n break;\n case 3:\n gpgpu.gl.uniform3iv(varShapeLoc, new Int32Array(uniformShape));\n break;\n case 4:\n gpgpu.gl.uniform4iv(varShapeLoc, new Int32Array(uniformShape));\n break;\n default:\n break;\n }\n }\n if (varTexShapeLoc) {\n gpgpu.gl.uniform2i(\n varTexShapeLoc, input.texData.texShape[0], input.texData.texShape[1]);\n }\n\n if (varLoc == null) {\n // The compiler inferred that this variable is not used in this shader.\n return;\n }\n\n if (input.isUniform) {\n // Upload the values of the tensor as uniform.\n if (util.sizeFromShape(input.shape) < 2) {\n gpgpu.gl.uniform1f(varLoc, input.uniformValues[0]);\n } else {\n let vals = input.uniformValues;\n if (!(vals instanceof Float32Array)) {\n vals = new Float32Array(vals);\n }\n gpgpu.gl.uniform1fv(varLoc, vals);\n }\n return;\n }\n\n // If the input was sliced, upload the flat offset index.\n if (input.texData.slice != null && varOffsetLoc != null) {\n gpgpu.gl.uniform1i(varOffsetLoc, input.texData.slice.flatOffset);\n }\n\n gpgpu.setInputMatrixTexture(input.texData.texture.texture, varLoc, i);\n });\n\n const outShapeLoc = binary.outShapeLocation;\n if (outShapeLoc) {\n switch (output.shape.length) {\n case 1:\n gpgpu.gl.uniform1iv(outShapeLoc, new Int32Array(output.shape));\n break;\n case 2:\n gpgpu.gl.uniform2iv(outShapeLoc, new Int32Array(output.shape));\n break;\n case 3:\n gpgpu.gl.uniform3iv(outShapeLoc, new Int32Array(output.shape));\n break;\n case 4:\n gpgpu.gl.uniform4iv(outShapeLoc, new Int32Array(output.shape));\n break;\n default:\n break;\n }\n }\n if (binary.outShapeStridesLocation) {\n const strides = util.computeStrides(output.shape);\n switch (output.shape.length) {\n case 2:\n gpgpu.gl.uniform1iv(\n binary.outShapeStridesLocation, new Int32Array(strides));\n break;\n case 3:\n gpgpu.gl.uniform2iv(\n binary.outShapeStridesLocation, new Int32Array(strides));\n break;\n case 4:\n gpgpu.gl.uniform3iv(\n binary.outShapeStridesLocation, new Int32Array(strides));\n break;\n default:\n break;\n }\n }\n if (binary.outTexShapeLocation) {\n gpgpu.gl.uniform2i(\n binary.outTexShapeLocation, output.texData.texShape[0],\n output.texData.texShape[1]);\n }\n\n if (binary.program.customUniforms && customUniformValues) {\n binary.program.customUniforms.forEach((d, i) => {\n const customLoc = binary.customUniformLocations[i];\n const customValue = customUniformValues[i];\n if (d.type === 'float') {\n gpgpu.gl.uniform1fv(customLoc, customValue);\n } else if (d.type === 'vec2') {\n gpgpu.gl.uniform2fv(customLoc, customValue);\n } else if (d.type === 'vec3') {\n gpgpu.gl.uniform3fv(customLoc, customValue);\n } else if (d.type === 'vec4') {\n gpgpu.gl.uniform4fv(customLoc, customValue);\n } else if (d.type === 'int') {\n gpgpu.gl.uniform1iv(customLoc, customValue);\n } else if (d.type === 'ivec2') {\n gpgpu.gl.uniform2iv(customLoc, customValue);\n } else if (d.type === 'ivec3') {\n gpgpu.gl.uniform3iv(customLoc, customValue);\n } else if (d.type === 'ivec4') {\n gpgpu.gl.uniform4iv(customLoc, customValue);\n } else {\n throw Error(`uniform type ${d.type} is not supported yet.`);\n }\n });\n }\n gpgpu.executeProgram();\n}\n\nexport function makeShaderKey(\n program: GPGPUProgram, inputs: TensorData[], output: TensorData): string {\n let keyInputs = '';\n inputs.concat(output).forEach(x => {\n const hasOffset = x.texData != null && x.texData.slice != null &&\n x.texData.slice.flatOffset > 0;\n // TODO: Remove the condition of !x.isUniform.\n if (program.enableShapeUniforms && !x.isUniform) {\n const xTexShape = x.texData.texShape;\n const {useSqueezeShape, uniformShape, keptDims} =\n shader_compiler.getUniformInfoFromShape(\n program.packedInputs, x.shape, xTexShape);\n let rank1 = '', rank2 = '', rank34 = '';\n if (uniformShape.length === 1 && program.packedInputs) {\n const packedTexShape =\n [Math.ceil(xTexShape[0] / 2), Math.ceil(xTexShape[1] / 2)];\n rank1 = `${packedTexShape[0] > 1}_${packedTexShape[1] > 1}`;\n } else if (uniformShape.length === 2 && !program.packedInputs) {\n rank2 = `${uniformShape[0] > 1}_${uniformShape[1] > 1}`;\n } else if (uniformShape.length > 2 && !program.packedInputs) {\n const strides = util.computeStrides(uniformShape);\n rank34 = `${strides[0] === xTexShape[1]}_${\n strides[strides.length - 1] === xTexShape[1]}`;\n }\n const xRank = x.shape.length;\n const isLogicalShapTexShapeEqual =\n uniformShape.length === 2 && util.arraysEqual(x.shape, xTexShape);\n const isScalar = util.sizeFromShape(x.shape) === 1;\n const broadcastDims =\n backend_util.getBroadcastDims(x.shape, output.shape);\n const isInOutTexShapeEqual = !program.packedInputs &&\n xRank === output.shape.length &&\n util.arraysEqual(xTexShape, output.texData.texShape);\n const isTexShapeGreaterThanOne =\n program.packedInputs || uniformShape.length > 2 ?\n '' :\n `${xTexShape[0] > 1}_${xTexShape[1] > 1}`;\n // These key components are needed due to shader_compiler is embedding\n // them in the shader.\n // |xRank| is used to determine the coords length. See\n // get[Packed]SamplerAtOutputCoords.\n // |isInOutTexShapeEqual| is used to determine whether going to an\n // optimization path in getSamplerAtOutputCoords.\n // |useSqueezeShape| is extracted from squeezeInputInfo of\n // getSampler[2|3|4]D/getPackedSampler3D.\n // |isScalar| is extracted from isInputScalar/isOutputScalar in\n // getPackedSamplerAtOutputCoords.\n // |broadcastDims| is extracted from get[Packed]SamplerAtOutputCoords.\n // |isLogicalShapTexShapeEqual| is used in\n // getOutput[Packed]2DCoords/get[Packed]Sampler2D.\n // |rank1| is used in getOutputPacked1DCoords.\n // |rank2| is used in getOutput2DCoords.\n // |rank34| is used in getSampler3D/getSampler4D.\n // |isTexShapeGreaterThanOne| are used in\n // getSampler[Scalar|1D|2D]/getOutput1DCoords.\n keyInputs += `${xRank}_${isInOutTexShapeEqual}_${\n useSqueezeShape ? keptDims : ''}_${uniformShape.length}_${isScalar}_${\n broadcastDims}_${isLogicalShapTexShapeEqual}_${rank1}_${rank2}_${\n rank34}_${isTexShapeGreaterThanOne}_${hasOffset}`;\n } else {\n const texShape = x.isUniform ? 'uniform' : x.texData.texShape;\n keyInputs += `${x.shape}_${texShape}_${hasOffset}`;\n }\n });\n const keyUserCode = program.userCode;\n let key = program.constructor.name;\n // Fast string concat. See https://jsperf.com/string-concatenation/14.\n key += '_' + keyInputs + '_' + keyUserCode +\n `${env().getNumber('WEBGL_VERSION')}`;\n return key;\n}\n\nexport function useShapeUniforms(rank: number) {\n // TODO: Remove the limitaion of rank <= 4.\n return env().getBool('WEBGL_USE_SHAPES_UNIFORMS') && rank <= 4;\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport * as shader_util from './shader_compiler_util';\nimport {PackingScheme} from './tex_util';\n\nexport class DecodeMatrixProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: [number, number, number];\n packedInputs = false;\n packedOutput = true;\n outPackingScheme = PackingScheme.DENSE;\n enableShapeUniforms: boolean;\n customUniforms = [{name: 'texShape', type: 'ivec2' as const }];\n\n constructor(outputShape: [number, number, number]) {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n this.userCode = `\n ivec3 outCoordsFromFlatIndex(int index) {\n ${\n this.enableShapeUniforms ?\n shader_util.getOutputLogicalCoordinatesFromFlatIndexByUniform(\n ['r', 'c', 'd'], outputShape) :\n shader_util.getLogicalCoordinatesFromFlatIndex(\n ['r', 'c', 'd'], outputShape)}\n return ivec3(r, c, d);\n }\n\n void main() {\n ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1]));\n int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y);\n\n vec4 result = vec4(0.);\n\n for (int i=0; i<4; i++) {\n int flatIndex = index + i;\n ivec3 rc = outCoordsFromFlatIndex(flatIndex);\n result[i] = getA(rc.x, rc.y, rc.z);\n }\n\n ${glsl.output} = result;\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport * as shader_util from './shader_compiler_util';\nimport {PackingScheme} from './tex_util';\n\nexport class DecodeMatrixPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n outputShape: [number, number, number];\n outPackingScheme = PackingScheme.DENSE;\n enableShapeUniforms: boolean;\n customUniforms = [{name: 'texShape', type: 'ivec2' as const }];\n\n constructor(outputShape: [number, number, number]) {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n this.userCode = `\n ivec3 outCoordsFromFlatIndex(int index) {\n ${\n this.enableShapeUniforms ?\n shader_util.getOutputLogicalCoordinatesFromFlatIndexByUniform(\n ['r', 'c', 'd'], outputShape) :\n shader_util.getLogicalCoordinatesFromFlatIndex(\n ['r', 'c', 'd'], outputShape)}\n return ivec3(r, c, d);\n }\n\n void main() {\n ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1]));\n int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y);\n\n vec4 result = vec4(0.);\n\n for (int i=0; i<4; i++) {\n int flatIndex = index + i;\n ivec3 rc = outCoordsFromFlatIndex(flatIndex);\n result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z));\n }\n\n ${glsl.output} = result;\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram} from './gpgpu_math';\nimport {ENCODE_FLOAT_SNIPPET} from './shader_compiler_util';\nimport {TextureUsage} from './tex_util';\n\nexport class EncodeFloatProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n outTexUsage = TextureUsage.DOWNLOAD;\n\n constructor(outputShape: number[]) {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.userCode = `\n ${ENCODE_FLOAT_SNIPPET}\n\n void main() {\n float x = getAAtOutCoords();\n ${glsl.output} = encode_float(x);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram} from './gpgpu_math';\nimport {ENCODE_FLOAT_SNIPPET} from './shader_compiler_util';\nimport {TextureUsage} from './tex_util';\n\nexport class EncodeFloatPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n packedInputs = true;\n packedOutput = false;\n outTexUsage = TextureUsage.DOWNLOAD;\n\n constructor(outputShape: [number, number, number]) {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.userCode = `\n ${ENCODE_FLOAT_SNIPPET}\n\n void main() {\n ivec3 coords = getOutputCoords();\n float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z));\n ${glsl.output} = encode_float(x);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport * as shader_util from './shader_compiler_util';\n\nconst CHANNEL_CHAR_TO_INDEX_MAP: Record = {\n 'R': 0,\n 'G': 1,\n 'B': 2,\n 'A': 3\n};\n\nexport class EncodeMatrixProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n enableShapeUniforms: boolean;\n customUniforms = [{name: 'texShape', type: 'ivec2' as const }];\n\n constructor(\n outputShape: [number, number, number], inputIsUnsignedByte = false,\n usedChannels = 'RGBA') {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n let output = `result`;\n if (inputIsUnsignedByte) {\n output = `floor(result * 255. + 0.5)`;\n }\n\n let mainLoop = '';\n for (let usedChannelIndex = 0; usedChannelIndex < usedChannels.length;\n usedChannelIndex++) {\n const curChannel = usedChannels[usedChannelIndex];\n mainLoop += `\n if(offset == ${usedChannelIndex}) {\n result = values[${CHANNEL_CHAR_TO_INDEX_MAP[curChannel]}];\n }`;\n }\n\n this.userCode = `\n ${\n this.enableShapeUniforms ? shader_util.getFlatIndexFrom3DOutput() :\n shader_util.getFlatIndexFrom3D(outputShape)}\n\n void main() {\n ivec3 coords = getOutputCoords();\n int flatIndex = getFlatIndex(coords);\n float result = 0.;\n int offset = imod(flatIndex, ${usedChannels.length});\n\n flatIndex = idiv(flatIndex, ${usedChannels.length}, 1.);\n\n int r = flatIndex / texShape[1];\n if (r < texShape[0]) {\n int c = imod(flatIndex, texShape[1]);\n vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]);\n vec4 values = ${glsl.texture2D}(A, uv);\n ${mainLoop}\n }\n ${glsl.output} = vec4(${output}, 0., 0., 0.);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport * as shader_util from './shader_compiler_util';\n\n/*\nThis is how the shader encodes a tensor with shape = [2, 3, 5]\n(indices are [batch, row, col]).\n\n000|001 002|003 004|xxx 020|021 022|023 024|xxx\n------- ------- ------- ------- ------- -------\n010|011 012|013 014|xxx xxx|xxx xxx|xxx xxx|xxx\n\n100|101 102|103 104|xxx 120|121 122|123 124|xxx\n------- ------- ------- ------- ------- -------\n110|111 112|113 114|xxx xxx|xxx xxx|xxx xxx|xxx\n\nSingle texels contain only values from the same batch, and from adjacent rows\nand columns.\n */\n\nexport class EncodeMatrixPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n packedInputs = false;\n packedOutput = true;\n enableShapeUniforms: boolean;\n customUniforms = [{name: 'texShape', type: 'ivec2' as const }];\n\n constructor(\n outputShape: [number, number, number], inputIsUnsignedByte = false) {\n const glsl = getGlslDifferences();\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n let mainLoop = '';\n let output = 'result';\n if (inputIsUnsignedByte) {\n output = 'floor(result * 255. + 0.5)';\n }\n\n for (let row = 0; row <= 1; row++) {\n for (let col = 0; col <= 1; col++) {\n const channel = row * 2 + col;\n\n mainLoop += `\n localCoords = coords;\n if(localCoords[2] + ${col} < ${\n this.enableShapeUniforms ? 'outShape[2]' : `${outputShape[2]}`}) {\n localCoords[2] += ${col};\n if (localCoords[1] + ${row} < ${\n this.enableShapeUniforms ? 'outShape[1]' : `${outputShape[1]}`}) {\n localCoords[1] += ${row};\n\n flatIndex = getFlatIndex(localCoords);\n offset = imod(flatIndex, 4);\n\n flatIndex = idiv(flatIndex, 4, 1.);\n\n int r = flatIndex / texShape[1];\n int c = imod(flatIndex, texShape[1]);\n vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]);\n values = ${glsl.texture2D}(A, uv);\n\n if (offset == 0) {\n result[${channel}] = values[0];\n } else if (offset == 1) {\n result[${channel}] = values[1];\n } else if (offset == 2) {\n result[${channel}] = values[2];\n } else {\n result[${channel}] = values[3];\n }\n }\n }\n `;\n }\n }\n\n this.userCode = `\n ${\n this.enableShapeUniforms ? shader_util.getFlatIndexFrom3DOutput() :\n shader_util.getFlatIndexFrom3D(outputShape)}\n\n void main() {\n ivec3 coords = getOutputCoords();\n\n vec4 result = vec4(0.);\n int flatIndex, r, c, offset;\n ivec3 localCoords;\n vec2 uv;\n vec4 values;\n\n ${mainLoop}\n\n ${glsl.output} = ${output};\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, PixelData, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {getGlslDifferences} from './glsl_version';\nimport * as tex_util from './tex_util';\nimport {Texture, TextureConfig} from './tex_util';\nimport * as webgl_util from './webgl_util';\n\nexport function createVertexShader(gl: WebGLRenderingContext): WebGLShader {\n const glsl = getGlslDifferences();\n const vertexShaderSource = `${glsl.version}\n precision highp float;\n ${glsl.attribute} vec3 clipSpacePos;\n ${glsl.attribute} vec2 uv;\n ${glsl.varyingVs} vec2 resultUV;\n\n void main() {\n gl_Position = vec4(clipSpacePos, 1);\n resultUV = uv;\n }`;\n return webgl_util.createVertexShader(gl, vertexShaderSource);\n}\n\nexport function createVertexBuffer(gl: WebGLRenderingContext): WebGLBuffer {\n // [x y z u v] * [upper-left, lower-left, upper-right, lower-right]\n const vertexArray = new Float32Array(\n [-1, 1, 0, 0, 1, -1, -1, 0, 0, 0, 1, 1, 0, 1, 1, 1, -1, 0, 1, 0]);\n return webgl_util.createStaticVertexBuffer(gl, vertexArray);\n}\n\nexport function createIndexBuffer(gl: WebGLRenderingContext): WebGLBuffer {\n // OpenGL (and WebGL) have \"CCW == front\" winding\n const triangleVertexIndices = new Uint16Array([0, 1, 2, 2, 1, 3]);\n return webgl_util.createStaticIndexBuffer(gl, triangleVertexIndices);\n}\n\nfunction createAndConfigureTexture(\n gl: WebGLRenderingContext, width: number, height: number,\n internalFormat: number, textureFormat: number,\n textureType: number): Texture {\n webgl_util.validateTextureSize(width, height);\n const texture = webgl_util.createTexture(gl);\n\n const tex2d = gl.TEXTURE_2D;\n webgl_util.callAndCheck(gl, () => gl.bindTexture(tex2d, texture));\n webgl_util.callAndCheck(\n gl, () => gl.texParameteri(tex2d, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE));\n webgl_util.callAndCheck(\n gl, () => gl.texParameteri(tex2d, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE));\n webgl_util.callAndCheck(\n gl, () => gl.texParameteri(tex2d, gl.TEXTURE_MIN_FILTER, gl.NEAREST));\n webgl_util.callAndCheck(\n gl, () => gl.texParameteri(tex2d, gl.TEXTURE_MAG_FILTER, gl.NEAREST));\n if (env().getNumber('WEBGL_VERSION') === 1) {\n webgl_util.callAndCheck(\n gl,\n () => gl.texImage2D(\n tex2d, 0, internalFormat, width, height, 0, textureFormat,\n textureType, null));\n } else {\n webgl_util.callAndCheck(\n gl,\n () => (gl as WebGL2RenderingContext)\n .texStorage2D(tex2d, 1, internalFormat, width, height));\n }\n webgl_util.callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null));\n\n return {texture, texShape: [height, width]};\n}\n\nexport function getInternalFormatForFloat32MatrixTexture(\n textureConfig: TextureConfig) {\n return textureConfig.internalFormatFloat;\n}\n\nexport function createFloat32MatrixTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): Texture {\n const [width, height] =\n tex_util.getUnpackedMatrixTextureShapeWidthHeight(rows, columns);\n return createAndConfigureTexture(\n gl, width, height,\n getInternalFormatForFloat32MatrixTexture(textureConfig),\n textureConfig.textureFormatFloat, gl.FLOAT);\n}\n\nexport function getInternalFormatForFloat16MatrixTexture(\n textureConfig: TextureConfig) {\n return textureConfig.internalFormatHalfFloat;\n}\n\nexport function createFloat16MatrixTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): Texture {\n const [width, height] =\n tex_util.getUnpackedMatrixTextureShapeWidthHeight(rows, columns);\n return createAndConfigureTexture(\n gl, width, height,\n getInternalFormatForFloat16MatrixTexture(textureConfig),\n textureConfig.textureFormatFloat, textureConfig.textureTypeHalfFloat);\n}\n\nexport function getInternalFormatForUnsignedBytesMatrixTexture(\n textureConfig: TextureConfig) {\n return textureConfig.downloadTextureFormat;\n}\n\nexport function createUnsignedBytesMatrixTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): Texture {\n const [width, height] =\n tex_util.getUnpackedMatrixTextureShapeWidthHeight(rows, columns);\n return createAndConfigureTexture(\n gl, width, height,\n getInternalFormatForUnsignedBytesMatrixTexture(textureConfig), gl.RGBA,\n gl.UNSIGNED_BYTE);\n}\n\nexport function getInternalFormatForPackedMatrixTexture(\n textureConfig: TextureConfig) {\n return textureConfig.internalFormatPackedFloat;\n}\n\nexport function createPackedMatrixTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): Texture {\n const [width, height] =\n tex_util.getPackedMatrixTextureShapeWidthHeight(rows, columns);\n return createAndConfigureTexture(\n gl, width, height, getInternalFormatForPackedMatrixTexture(textureConfig),\n gl.RGBA, gl.FLOAT);\n}\n\nexport function getInternalFormatForFloat16PackedMatrixTexture(\n textureConfig: TextureConfig) {\n return textureConfig.internalFormatPackedHalfFloat;\n}\n\nexport function createFloat16PackedMatrixTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): Texture {\n const [width, height] =\n tex_util.getPackedMatrixTextureShapeWidthHeight(rows, columns);\n return createAndConfigureTexture(\n gl, width, height,\n getInternalFormatForFloat16PackedMatrixTexture(textureConfig), gl.RGBA,\n textureConfig.textureTypeHalfFloat);\n}\n\nexport function bindVertexProgramAttributeStreams(\n gl: WebGLRenderingContext, program: WebGLProgram,\n vertexBuffer: WebGLBuffer): boolean {\n const posOffset = 0; // x is the first buffer element\n const uvOffset = 3 * 4; // uv comes after [x y z]\n const stride = (3 * 4) + (2 * 4); // xyz + uv, each entry is 4-byte float.\n webgl_util.callAndCheck(\n gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer));\n const success = webgl_util.bindVertexBufferToProgramAttribute(\n gl, program, 'clipSpacePos', vertexBuffer, 3, stride, posOffset);\n return success &&\n webgl_util.bindVertexBufferToProgramAttribute(\n gl, program, 'uv', vertexBuffer, 2, stride, uvOffset);\n}\n\nexport function uploadDenseMatrixToTexture(\n gl: WebGLRenderingContext, texture: WebGLTexture, width: number,\n height: number, data: TypedArray, textureConfig: TextureConfig) {\n webgl_util.callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture));\n\n let dataForUpload: TypedArray, texelDataType: number, internalFormat: number;\n if (data instanceof Uint8Array) {\n dataForUpload = new Uint8Array(width * height * 4);\n texelDataType = gl.UNSIGNED_BYTE;\n internalFormat = gl.RGBA;\n } else {\n dataForUpload = new Float32Array(width * height * 4);\n texelDataType = gl.FLOAT;\n internalFormat = textureConfig.internalFormatPackedFloat;\n }\n\n dataForUpload.set(data);\n if (env().getNumber('WEBGL_VERSION') === 2) {\n webgl_util.callAndCheck(\n gl,\n () => gl.texSubImage2D(\n gl.TEXTURE_2D, 0, 0, 0, width, height, gl.RGBA, texelDataType,\n dataForUpload));\n } else {\n webgl_util.callAndCheck(\n gl,\n () => gl.texImage2D(\n gl.TEXTURE_2D, 0, internalFormat, width, height, 0, gl.RGBA,\n texelDataType, dataForUpload));\n }\n\n webgl_util.callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null));\n}\n\nexport function uploadPixelDataToTexture(\n gl: WebGLRenderingContext, texture: WebGLTexture,\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n HTMLVideoElement|ImageBitmap) {\n webgl_util.callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture));\n if ((pixels as PixelData).data instanceof Uint8Array) {\n if (env().getNumber('WEBGL_VERSION') === 2) {\n webgl_util.callAndCheck(\n gl,\n () => gl.texSubImage2D(\n gl.TEXTURE_2D, 0, 0, 0, pixels.width, pixels.height, gl.RGBA,\n gl.UNSIGNED_BYTE, (pixels as PixelData).data));\n } else {\n webgl_util.callAndCheck(\n gl,\n () => gl.texImage2D(\n gl.TEXTURE_2D, 0, gl.RGBA, pixels.width, pixels.height, 0,\n gl.RGBA, gl.UNSIGNED_BYTE, (pixels as PixelData).data));\n }\n } else {\n if (env().getNumber('WEBGL_VERSION') === 2) {\n webgl_util.callAndCheck(\n gl,\n () => gl.texSubImage2D(\n gl.TEXTURE_2D, 0, 0, 0, gl.RGBA, gl.UNSIGNED_BYTE,\n (pixels as ImageData | HTMLImageElement | HTMLCanvasElement |\n HTMLVideoElement | ImageBitmap)));\n } else {\n webgl_util.callAndCheck(\n gl,\n () => gl.texImage2D(\n gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE,\n pixels as ImageData | HTMLImageElement | HTMLCanvasElement |\n HTMLVideoElement | ImageBitmap));\n }\n }\n\n webgl_util.callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null));\n}\n\nexport function createBufferFromOutputTexture(\n gl2: WebGL2RenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig): WebGLBuffer {\n // Create and bind the buffer.\n const buffer = gl2.createBuffer();\n webgl_util.callAndCheck(\n gl2, () => gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer));\n\n // Initialize the buffer to the size of the texture in bytes.\n const bytesPerFloat = 4;\n const valuesPerTexel = 4;\n const bufferSizeBytes = bytesPerFloat * valuesPerTexel * rows * columns;\n\n webgl_util.callAndCheck(\n gl2,\n () => gl2.bufferData(\n gl2.PIXEL_PACK_BUFFER, bufferSizeBytes, gl2.STREAM_READ));\n\n // Enqueue a command on the GPU command queue to copy of texture into the\n // buffer.\n webgl_util.callAndCheck(\n gl2, () => gl2.readPixels(0, 0, columns, rows, gl2.RGBA, gl2.FLOAT, 0));\n\n webgl_util.callAndCheck(\n gl2, () => gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null));\n\n return buffer;\n}\n\nexport function downloadFloat32MatrixFromBuffer(\n gl: WebGLRenderingContext, buffer: WebGLBuffer,\n size: number): Float32Array {\n const gl2 = gl as WebGL2RenderingContext;\n\n const downloadTarget = new Float32Array(size);\n\n gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer);\n gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, downloadTarget);\n gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null);\n\n return downloadTarget;\n}\n\nexport function downloadByteEncodedFloatMatrixFromOutputTexture(\n gl: WebGLRenderingContext, rows: number, columns: number,\n textureConfig: TextureConfig) {\n const [w, h] =\n tex_util.getUnpackedMatrixTextureShapeWidthHeight(rows, columns);\n\n const numChannels = 4;\n const downloadTarget = new Uint8Array(\n tex_util.getUnpackedArraySizeFromMatrixSize(rows * columns, numChannels));\n\n webgl_util.callAndCheck(\n gl,\n () => gl.readPixels(\n 0, 0, w, h, textureConfig.downloadTextureFormat, gl.UNSIGNED_BYTE,\n downloadTarget));\n\n // By wrapping the buffer in a Float32Array, we use native browser IEEE 754\n // decoding of the 4 bytes that back each 32 bit float.\n return new Float32Array(downloadTarget.buffer);\n}\n\nexport function downloadPackedMatrixFromBuffer(\n gl: WebGLRenderingContext, buffer: WebGLBuffer, batch: number, rows: number,\n cols: number, physicalRows: number, physicalCols: number,\n textureConfig: TextureConfig): Float32Array {\n const gl2 = gl as WebGL2RenderingContext;\n\n const downloadTarget =\n new Float32Array(tex_util.getPackedRGBAArraySizeFromMatrixShape(\n physicalRows, physicalCols));\n\n gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer);\n gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, downloadTarget);\n gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null);\n\n return downloadTarget;\n}\n\nexport function downloadMatrixFromPackedOutputTexture(\n gl: WebGLRenderingContext, physicalRows: number,\n physicalCols: number): Float32Array {\n const packedRGBA = new Float32Array(physicalRows * physicalCols * 4);\n webgl_util.callAndCheck(\n gl,\n () => gl.readPixels(\n 0, 0, physicalCols, physicalRows, gl.RGBA, gl.FLOAT, packedRGBA));\n\n return packedRGBA;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, PixelData, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {getWebGLContext, setWebGLContext} from './canvas_util';\nimport * as gpgpu_util from './gpgpu_util';\nimport * as tex_util from './tex_util';\nimport {Texture, TextureConfig} from './tex_util';\nimport {WebGL1DisjointQueryTimerExtension, WebGL2DisjointQueryTimerExtension, WebGLParallelCompilationExtension} from './webgl_types';\nimport * as webgl_util from './webgl_util';\n\nexport interface FenceContext {\n query: WebGLQuery|WebGLSync;\n isFencePassed(): boolean;\n}\n\ntype WebGLVao = WebGLVertexArrayObject | WebGLVertexArrayObjectOES;\n\nexport interface GPGPUContextProgram extends WebGLProgram {\n vao: WebGLVao;\n}\n\nexport class GPGPUContext {\n gl: WebGLRenderingContext;\n textureFloatExtension: {};\n textureHalfFloatExtension: {};\n colorBufferFloatExtension: {};\n colorBufferHalfFloatExtension: {};\n disjointQueryTimerExtension: WebGL2DisjointQueryTimerExtension|\n WebGL1DisjointQueryTimerExtension;\n parallelCompilationExtension: WebGLParallelCompilationExtension;\n vertexBuffer: WebGLBuffer;\n indexBuffer: WebGLBuffer;\n framebuffer: WebGLFramebuffer;\n outputTexture: WebGLTexture|null = null;\n program: GPGPUContextProgram|null = null;\n private disposed = false;\n private disjoint: boolean;\n private vertexShader: WebGLShader;\n textureConfig: TextureConfig;\n\n createVertexArray: () => WebGLVao | null;\n bindVertexArray: (vao: WebGLVao | null) => void;\n deleteVertexArray: (vao: WebGLVao | null) => void;\n getVertexArray: () => WebGLVao | null;\n\n constructor(gl?: WebGLRenderingContext) {\n const glVersion = env().getNumber('WEBGL_VERSION');\n if (gl != null) {\n this.gl = gl;\n setWebGLContext(glVersion, gl);\n } else {\n this.gl = getWebGLContext(glVersion);\n }\n gl = this.gl;\n\n if (env().getNumber('WEBGL_VERSION') === 2) {\n const gl2 = gl as WebGL2RenderingContext;\n this.createVertexArray = () => {\n return webgl_util.callAndCheck(gl2,\n () => gl2.createVertexArray());\n };\n this.bindVertexArray = (vao: WebGLVao|null) => {\n return webgl_util.callAndCheck(gl2,\n () => gl2.bindVertexArray(vao as WebGLVertexArrayObject));\n };\n this.deleteVertexArray = (vao: WebGLVao|null) => {\n return webgl_util.callAndCheck(gl2,\n () => gl2.deleteVertexArray(vao as WebGLVertexArrayObject));\n };\n this.getVertexArray = () => {\n return webgl_util.callAndCheck(gl2,\n () => gl2.getParameter(gl2.VERTEX_ARRAY_BINDING));\n };\n } else if (gl != null) {\n const ext = gl.getExtension('OES_vertex_array_object');\n if (ext == null) {\n throw new Error(\n 'All WebGL1 implementations are expected to offer' +\n ' OES_vertex_array_object.');\n }\n this.createVertexArray = () => {\n return webgl_util.callAndCheck(gl,\n () => ext.createVertexArrayOES());\n };\n this.bindVertexArray = (vao: WebGLVao|null) => {\n return webgl_util.callAndCheck(gl,\n () => ext.bindVertexArrayOES(vao as WebGLVertexArrayObjectOES));\n };\n this.deleteVertexArray = (vao: WebGLVao|null) => {\n return webgl_util.callAndCheck(gl,\n () => ext.deleteVertexArrayOES(vao as WebGLVertexArrayObjectOES));\n };\n this.getVertexArray = () => {\n return webgl_util.callAndCheck(gl,\n () => gl.getParameter(ext.VERTEX_ARRAY_BINDING_OES));\n };\n }\n\n // WebGL 2.0 enables texture floats without an extension.\n let COLOR_BUFFER_FLOAT = 'WEBGL_color_buffer_float';\n const COLOR_BUFFER_HALF_FLOAT = 'EXT_color_buffer_half_float';\n this.parallelCompilationExtension =\n this.gl.getExtension('KHR_parallel_shader_compile');\n if (env().getNumber('WEBGL_VERSION') === 1) {\n const TEXTURE_FLOAT = 'OES_texture_float';\n const TEXTURE_HALF_FLOAT = 'OES_texture_half_float';\n\n this.textureFloatExtension =\n webgl_util.getExtensionOrThrow(this.gl, TEXTURE_FLOAT);\n if (webgl_util.hasExtension(this.gl, TEXTURE_HALF_FLOAT)) {\n this.textureHalfFloatExtension =\n webgl_util.getExtensionOrThrow(this.gl, TEXTURE_HALF_FLOAT);\n } else if (env().get('WEBGL_FORCE_F16_TEXTURES')) {\n throw new Error(\n 'GL context does not support half float textures, yet the ' +\n 'environment flag WEBGL_FORCE_F16_TEXTURES is set to true.');\n }\n\n this.colorBufferFloatExtension = this.gl.getExtension(COLOR_BUFFER_FLOAT);\n if (webgl_util.hasExtension(this.gl, COLOR_BUFFER_HALF_FLOAT)) {\n this.colorBufferHalfFloatExtension =\n webgl_util.getExtensionOrThrow(this.gl, COLOR_BUFFER_HALF_FLOAT);\n } else if (env().get('WEBGL_FORCE_F16_TEXTURES')) {\n throw new Error(\n 'GL context does not support color renderable half floats, yet ' +\n 'the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.');\n }\n } else {\n COLOR_BUFFER_FLOAT = 'EXT_color_buffer_float';\n if (webgl_util.hasExtension(this.gl, COLOR_BUFFER_FLOAT)) {\n this.colorBufferFloatExtension =\n this.gl.getExtension(COLOR_BUFFER_FLOAT);\n } else if (webgl_util.hasExtension(this.gl, COLOR_BUFFER_HALF_FLOAT)) {\n this.colorBufferHalfFloatExtension =\n this.gl.getExtension(COLOR_BUFFER_HALF_FLOAT);\n } else {\n throw new Error('GL context does not support color renderable floats');\n }\n }\n\n this.vertexBuffer = gpgpu_util.createVertexBuffer(this.gl);\n this.indexBuffer = gpgpu_util.createIndexBuffer(this.gl);\n this.framebuffer = webgl_util.createFramebuffer(this.gl);\n\n this.textureConfig =\n tex_util.getTextureConfig(this.gl, this.textureHalfFloatExtension);\n }\n\n private get debug(): boolean {\n return env().getBool('DEBUG');\n }\n\n public dispose() {\n if (this.disposed) {\n return;\n }\n if (this.program != null) {\n console.warn(\n 'Disposing a GPGPUContext that still has a bound WebGLProgram.' +\n ' This is probably a resource leak, delete the program with ' +\n 'GPGPUContext.deleteProgram before disposing.');\n }\n if (this.outputTexture != null) {\n console.warn(\n 'Disposing a GPGPUContext that still has a bound output matrix ' +\n 'texture. This is probably a resource leak, delete the output ' +\n 'matrix texture with GPGPUContext.deleteMatrixTexture before ' +\n 'disposing.');\n }\n const gl = this.gl;\n webgl_util.callAndCheck(gl, () => gl.finish());\n webgl_util.callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, null));\n webgl_util.callAndCheck(gl, () => gl.deleteFramebuffer(this.framebuffer));\n webgl_util.callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, null));\n webgl_util.callAndCheck(\n gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null));\n webgl_util.callAndCheck(gl, () => gl.deleteBuffer(this.indexBuffer));\n this.disposed = true;\n }\n\n public createFloat32MatrixTexture(rows: number, columns: number): Texture {\n this.throwIfDisposed();\n return gpgpu_util.createFloat32MatrixTexture(\n this.gl, rows, columns, this.textureConfig);\n }\n\n public createFloat16MatrixTexture(rows: number, columns: number): Texture {\n this.throwIfDisposed();\n return gpgpu_util.createFloat16MatrixTexture(\n this.gl, rows, columns, this.textureConfig);\n }\n\n public createUnsignedBytesMatrixTexture(rows: number, columns: number):\n Texture {\n this.throwIfDisposed();\n return gpgpu_util.createUnsignedBytesMatrixTexture(\n this.gl, rows, columns, this.textureConfig);\n }\n\n public uploadPixelDataToTexture(\n texture: WebGLTexture,\n pixels: PixelData|ImageData|HTMLImageElement|HTMLCanvasElement|\n ImageBitmap) {\n this.throwIfDisposed();\n gpgpu_util.uploadPixelDataToTexture(this.gl, texture, pixels);\n }\n\n public uploadDenseMatrixToTexture(\n texture: WebGLTexture, width: number, height: number, data: TypedArray) {\n this.throwIfDisposed();\n gpgpu_util.uploadDenseMatrixToTexture(\n this.gl, texture, width, height, data, this.textureConfig);\n }\n\n public createFloat16PackedMatrixTexture(rows: number, columns: number):\n Texture {\n this.throwIfDisposed();\n return gpgpu_util.createFloat16PackedMatrixTexture(\n this.gl, rows, columns, this.textureConfig);\n }\n\n public createPackedMatrixTexture(rows: number, columns: number): Texture {\n this.throwIfDisposed();\n return gpgpu_util.createPackedMatrixTexture(\n this.gl, rows, columns, this.textureConfig);\n }\n\n public deleteMatrixTexture(texture: WebGLTexture) {\n this.throwIfDisposed();\n if (this.outputTexture === texture) {\n webgl_util.unbindColorTextureFromFramebuffer(this.gl, this.framebuffer);\n this.outputTexture = null;\n }\n webgl_util.callAndCheck(this.gl, () => this.gl.deleteTexture(texture));\n }\n\n public downloadByteEncodedFloatMatrixFromOutputTexture(\n texture: WebGLTexture, rows: number, columns: number): Float32Array {\n return this.downloadMatrixDriver(\n texture,\n () => gpgpu_util.downloadByteEncodedFloatMatrixFromOutputTexture(\n this.gl, rows, columns, this.textureConfig));\n }\n\n public downloadPackedMatrixFromBuffer(\n buffer: WebGLBuffer, batch: number, rows: number, columns: number,\n physicalRows: number, physicalCols: number): Float32Array {\n return gpgpu_util.downloadPackedMatrixFromBuffer(\n this.gl, buffer, batch, rows, columns, physicalRows, physicalCols,\n this.textureConfig);\n }\n\n public downloadFloat32MatrixFromBuffer(buffer: WebGLBuffer, size: number):\n Float32Array {\n return gpgpu_util.downloadFloat32MatrixFromBuffer(this.gl, buffer, size);\n }\n\n public createBufferFromTexture(\n texture: WebGLTexture, rows: number, columns: number): WebGLBuffer {\n this.bindTextureToFrameBuffer(texture);\n const result = gpgpu_util.createBufferFromOutputTexture(\n this.gl as WebGL2RenderingContext, rows, columns, this.textureConfig);\n this.unbindTextureToFrameBuffer();\n return result;\n }\n\n public createAndWaitForFence(): Promise {\n const fenceContext = this.createFence(this.gl);\n return this.pollFence(fenceContext);\n }\n\n private createFence(gl: WebGLRenderingContext): FenceContext {\n let query: WebGLQuery|WebGLSync;\n let isFencePassed: () => boolean;\n\n if (env().getBool('WEBGL_FENCE_API_ENABLED')) {\n const gl2 = gl as WebGL2RenderingContext;\n\n const sync = gl2.fenceSync(gl2.SYNC_GPU_COMMANDS_COMPLETE, 0);\n gl.flush();\n\n isFencePassed = () => {\n const status = gl2.clientWaitSync(sync, 0, 0);\n return status === gl2.ALREADY_SIGNALED ||\n status === gl2.CONDITION_SATISFIED;\n };\n\n query = sync;\n } else if (\n env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') > 0) {\n query = this.beginQuery();\n this.endQuery();\n isFencePassed = () => this.isQueryAvailable(\n query,\n env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION'));\n } else {\n // If we have no way to fence, return true immediately. This will fire in\n // WebGL 1.0 when there is no disjoint query timer. In this case, because\n // the fence passes immediately, we'll immediately ask for a download of\n // the texture, which will cause the UI thread to hang.\n isFencePassed = () => true;\n }\n\n return {query, isFencePassed};\n }\n\n public downloadMatrixFromPackedTexture(\n texture: WebGLTexture, physicalRows: number,\n physicalCols: number): Float32Array {\n return this.downloadMatrixDriver(\n texture,\n () => gpgpu_util.downloadMatrixFromPackedOutputTexture(\n this.gl, physicalRows, physicalCols));\n }\n\n public createProgram(fragmentShader: WebGLShader): GPGPUContextProgram {\n this.throwIfDisposed();\n const gl = this.gl;\n if (this.vertexShader == null) {\n this.vertexShader = gpgpu_util.createVertexShader(gl);\n }\n const program: WebGLProgram = webgl_util.createProgram(gl);\n webgl_util.callAndCheck(\n gl, () => gl.attachShader(program, this.vertexShader));\n webgl_util.callAndCheck(gl, () => gl.attachShader(program, fragmentShader));\n webgl_util.linkProgram(gl, program);\n\n let program2: GPGPUContextProgram;\n {\n program2 = Object.assign(program, {\n vao: this.createVertexArray(),\n });\n this.bindVertexArray(program2.vao);\n // Bind index buffer, and vertex buffers based on program attrib\n // locations.\n webgl_util.callAndCheck(\n gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer));\n console.assert(\n gpgpu_util.bindVertexProgramAttributeStreams(gl, program2,\n this.vertexBuffer),\n 'gpgpu_util.bindVertexProgramAttributeStreams not fully successful.');\n\n if (this.debug) {\n webgl_util.validateProgram(gl, program2);\n }\n }\n this.setProgram(program2);\n\n return program2;\n }\n\n public deleteProgram(program: GPGPUContextProgram) {\n this.throwIfDisposed();\n if (program === this.program) {\n this.program = null;\n }\n if (program != null) {\n webgl_util.callAndCheck(this.gl, () => this.gl.deleteProgram(program));\n this.deleteVertexArray(program.vao);\n }\n }\n\n public setProgram(program: GPGPUContextProgram|null) {\n this.throwIfDisposed();\n this.program = program;\n\n if (this.program != null) {\n this.bindVertexArray(this.program.vao);\n\n if (this.debug) {\n webgl_util.validateProgram(this.gl, this.program);\n }\n }\n webgl_util.callAndCheck(this.gl, () => this.gl.useProgram(program));\n }\n\n public getUniformLocation(\n program: WebGLProgram, uniformName: string,\n shouldThrow = true): WebGLUniformLocation {\n this.throwIfDisposed();\n if (shouldThrow) {\n return webgl_util.getProgramUniformLocationOrThrow(\n this.gl, program, uniformName);\n } else {\n return webgl_util.getProgramUniformLocation(\n this.gl, program, uniformName);\n }\n }\n\n public getAttributeLocation(program: WebGLProgram, attribute: string):\n number {\n this.throwIfDisposed();\n return webgl_util.callAndCheck(\n this.gl, () => this.gl.getAttribLocation(program, attribute));\n }\n\n public getUniformLocationNoThrow(program: WebGLProgram, uniformName: string):\n WebGLUniformLocation {\n this.throwIfDisposed();\n return this.gl.getUniformLocation(program, uniformName);\n }\n\n public setInputMatrixTexture(\n inputMatrixTexture: WebGLTexture, uniformLocation: WebGLUniformLocation,\n textureUnit: number) {\n this.throwIfDisposed();\n this.throwIfNoProgram();\n webgl_util.bindTextureToProgramUniformSampler(\n this.gl, inputMatrixTexture, uniformLocation, textureUnit);\n }\n\n public setOutputMatrixTexture(\n outputMatrixTexture: WebGLTexture, rows: number, columns: number) {\n this.setOutputMatrixTextureDriver(outputMatrixTexture, columns, rows);\n }\n\n public setOutputPackedMatrixTexture(\n outputPackedMatrixTexture: WebGLTexture, rows: number, columns: number) {\n this.throwIfDisposed();\n const [width, height] =\n tex_util.getPackedMatrixTextureShapeWidthHeight(rows, columns);\n this.setOutputMatrixTextureDriver(outputPackedMatrixTexture, width, height);\n }\n\n public setOutputMatrixWriteRegion(\n startRow: number, numRows: number, startColumn: number,\n numColumns: number) {\n this.setOutputMatrixWriteRegionDriver(\n startColumn, startRow, numColumns, numRows);\n }\n\n public setOutputPackedMatrixWriteRegion(\n startRow: number, numRows: number, startColumn: number,\n numColumns: number) {\n throw new Error('setOutputPackedMatrixWriteRegion not implemented.');\n }\n\n public debugValidate() {\n if (this.program != null) {\n webgl_util.validateProgram(this.gl, this.program);\n }\n webgl_util.validateFramebuffer(this.gl);\n }\n\n public executeProgram() {\n this.throwIfDisposed();\n this.throwIfNoProgram();\n const gl = this.gl;\n if (this.debug) {\n const boundVao = this.getVertexArray();\n console.assert(boundVao === this.program.vao,\n 'VAO changed between setProgram and executeProgram!');\n\n this.debugValidate();\n }\n webgl_util.callAndCheck(\n gl, () => gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0));\n }\n\n public blockUntilAllProgramsCompleted() {\n this.throwIfDisposed();\n webgl_util.callAndCheck(this.gl, () => this.gl.finish());\n }\n\n private getQueryTimerExtension(): WebGL1DisjointQueryTimerExtension\n |WebGL2DisjointQueryTimerExtension {\n if (this.disjointQueryTimerExtension == null) {\n this.disjointQueryTimerExtension =\n webgl_util.getExtensionOrThrow(\n this.gl,\n env().getNumber(\n 'WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2 ?\n 'EXT_disjoint_timer_query_webgl2' :\n 'EXT_disjoint_timer_query') as\n WebGL1DisjointQueryTimerExtension |\n WebGL2DisjointQueryTimerExtension;\n }\n return this.disjointQueryTimerExtension;\n }\n\n private getQueryTimerExtensionWebGL2(): WebGL2DisjointQueryTimerExtension {\n return this.getQueryTimerExtension();\n }\n\n private getQueryTimerExtensionWebGL1(): WebGL1DisjointQueryTimerExtension {\n return this.getQueryTimerExtension() as WebGL1DisjointQueryTimerExtension;\n }\n\n beginQuery(): WebGLQuery {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2) {\n const gl2 = this.gl as WebGL2RenderingContext;\n const ext = this.getQueryTimerExtensionWebGL2();\n\n const query = gl2.createQuery();\n gl2.beginQuery(ext.TIME_ELAPSED_EXT, query);\n return query;\n }\n const ext = this.getQueryTimerExtensionWebGL1();\n const query = ext.createQueryEXT() as WebGLQuery;\n ext.beginQueryEXT(ext.TIME_ELAPSED_EXT, query);\n return query;\n }\n\n endQuery() {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2) {\n const gl2 = this.gl as WebGL2RenderingContext;\n const ext = this.getQueryTimerExtensionWebGL2();\n gl2.endQuery(ext.TIME_ELAPSED_EXT);\n return;\n }\n const ext = this.getQueryTimerExtensionWebGL1();\n ext.endQueryEXT(ext.TIME_ELAPSED_EXT);\n }\n\n public async waitForQueryAndGetTime(query: WebGLQuery): Promise {\n await util.repeatedTry(\n () => this.disposed || // while testing contexts are created / disposed\n // in rapid succession, so without this check we\n // may poll for the query timer indefinitely\n this.isQueryAvailable(\n query,\n env().getNumber(\n 'WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION')));\n return this.getQueryTime(\n query, env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION'));\n }\n\n private getQueryTime(query: WebGLQuery, queryTimerVersion: number): number {\n if (queryTimerVersion === 0) {\n return null;\n }\n\n if (queryTimerVersion === 2) {\n const gl2 = this.gl as WebGL2RenderingContext;\n\n const timeElapsedNanos = gl2.getQueryParameter(query, gl2.QUERY_RESULT);\n // Return milliseconds.\n return timeElapsedNanos / 1000000;\n } else {\n const ext = this.getQueryTimerExtensionWebGL1();\n\n const timeElapsedNanos =\n ext.getQueryObjectEXT(query, ext.QUERY_RESULT_EXT);\n // Return milliseconds.\n return timeElapsedNanos / 1000000;\n }\n }\n\n private isQueryAvailable(query: WebGLQuery, queryTimerVersion: number):\n boolean {\n if (queryTimerVersion === 0) {\n return true;\n }\n\n if (queryTimerVersion === 2) {\n const gl2 = this.gl as WebGL2RenderingContext;\n const ext = this.getQueryTimerExtensionWebGL2();\n\n const available =\n gl2.getQueryParameter(query, gl2.QUERY_RESULT_AVAILABLE);\n if (this.disjoint == null) {\n this.disjoint = this.gl.getParameter(ext.GPU_DISJOINT_EXT);\n }\n\n return available && !this.disjoint;\n } else {\n const ext = this.getQueryTimerExtensionWebGL1();\n\n const available =\n ext.getQueryObjectEXT(query, ext.QUERY_RESULT_AVAILABLE_EXT);\n if (this.disjoint == null) {\n this.disjoint = this.gl.getParameter(ext.GPU_DISJOINT_EXT);\n }\n\n return available && !this.disjoint;\n }\n }\n\n pollFence(fenceContext: FenceContext) {\n return new Promise(resolve => {\n this.addItemToPoll(() => fenceContext.isFencePassed(), () => resolve());\n });\n }\n\n private itemsToPoll: PollItem[] = [];\n\n pollItems(): void {\n // Find the last query that has finished.\n const index = linearSearchLastTrue(this.itemsToPoll.map(x => x.isDoneFn));\n for (let i = 0; i <= index; ++i) {\n const {resolveFn} = this.itemsToPoll[i];\n resolveFn();\n }\n this.itemsToPoll = this.itemsToPoll.slice(index + 1);\n }\n\n private addItemToPoll(isDoneFn: () => boolean, resolveFn: () => void) {\n this.itemsToPoll.push({isDoneFn, resolveFn});\n if (this.itemsToPoll.length > 1) {\n // We already have a running loop that polls.\n return;\n }\n // Start a new loop that polls.\n let scheduleFn = undefined;\n if ('setTimeoutCustom' in env().platform) {\n scheduleFn = env().platform.setTimeoutCustom.bind(env().platform);\n }\n util.repeatedTry(() => {\n this.pollItems();\n // End the loop if no more items to poll.\n return this.itemsToPoll.length === 0;\n }, () => 0, null, scheduleFn);\n }\n\n private bindTextureToFrameBuffer(texture: WebGLTexture) {\n this.throwIfDisposed();\n webgl_util.bindColorTextureToFramebuffer(\n this.gl, texture, this.framebuffer);\n if (this.debug) {\n webgl_util.validateFramebuffer(this.gl);\n }\n }\n\n private unbindTextureToFrameBuffer() {\n if (this.outputTexture != null) {\n webgl_util.bindColorTextureToFramebuffer(\n this.gl, this.outputTexture, this.framebuffer);\n if (this.debug) {\n webgl_util.validateFramebuffer(this.gl);\n }\n } else {\n webgl_util.unbindColorTextureFromFramebuffer(this.gl, this.framebuffer);\n }\n }\n\n private downloadMatrixDriver(\n texture: WebGLTexture,\n downloadAndDecode: () => Float32Array): Float32Array {\n this.bindTextureToFrameBuffer(texture);\n const result = downloadAndDecode();\n this.unbindTextureToFrameBuffer();\n\n return result;\n }\n\n private setOutputMatrixTextureDriver(\n outputMatrixTextureMaybePacked: WebGLTexture, width: number,\n height: number) {\n this.throwIfDisposed();\n const gl = this.gl;\n webgl_util.bindColorTextureToFramebuffer(\n gl, outputMatrixTextureMaybePacked, this.framebuffer);\n if (this.debug) {\n webgl_util.validateFramebuffer(gl);\n }\n this.outputTexture = outputMatrixTextureMaybePacked;\n webgl_util.callAndCheck(gl, () => gl.viewport(0, 0, width, height));\n webgl_util.callAndCheck(gl, () => gl.scissor(0, 0, width, height));\n }\n\n private setOutputMatrixWriteRegionDriver(\n x: number, y: number, width: number, height: number) {\n this.throwIfDisposed();\n webgl_util.callAndCheck(\n this.gl, () => this.gl.scissor(x, y, width, height));\n }\n\n private throwIfDisposed() {\n if (this.disposed) {\n throw new Error('Attempted to use disposed GPGPUContext.');\n }\n }\n\n private throwIfNoProgram() {\n if (this.program == null) {\n throw new Error('No GPU program is currently set.');\n }\n }\n}\n\ntype PollItem = {\n isDoneFn: () => boolean,\n resolveFn: () => void\n};\n\n/**\n * Finds the index of the last true element using linear search.\n * Note: We can't do binary search because Chrome expects us to explicitly\n * test all fences before download:\n * https://github.com/tensorflow/tfjs/issues/1145\n */\nexport function linearSearchLastTrue(arr: Array<() => boolean>): number {\n let i = 0;\n for (; i < arr.length; ++i) {\n const isDone = arr[i]();\n if (!isDone) {\n break;\n }\n }\n return i - 1;\n}\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Import shared functionality from tfjs-backend-cpu without triggering\n// side effects.\n// tslint:disable-next-line: no-imports-from-dist\nimport * as shared from '@tensorflow/tfjs-backend-cpu/dist/shared';\n// tslint:disable-next-line: no-imports-from-dist\nimport {SimpleBinaryKernelImpl} from '@tensorflow/tfjs-backend-cpu/dist/shared';\n// tslint:disable-next-line: no-imports-from-dist\nimport {SimpleUnaryImpl} from '@tensorflow/tfjs-backend-cpu/dist/utils/unary_types';\n\nexport type SimpleBinaryKernelImplCPU = SimpleBinaryKernelImpl;\nexport type SimpleUnaryKernelImplCPU = SimpleUnaryImpl;\nconst {\n addImpl: addImplCPU,\n bincountImpl: bincountImplCPU,\n bincountReduceImpl: bincountReduceImplCPU,\n castImpl: castImplCPU,\n ceilImpl: ceilImplCPU,\n concatImpl: concatImplCPU,\n equalImpl: equalImplCPU,\n expImpl: expImplCPU,\n expm1Impl: expm1ImplCPU,\n floorImpl: floorImplCPU,\n gatherNdImpl: gatherNdImplCPU,\n gatherV2Impl: gatherV2ImplCPU,\n greaterImpl: greaterImplCPU,\n greaterEqualImpl: greaterEqualImplCPU,\n lessImpl: lessImplCPU,\n lessEqualImpl: lessEqualImplCPU,\n linSpaceImpl: linSpaceImplCPU,\n logImpl: logImplCPU,\n maxImpl: maxImplCPU,\n maximumImpl: maximumImplCPU,\n minimumImpl: minimumImplCPU,\n multiplyImpl: multiplyImplCPU,\n negImpl: negImplCPU,\n notEqualImpl: notEqualImplCPU,\n prodImpl: prodImplCPU,\n raggedGatherImpl: raggedGatherImplCPU,\n raggedRangeImpl: raggedRangeImplCPU,\n raggedTensorToTensorImpl: raggedTensorToTensorImplCPU,\n rangeImpl: rangeImplCPU,\n rsqrtImpl: rsqrtImplCPU,\n scatterImpl: scatterImplCPU,\n sigmoidImpl: sigmoidImplCPU,\n simpleAbsImpl: simpleAbsImplCPU,\n sliceImpl: sliceImplCPU,\n sparseFillEmptyRowsImpl: sparseFillEmptyRowsImplCPU,\n sparseReshapeImpl: sparseReshapeImplCPU,\n sparseSegmentReductionImpl: sparseSegmentReductionImplCPU,\n sqrtImpl: sqrtImplCPU,\n stridedSliceImpl: stridedSliceImplCPU,\n stringNGramsImpl: stringNGramsImplCPU,\n stringSplitImpl: stringSplitImplCPU,\n stringToHashBucketFastImpl: stringToHashBucketFastImplCPU,\n subImpl: subImplCPU,\n tileImpl: tileImplCPU,\n topKImpl: topKImplCPU,\n transposeImpl: transposeImplCPU,\n uniqueImpl: uniqueImplCPU,\n} = shared;\n\nexport {\n addImplCPU,\n bincountImplCPU,\n bincountReduceImplCPU,\n castImplCPU,\n ceilImplCPU,\n concatImplCPU,\n equalImplCPU,\n expImplCPU,\n expm1ImplCPU,\n floorImplCPU,\n gatherNdImplCPU,\n gatherV2ImplCPU,\n greaterEqualImplCPU,\n greaterImplCPU,\n lessEqualImplCPU,\n lessImplCPU,\n linSpaceImplCPU,\n logImplCPU,\n maxImplCPU,\n maximumImplCPU,\n minimumImplCPU,\n multiplyImplCPU,\n negImplCPU,\n notEqualImplCPU,\n prodImplCPU,\n raggedGatherImplCPU,\n raggedRangeImplCPU,\n raggedTensorToTensorImplCPU,\n scatterImplCPU,\n sigmoidImplCPU,\n simpleAbsImplCPU,\n sliceImplCPU,\n sparseFillEmptyRowsImplCPU,\n sparseReshapeImplCPU,\n sparseSegmentReductionImplCPU,\n sqrtImplCPU,\n stridedSliceImplCPU,\n stringNGramsImplCPU,\n stringSplitImplCPU,\n stringToHashBucketFastImplCPU,\n subImplCPU,\n rangeImplCPU,\n rsqrtImplCPU,\n tileImplCPU,\n topKImplCPU,\n transposeImplCPU,\n uniqueImplCPU,\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nexport function getVecChannels(name: string, rank: number): string[] {\n return ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, rank).map(d => `${name}.${d}`);\n}\n\nexport function getChannels(name: string, rank: number): string[] {\n if (rank === 1) {\n return [name];\n }\n return getVecChannels(name, rank);\n}\n\nexport function getSourceCoords(rank: number, dims: string[]): string {\n if (rank === 1) {\n return 'rc';\n }\n\n let coords = '';\n for (let i = 0; i < rank; i++) {\n coords += dims[i];\n if (i < rank - 1) {\n coords += ',';\n }\n }\n return coords;\n}","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class PackProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n packedInputs = false;\n packedOutput = true;\n enableShapeUniforms: boolean;\n rank: number;\n\n constructor(\n outputShape:\n number[]) { // TODO(https://github.com/tensorflow/tfjs/issues/893):\n // Only input / output 3D tensors.\n this.outputShape = outputShape;\n this.rank = outputShape.length;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n if (this.rank === 0) {\n this.userCode = `\n void main() {\n setOutput(vec4(getA(), 0., 0., 0.));\n }\n `;\n } else {\n const channels = getChannels('rc', this.rank);\n const dtype = getCoordsDataType(this.rank);\n const outOfBoundsCondition = this.getOutOfBoundsCondition(channels);\n const setup = this.getSetup(channels);\n const output = this.getOutput(channels);\n\n this.userCode = `\n void main() {\n ${dtype} rc = getOutputCoords();\n\n if(${outOfBoundsCondition}) {\n setOutput(vec4(0));\n } else {\n ${setup}\n\n setOutput(vec4(${output}));\n }\n }\n `;\n }\n }\n\n private getSourceCoordsArr(dims: string[]): string[] {\n const coords = [];\n\n for (let row = 0; row <= 1; row++) {\n for (let col = 0; col <= 1; col++) {\n let coord = `${row === 0 ? 'r' : 'rp1'}, ${col === 0 ? 'c' : 'cp1'}`;\n\n for (let d = 2; d < this.rank; d++) {\n coord = `${dims[dims.length - 1 - d]},` + coord;\n }\n\n coords.push(coord);\n }\n }\n return coords;\n }\n\n private getOutOfBoundsCondition(dims: string[]): string {\n if (this.rank === 1) {\n return `rc > ${\n this.enableShapeUniforms ? 'outShape' : this.outputShape[0]}`;\n }\n\n let cond = '';\n for (let i = this.rank - 2; i < this.rank; i++) {\n cond += `${dims[i]} >= ${\n this.enableShapeUniforms ? `outShape[${i}]` : this.outputShape[i]}`;\n if (i < this.rank - 1) {\n cond += '||';\n }\n }\n\n return cond;\n }\n\n private getSetup(dims: string[]): string {\n if (this.rank === 1) {\n return '';\n }\n\n const innerDims = dims.slice(-2);\n const col = this.enableShapeUniforms ? `outShape[${this.rank} - 1]` :\n this.outputShape[this.rank - 1];\n const row = this.enableShapeUniforms ? `outShape[${this.rank} - 2]` :\n this.outputShape[this.rank - 2];\n\n return `\n int r = ${innerDims[0]};\n int c = ${innerDims[1]};\n int rp1 = r + 1;\n int cp1 = c + 1;\n\n bool cEdge = cp1 >= ${col};\n bool rEdge = rp1 >= ${row};\n `;\n }\n\n private getOutput(dims: string[]): string {\n const sourceCoords = this.getSourceCoordsArr(dims);\n if (this.rank === 1) {\n const outShape =\n this.enableShapeUniforms ? 'outShape' : this.outputShape[0];\n return `getA(rc), (rc + 1 >= ${outShape} ? 0. : getA(rc + 1)), 0, 0`;\n }\n\n return `getA(${sourceCoords[0]}),\n cEdge ? 0. : getA(${sourceCoords[1]}),\n rEdge ? 0. : getA(${sourceCoords[2]}),\n rEdge || cEdge ? 0. : getA(${sourceCoords[3]})`;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport * as shader_util from './shader_compiler_util';\n\nexport class ReshapePackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n customUniforms = [{name: 'inputShape', type: 'ivec3' as const }];\n\n constructor(outputShape: [number, number, number], inputShape: [\n number, number, number\n ]) {\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n let mainLoop = ``;\n for (let i = 0; i < 4; i++) {\n let thisRC = `thisRC = rc;`;\n if (i % 2 === 1) {\n thisRC += `thisRC.z += 1;`;\n }\n if (i > 1) {\n thisRC += `thisRC.y += 1;`;\n }\n\n mainLoop += `\n ${thisRC}\n ${i > 0 ? `if(thisRC.y < rows && thisRC.z < cols){` : ''}\n int flatIndex = getFlatIndex(thisRC);\n\n ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex);\n vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z));\n\n result[${i}] =\n getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims);\n ${i > 0 ? '}' : ''}\n `;\n }\n\n this.userCode = `\n ${getReshapedInputCoords(inputShape, this.enableShapeUniforms)}\n ${\n this.enableShapeUniforms ? shader_util.getFlatIndexFrom3DOutput() :\n shader_util.getFlatIndexFrom3D(outputShape)}\n\n void main() {\n ivec3 rc = getOutputCoords();\n\n vec4 result = vec4(0.);\n\n ivec3 thisRC;\n int rows = ${this.enableShapeUniforms ? 'outShape[1]' : outputShape[1]};\n int cols = ${this.enableShapeUniforms ? 'outShape[2]' : outputShape[2]};\n\n ${mainLoop}\n\n setOutput(result);\n }\n `;\n }\n}\n\nfunction getReshapedInputCoords(\n shape: [number, number, number], enableShapeUniforms: boolean): string {\n const coordsFromIndexSnippet = enableShapeUniforms ?\n shader_util.getLogicalCoordinatesFromFlatIndexByUniform(\n ['r', 'c', 'd'], 'inputShape') :\n shader_util.getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], shape);\n\n return `\n ivec3 inputCoordsFromReshapedOutCoords(int index) {\n ${coordsFromIndexSnippet}\n return ivec3(r, c, d);\n }\n `;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\n\nimport {GPGPUContext} from './gpgpu_context';\nimport {getInternalFormatForFloat16MatrixTexture, getInternalFormatForFloat16PackedMatrixTexture, getInternalFormatForFloat32MatrixTexture, getInternalFormatForPackedMatrixTexture, getInternalFormatForUnsignedBytesMatrixTexture} from './gpgpu_util';\nimport {getPackedMatrixTextureShapeWidthHeight, getUnpackedMatrixTextureShapeWidthHeight, PhysicalTextureType, Texture, TextureConfig, TextureUsage} from './tex_util';\n\nexport class TextureManager {\n private numUsedTextures = 0;\n private numFreeTextures = 0;\n private _numBytesAllocated = 0;\n private _numBytesFree = 0; // How many bytes that have been allocated\n // are available for reuse.\n private freeTextures: {[shape: string]: Texture[]} = {};\n private logEnabled = false;\n private usedTextures: {[shape: string]: Texture[]} = {};\n\n constructor(private gpgpu: GPGPUContext) {}\n\n acquireTexture(\n shapeRC: [number, number], usage: TextureUsage,\n isPacked: boolean): Texture {\n const physicalTexType = getPhysicalFromLogicalTextureType(usage, isPacked);\n\n const shapeKey = getKeyFromTextureShape(shapeRC, physicalTexType, isPacked);\n if (!(shapeKey in this.freeTextures)) {\n this.freeTextures[shapeKey] = [];\n }\n if (!(shapeKey in this.usedTextures)) {\n this.usedTextures[shapeKey] = [];\n }\n\n const texBytes = computeBytes(\n shapeRC, physicalTexType, this.gpgpu.gl, this.gpgpu.textureConfig,\n isPacked);\n\n if (this.freeTextures[shapeKey].length > 0) {\n this.numFreeTextures--;\n this.numUsedTextures++;\n this._numBytesFree -= texBytes;\n this.log();\n const newTexture = this.freeTextures[shapeKey].shift();\n this.usedTextures[shapeKey].push(newTexture);\n return newTexture;\n }\n\n let newTexture: Texture;\n if (physicalTexType === PhysicalTextureType.PACKED_2X2_FLOAT32) {\n newTexture = this.gpgpu.createPackedMatrixTexture(shapeRC[0], shapeRC[1]);\n } else if (physicalTexType === PhysicalTextureType.PACKED_2X2_FLOAT16) {\n newTexture =\n this.gpgpu.createFloat16PackedMatrixTexture(shapeRC[0], shapeRC[1]);\n } else if (physicalTexType === PhysicalTextureType.UNPACKED_FLOAT32) {\n newTexture =\n this.gpgpu.createFloat32MatrixTexture(shapeRC[0], shapeRC[1]);\n } else if (physicalTexType === PhysicalTextureType.UNPACKED_FLOAT16) {\n newTexture =\n this.gpgpu.createFloat16MatrixTexture(shapeRC[0], shapeRC[1]);\n } else if (\n physicalTexType === PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE) {\n newTexture =\n this.gpgpu.createUnsignedBytesMatrixTexture(shapeRC[0], shapeRC[1]);\n }\n this.usedTextures[shapeKey].push(newTexture);\n\n this.numUsedTextures++;\n this._numBytesAllocated += texBytes;\n this.log();\n\n return newTexture;\n }\n\n releaseTexture(\n texture: Texture, shape: [number, number], logicalTexType: TextureUsage,\n isPacked: boolean): void {\n if (this.freeTextures == null) {\n // Already disposed.\n return;\n }\n const physicalTexType =\n getPhysicalFromLogicalTextureType(logicalTexType, isPacked);\n const shapeKey = getKeyFromTextureShape(shape, physicalTexType, isPacked);\n if (!(shapeKey in this.freeTextures)) {\n this.freeTextures[shapeKey] = [];\n }\n\n const texBytes = computeBytes(\n shape, physicalTexType, this.gpgpu.gl, this.gpgpu.textureConfig,\n isPacked);\n const deleteTexThreshold = env().get('WEBGL_DELETE_TEXTURE_THRESHOLD');\n if (deleteTexThreshold !== -1 &&\n this._numBytesAllocated > deleteTexThreshold) {\n this.gpgpu.deleteMatrixTexture(texture.texture);\n this._numBytesAllocated -= texBytes;\n } else {\n this.freeTextures[shapeKey].push(texture);\n this.numFreeTextures++;\n this._numBytesFree += texBytes;\n }\n\n this.numUsedTextures--;\n\n const texList = this.usedTextures[shapeKey];\n const texIndex = texList.indexOf(texture);\n if (texIndex < 0) {\n throw new Error(\n 'Cannot release a texture that was never provided by this ' +\n 'texture manager');\n }\n texList.splice(texIndex, 1);\n this.log();\n }\n\n private log() {\n if (!this.logEnabled) {\n return;\n }\n const total = this.numFreeTextures + this.numUsedTextures;\n console.log(\n 'Free/Used', `${this.numFreeTextures} / ${this.numUsedTextures}`,\n `(${total})`);\n const freeRatio = this._numBytesFree / this._numBytesAllocated;\n console.log(`Bytes allocated: ${this._numBytesAllocated}`);\n console.log(`Bytes unused: ${this._numBytesFree} (${\n Math.round(100 * freeRatio)}%)`);\n }\n\n get numBytesAllocated(): number {\n return this._numBytesAllocated;\n }\n\n get numBytesFree(): number {\n return this._numBytesFree;\n }\n\n getNumUsedTextures(): number {\n return this.numUsedTextures;\n }\n\n getNumFreeTextures(): number {\n return this.numFreeTextures;\n }\n\n dispose() {\n if (this.freeTextures == null) {\n // Already disposed.\n return;\n }\n for (const texShape in this.freeTextures) {\n this.freeTextures[texShape].forEach(tex => {\n this.gpgpu.deleteMatrixTexture(tex.texture);\n });\n }\n for (const texShape in this.usedTextures) {\n this.usedTextures[texShape].forEach(tex => {\n this.gpgpu.deleteMatrixTexture(tex.texture);\n });\n }\n this.freeTextures = null;\n this.usedTextures = null;\n this.numUsedTextures = 0;\n this.numFreeTextures = 0;\n this._numBytesAllocated = 0;\n this._numBytesFree = 0;\n }\n}\n\nfunction numBytesForInternalFormat(\n gl: WebGLRenderingContext, internalFormat: number): number {\n // tslint:disable-next-line:no-any\n const glany = gl as any;\n if (internalFormat === glany.R32F) {\n return 4;\n } else if (internalFormat === glany.R16F) {\n return 2;\n } else if (internalFormat === glany.RGBA32F) {\n return 16;\n } else if (internalFormat === gl.RGBA) {\n return 16;\n } else if (internalFormat === glany.RGBA16F) {\n return 8;\n } else if (internalFormat === glany.RGBA8) {\n return 4;\n }\n throw new Error(`Unknown internal format ${internalFormat}`);\n}\n\nexport function computeBytes(\n shape: [number, number], physicalTexType: PhysicalTextureType,\n gl: WebGLRenderingContext, textureConfig: TextureConfig,\n isPacked: boolean): number {\n // It is not possible to infer packed status from the texture type because\n // depending on the textureConfig, different texture types may resolve to the\n // same internal format (e.g. in WebGL1, the internal format for\n // UNPACKED_FLOAT16 textures is gl.RGBA). Therefore we pass in `isPacked`\n // explicitly.\n const internalFormat =\n internalFormatForPhysicalTexType(physicalTexType, textureConfig);\n\n let numElements: number;\n if (isPacked) {\n const [packedWidth, packedHeight] =\n getPackedMatrixTextureShapeWidthHeight(shape[0], shape[1]);\n numElements = packedWidth * packedHeight;\n\n } else {\n const [width, height] =\n getUnpackedMatrixTextureShapeWidthHeight(shape[0], shape[1]);\n numElements = width * height;\n }\n\n const bytesPerElement = numBytesForInternalFormat(gl, internalFormat);\n return numElements * bytesPerElement;\n}\n\nfunction internalFormatForPhysicalTexType(\n physicalTexType: PhysicalTextureType,\n textureConfig: TextureConfig): number {\n switch (physicalTexType) {\n case PhysicalTextureType.PACKED_2X2_FLOAT32:\n return getInternalFormatForPackedMatrixTexture(textureConfig);\n case PhysicalTextureType.PACKED_2X2_FLOAT16:\n return getInternalFormatForFloat16PackedMatrixTexture(textureConfig);\n case PhysicalTextureType.UNPACKED_FLOAT32:\n return getInternalFormatForFloat32MatrixTexture(textureConfig);\n case PhysicalTextureType.UNPACKED_FLOAT16:\n return getInternalFormatForFloat16MatrixTexture(textureConfig);\n case PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE:\n return getInternalFormatForUnsignedBytesMatrixTexture(textureConfig);\n default:\n throw new Error(`Unknown physical texture type ${physicalTexType}`);\n }\n}\n\nfunction getPhysicalTextureForRendering(isPacked: boolean):\n PhysicalTextureType {\n if (env().getBool('WEBGL_RENDER_FLOAT32_ENABLED')) {\n if (isPacked) {\n return PhysicalTextureType.PACKED_2X2_FLOAT32;\n }\n return PhysicalTextureType.UNPACKED_FLOAT32;\n }\n\n if (isPacked) {\n return PhysicalTextureType.PACKED_2X2_FLOAT16;\n }\n return PhysicalTextureType.UNPACKED_FLOAT16;\n}\n\nfunction getPhysicalFromLogicalTextureType(\n logicalTexType: TextureUsage, isPacked: boolean): PhysicalTextureType {\n if (logicalTexType === TextureUsage.UPLOAD) {\n return PhysicalTextureType.PACKED_2X2_FLOAT32;\n } else if (logicalTexType === TextureUsage.RENDER || logicalTexType == null) {\n return getPhysicalTextureForRendering(isPacked);\n } else if (\n logicalTexType === TextureUsage.DOWNLOAD ||\n logicalTexType === TextureUsage.PIXELS) {\n return PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE;\n }\n throw new Error(`Unknown logical texture type ${logicalTexType}`);\n}\n\nfunction getKeyFromTextureShape(\n shapeRowsCol: [number, number], physicalTexType: PhysicalTextureType,\n isPacked: boolean): string {\n return `${shapeRowsCol[0]}_${shapeRowsCol[1]}_${physicalTexType}_${isPacked}`;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport class UnaryOpProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n enableShapeUniforms: boolean;\n\n constructor(aShape: number[], opSnippet: string) {\n this.outputShape = aShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n this.userCode = `\n float unaryOperation(float x) {\n ${opSnippet}\n }\n\n void main() {\n float x = getAAtOutCoords();\n float y = unaryOperation(x);\n\n setOutput(y);\n }\n `;\n }\n}\n\nexport const CHECK_NAN_SNIPPET = `if (isnan(x)) return x;`;\n\nexport const LINEAR = `return x;`;\n\nexport const ABS = `return abs(x);`;\n\nexport function STEP(alpha = 0.0) {\n return CHECK_NAN_SNIPPET + `\n return x > 0.0 ? 1.0 : float(${alpha});\n `;\n}\n\nexport const ELU = `return (x >= 0.0) ? x : (exp(x) - 1.0);`;\nexport const RELU = CHECK_NAN_SNIPPET + `\n return (x < 0.0) ? 0.0 : x;\n`;\n\nexport const RELU6 = CHECK_NAN_SNIPPET + `\n return (x < 0.0) ? 0.0 : min(6.0, x);\n`;\n\nexport const CLONE = 'return x;';\n\nexport const SIGMOID = `return 1.0 / (1.0 + exp(-1.0 * x));`;\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport const LINEAR = `return x;`;\n\nexport const ELU = `\n vec4 result;\n\n result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0);\n result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0);\n result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0);\n result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0);\n\n return result;\n`;\n\nexport const RELU = `\n vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\nexport const RELU6 = `\n vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\nexport const SIGMOID = `return 1.0 / (1.0 + exp(-1.0 * x));`;\n\nexport class UnaryOpPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n enableShapeUniforms: boolean;\n outputShape: number[];\n packedInputs = true;\n packedOutput = true;\n\n constructor(aShape: number[], opSnippet: string) {\n this.outputShape = aShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n this.userCode = `\n vec4 unaryOperation(vec4 x) {\n ${opSnippet}\n }\n\n void main() {\n vec4 x = getAAtOutCoords();\n vec4 y = unaryOperation(x);\n\n setOutput(y);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport {getChannels, getSourceCoords} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class UnpackProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = false;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n\n constructor(outputShape: number[]) {\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n const rank = outputShape.length;\n\n const channels = getChannels('rc', rank);\n const dtype = getCoordsDataType(rank);\n const sourceCoords = getSourceCoords(rank, channels);\n const innerDims = channels.slice(-2);\n const coords = rank <= 1 ? 'rc' : `vec2(${innerDims.join(',')})`;\n\n this.userCode = `\n void main() {\n ${dtype} rc = getOutputCoords();\n vec4 packedInput = getA(${sourceCoords});\n\n setOutput(getChannel(packedInput, ${coords}));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// Import webgl flags.\nimport './flags_webgl';\n\nimport * as tf from '@tensorflow/tfjs-core';\nimport {backend_util, BackendValues, buffer, DataId, DataStorage, DataToGPUWebGLOption, DataType, engine, env, GPUData, kernel_impls, KernelBackend, MemoryInfo, nextFrame, NumericDataType, Rank, RecursiveArray, scalar, ShapeMap, Tensor, Tensor2D, TensorBuffer, TensorInfo, tidy, TimingInfo, TypedArray, util, WebGLData} from '@tensorflow/tfjs-core';\nimport {getWebGLContext} from './canvas_util';\nimport {DecodeMatrixProgram} from './decode_matrix_gpu';\nimport {DecodeMatrixPackedProgram} from './decode_matrix_packed_gpu';\nimport {EncodeFloatProgram} from './encode_float_gpu';\nimport {EncodeFloatPackedProgram} from './encode_float_packed_gpu';\nimport {EncodeMatrixProgram} from './encode_matrix_gpu';\nimport {EncodeMatrixPackedProgram} from './encode_matrix_packed_gpu';\nimport {GPGPUContext} from './gpgpu_context';\nimport * as gpgpu_math from './gpgpu_math';\nimport {getUniformLocations, GPGPUBinary, GPGPUProgram, TensorData} from './gpgpu_math';\nimport {simpleAbsImplCPU} from './kernel_utils/shared';\nimport {PackProgram} from './pack_gpu';\nimport {ReshapePackedProgram} from './reshape_packed_gpu';\nimport * as tex_util from './tex_util';\nimport {Texture, TextureData, TextureUsage} from './tex_util';\nimport {TextureManager} from './texture_manager';\nimport * as unary_op from './unaryop_gpu';\nimport {UnaryOpProgram} from './unaryop_gpu';\nimport {UnaryOpPackedProgram} from './unaryop_packed_gpu';\nimport {UnpackProgram} from './unpack_gpu';\nimport * as webgl_util from './webgl_util';\n\nconst whereImpl = kernel_impls.whereImpl;\n\nexport const EPSILON_FLOAT32 = 1e-7;\nexport const EPSILON_FLOAT16 = 1e-4;\n\ntype KernelInfo = {\n name: string; query: Promise;\n};\n\nexport type TimerNode = RecursiveArray|KernelInfo;\nexport interface CPUTimerQuery {\n startMs: number;\n endMs?: number;\n}\n\nexport interface WebGLMemoryInfo extends MemoryInfo {\n numBytesInGPU: number;\n // Tracks the total number of bytes allocated on the GPU, accounting for the\n // physical texture type.\n numBytesInGPUAllocated: number;\n // Tracks byte size of textures that were created and then made available for\n // reuse (disposed).\n numBytesInGPUFree: number;\n unreliable: boolean;\n}\n\nexport interface WebGLTimingInfo extends TimingInfo {\n uploadWaitMs: number;\n downloadWaitMs: number;\n}\n\nconst binaryCaches: {[webGLVersion: string]: {[key: string]: GPGPUBinary}} = {};\n\nexport function getBinaryCache(webGLVersion: number) {\n if (webGLVersion in binaryCaches) {\n return binaryCaches[webGLVersion];\n }\n binaryCaches[webGLVersion] = {};\n return binaryCaches[webGLVersion];\n}\n\n// Empirically determined constant used to determine size threshold for handing\n// off execution to the CPU.\nconst CPU_HANDOFF_SIZE_THRESHOLD =\n env().getNumber('CPU_HANDOFF_SIZE_THRESHOLD');\n\n// Empirically determined constant used to decide the number of MB on GPU\n// before we warn about high memory use. The MB are this constant * screen area\n// * dpi / 1024 / 1024.\nconst BEFORE_PAGING_CONSTANT = 600;\nfunction numMBBeforeWarning(): number {\n if (env().global.screen == null) {\n return 1024; // 1 GB.\n }\n return (env().global.screen.height * env().global.screen.width *\n window.devicePixelRatio) *\n BEFORE_PAGING_CONSTANT / 1024 / 1024;\n}\n\nexport class MathBackendWebGL extends KernelBackend {\n texData: DataStorage;\n gpgpu: GPGPUContext;\n\n private static nextDataId = 0;\n private nextDataId(): number {\n return MathBackendWebGL.nextDataId++;\n }\n // Maps data ids that have a pending read operation, to list of subscribers.\n private pendingRead = new WeakMap void>>();\n // List of data ids that are scheduled for disposal, but are waiting on a\n // pending read operation.\n private pendingDisposal = new WeakSet();\n\n // Used to count the number of 'shallow' sliced tensors that point to the\n // same data id.\n dataRefCount = new WeakMap();\n private numBytesInGPU = 0;\n\n private canvas: HTMLCanvasElement|OffscreenCanvas;\n\n private programTimersStack: TimerNode[];\n private activeTimers: TimerNode[];\n // Accumulated time spent (including blocking) in uploading data to webgl.\n private uploadWaitMs = 0;\n // Accumulated time spent (including blocking in downloading data from webgl.\n private downloadWaitMs = 0;\n\n // record the last manual GL Flush time.\n private lastGlFlushTime = 0;\n\n // Number of bits of precision of this backend.\n private floatPrecisionValue: 32|16;\n\n private textureManager: TextureManager;\n private binaryCache: {[key: string]: GPGPUBinary};\n private gpgpuCreatedLocally: boolean;\n private numMBBeforeWarning: number;\n private warnedAboutMemory = false;\n\n constructor(gpuResource?: GPGPUContext|HTMLCanvasElement|OffscreenCanvas) {\n super();\n if (!env().getBool('HAS_WEBGL')) {\n throw new Error('WebGL is not supported on this device');\n }\n\n let newGPGPU;\n if (gpuResource != null) {\n if (gpuResource instanceof GPGPUContext) {\n newGPGPU = gpuResource;\n } else {\n const gl =\n getWebGLContext(env().getNumber('WEBGL_VERSION'), gpuResource);\n newGPGPU = new GPGPUContext(gl);\n }\n this.binaryCache = {};\n this.gpgpuCreatedLocally = false;\n } else {\n const gl = getWebGLContext(env().getNumber('WEBGL_VERSION'));\n newGPGPU = new GPGPUContext(gl);\n this.binaryCache = getBinaryCache(env().getNumber('WEBGL_VERSION'));\n this.gpgpuCreatedLocally = true;\n }\n\n this.gpgpu = newGPGPU;\n this.canvas = this.gpgpu.gl.canvas;\n this.textureManager = new TextureManager(this.gpgpu);\n this.numMBBeforeWarning = numMBBeforeWarning();\n this.texData = new DataStorage(this, engine());\n }\n\n override numDataIds() {\n return this.texData.numDataIds() - this.pendingDeletes;\n }\n\n // Writes a new entry to the data store with a WebGL texture, and registers it\n // to the texture manager.\n writeTexture(\n texture: WebGLTexture, shape: number[], dtype: DataType,\n texHeight: number, texWidth: number, channels: string): DataId {\n // Temporarily create an tensor info to make the texture compatible with\n // the runWebGLProgram's input.\n const input = this.makeTensorInfo(shape, dtype);\n const inData = this.texData.get(input.dataId);\n // Even though the input texture could be unpacked or dense packed, it is\n // always considered as unpacked for EncodeMatrixProgram.\n inData.isPacked = false;\n\n // Bind texture to the input tensor.\n inData.texture = {texture, texShape: [texHeight, texWidth]};\n inData.texShape = [texHeight, texWidth];\n\n const shapeAs3D = webgl_util.getShapeAs3D(shape);\n const program =\n new EncodeMatrixProgram(shapeAs3D, false /* isByteArray */, channels);\n const output =\n this.runWebGLProgram(program, [input], dtype, [[texHeight, texWidth]]);\n output.shape = shape;\n\n // Unbind the texture from the input tensor to avoid the texture being\n // released.\n inData.texture = null;\n this.disposeIntermediateTensorInfo(input);\n\n return output.dataId;\n }\n\n override write(values: BackendValues, shape: number[], dtype: DataType):\n DataId {\n if (env().getBool('WEBGL_CHECK_NUMERICAL_PROBLEMS') ||\n env().getBool('DEBUG')) {\n this.checkNumericalProblems(values);\n }\n if (dtype === 'complex64' && values != null) {\n throw new Error(\n `Cannot write to a complex64 dtype. ` +\n `Please use tf.complex(real, imag).`);\n }\n const dataId = {id: this.nextDataId()};\n this.texData.set(\n dataId,\n {shape, dtype, values, usage: TextureUsage.UPLOAD, refCount: 1});\n return dataId;\n }\n\n /** Return refCount of a `TensorData`. */\n override refCount(dataId: DataId): number {\n if (this.texData.has(dataId)) {\n const tensorData = this.texData.get(dataId);\n return tensorData.refCount;\n }\n return 0;\n }\n\n /** Increase refCount of a `TextureData`. */\n override incRef(dataId: DataId): void {\n const texData = this.texData.get(dataId);\n texData.refCount++;\n }\n\n /** Decrease refCount of a `TextureData`. */\n decRef(dataId: DataId): void {\n if (this.texData.has(dataId)) {\n const texData = this.texData.get(dataId);\n texData.refCount--;\n }\n }\n\n override move(\n dataId: DataId, values: BackendValues, shape: number[], dtype: DataType,\n refCount: number): void {\n if (env().getBool('DEBUG')) {\n this.checkNumericalProblems(values);\n }\n if (dtype === 'complex64') {\n throw new Error(\n `Cannot write to a complex64 dtype. ` +\n `Please use tf.complex(real, imag).`);\n }\n this.texData.set(\n dataId, {shape, dtype, values, usage: TextureUsage.UPLOAD, refCount});\n }\n\n disposeIntermediateTensorInfo(tensorInfo: TensorInfo): void {\n this.disposeData(tensorInfo.dataId);\n }\n\n override readSync(dataId: DataId): BackendValues {\n const texData = this.texData.get(dataId);\n const {values, dtype, complexTensorInfos, slice, shape, isPacked} = texData;\n\n // The presence of `slice` indicates this tensor is a shallow slice of a\n // different tensor, and is using that original tensor's texture. Run\n // `clone` in order to copy that texture and read from it.\n if (slice != null) {\n let program;\n if (isPacked) {\n program = new UnaryOpPackedProgram(shape, unary_op.CLONE);\n } else {\n program = new UnaryOpProgram(shape, unary_op.CLONE);\n }\n const res =\n this.runWebGLProgram(program, [{dataId, shape, dtype}], dtype);\n const data = this.readSync(res.dataId);\n this.disposeIntermediateTensorInfo(res);\n return data;\n }\n if (values != null) {\n return this.convertAndCacheOnCPU(dataId);\n }\n if (dtype === 'string') {\n return values;\n }\n const shouldTimeProgram = this.activeTimers != null;\n let start: number;\n if (shouldTimeProgram) {\n start = util.now();\n }\n\n let result: Float32Array;\n if (dtype === 'complex64') {\n const realValues =\n this.readSync(complexTensorInfos.real.dataId) as Float32Array;\n const imagValues =\n this.readSync(complexTensorInfos.imag.dataId) as Float32Array;\n result = backend_util.mergeRealAndImagArrays(realValues, imagValues);\n } else {\n result = this.getValuesFromTexture(dataId);\n }\n\n if (shouldTimeProgram) {\n this.downloadWaitMs += util.now() - start;\n }\n return this.convertAndCacheOnCPU(dataId, result);\n }\n\n override async read(dataId: DataId): Promise {\n if (this.pendingRead.has(dataId)) {\n const subscribers = this.pendingRead.get(dataId);\n return new Promise(resolve => subscribers.push(resolve));\n }\n const texData = this.texData.get(dataId);\n const {values, shape, slice, dtype, complexTensorInfos, isPacked} = texData;\n\n // The presence of `slice` indicates this tensor is a shallow slice of a\n // different tensor, and is using that original tensor's texture. Run\n // `clone` in order to copy that texture and read from it.\n if (slice != null) {\n let program;\n if (isPacked) {\n program = new UnaryOpPackedProgram(shape, unary_op.CLONE);\n } else {\n program = new UnaryOpProgram(shape, unary_op.CLONE);\n }\n const res =\n this.runWebGLProgram(program, [{dataId, shape, dtype}], dtype);\n const data = this.read(res.dataId);\n this.disposeIntermediateTensorInfo(res);\n return data;\n }\n\n if (values != null) {\n return this.convertAndCacheOnCPU(dataId);\n }\n\n if (env().getBool('DEBUG')) {\n // getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED') caused a blocking GPU call.\n // For performance reason, only check it for debugging. In production,\n // it doesn't handle this use case anyway, so behavior is not changed.\n if (!env().getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED') &&\n env().getNumber('WEBGL_VERSION') === 2) {\n throw new Error(\n `tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and ` +\n `WEBGL_VERSION=2 not yet supported.`);\n }\n }\n\n let buffer: WebGLBuffer = null;\n let tmpDownloadTarget: TensorInfo;\n\n if (dtype !== 'complex64' && env().get('WEBGL_BUFFER_SUPPORTED')) {\n // Possibly copy the texture into a buffer before inserting a fence.\n tmpDownloadTarget = this.decode(dataId);\n const tmpData = this.texData.get(tmpDownloadTarget.dataId);\n\n buffer = this.gpgpu.createBufferFromTexture(\n tmpData.texture.texture, ...tex_util.getDenseTexShape(shape));\n }\n\n this.pendingRead.set(dataId, []);\n\n if (dtype !== 'complex64') {\n // Create a fence and wait for it to resolve.\n await this.gpgpu.createAndWaitForFence();\n }\n\n // Download the values from the GPU.\n let vals: Float32Array;\n if (dtype === 'complex64') {\n const ps = await Promise.all([\n this.read(complexTensorInfos.real.dataId),\n this.read(complexTensorInfos.imag.dataId)\n ]);\n\n const realValues = ps[0];\n const imagValues = ps[1];\n vals = backend_util.mergeRealAndImagArrays(\n realValues as Float32Array, imagValues as Float32Array);\n } else if (buffer == null) {\n vals = this.getValuesFromTexture(dataId);\n } else {\n const size = util.sizeFromShape(shape);\n vals = this.gpgpu.downloadFloat32MatrixFromBuffer(buffer, size);\n }\n if (tmpDownloadTarget != null) {\n this.disposeIntermediateTensorInfo(tmpDownloadTarget);\n }\n if (buffer != null) {\n const gl = this.gpgpu.gl;\n webgl_util.callAndCheck(gl, () => gl.deleteBuffer(buffer));\n }\n const dTypeVals = this.convertAndCacheOnCPU(dataId, vals);\n\n const subscribers = this.pendingRead.get(dataId);\n this.pendingRead.delete(dataId);\n\n // Notify all pending reads.\n subscribers.forEach(resolve => resolve(dTypeVals));\n if (this.pendingDisposal.has(dataId)) {\n this.pendingDisposal.delete(dataId);\n if (this.disposeData(dataId)) {\n engine().removeDataId(dataId, this);\n }\n this.pendingDeletes--;\n }\n return dTypeVals;\n }\n\n /**\n * Read tensor to a new texture that is densely packed for ease of use.\n * @param dataId The source tensor.\n * @param options\n * customTexShape: Optional. If set, will use the user defined texture\n * shape to create the texture.\n */\n override readToGPU(dataId: DataId, options: DataToGPUWebGLOption = {}):\n GPUData {\n const texData = this.texData.get(dataId);\n const {values, shape, slice, dtype, isPacked, texture} = texData;\n\n if (dtype === 'complex64') {\n throw new Error('Does not support reading texture for complex64 dtype.');\n }\n\n // The presence of `slice` indicates this tensor is a shallow slice of a\n // different tensor, and is using that original tensor's texture. Run\n // `clone` in order to copy that texture and read from it.\n if (slice != null) {\n let program;\n if (isPacked) {\n program = new UnaryOpPackedProgram(shape, unary_op.CLONE);\n } else {\n program = new UnaryOpProgram(shape, unary_op.CLONE);\n }\n const res =\n this.runWebGLProgram(program, [{dataId, shape, dtype}], dtype);\n const gpuResouorce = this.readToGPU(res, options);\n this.disposeIntermediateTensorInfo(res);\n return gpuResouorce;\n }\n\n if (texture == null) {\n if (values != null) {\n throw new Error('Data is not on GPU but on CPU.');\n } else {\n throw new Error('There is no data on GPU or CPU.');\n }\n }\n\n // Decode the texture so that it is stored densely (using four channels).\n const tmpTarget = this.decode(dataId, options.customTexShape);\n\n // Make engine track this tensor, so that we can dispose it later.\n const tensorRef = engine().makeTensorFromTensorInfo(tmpTarget);\n\n const tmpData = this.texData.get(tmpTarget.dataId);\n return {tensorRef, ...tmpData.texture};\n }\n\n bufferSync(t: TensorInfo):\n TensorBuffer {\n const data = this.readSync(t.dataId);\n if (t.dtype === 'string') {\n try {\n // Decode the bytes into string.\n const strings = (data as Uint8Array[]).map(d => util.decodeString(d));\n return buffer(t.shape as ShapeMap[R], t.dtype, strings) as\n TensorBuffer;\n } catch {\n throw new Error('Failed to decode encoded string bytes into utf-8');\n }\n }\n return buffer(t.shape as ShapeMap[R], t.dtype, data as TypedArray) as\n TensorBuffer;\n }\n\n private checkNumericalProblems(values: BackendValues): void {\n if (values == null) {\n return;\n }\n for (let i = 0; i < values.length; i++) {\n const num = values[i] as number;\n if (!webgl_util.canBeRepresented(num)) {\n if (env().getBool('WEBGL_RENDER_FLOAT32_CAPABLE')) {\n throw Error(\n `The value ${num} cannot be represented with your ` +\n `current settings. Consider enabling float32 rendering: ` +\n `'tf.env().set('WEBGL_RENDER_FLOAT32_ENABLED', true);'`);\n }\n throw Error(`The value ${num} cannot be represented on this device.`);\n }\n }\n }\n\n private getValuesFromTexture(dataId: DataId): Float32Array {\n const {shape, dtype, isPacked} = this.texData.get(dataId);\n const size = util.sizeFromShape(shape);\n if (env().getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED')) {\n const tmpTarget = this.decode(dataId);\n const tmpData = this.texData.get(tmpTarget.dataId);\n const vals =\n this.gpgpu\n .downloadMatrixFromPackedTexture(\n tmpData.texture.texture, ...tex_util.getDenseTexShape(shape))\n .subarray(0, size);\n\n this.disposeIntermediateTensorInfo(tmpTarget);\n\n return vals;\n }\n\n const shouldUsePackedProgram =\n env().getBool('WEBGL_PACK') && isPacked === true;\n const outputShape =\n shouldUsePackedProgram ? webgl_util.getShapeAs3D(shape) : shape;\n const program = shouldUsePackedProgram ?\n new EncodeFloatPackedProgram(outputShape as [number, number, number]) :\n new EncodeFloatProgram(outputShape);\n const output = this.runWebGLProgram(\n program, [{shape: outputShape, dtype, dataId}], 'float32');\n const tmpData = this.texData.get(output.dataId);\n const vals = this.gpgpu\n .downloadByteEncodedFloatMatrixFromOutputTexture(\n tmpData.texture.texture, tmpData.texShape[0],\n tmpData.texShape[1])\n .subarray(0, size);\n this.disposeIntermediateTensorInfo(output);\n\n return vals;\n }\n\n override timerAvailable(): boolean {\n return env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0;\n }\n\n override time(f: () => void): Promise {\n const oldActiveTimers = this.activeTimers;\n const newActiveTimers: TimerNode[] = [];\n\n let outerMostTime = false;\n if (this.programTimersStack == null) {\n this.programTimersStack = newActiveTimers;\n outerMostTime = true;\n } else {\n this.activeTimers.push(newActiveTimers);\n }\n this.activeTimers = newActiveTimers;\n\n f();\n\n // needing to split these up because util.flatten only accepts certain types\n const flattenedActiveTimerQueries =\n util.flatten(this.activeTimers.map((d: KernelInfo) => d.query))\n .filter(d => d != null);\n const flattenedActiveTimerNames =\n util.flatten(this.activeTimers.map((d: KernelInfo) => d.name))\n .filter(d => d != null);\n\n this.activeTimers = oldActiveTimers;\n\n if (outerMostTime) {\n this.programTimersStack = null;\n }\n\n const res: WebGLTimingInfo = {\n uploadWaitMs: this.uploadWaitMs,\n downloadWaitMs: this.downloadWaitMs,\n kernelMs: null,\n wallMs: null // will be filled by the engine\n };\n\n return (async () => {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') >\n 0) {\n const kernelMs = await Promise.all(flattenedActiveTimerQueries);\n\n res['kernelMs'] = util.sum(kernelMs);\n res['getExtraProfileInfo'] = () =>\n kernelMs\n .map((d, i) => ({name: flattenedActiveTimerNames[i], ms: d}))\n .map(d => `${d.name}: ${d.ms}`)\n .join(', ');\n } else {\n res['kernelMs'] = {\n error: 'WebGL query timers are not supported in this environment.'\n };\n }\n\n this.uploadWaitMs = 0;\n this.downloadWaitMs = 0;\n return res;\n })();\n }\n override memory(): WebGLMemoryInfo {\n return {\n unreliable: false,\n numBytesInGPU: this.numBytesInGPU,\n numBytesInGPUAllocated: this.textureManager.numBytesAllocated,\n numBytesInGPUFree: this.textureManager.numBytesFree\n } as WebGLMemoryInfo;\n }\n\n private startTimer(): WebGLQuery|CPUTimerQuery {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) {\n return this.gpgpu.beginQuery();\n }\n return {startMs: util.now(), endMs: null};\n }\n\n private endTimer(query: WebGLQuery|CPUTimerQuery): WebGLQuery|CPUTimerQuery {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) {\n this.gpgpu.endQuery();\n return query;\n }\n (query as CPUTimerQuery).endMs = util.now();\n return query;\n }\n\n private async getQueryTime(query: WebGLQuery|CPUTimerQuery): Promise {\n if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) {\n return this.gpgpu.waitForQueryAndGetTime(query as WebGLQuery);\n }\n const timerQuery = query as CPUTimerQuery;\n return timerQuery.endMs - timerQuery.startMs;\n }\n\n private pendingDeletes = 0;\n\n /**\n * Decrease the RefCount on the dataId and dispose the memory if the dataId\n * has 0 refCount. If there are pending read on the data, the disposal would\n * added to the pending delete queue. Return true if the dataId is removed\n * from backend or the backend does not contain the dataId, false if the\n * dataId is not removed. Memory may or may not be released even when dataId\n * is removed, which also depends on dataRefCount, see `releaseGPU`.\n * @param dataId\n * @oaram force Optional, remove the data regardless of refCount\n */\n override disposeData(dataId: DataId, force = false): boolean {\n if (this.pendingDisposal.has(dataId)) {\n return false;\n }\n\n // No-op if already disposed.\n if (!this.texData.has(dataId)) {\n return true;\n }\n\n // if force flag is set, change refCount to 0, this would ensure disposal\n // when added to the pendingDisposal queue. Memory may or may not be\n // released, which also depends on dataRefCount, see `releaseGPU`.\n if (force) {\n this.texData.get(dataId).refCount = 0;\n } else {\n this.texData.get(dataId).refCount--;\n }\n\n if (!force && this.texData.get(dataId).refCount > 0) {\n return false;\n }\n\n if (this.pendingRead.has(dataId)) {\n this.pendingDisposal.add(dataId);\n this.pendingDeletes++;\n return false;\n }\n\n this.releaseGPUData(dataId);\n const {complexTensorInfos} = this.texData.get(dataId);\n if (complexTensorInfos != null) {\n this.disposeData(complexTensorInfos.real.dataId, force);\n this.disposeData(complexTensorInfos.imag.dataId, force);\n }\n\n this.texData.delete(dataId);\n\n return true;\n }\n\n private releaseGPUData(dataId: DataId): void {\n const {texture, dtype, texShape, usage, isPacked, slice} =\n this.texData.get(dataId);\n const key = slice && slice.origDataId || dataId;\n const refCount = this.dataRefCount.get(key);\n\n if (refCount > 1) {\n this.dataRefCount.set(key, refCount - 1);\n } else {\n this.dataRefCount.delete(key);\n if (texture != null) {\n this.numBytesInGPU -= this.computeBytes(texShape, dtype);\n this.textureManager.releaseTexture(texture, texShape, usage, isPacked);\n }\n }\n\n const texData = this.texData.get(dataId);\n texData.texture = null;\n texData.texShape = null;\n texData.isPacked = false;\n texData.slice = null;\n }\n\n getTexture(dataId: DataId): WebGLTexture {\n this.uploadToGPU(dataId);\n return this.texData.get(dataId).texture.texture;\n }\n\n /**\n * Returns internal information for the specific data bucket. Used in unit\n * tests.\n */\n getDataInfo(dataId: DataId): TextureData {\n return this.texData.get(dataId);\n }\n\n /*\n Tests whether all the inputs to an op are small and on the CPU. This heuristic\n determines when it would be faster to execute a kernel on the CPU. WebGL\n kernels opt into running this check and forwarding when appropriate.\n TODO(https://github.com/tensorflow/tfjs/issues/872): Develop a more\n sustainable strategy for optimizing backend execution of ops.\n */\n shouldExecuteOnCPU(\n inputs: TensorInfo[],\n sizeThreshold = CPU_HANDOFF_SIZE_THRESHOLD): boolean {\n return env().getBool('WEBGL_CPU_FORWARD') &&\n inputs.every(\n input => this.texData.get(input.dataId).texture == null &&\n util.sizeFromShape(input.shape) < sizeThreshold);\n }\n\n getGPGPUContext(): GPGPUContext {\n return this.gpgpu;\n }\n\n where(condition: Tensor): Tensor2D {\n backend_util.warn(\n 'tf.where() in webgl locks the UI thread. ' +\n 'Call tf.whereAsync() instead');\n const condVals = condition.dataSync();\n return whereImpl(condition.shape, condVals);\n }\n\n private packedUnaryOp(x: TensorInfo, op: string, dtype: DataType) {\n const program = new UnaryOpPackedProgram(x.shape, op);\n const outInfo = this.compileAndRun(program, [x], dtype);\n return engine().makeTensorFromTensorInfo(outInfo);\n }\n\n // TODO(msoulanille) remove this once the backend has been modularized\n // a copy is needed here to break a circular dependency.\n // Also remove the op from unary_op.\n abs(x: T): T {\n // TODO: handle cases when x is complex.\n if (this.shouldExecuteOnCPU([x]) && x.dtype !== 'complex64') {\n const outValues =\n simpleAbsImplCPU(this.texData.get(x.dataId).values as TypedArray);\n return this.makeOutput(x.shape, x.dtype, outValues);\n }\n\n if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) {\n return this.packedUnaryOp(x, unary_op.ABS, x.dtype) as T;\n }\n\n const program = new UnaryOpProgram(x.shape, unary_op.ABS);\n const outInfo = this.compileAndRun(program, [x]);\n return engine().makeTensorFromTensorInfo(outInfo) as T;\n }\n\n makeTensorInfo(\n shape: number[], dtype: DataType,\n values?: BackendValues|string[]): TensorInfo {\n let dataId;\n if (dtype === 'string' && values != null && values.length > 0 &&\n util.isString(values[0])) {\n const encodedValues =\n (values as unknown as string[]).map(d => util.encodeString(d));\n\n dataId = this.write(encodedValues, shape, dtype);\n } else {\n dataId = this.write(values as TypedArray, shape, dtype);\n }\n\n this.texData.get(dataId).usage = null;\n return {dataId, shape, dtype};\n }\n\n private makeOutput(\n shape: number[], dtype: DataType, values?: BackendValues): T {\n return engine().makeTensorFromTensorInfo(\n this.makeTensorInfo(shape, dtype, values), this) as T;\n }\n\n unpackTensor(input: TensorInfo): TensorInfo {\n const program = new UnpackProgram(input.shape);\n return this.runWebGLProgram(program, [input], input.dtype);\n }\n\n packTensor(input: TensorInfo): TensorInfo {\n const program = new PackProgram(input.shape);\n const preventEagerUnpackingOutput = true;\n return this.runWebGLProgram(\n program, [input], input.dtype, null /* customUniformValues */,\n preventEagerUnpackingOutput);\n }\n\n private packedReshape(input: TensorInfo, afterShape: number[]): TensorInfo {\n const input3DShape = [\n webgl_util.getBatchDim(input.shape),\n ...webgl_util.getRowsCols(input.shape)\n ] as [number, number, number];\n const input3D: TensorInfo = {\n dtype: input.dtype,\n shape: input3DShape,\n dataId: input.dataId\n };\n const afterShapeAs3D = [\n webgl_util.getBatchDim(afterShape), ...webgl_util.getRowsCols(afterShape)\n ] as [number, number, number];\n\n const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape);\n const preventEagerUnpackingOfOutput = true;\n const customValues = [input3DShape];\n const output = this.runWebGLProgram(\n program, [input3D], input.dtype, customValues,\n preventEagerUnpackingOfOutput);\n return {dataId: output.dataId, shape: afterShape, dtype: output.dtype};\n }\n\n private decode(dataId: DataId, customTexShape?: [number, number]):\n TensorInfo {\n const texData = this.texData.get(dataId);\n const {isPacked, shape, dtype} = texData;\n if (customTexShape != null) {\n const size = util.sizeFromShape(shape);\n const texSize = customTexShape[0] * customTexShape[1] * 4;\n util.assert(\n size <= texSize,\n () => 'customTexShape is too small. ' +\n 'Row * Column * 4 should be equal or larger than the ' +\n 'size of the tensor data.');\n }\n const shapeAs3D =\n webgl_util.getShapeAs3D(shape) as [number, number, number];\n let program;\n if (isPacked) {\n program = new DecodeMatrixPackedProgram(shapeAs3D);\n } else {\n program = new DecodeMatrixProgram(shapeAs3D);\n }\n const preventEagerUnpackingOfOutput = true;\n const customValues =\n [customTexShape != null ? customTexShape :\n tex_util.getDenseTexShape(shapeAs3D)];\n const out = this.runWebGLProgram(\n program, [{shape: shapeAs3D, dtype, dataId}], dtype, customValues,\n preventEagerUnpackingOfOutput, customTexShape);\n return {dtype, shape, dataId: out.dataId};\n }\n\n runWebGLProgram(\n program: GPGPUProgram, inputs: TensorInfo[], outputDtype: DataType,\n customUniformValues?: number[][], preventEagerUnpackingOfOutput = false,\n customTexShape?: [number, number]): TensorInfo {\n const output = this.makeTensorInfo(program.outputShape, outputDtype);\n const outData = this.texData.get(output.dataId);\n if (program.packedOutput) {\n outData.isPacked = true;\n }\n if (program.outPackingScheme === tex_util.PackingScheme.DENSE) {\n const texelShape = customTexShape != null ?\n customTexShape :\n tex_util.getDenseTexShape(program.outputShape);\n // For a densely packed output, we explicitly set texShape\n // so it doesn't get assigned later according to our typical packing\n // scheme wherein a single texel can only contain values from adjacent\n // rows/cols.\n outData.texShape = texelShape.map(d => d * 2) as [number, number];\n }\n if (program.outTexUsage != null) {\n outData.usage = program.outTexUsage;\n }\n\n if (util.sizeFromShape(output.shape) === 0) {\n // Short-circuit the computation since the result is empty (has 0 in its\n // shape).\n outData.values =\n util.getTypedArrayFromDType(output.dtype as 'float32', 0);\n return output;\n }\n\n const dataToDispose: TensorInfo[] = [];\n const inputsData: TensorData[] = inputs.map(input => {\n if (input.dtype === 'complex64') {\n throw new Error(\n `GPGPUProgram does not support complex64 input. For complex64 ` +\n `dtypes, please separate the program into real and imaginary ` +\n `parts.`);\n }\n\n let texData = this.texData.get(input.dataId);\n\n if (texData.texture == null) {\n if (!program.packedInputs &&\n util.sizeFromShape(input.shape) <=\n env().getNumber('WEBGL_SIZE_UPLOAD_UNIFORM')) {\n // Upload small tensors that live on the CPU as uniforms, not as\n // textures. Do this only when the environment supports 32bit floats\n // due to problems when comparing 16bit floats with 32bit floats.\n // TODO(https://github.com/tensorflow/tfjs/issues/821): Make it\n // possible for packed shaders to sample from uniforms.\n return {\n shape: input.shape,\n texData: null,\n isUniform: true,\n uniformValues: texData.values as TypedArray\n };\n }\n\n // This ensures that if a packed program's inputs have not yet been\n // uploaded to the GPU, they get uploaded as packed right off the bat.\n if (program.packedInputs) {\n texData.isPacked = true;\n texData.shape = input.shape;\n }\n }\n\n this.uploadToGPU(input.dataId);\n if (!!texData.isPacked !== !!program.packedInputs) {\n input = texData.isPacked ? this.unpackTensor(input) :\n this.packTensor(input);\n dataToDispose.push(input);\n texData = this.texData.get(input.dataId);\n } else if (\n texData.isPacked &&\n !webgl_util.isReshapeFree(texData.shape, input.shape)) {\n // This is a special case where a texture exists for a tensor\n // but the shapes are incompatible (due to packing constraints) because\n // the tensor did not have a chance to go through the packed reshape\n // shader. This only happens when we reshape the *same* tensor to form\n // *distinct* inputs to an op, e.g. dotting a vector with itself. This\n // case will disappear once packed uploading is the default.\n\n const savedInput = input;\n const targetShape = input.shape;\n\n input.shape = texData.shape;\n input = this.packedReshape(input as Tensor, targetShape);\n dataToDispose.push(input);\n texData = this.texData.get(input.dataId);\n\n savedInput.shape = targetShape;\n }\n\n return {shape: input.shape, texData, isUniform: false};\n });\n\n this.uploadToGPU(output.dataId);\n const outputData:\n TensorData = {shape: output.shape, texData: outData, isUniform: false};\n const key = gpgpu_math.makeShaderKey(program, inputsData, outputData);\n const binary = this.getAndSaveBinary(key, () => {\n return gpgpu_math.compileProgram(\n this.gpgpu, program, inputsData, outputData);\n });\n const shouldTimeProgram = this.activeTimers != null;\n let query: WebGLQuery|CPUTimerQuery;\n if (shouldTimeProgram) {\n query = this.startTimer();\n }\n\n if (!env().get('ENGINE_COMPILE_ONLY')) {\n gpgpu_math.runProgram(\n this.gpgpu, binary, inputsData, outputData, customUniformValues);\n }\n\n dataToDispose.forEach(info => this.disposeIntermediateTensorInfo(info));\n\n if (shouldTimeProgram) {\n query = this.endTimer(query);\n this.activeTimers.push(\n {name: program.constructor.name, query: this.getQueryTime(query)});\n }\n\n const glFlushThreshold = env().get('WEBGL_FLUSH_THRESHOLD');\n // Manually GL flush requested\n if (glFlushThreshold > 0) {\n const time = util.now();\n if ((time - this.lastGlFlushTime) > glFlushThreshold) {\n this.gpgpu.gl.flush();\n this.lastGlFlushTime = time;\n }\n }\n\n if (!env().getBool('WEBGL_LAZILY_UNPACK') && outData.isPacked &&\n preventEagerUnpackingOfOutput === false) {\n const unpacked = this.unpackTensor(output);\n this.disposeIntermediateTensorInfo(output);\n return unpacked;\n }\n return output;\n }\n\n compileAndRun(\n program: GPGPUProgram, inputs: TensorInfo[], outputDtype?: DataType,\n customUniformValues?: number[][],\n preventEagerUnpackingOfOutput = false): TensorInfo {\n outputDtype = outputDtype || inputs[0].dtype;\n const outInfo = this.runWebGLProgram(\n program, inputs, outputDtype, customUniformValues,\n preventEagerUnpackingOfOutput);\n return outInfo;\n }\n\n private getAndSaveBinary(key: string, getBinary: () => GPGPUBinary):\n GPGPUBinary {\n if (!(key in this.binaryCache)) {\n this.binaryCache[key] = getBinary();\n }\n return this.binaryCache[key];\n }\n\n getTextureManager(): TextureManager {\n return this.textureManager;\n }\n\n private disposed = false;\n\n override dispose() {\n if (this.disposed) {\n return;\n }\n // Avoid disposing the compiled webgl programs during unit testing because\n // it slows down test execution.\n if (!env().getBool('IS_TEST')) {\n const allKeys = Object.keys(this.binaryCache);\n allKeys.forEach(key => {\n this.gpgpu.deleteProgram(this.binaryCache[key].webGLProgram);\n delete this.binaryCache[key];\n });\n }\n this.textureManager.dispose();\n if (this.canvas != null &&\n (typeof (HTMLCanvasElement) !== 'undefined' &&\n this.canvas instanceof HTMLCanvasElement)) {\n this.canvas.remove();\n } else {\n this.canvas = null;\n }\n if (this.gpgpuCreatedLocally) {\n this.gpgpu.program = null;\n this.gpgpu.dispose();\n }\n this.disposed = true;\n }\n\n override floatPrecision(): 16|32 {\n if (this.floatPrecisionValue == null) {\n this.floatPrecisionValue = tidy(() => {\n if (!env().get('WEBGL_RENDER_FLOAT32_ENABLED')) {\n // Momentarily switching DEBUG flag to false so we don't throw an\n // error trying to upload a small value.\n const debugFlag = env().getBool('DEBUG');\n env().set('DEBUG', false);\n const underflowCheckValue = this.abs(scalar(1e-8)).dataSync()[0];\n env().set('DEBUG', debugFlag);\n\n if (underflowCheckValue > 0) {\n return 32;\n }\n }\n return 16;\n });\n }\n return this.floatPrecisionValue;\n }\n\n /** Returns the smallest representable number. */\n override epsilon(): number {\n return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16;\n }\n\n uploadToGPU(dataId: DataId): void {\n const texData = this.texData.get(dataId);\n const {shape, dtype, values, texture, usage, isPacked} = texData;\n\n if (texture != null) {\n // Array is already on GPU. No-op.\n return;\n }\n const shouldTimeProgram = this.activeTimers != null;\n let start: number;\n if (shouldTimeProgram) {\n start = util.now();\n }\n\n let texShape = texData.texShape;\n if (texShape == null) {\n // This texShape may not be the final texture shape. For packed or dense\n // textures, the texShape will be changed when textures are created.\n texShape = webgl_util.getTextureShapeFromLogicalShape(shape, isPacked);\n texData.texShape = texShape;\n }\n\n if (values != null) {\n const shapeAs3D = webgl_util.getShapeAs3D(shape);\n\n let program;\n let width = texShape[1], height = texShape[0];\n const isByteArray =\n values instanceof Uint8Array || values instanceof Uint8ClampedArray;\n\n // texture for float array is PhysicalTextureType.PACKED_2X2_FLOAT32, we\n // need to make sure the upload uses the same packed size\n if (isPacked || !isByteArray) {\n [width, height] = tex_util.getPackedMatrixTextureShapeWidthHeight(\n texShape[0], texShape[1]);\n }\n\n if (isPacked) {\n program = new EncodeMatrixPackedProgram(shapeAs3D, isByteArray);\n } else {\n program = new EncodeMatrixProgram(shapeAs3D, isByteArray);\n }\n\n // TexShape for float array needs to be the original shape, which byte\n // array needs to be packed size. This allow the data upload shape to be\n // matched with texture creation logic.\n const tempDenseInputTexShape: [number, number] =\n isByteArray ? [height, width] : texShape;\n const tempDenseInputHandle =\n this.makeTensorInfo(tempDenseInputTexShape, dtype);\n const tempDenseInputTexData =\n this.texData.get(tempDenseInputHandle.dataId);\n if (isByteArray) {\n tempDenseInputTexData.usage = TextureUsage.PIXELS;\n } else {\n tempDenseInputTexData.usage = TextureUsage.UPLOAD;\n }\n tempDenseInputTexData.texShape = tempDenseInputTexShape;\n this.gpgpu.uploadDenseMatrixToTexture(\n this.getTexture(tempDenseInputHandle.dataId), width, height,\n values as TypedArray);\n\n const customValues = [[height, width]];\n // We want the output to remain packed regardless of the value of\n // WEBGL_PACK.\n const preventEagerUnpacking = true;\n const encodedOutputTarget = this.runWebGLProgram(\n program, [tempDenseInputHandle], dtype, customValues,\n preventEagerUnpacking);\n\n // Have the original texture assume the identity of the encoded output.\n const outputTexData = this.texData.get(encodedOutputTarget.dataId);\n texData.texShape = outputTexData.texShape;\n texData.isPacked = outputTexData.isPacked;\n texData.usage = outputTexData.usage;\n\n if (!env().get('ENGINE_COMPILE_ONLY')) {\n texData.texture = outputTexData.texture;\n // Once uploaded, don't store the values on cpu.\n texData.values = null;\n this.texData.delete(encodedOutputTarget.dataId);\n } else {\n this.disposeData(encodedOutputTarget.dataId);\n }\n\n this.disposeIntermediateTensorInfo(tempDenseInputHandle);\n\n if (shouldTimeProgram) {\n this.uploadWaitMs += util.now() - start;\n }\n } else {\n const newTexture = this.acquireTexture(texShape, usage, dtype, isPacked);\n texData.texture = newTexture;\n }\n }\n\n private convertAndCacheOnCPU(dataId: DataId, float32Values?: Float32Array):\n TypedArray {\n const texData = this.texData.get(dataId);\n const {dtype} = texData;\n\n if (float32Values != null) {\n texData.values = float32ToTypedArray(float32Values, dtype as 'float32');\n }\n return texData.values as TypedArray;\n }\n\n private acquireTexture(\n texShape: [number, number], texType: TextureUsage, dtype: DataType,\n isPacked: boolean): Texture {\n this.numBytesInGPU += this.computeBytes(texShape, dtype);\n if (!this.warnedAboutMemory &&\n this.numBytesInGPU > this.numMBBeforeWarning * 1024 * 1024) {\n const mb = (this.numBytesInGPU / 1024 / 1024).toFixed(2);\n this.warnedAboutMemory = true;\n console.warn(\n `High memory usage in GPU: ${mb} MB, ` +\n `most likely due to a memory leak`);\n }\n return this.textureManager.acquireTexture(texShape, texType, isPacked);\n }\n\n private computeBytes(shape: [number, number], dtype: DataType) {\n return shape[0] * shape[1] * util.bytesPerElement(dtype);\n }\n\n checkCompileCompletion() {\n for (const [, binary] of Object.entries(this.binaryCache)) {\n this.checkCompletion_(binary);\n }\n }\n\n async checkCompileCompletionAsync(): Promise {\n const ps = [];\n if (this.gpgpu.parallelCompilationExtension) {\n for (const [, binary] of Object.entries(this.binaryCache)) {\n ps.push(this.checkCompletionAsync_(binary));\n }\n return Promise.all(ps);\n } else {\n for (const [, binary] of Object.entries(this.binaryCache)) {\n const p: Promise = new Promise((resolve) => {\n try {\n this.checkCompletion_(binary);\n resolve(true);\n } catch (error) {\n throw error;\n }\n });\n ps.push(p);\n }\n return Promise.all(ps);\n }\n }\n\n private async checkCompletionAsync_(binary: GPGPUBinary): Promise {\n if (this.gpgpu.gl.getProgramParameter(\n binary.webGLProgram,\n this.gpgpu.parallelCompilationExtension.COMPLETION_STATUS_KHR)) {\n return this.checkCompletion_(binary);\n } else {\n await nextFrame();\n return this.checkCompletionAsync_(binary);\n }\n }\n\n private checkCompletion_(binary: GPGPUBinary): boolean {\n if (this.gpgpu.gl.getProgramParameter(\n binary.webGLProgram, this.gpgpu.gl.LINK_STATUS) === false) {\n console.log(this.gpgpu.gl.getProgramInfoLog(binary.webGLProgram));\n if (this.gpgpu.gl.getShaderParameter(\n binary.fragmentShader, this.gpgpu.gl.COMPILE_STATUS) === false) {\n webgl_util.logShaderSourceAndInfoLog(\n binary.source,\n this.gpgpu.gl.getShaderInfoLog(binary.fragmentShader));\n throw new Error('Failed to compile fragment shader.');\n }\n throw new Error('Failed to link vertex and fragment shaders.');\n }\n return true;\n }\n\n getUniformLocations() {\n for (const [, binary] of Object.entries(this.binaryCache)) {\n const {\n uniformLocations,\n customUniformLocations,\n infLoc,\n nanLoc,\n inShapesLocations,\n inTexShapesLocations,\n outShapeLocation,\n outShapeStridesLocation,\n outTexShapeLocation\n } = getUniformLocations(this.gpgpu, binary.program, binary.webGLProgram);\n binary.uniformLocations = uniformLocations;\n binary.customUniformLocations = customUniformLocations;\n binary.infLoc = infLoc;\n binary.nanLoc = nanLoc;\n binary.inShapesLocations = inShapesLocations;\n binary.inTexShapesLocations = inTexShapesLocations;\n binary.outShapeLocation = outShapeLocation;\n binary.outShapeStridesLocation = outShapeStridesLocation;\n binary.outTexShapeLocation = outTexShapeLocation;\n }\n }\n\n /**\n * Create a TF.js tensor out of an existing WebGL texture. A new texture will\n * be created.\n */\n override createTensorFromGPUData(\n values: WebGLData, shape: number[], dtype: DataType): Tensor {\n values.channels = values.channels || 'RGBA';\n const {texture, height, width, channels} = values;\n const backend = engine().backend as MathBackendWebGL;\n\n // Have to throw an error, otherwise WebGL just warns and returns wrong\n // values.\n if (!backend.gpgpu.gl.isTexture(texture)) {\n throw new Error(\n `The texture is invalid. Also, please make sure the texture and ` +\n `the TFJS WebGL backend are using the same canvas. If you want to ` +\n `use your own custom canvas, you have to create and use the custom ` +\n `TFJS WebGL backend created from the canvas through ` +\n `'new tf.MathBackendWebGL(customCanvas)'.`);\n }\n\n const dataId =\n backend.writeTexture(texture, shape, dtype, height, width, channels);\n return engine().makeTensorFromDataId(dataId, shape, dtype, backend);\n }\n}\n\nfunction float32ToTypedArray(\n a: Float32Array, dtype: D): tf.DataTypeMap[D] {\n if (dtype === 'float32' || dtype === 'complex64') {\n return a as tf.DataTypeMap[D];\n } else if (dtype === 'int32' || dtype === 'bool') {\n const result = (dtype === 'int32') ? new Int32Array(a.length) :\n new Uint8Array(a.length);\n for (let i = 0; i < result.length; ++i) {\n result[i] = Math.round(a[i]);\n }\n return result as tf.DataTypeMap[D];\n } else {\n throw new Error(`Unknown dtype ${dtype}`);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google Inc. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n// base.ts is the webgl backend without auto kernel registration.\n\nimport {device_util, registerBackend} from '@tensorflow/tfjs-core';\nimport {MathBackendWebGL} from './backend_webgl';\nexport {version as version_webgl} from './version';\n\nif (device_util.isBrowser()) {\n registerBackend('webgl', () => new MathBackendWebGL(), 2 /* priority */);\n}\n\n// Export webgl utilities\nexport * from './webgl';\n\n// Export forceHalfFlost under webgl namespace for the union bundle.\nimport {forceHalfFloat} from './webgl';\nexport const webgl = {forceHalfFloat};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport const CHECK_NAN_SNIPPET = `\n if (isnan(a)) return a;\n if (isnan(b)) return b;\n`;\n\nexport const SQUARED_DIFFERENCE = 'return (a - b) * (a - b);';\nexport class BinaryOpProgram implements GPGPUProgram {\n variableNames = ['A', 'B'];\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n\n constructor(op: string, aShape: number[], bShape: number[]) {\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n this.userCode = `\n float binaryOperation(float a, float b) {\n ${op}\n }\n\n void main() {\n float a = getAAtOutCoords();\n float b = getBAtOutCoords();\n setOutput(binaryOperation(a, b));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport const CHECK_NAN_SNIPPET_PACKED = `\n result.r = isNaN.r ? NAN : result.r;\n result.g = isNaN.g ? NAN : result.g;\n result.b = isNaN.b ? NAN : result.b;\n result.a = isNaN.a ? NAN : result.a;\n`;\n\nexport const ELU_DER = `\n vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.)));\n return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0))));\n`;\n\nexport const NOT_EQUAL = `\n return vec4(notEqual(a, b));\n`;\n\nexport class BinaryOpPackedProgram implements GPGPUProgram {\n variableNames = ['A', 'B'];\n outputShape: number[];\n userCode: string;\n supportsBroadcasting = true;\n packedInputs = true;\n packedOutput = true;\n enableShapeUniforms: boolean;\n\n constructor(\n op: string, aShape: number[], bShape: number[],\n checkOutOfBounds = false) {\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n const rank = this.outputShape.length;\n this.enableShapeUniforms = useShapeUniforms(rank);\n let checkOutOfBoundsString = '';\n if (checkOutOfBounds) {\n if (rank === 0 || util.sizeFromShape(this.outputShape) === 1) {\n checkOutOfBoundsString = `\n result.y = 0.;\n result.z = 0.;\n result.w = 0.;\n `;\n } else {\n const dtype = getCoordsDataType(rank);\n checkOutOfBoundsString = `\n ${dtype} coords = getOutputCoords();\n `;\n if (rank === 1) {\n if (this.enableShapeUniforms) {\n checkOutOfBoundsString += `\n result.y = (coords + 1) >= outShape ? 0. : result.y;\n result.z = 0.;\n result.w = 0.;\n `;\n } else {\n checkOutOfBoundsString += `\n result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y;\n result.z = 0.;\n result.w = 0.;\n `;\n }\n } else {\n const channels = getChannels('coords', rank);\n if (this.enableShapeUniforms) {\n checkOutOfBoundsString += `\n bool nextRowOutOfBounds =\n (${channels[rank - 2]} + 1) >= outShape[${rank} - 2];\n bool nextColOutOfBounds =\n (${channels[rank - 1]} + 1) >= outShape[${rank} - 1];\n result.y = nextColOutOfBounds ? 0. : result.y;\n result.z = nextRowOutOfBounds ? 0. : result.z;\n result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w;\n `;\n } else {\n checkOutOfBoundsString += `\n bool nextRowOutOfBounds =\n (${channels[rank - 2]} + 1) >= ${this.outputShape[rank - 2]};\n bool nextColOutOfBounds =\n (${channels[rank - 1]} + 1) >= ${this.outputShape[rank - 1]};\n result.y = nextColOutOfBounds ? 0. : result.y;\n result.z = nextRowOutOfBounds ? 0. : result.z;\n result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w;\n `;\n }\n }\n }\n }\n\n this.userCode = `\n vec4 binaryOperation(vec4 a, vec4 b) {\n ${op}\n }\n\n void main() {\n vec4 a = getAAtOutCoords();\n vec4 b = getBAtOutCoords();\n\n vec4 result = binaryOperation(a, b);\n ${checkOutOfBoundsString}\n\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Identity, IdentityInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function identity(\n args: {inputs: IdentityInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n backend.incRef(x.dataId);\n\n return {dataId: x.dataId, shape: x.shape, dtype: x.dtype};\n}\n\nexport const identityConfig: KernelConfig = {\n kernelName: Identity,\n backendName: 'webgl',\n kernelFunc: identity as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Complex, ComplexInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {identity} from './Identity';\n\n/**\n * In WebGL data is stored in GPU textures which can't be efficiently copied, so\n * complex tensors share data with their real and imaginary components. Complex\n * tensors' reference to the components is tracked by refCount on the individual\n * component. The refCounts are increased by the identity call.\n *\n * When a complex tensor is disposed, it will reduce the refCount on the\n * components by calling disposeData on each.\n */\nexport function complex(\n args: {inputs: ComplexInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {real, imag} = inputs;\n\n const complexInfo = backend.makeTensorInfo(real.shape, 'complex64');\n const complex = backend.texData.get(complexInfo.dataId);\n\n const realTensorInfo = identity({inputs: {x: real}, backend});\n\n const imagTensorInfo = identity({inputs: {x: imag}, backend});\n\n complex.complexTensorInfos = {real: realTensorInfo, imag: imagTensorInfo};\n\n return complexInfo;\n}\n\nexport const complexConfig: KernelConfig = {\n kernelName: Complex,\n backendName: 'webgl',\n kernelFunc: complex as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, LeakyRelu, LeakyReluAttrs, LeakyReluInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {BinaryOpProgram} from '../binaryop_gpu';\nimport {BinaryOpPackedProgram} from '../binaryop_packed_gpu';\n\nexport const LEAKYRELU = `return (a < 0.) ? b * a : a;`;\nexport const LEAKYRELU_PACKED = `\n vec4 aLessThanZero = vec4(lessThan(a, vec4(0.)));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n`;\n\nexport function leakyRelu(args: {\n inputs: LeakyReluInputs,\n backend: MathBackendWebGL,\n attrs: LeakyReluAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {alpha} = attrs;\n\n const $alpha = backend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(alpha as unknown as 'float32', 'float32'));\n\n const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ?\n new BinaryOpPackedProgram(LEAKYRELU_PACKED, x.shape, $alpha.shape) :\n new BinaryOpProgram(LEAKYRELU, x.shape, $alpha.shape);\n const result = backend.runWebGLProgram(program, [x, $alpha], 'float32');\n\n backend.disposeIntermediateTensorInfo($alpha);\n\n return result;\n}\n\nexport const leakyReluConfig: KernelConfig = {\n kernelName: LeakyRelu,\n backendName: 'webgl',\n kernelFunc: leakyRelu as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, Prelu, PreluInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {BinaryOpProgram} from '../binaryop_gpu';\nimport {BinaryOpPackedProgram} from '../binaryop_packed_gpu';\n\nexport const PRELU = `return (a < 0.) ? b * a : a;`;\nexport const PRELU_PACKED = `\n vec4 aLessThanZero = vec4(lessThan(a, vec4(0.)));\n return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a);\n`;\n\nexport function prelu(args: {inputs: PreluInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x, alpha} = inputs;\n\n const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ?\n new BinaryOpPackedProgram(PRELU_PACKED, x.shape, alpha.shape) :\n new BinaryOpProgram(PRELU, x.shape, alpha.shape);\n return backend.runWebGLProgram(program, [x, alpha], 'float32');\n}\n\nexport const preluConfig: KernelConfig = {\n kernelName: Prelu,\n backendName: 'webgl',\n kernelFunc: prelu as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BinaryInputs, DataType, env, KernelFunc, TypedArray, UnaryInputs, upcastType} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {BinaryOpProgram} from '../binaryop_gpu';\nimport {BinaryOpPackedProgram} from '../binaryop_packed_gpu';\nimport {complex} from '../kernels/Complex';\nimport {LEAKYRELU, LEAKYRELU_PACKED} from '../kernels/LeakyRelu';\nimport {PRELU, PRELU_PACKED} from '../kernels/Prelu';\nimport * as unary_op from '../unaryop_gpu';\nimport {UnaryOpProgram} from '../unaryop_gpu';\nimport * as unary_packed_op from '../unaryop_packed_gpu';\nimport {UnaryOpPackedProgram} from '../unaryop_packed_gpu';\n\nimport {SimpleBinaryKernelImplCPU, SimpleUnaryKernelImplCPU} from './shared';\n\nexport const CHECK_NAN_SNIPPET_UNARY = `if (isnan(x)) return x;`;\n\ntype UnaryKernelFuncConfig = {\n opSnippet: string,\n packedOpSnippet?: string,\n cpuKernelImpl?: SimpleUnaryKernelImplCPU,\n dtype?: DataType\n};\n\n/**\n * Template that creates a `KernelFunc` for unary ops.\n * @param opSnippet Op snippet to create `UnaryOpProgram`.\n * @param packedOpSnippet Op snippet to create `UnaryOpPackedProgram`.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the first input. This is mainly used in\n * comparison kernels, such as Equal, Less, Greater, etc.\n */\nexport function unaryKernelFunc(\n {opSnippet, packedOpSnippet, cpuKernelImpl, dtype}: UnaryKernelFuncConfig):\n KernelFunc {\n return ({inputs, backend}) => {\n const {x} = inputs as UnaryInputs;\n const webglBackend = backend as MathBackendWebGL;\n\n const $dtype = dtype || x.dtype;\n if (webglBackend.shouldExecuteOnCPU([x]) && cpuKernelImpl != null) {\n const xData = webglBackend.texData.get(x.dataId);\n const outValues = cpuKernelImpl(xData.values as TypedArray, $dtype);\n return webglBackend.makeTensorInfo(x.shape, $dtype, outValues);\n }\n\n const shouldUsePackedProgram =\n env().getBool('WEBGL_PACK_UNARY_OPERATIONS') && packedOpSnippet != null;\n let program: UnaryOpProgram|UnaryOpPackedProgram;\n if (shouldUsePackedProgram) {\n program = new UnaryOpPackedProgram(x.shape, packedOpSnippet);\n } else {\n program = new UnaryOpProgram(x.shape, opSnippet);\n }\n\n return webglBackend.runWebGLProgram(program, [x], $dtype);\n };\n}\n\ntype BinaryKernelFuncConfig = {\n opSnippet: string,\n packedOpSnippet?: string,\n checkOutOfBounds?: boolean,\n supportsComplex?: boolean,\n cpuKernelImpl?: SimpleBinaryKernelImplCPU,\n dtype?: DataType\n};\n\n/**\n * Template that creates a `KernelFunc` for binary ops.\n * @param opSnippet Op snippet to create `BinaryOpProgram`.\n * @param packedOpSnippet Op snippet to create `BinaryOpPackedProgram`.\n * @param checkOutOfBoundsForPackedProgram Whether to set checkOutOfBounds=true\n * when creating BinaryOpPackedProgram.\n * @param dtype Optional. If set, the result has this dtype. Otherwise, the\n * result has the same dtype as the first input. This is mainly used in\n * comparison kernels, such as Equal, Less, Greater, etc.\n */\nexport function binaryKernelFunc({\n opSnippet,\n packedOpSnippet,\n checkOutOfBounds = false,\n supportsComplex = false,\n cpuKernelImpl,\n dtype\n}: BinaryKernelFuncConfig): KernelFunc {\n return ({inputs, backend}) => {\n const {a, b} = inputs as BinaryInputs;\n const webglBackend = backend as MathBackendWebGL;\n\n if (supportsComplex && a.dtype === 'complex64') {\n const aData = webglBackend.texData.get(a.dataId);\n const bData = webglBackend.texData.get(b.dataId);\n\n const [real, imag] = [\n [aData.complexTensorInfos.real, bData.complexTensorInfos.real],\n [aData.complexTensorInfos.imag, bData.complexTensorInfos.imag]\n ].map(complexParts => {\n const [aPart, bPart] = complexParts;\n\n const aHandle = {\n dataId: aPart.dataId,\n dtype: aPart.dtype,\n shape: a.shape\n };\n const bHandle = {\n dataId: bPart.dataId,\n dtype: bPart.dtype,\n shape: b.shape\n };\n\n const program = new BinaryOpProgram(opSnippet, a.shape, b.shape);\n return webglBackend.runWebGLProgram(\n program, [aHandle, bHandle], upcastType(aPart.dtype, bPart.dtype));\n });\n\n const complexOutput =\n complex({inputs: {real, imag}, backend: webglBackend});\n\n webglBackend.disposeIntermediateTensorInfo(real);\n webglBackend.disposeIntermediateTensorInfo(imag);\n\n // TODO(annxingyuan): Implement CPU forwarding for complex inputs.\n\n return complexOutput;\n }\n\n const $dtype = dtype || upcastType(a.dtype, b.dtype);\n if ((a.dtype === 'string' || b.dtype === 'string' ||\n webglBackend.shouldExecuteOnCPU([a, b])) &&\n cpuKernelImpl != null) {\n const aVals = webglBackend.texData.get(a.dataId).values as TypedArray;\n const bVals = webglBackend.texData.get(b.dataId).values as TypedArray;\n\n const decodedAVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(aVals as any as Uint8Array[]) :\n aVals;\n const decodedBVals = a.dtype === 'string' ?\n // tslint:disable-next-line: no-any\n backend_util.fromUint8ToStringArray(bVals as any as Uint8Array[]) :\n bVals;\n const [outValues, outShape] =\n cpuKernelImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype);\n\n const out = webglBackend.makeTensorInfo(outShape, $dtype);\n const outData = webglBackend.texData.get(out.dataId);\n outData.values = outValues;\n return out;\n }\n\n const shouldUsePackedProgram =\n env().getBool('WEBGL_PACK_BINARY_OPERATIONS') &&\n packedOpSnippet != null;\n let program: BinaryOpProgram|BinaryOpPackedProgram;\n if (shouldUsePackedProgram) {\n program = new BinaryOpPackedProgram(\n packedOpSnippet, a.shape, b.shape, checkOutOfBounds);\n } else {\n program = new BinaryOpProgram(opSnippet, a.shape, b.shape);\n }\n\n return webglBackend.runWebGLProgram(program, [a, b], $dtype);\n };\n}\n\nexport function mapActivationToShaderProgram(\n activation: backend_util.Activation, packed = false): string {\n if (activation === 'linear') {\n if (packed) {\n return unary_packed_op.LINEAR;\n }\n return unary_op.LINEAR;\n } else if (activation === 'relu') {\n if (packed) {\n return unary_packed_op.RELU;\n }\n return unary_op.RELU;\n } else if (activation === 'elu') {\n if (packed) {\n return unary_packed_op.ELU;\n }\n return unary_op.ELU;\n } else if (activation === 'relu6') {\n if (packed) {\n return unary_packed_op.RELU6;\n }\n return unary_op.RELU6;\n } else if (activation === 'prelu') {\n if (packed) {\n return PRELU_PACKED;\n }\n return PRELU;\n } else if (activation === 'leakyrelu') {\n if (packed) {\n return LEAKYRELU_PACKED;\n }\n return LEAKYRELU;\n } else if (activation === 'sigmoid') {\n if (packed) {\n return unary_packed_op.SIGMOID;\n }\n return unary_op.SIGMOID;\n }\n throw new Error(`Activation ${\n activation} has not been implemented for the WebGL backend.`);\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport class MatMulPackedProgram implements GPGPUProgram {\n variableNames = ['matrixA', 'matrixB'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n\n constructor(\n aShape: [number, number, number], bShape: [number, number, number],\n outputShape: [number, number, number], transposeA = false,\n transposeB = false, addBias = false, activation: string = null,\n hasPreluActivation = false, hasLeakyreluActivation = false) {\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n const sharedDim = transposeA ? aShape[1] : aShape[2];\n const sharedDimensionPacked = Math.ceil(sharedDim / 2);\n\n const aSample = transposeA ? 'i * 2, rc.y' : 'rc.y, i * 2';\n const bSample = transposeB ? 'rc.z, i * 2' : 'i * 2, rc.z';\n const aSwizzle = transposeA ? ['a.xxyy', 'a.zzww'] : ['a.xxzz', 'a.yyww'];\n const bSwizzle = transposeB ? ['b.xzxz', 'b.ywyw'] : ['b.xyxy', 'b.zwzw'];\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (activation) {\n if (hasPreluActivation) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getPreluActivationWeightsAtOutCoords();\n ${activation}\n }`;\n } else if (hasLeakyreluActivation) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getLeakyreluAlphaAtOutCoords();\n ${activation}\n }`;\n } else {\n activationSnippet = `vec4 activation(vec4 x) {\n ${activation}\n }`;\n }\n\n applyActivationSnippet = `result = activation(result);`;\n }\n\n const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : '';\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n\n if (hasLeakyreluActivation) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n let batchASnippet = 'rc.x';\n let batchBSnippet = 'rc.x';\n if (aShape[0] < bShape[0]) {\n batchASnippet = `imod(rc.x, ${aShape[0]})`;\n } else if (bShape[0] < aShape[0]) {\n batchBSnippet = `imod(rc.x, ${bShape[0]})`;\n }\n\n this.userCode = `\n ${activationSnippet}\n // Don't use uniform for sharedDimensionPacked for performance.\n const float sharedDimension = ${sharedDimensionPacked}.0;\n\n vec4 dot2x2ARowBCol(ivec3 rc) {\n vec4 result = vec4(0);\n int batchA = ${batchASnippet};\n int batchB = ${batchBSnippet};\n for (int i = 0; i < ${sharedDimensionPacked}; i++) {\n vec4 a = getMatrixA(batchA, ${aSample});\n vec4 b = getMatrixB(batchB, ${bSample});\n\n // These swizzled products need to be separately added.\n // See: https://github.com/tensorflow/tfjs/issues/1735\n result += (${aSwizzle[0]} * ${bSwizzle[0]});\n result += (${aSwizzle[1]} * ${bSwizzle[1]});\n }\n return result;\n }\n\n void main() {\n ivec3 rc = getOutputCoords();\n vec4 result = dot2x2ARowBCol(rc);\n\n ${addBiasSnippet}\n\n ${applyActivationSnippet}\n\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\n// (Ar + Ai)(Br + Bi) =\n// ArBr + ArBi + AiBr + AiBi = ArBr - AB + ArBi + AiBr\n// Yr = ArBr - AB\n// Yi = ArBi + AiBr\nexport const COMPLEX_MULTIPLY = {\n REAL: 'return areal * breal - aimag * bimag;',\n IMAG: 'return areal * bimag + aimag * breal;'\n};\n\nexport class BinaryOpComplexProgram implements GPGPUProgram {\n variableNames = ['AReal', 'AImag', 'BReal', 'BImag'];\n userCode: string;\n outputShape: number[];\n\n constructor(op: string, aShape: number[], bShape: number[]) {\n this.outputShape = backend_util.assertAndGetBroadcastShape(aShape, bShape);\n\n this.userCode = `\n float binaryOpComplex(\n float areal, float aimag, float breal, float bimag) {\n ${op}\n }\n\n void main() {\n float areal = getARealAtOutCoords();\n float aimag = getAImagAtOutCoords();\n float breal = getBRealAtOutCoords();\n float bimag = getBImagAtOutCoords();\n setOutput(binaryOpComplex(areal, aimag, breal, bimag));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BinaryInputs, env, KernelConfig, Multiply, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport * as binaryop_complex_gpu from '../binaryop_complex_gpu';\nimport {BinaryOpComplexProgram} from '../binaryop_complex_gpu';\nimport {BinaryOpProgram} from '../binaryop_gpu';\nimport {BinaryOpPackedProgram} from '../binaryop_packed_gpu';\nimport {multiplyImplCPU as cpuMultiply} from '../kernel_utils/shared';\n\nimport {complex} from './Complex';\n\nconst MUL = 'return a * b;';\n\nexport function multiply(\n args: {inputs: BinaryInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {a, b} = inputs;\n const dtype = backend_util.upcastType(a.dtype, b.dtype);\n\n if (a.dtype === 'complex64') {\n const aData = backend.texData.get(a.dataId);\n const bData = backend.texData.get(b.dataId);\n\n const realProgram = new BinaryOpComplexProgram(\n binaryop_complex_gpu.COMPLEX_MULTIPLY.REAL, a.shape, b.shape);\n const imagProgram = new BinaryOpComplexProgram(\n binaryop_complex_gpu.COMPLEX_MULTIPLY.IMAG, a.shape, b.shape);\n\n const inputs = [\n {\n dataId: aData.complexTensorInfos.real.dataId,\n dtype: aData.complexTensorInfos.real.dtype,\n shape: a.shape\n },\n {\n dataId: aData.complexTensorInfos.imag.dataId,\n dtype: aData.complexTensorInfos.imag.dtype,\n shape: a.shape\n },\n {\n dataId: bData.complexTensorInfos.real.dataId,\n dtype: bData.complexTensorInfos.real.dtype,\n shape: b.shape\n },\n {\n dataId: bData.complexTensorInfos.imag.dataId,\n dtype: bData.complexTensorInfos.imag.dtype,\n shape: b.shape\n }\n ];\n\n const realPart = backend.runWebGLProgram(realProgram, inputs, 'float32');\n const imagPart = backend.runWebGLProgram(imagProgram, inputs, 'float32');\n\n const complexOutput =\n complex({inputs: {real: realPart, imag: imagPart}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(imagPart);\n\n // TODO(annxingyuan): CPU forwarding for complex inputs.\n return complexOutput;\n }\n\n if (backend.shouldExecuteOnCPU([a, b])) {\n const aData = backend.texData.get(a.dataId);\n const bData = backend.texData.get(b.dataId);\n const [outValues, outShape] = cpuMultiply(\n a.shape, b.shape, aData.values as TypedArray,\n bData.values as TypedArray, dtype);\n\n const out = backend.makeTensorInfo(outShape, dtype);\n const outData = backend.texData.get(out.dataId);\n outData.values = outValues;\n return out;\n }\n\n let program: BinaryOpProgram|BinaryOpPackedProgram;\n if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) {\n program = new BinaryOpPackedProgram(MUL, a.shape, b.shape);\n } else {\n program = new BinaryOpProgram(MUL, a.shape, b.shape);\n }\n\n return backend.runWebGLProgram(program, [a, b], dtype);\n}\n\nexport const multiplyConfig: KernelConfig = {\n kernelName: Multiply,\n backendName: 'webgl',\n kernelFunc: multiply\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Reshape, ReshapeAttrs, ReshapeInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {packedReshape} from '../kernel_utils/reshape';\nimport {isReshapeFree} from '../webgl_util';\n\nexport function reshape(args: {\n inputs: ReshapeInputs,\n backend: MathBackendWebGL,\n attrs: ReshapeAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {shape} = attrs;\n const webglBackend = backend;\n\n const xSize = util.sizeFromShape(x.shape);\n const $shape = util.inferFromImplicitShape(shape, xSize);\n const $xSize = util.sizeFromShape($shape);\n\n util.assert(\n xSize === $xSize,\n () => `The new shape (${$shape}) has ${$xSize} elements and the old ` +\n `shape (${x.shape}) has ${xSize} elements. The new shape and old ` +\n `shape must have the same number of elements.`);\n\n const xTexData = webglBackend.texData.get(x.dataId);\n if (xTexData.isPacked && !isReshapeFree(x.shape, $shape) &&\n !(xTexData.texture !== null && isReshapeFree(xTexData.shape, $shape))) {\n return packedReshape(x, $shape, webglBackend);\n }\n\n webglBackend.incRef(x.dataId);\n\n return {dataId: x.dataId, shape: $shape, dtype: x.dtype};\n}\n\nexport const reshapeConfig: KernelConfig = {\n kernelName: Reshape,\n backendName: 'webgl',\n kernelFunc: reshape as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ReshapePackedProgram} from '../reshape_packed_gpu';\nimport {getBatchDim, getRowsCols} from '../webgl_util';\n\nexport function packedReshape(\n input: TensorInfo, afterShape: number[],\n backend: MathBackendWebGL): TensorInfo {\n const input3DShape =\n [getBatchDim(input.shape),\n ...getRowsCols(input.shape)] as [number, number, number];\n const input3D: TensorInfo = {\n dtype: input.dtype,\n shape: input3DShape,\n dataId: input.dataId\n };\n const afterShapeAs3D =\n [getBatchDim(afterShape),\n ...getRowsCols(afterShape)] as [number, number, number];\n\n const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape);\n const preventEagerUnpackingOfOutput = true;\n const customValues = [input3DShape];\n const output = backend.runWebGLProgram(\n program, [input3D], input.dtype, customValues,\n preventEagerUnpackingOfOutput);\n return {dataId: output.dataId, shape: afterShape, dtype: output.dtype};\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class MeanProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(reduceInfo: backend_util.ReduceInfo, divisor?: number) {\n const {windowSize, batchSize, inSize, outSize} = reduceInfo;\n this.outputShape = [batchSize, outSize];\n\n const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4;\n const windowSizeVec4Remainder = windowSize % 4;\n\n let updateSnippet = `sumValue += dot(values, ones);`;\n if (divisor != null) {\n const denominator = 1 / divisor;\n updateSnippet = `sumValue += dot(values * ${\n util.isInt(denominator) ? denominator.toPrecision(2) :\n denominator}, ones);`;\n }\n\n let checkOutOfBounds = '';\n if (inSize % windowSize > 0) {\n checkOutOfBounds = `\n if (inIdx < 0 || inIdx >= ${inSize}) {\n return 0.0;\n }\n `;\n }\n\n this.userCode = `\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float getValue(int batch, int inIdx) {\n ${checkOutOfBounds}\n return getX(batch, inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${windowSize};\n\n float sumValue = 0.0;\n\n for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) {\n int inIdx = inOffset + i;\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n ${updateSnippet}\n }\n\n int inIdx = inOffset + ${windowSizeNearestVec4};\n if (${windowSizeVec4Remainder === 1}) {\n vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0);\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 2}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1), 0.0, 0.0);\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 3}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2), 0.0);\n\n ${updateSnippet}\n }\n setOutput(sumValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ReduceProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n reduceInfo: backend_util.ReduceInfo,\n reduceType: 'all'|'any'|'max'|'min'|'sum'|'prod') {\n const {windowSize, batchSize, inSize, outSize} = reduceInfo;\n this.outputShape = [batchSize, outSize];\n\n let initializationValue = '0.0';\n let compareOp = ``;\n\n if (reduceType === 'prod') {\n initializationValue = '1.0';\n } else if (reduceType === 'min') {\n // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps.\n initializationValue = '1.0 / 1e-20';\n compareOp = `min`;\n } else if (reduceType === 'max') {\n // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps.\n initializationValue = '-1.0 / 1e-20';\n compareOp = `max`;\n }\n\n let returnValue = `${reduceType}(${reduceType}(${reduceType}(` +\n 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])';\n\n if (reduceType === 'sum') {\n returnValue = `sumValue`;\n } else if (reduceType === 'prod') {\n returnValue = `prodValue`;\n } else if (reduceType === 'all') {\n returnValue = `allValue`;\n } else if (reduceType === 'any') {\n returnValue = `anyValue`;\n }\n\n const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4;\n const windowSizeVec4Remainder = windowSize % 4;\n\n let updateSnippet = `\n if (${reduceType === 'sum'}) {\n sumValue += dot(values, ones);\n } else if (${reduceType === 'prod'}) {\n vec2 tmp = vec2(values[0], values[1]) * vec2(values[2], values[3]);\n prodValue *= tmp[0] * tmp[1];\n } else {\n minMaxValue = ${compareOp}(values, minMaxValue);\n if (${reduceType === 'min'} || ${reduceType === 'max'}) {\n minMaxValue = ${compareOp}(values, minMaxValue);\n bvec4 isNaN = isnan(values);\n if (isNaN.r || isNaN.g || isNaN.b || isNaN.a) {\n minMaxValue = vec4(NAN);\n }\n }\n }\n `;\n\n let vecType = `vec4`;\n\n if (reduceType === 'all') {\n initializationValue = '1.0';\n updateSnippet = `\n bool reducedAllValue = all(values);\n float floatedReducedAllValue = float(reducedAllValue);\n allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0);\n `;\n vecType = `bvec4`;\n } else if (reduceType === 'any') {\n initializationValue = '0.0';\n updateSnippet = `\n bool reducedAnyValue = any(values);\n float floatedReducedAnyValue = float(reducedAnyValue);\n anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0);\n `;\n vecType = `bvec4`;\n }\n\n let checkOutOfBounds = '';\n if (inSize % windowSize > 0) {\n checkOutOfBounds = `\n if (inIdx < 0 || inIdx >= ${inSize}) {\n return initializationValue;\n }\n `;\n }\n this.userCode = `\n const float initializationValue = ${initializationValue};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float getValue(int batch, int inIdx) {\n ${checkOutOfBounds}\n return getX(batch, inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${windowSize};\n\n vec4 minMaxValue = vec4(${initializationValue});\n float prodValue = 1.0;\n float sumValue = 0.0;\n float allValue = 1.0;\n float anyValue = 0.0;\n\n for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) {\n int inIdx = inOffset + i;\n ${vecType} values = ${vecType}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n ${updateSnippet}\n }\n\n int inIdx = inOffset + ${windowSizeNearestVec4};\n if (${windowSizeVec4Remainder === 1}) {\n ${vecType} values = ${vecType}(\n getValue(batch, inIdx),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 2}) {\n ${vecType} values = ${vecType}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 3}) {\n ${vecType} values = ${vecType}(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n initializationValue\n );\n\n ${updateSnippet}\n }\n setOutput(${returnValue});\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {MeanProgram} from '../mean_gpu';\nimport {ReduceProgram} from '../reduce_gpu';\n\ntype ReduceTypes = 'all'|'any'|'max'|'min'|'sum'|'prod'|'mean';\n\n// Returns an array of configuration objects that describe each stage of the\n// reduction.\nfunction getReductionStages(inShape: number[]):\n Array<{inSize: number, windowSize: number, outSize: number}> {\n const stages = [];\n\n while (stages.length === 0 || stages[stages.length - 1].outSize !== 1) {\n const outSize: number =\n stages.length ? stages[stages.length - 1].outSize : inShape[1];\n const windowSize = backend_util.computeOptimalWindowSize(outSize);\n stages.push({\n inSize: outSize,\n windowSize,\n outSize: Math.ceil(outSize / windowSize)\n });\n }\n\n return stages;\n}\n\nexport function reduce(\n x: TensorInfo, dtype: DataType, reductionType: ReduceTypes,\n backend: MathBackendWebGL): TensorInfo {\n const reductionStages = getReductionStages(x.shape);\n\n let result = x;\n for (let i = 0; i < reductionStages.length; i++) {\n const {inSize, windowSize, outSize} = reductionStages[i];\n\n let program: ReduceProgram|MeanProgram;\n let previousResult: TensorInfo;\n if (reductionType === 'mean') {\n program = i === 0 ?\n new MeanProgram(\n {windowSize, inSize, batchSize: x.shape[0], outSize}, inSize) :\n new MeanProgram({windowSize, inSize, batchSize: x.shape[0], outSize});\n } else {\n program = new ReduceProgram(\n {windowSize, inSize, batchSize: x.shape[0], outSize}, reductionType);\n }\n\n previousResult = result;\n result = backend.runWebGLProgram(program, [result], dtype);\n\n if (previousResult.dataId !== x.dataId) {\n backend.disposeIntermediateTensorInfo(previousResult);\n }\n }\n\n return result;\n}\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class TransposeProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n rank: number;\n\n constructor(aShape: number[], newDim: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[newDim[i]];\n }\n this.outputShape = outputShape;\n this.rank = outputShape.length;\n const dtype = getCoordsDataType(this.rank);\n const switched = getSwitchedCoords(newDim);\n\n this.userCode = `\n void main() {\n ${dtype} resRC = getOutputCoords();\n setOutput(getA(${switched}));\n }\n `;\n }\n}\n\nfunction getSwitchedCoords(newDim: number[]): string {\n const rank = newDim.length;\n if (rank > 6) {\n throw Error(`Transpose for rank ${rank} is not yet supported`);\n }\n const originalOrder =\n ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w', 'resRC.u', 'resRC.v'];\n const switchedCoords = new Array(rank);\n for (let i = 0; i < newDim.length; i++) {\n switchedCoords[newDim[i]] = originalOrder[i];\n }\n return switchedCoords.join();\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getVecChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class TransposePackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n rank: number;\n packedInputs = true;\n packedOutput = true;\n\n constructor(aShape: number[], newDim: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[newDim[i]];\n }\n this.outputShape = outputShape;\n this.rank = outputShape.length;\n if (this.rank > 6) {\n throw Error(\n `Packed transpose for rank ${this.rank} is not yet supported.`);\n }\n const dtype = getCoordsDataType(this.rank);\n\n const outputOrder = getVecChannels('rc', this.rank);\n const switchedOrder = new Array(this.rank);\n for (let i = 0; i < newDim.length; i++) {\n switchedOrder[newDim[i]] = outputOrder[i];\n }\n const innerDims = `vec2(${switchedOrder.slice(-2).join()})`;\n const nextColumn =\n `++${outputOrder[this.rank - 1]} < ${outputShape[this.rank - 1]}`;\n const getc = `getChannel(getA(${switchedOrder.join()}), ${innerDims})`;\n\n this.userCode = `\n void main() {\n ${dtype} rc = getOutputCoords();\n vec4 result = vec4(0.);\n result[0] = ${getc};\n if(${nextColumn}) {\n result[1] = ${getc};\n }\n --${outputOrder[this.rank - 1]};\n if(++${outputOrder[this.rank - 2]} < ${outputShape[this.rank - 2]}) {\n result[2] = ${getc};\n if(${nextColumn}) {\n result[3] = ${getc};\n }\n }\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {transposeImplCPU} from '../kernel_utils/shared';\nimport {TransposeProgram} from '../transpose_gpu';\nimport {TransposePackedProgram} from '../transpose_packed_gpu';\n\nexport function transposeImpl(\n x: TensorInfo, perm: number[], backend: MathBackendWebGL): TensorInfo {\n const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ?\n new TransposePackedProgram(x.shape, perm) :\n new TransposeProgram(x.shape, perm);\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport {transposeImplCPU};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Sum, SumAttrs, SumInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {sumImpl} from './Sum_impl';\n\nexport function sum(\n args: {inputs: SumInputs, attrs: SumAttrs, backend: MathBackendWebGL}) {\n const {inputs, backend, attrs} = args;\n\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n return sumImpl(x, axis, keepDims, backend);\n}\n\nexport const sumConfig: KernelConfig = {\n kernelName: Sum,\n backendName: 'webgl',\n kernelFunc: sum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, sumOutType, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\nimport {reshape} from './Reshape';\n\nimport {transposeImpl} from './Transpose_impl';\n\nexport function sumImpl(\n x: TensorInfo, axis: number|number[], keepDims: boolean,\n backend: MathBackendWebGL): TensorInfo {\n const reductionIndices = axis;\n\n const xRank = x.shape.length;\n\n const origAxes = util.parseAxisParam(reductionIndices, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n const sumInputIsTransposed = permutedAxes != null;\n\n let sumInput = x;\n if (sumInputIsTransposed) {\n sumInput = transposeImpl(x, permutedAxes, backend);\n\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims('sum', axes, xRank);\n const [sumOutShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(sumInput.shape, axes);\n\n let outShape = sumOutShape;\n if (keepDims) {\n // rather than reshape at the end, set the target shape here.\n outShape = backend_util.expandShapeToKeepDim(sumOutShape, origAxes);\n }\n\n const inSize = util.sizeFromShape(reduceShape);\n const xSize = util.sizeFromShape(x.shape);\n const batchSize = xSize / inSize;\n const reshapedInput = reshape(\n {inputs: {x: sumInput}, attrs: {shape: [batchSize, inSize]}, backend});\n\n const outType = sumOutType(x.dtype);\n\n const reduced = reduce(reshapedInput, outType, 'sum', backend);\n const out =\n reshape({inputs: {x: reduced}, attrs: {shape: outShape}, backend});\n\n backend.disposeIntermediateTensorInfo(reshapedInput);\n backend.disposeIntermediateTensorInfo(reduced);\n if (sumInputIsTransposed) {\n backend.disposeIntermediateTensorInfo(sumInput);\n }\n\n return out;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Transpose, TransposeAttrs, TransposeInputs, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {transposeImpl} from './Transpose_impl';\nimport {transposeImplCPU as cpuTranspose} from './Transpose_impl';\n\nexport function transpose(args: {\n inputs: TransposeInputs,\n attrs: TransposeAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {perm} = attrs;\n const webglBackend = backend;\n\n const xRank = x.shape.length;\n\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = x.shape[perm[i]];\n }\n\n let out: TensorInfo;\n if (webglBackend.shouldExecuteOnCPU([x])) {\n const xTexData = webglBackend.texData.get(x.dataId);\n const values = xTexData.values as TypedArray;\n const outValues = cpuTranspose(values, x.shape, x.dtype, perm, newShape);\n\n out = webglBackend.makeTensorInfo(newShape, x.dtype);\n const outData = webglBackend.texData.get(out.dataId);\n outData.values = outValues;\n } else {\n out = transposeImpl(x, perm, webglBackend);\n }\n return out;\n}\n\nexport const transposeConfig: KernelConfig = {\n kernelName: Transpose,\n backendName: 'webgl',\n kernelFunc: transpose as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, broadcast_util, TensorInfo, upcastType, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {mapActivationToShaderProgram} from '../kernel_utils/kernel_funcs_utils';\nimport {MatMulPackedProgram} from '../mulmat_packed_gpu';\n\nimport {multiply} from './Multiply';\nimport {reshape} from './Reshape';\nimport {sum} from './Sum';\nimport {transpose} from './Transpose';\n\n// Empirically determined minimal shared dimension in matmul before we forward\n// to a.mul(b).sum() in order to take advantage of GPU parallelism. See\n// https://github.com/tensorflow/tfjs-core/pull/1379 for benchmarks.\nexport const MATMUL_SHARED_DIM_THRESHOLD = 1000;\n\ntype BatchMatMulConfig = {\n a: TensorInfo,\n b: TensorInfo,\n transposeA: boolean,\n transposeB: boolean,\n backend: MathBackendWebGL,\n bias?: TensorInfo,\n preluActivationWeights?: TensorInfo,\n leakyreluAlpha?: number,\n activation?: backend_util.Activation\n};\n\nexport function batchMatMulImpl({\n a,\n b,\n transposeA,\n transposeB,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: BatchMatMulConfig): TensorInfo {\n const aRank = a.shape.length;\n const bRank = b.shape.length;\n\n const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1];\n const innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2];\n\n const outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2];\n const outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1];\n\n const outerDimsA = a.shape.slice(0, -2);\n const outerDimsB = b.shape.slice(0, -2);\n\n const batchDimA = util.sizeFromShape(outerDimsA);\n const batchDimB = util.sizeFromShape(outerDimsB);\n\n const outShapeOuterDims = broadcast_util.assertAndGetBroadcastShape(\n a.shape.slice(0, -2), b.shape.slice(0, -2));\n const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]);\n\n util.assert(\n innerShapeA === innerShapeB,\n () => `Error in matMul: inner shapes (${innerShapeA}) and (` +\n `${innerShapeB}) of Tensors with shapes ${a.shape} and ` +\n `${b.shape} and transposeA=${transposeA}` +\n ` and transposeB=${transposeB} must match.`);\n\n const a3dShape: [number, number, number] = transposeA ?\n [batchDimA, innerShapeA, outerShapeA] :\n [batchDimA, outerShapeA, innerShapeA];\n const b3dShape: [number, number, number] = transposeB ?\n [batchDimB, outerShapeB, innerShapeB] :\n [batchDimB, innerShapeB, outerShapeB];\n\n // The rest of the implementation is designed to operate on rank-3 tensors\n const a3d = reshape({inputs: {x: a}, backend, attrs: {shape: a3dShape}});\n const b3d = reshape({inputs: {x: b}, backend, attrs: {shape: b3dShape}});\n\n const intermediates: TensorInfo[] = [a3d, b3d];\n\n const batchDim = Math.max(batchDimA, batchDimB);\n const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2];\n\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n const hasLeakyreluAlpha = activation === 'leakyrelu';\n const fusedActivation = activation != null ?\n mapActivationToShaderProgram(activation, true) :\n null;\n const containsFusedOps = hasBias || hasPreluActivationWeights ||\n hasLeakyreluAlpha || fusedActivation != null;\n let out: TensorInfo;\n\n // Since the matrices are vectors, it is faster to call mul().sum()\n // because sum() is O(sqrt(N)) due to divide-and-conquer.\n if ((outerShapeA === 1 || outerShapeB === 1) &&\n sharedDim > MATMUL_SHARED_DIM_THRESHOLD && containsFusedOps === false) {\n let aVec = a3d;\n let bVec = b3d;\n if (transposeA) {\n aVec = transpose({inputs: {x: a3d}, backend, attrs: {perm: [0, 2, 1]}});\n intermediates.push(aVec);\n }\n if (transposeB) {\n bVec = transpose({inputs: {x: b3d}, backend, attrs: {perm: [0, 2, 1]}});\n intermediates.push(bVec);\n }\n\n const shouldReshapeA = outerShapeB !== 1;\n const shouldReshapeB = outerShapeB === 1;\n\n let aVec3d = aVec;\n if (shouldReshapeA) {\n aVec3d = reshape({\n inputs: {x: aVec},\n backend,\n attrs: {shape: [batchDim, sharedDim, 1]}\n });\n\n intermediates.push(aVec3d);\n }\n\n const axis = outerShapeB === 1 ? 2 : 1;\n\n let bVec3d = bVec;\n if (shouldReshapeB) {\n bVec3d = reshape({\n inputs: {x: bVec},\n backend,\n attrs: {shape: [batchDim, 1, sharedDim]}\n });\n\n intermediates.push(bVec3d);\n }\n\n const product = multiply({inputs: {a: aVec3d, b: bVec3d}, backend});\n out = sum({inputs: {x: product}, backend, attrs: {axis, keepDims: true}});\n intermediates.push(product);\n } else {\n const dtype = upcastType(a.dtype, b.dtype);\n\n const program = new MatMulPackedProgram(\n a3dShape, b3dShape, [batchDim, outerShapeA, outerShapeB], transposeA,\n transposeB, hasBias, fusedActivation, hasPreluActivationWeights,\n hasLeakyreluAlpha);\n\n const inputs: TensorInfo[] = [a3d, b3d];\n if (bias != null) {\n inputs.push(bias);\n }\n if (hasPreluActivationWeights) {\n inputs.push(preluActivationWeights);\n }\n if (hasLeakyreluAlpha) {\n const $leakyreluAlpha = backend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(leakyreluAlpha as unknown as 'float32', 'float32'));\n inputs.push($leakyreluAlpha);\n intermediates.push($leakyreluAlpha);\n }\n\n out = backend.runWebGLProgram(program, inputs, dtype);\n }\n\n const outReshaped =\n reshape({inputs: {x: out}, backend, attrs: {shape: outShape}});\n intermediates.push(out);\n for (const i of intermediates) {\n backend.disposeIntermediateTensorInfo(i);\n }\n return outReshaped;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {_FusedMatMul, _FusedMatMulAttrs, _FusedMatMulInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {batchMatMulImpl} from './BatchMatMul_impl';\n\nexport function _fusedMatMul(args: {\n inputs: _FusedMatMulInputs,\n attrs: _FusedMatMulAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b, bias, preluActivationWeights} = inputs;\n const {transposeA, transposeB, activation, leakyreluAlpha} = attrs;\n\n return batchMatMulImpl({\n a,\n b,\n transposeA,\n transposeB,\n backend,\n bias,\n preluActivationWeights,\n leakyreluAlpha,\n activation\n });\n}\n\nexport const _fusedMatMulConfig: KernelConfig = {\n kernelName: _FusedMatMul,\n backendName: 'webgl',\n kernelFunc: _fusedMatMul as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Abs, AbsInputs, env, KernelConfig, KernelFunc, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {simpleAbsImplCPU} from '../kernel_utils/shared';\nimport {UnaryOpProgram} from '../unaryop_gpu';\nimport {UnaryOpPackedProgram} from '../unaryop_packed_gpu';\n\nconst ABS = `return abs(x);`;\n\nexport function abs(args: {inputs: AbsInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n // TODO: handle cases when x is complex. Once the cpu implementation\n // can handle complex values, refactor to use unaryKernelFunc.\n if (backend.shouldExecuteOnCPU([x]) && x.dtype !== 'complex64') {\n const xData = backend.texData.get(x.dataId);\n const outValues = simpleAbsImplCPU(xData.values as TypedArray);\n return backend.makeTensorInfo(x.shape, x.dtype, outValues);\n }\n\n let program: UnaryOpProgram|UnaryOpPackedProgram;\n if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) {\n program = new UnaryOpPackedProgram(x.shape, ABS);\n } else {\n program = new UnaryOpProgram(x.shape, ABS);\n }\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport const absConfig: KernelConfig = {\n kernelName: Abs,\n backendName: 'webgl',\n kernelFunc: abs as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acos, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ACOS = CHECK_NAN_SNIPPET + `\n if (abs(x) > 1.) {\n return NAN;\n }\n return acos(x);\n`;\n\nexport const acos = unaryKernelFunc({opSnippet: ACOS});\n\nexport const acosConfig: KernelConfig = {\n kernelName: Acos,\n backendName: 'webgl',\n kernelFunc: acos,\n};\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Acosh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ACOSH = CHECK_NAN_SNIPPET + `\n if (x < 1.0) return NAN;\nreturn log(x + sqrt(x * x - 1.0));`;\n\nexport const acosh = unaryKernelFunc({opSnippet: ACOSH});\n\nexport const acoshConfig: KernelConfig = {\n kernelName: Acosh,\n backendName: 'webgl',\n kernelFunc: acosh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Add, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {addImplCPU as cpuAdd} from '../kernel_utils/shared';\n\nconst ADD = 'return a + b;';\n\nexport const addKernelFunc = binaryKernelFunc({\n opSnippet: ADD,\n packedOpSnippet: ADD,\n supportsComplex: true,\n cpuKernelImpl: cpuAdd\n});\n\nexport const addConfig: KernelConfig = {\n kernelName: Add,\n backendName: 'webgl',\n kernelFunc: addKernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class AddNProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(outputShape: number[], shapes: number[][]) {\n this.outputShape = outputShape;\n this.variableNames = shapes.map((_, i) => `T${i}`);\n\n const snippets: string[] = [];\n // Get target elements from every input tensor.\n this.variableNames.forEach(variable => {\n snippets.push(`float v${variable} = get${variable}AtOutCoords();`);\n });\n\n // Calculate the sum of all elements.\n const operation = this.variableNames\n .map(variable => {\n return `v${variable}`;\n })\n .join(' + ');\n\n this.userCode = `\n void main() {\n ${snippets.join('\\n ')}\n\n float result = ${operation};\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class AddNPackedProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[] = [];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n\n constructor(outputShape: number[], shapes: number[][]) {\n this.outputShape = outputShape;\n this.variableNames = shapes.map((_, i) => `T${i}`);\n\n const snippets: string[] = [];\n // Get target elements from every input tensor.\n this.variableNames.forEach(variable => {\n snippets.push(`vec4 v${variable} = get${variable}AtOutCoords();`);\n });\n\n // Calculate the sum of all elements.\n const operation = this.variableNames\n .map(variable => {\n return `v${variable}`;\n })\n .join(' + ');\n\n this.userCode = `\n void main() {\n ${snippets.join('\\n ')}\n\n vec4 result = ${operation};\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {AddN, AddNInputs, env, KernelConfig, KernelFunc, TensorInfo, upcastType} from '@tensorflow/tfjs-core';\n\nimport {AddNProgram} from '../addn_gpu';\nimport {AddNPackedProgram} from '../addn_packed_gpu';\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {identity} from './Identity';\n\nexport function addN(args: {inputs: AddNInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n\n const tensors = inputs;\n if (tensors.length === 1) {\n return identity({inputs: {x: tensors[0]}, backend});\n }\n\n // Limit the number of uploaded textures for optimization.\n if (tensors.length > env().get('WEBGL_MAX_TEXTURES_IN_SHADER')) {\n const midIndex = Math.floor(tensors.length / 2);\n const leftSide = addN({inputs: tensors.slice(0, midIndex), backend});\n const rightSide = addN({inputs: tensors.slice(midIndex), backend});\n return addN({inputs: [leftSide, rightSide], backend});\n }\n\n const dtype =\n tensors.map(t => t.dtype).reduce((d1, d2) => upcastType(d1, d2));\n const shapes = tensors.map(t => t.shape);\n // We can make sure shapes are identical in op level.\n const usePackedOp = env().getBool('WEBGL_PACK');\n const program = usePackedOp ?\n new AddNPackedProgram(tensors[0].shape, shapes) :\n new AddNProgram(tensors[0].shape, shapes);\n return backend.runWebGLProgram(program, tensors, dtype);\n}\n\nexport const addNConfig: KernelConfig = {\n kernelName: AddN,\n backendName: 'webgl',\n kernelFunc: addN as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {All, AllAttrs, AllInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\n\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function all(\n args: {inputs: AllInputs, backend: MathBackendWebGL, attrs: AllAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n const xRank = x.shape.length;\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n let permutedX = x;\n if (permutedAxes != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims('all', axes, xRank);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(permutedX.shape, axes);\n const inSize = util.sizeFromShape(reduceShape);\n\n const a2D =\n reshape({inputs: {x: permutedX}, backend, attrs: {shape: [-1, inSize]}});\n const reduced = reduce(a2D, a2D.dtype, 'all', backend);\n\n let res;\n if (keepDims) {\n const newShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: newShape}});\n } else {\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: outShape}});\n }\n\n backend.disposeIntermediateTensorInfo(a2D);\n backend.disposeIntermediateTensorInfo(reduced);\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo(permutedX);\n }\n\n return res;\n}\n\nexport const allConfig: KernelConfig = {\n kernelName: All,\n backendName: 'webgl',\n kernelFunc: all as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Any, AnyAttrs, AnyInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function any(\n args: {inputs: AnyInputs, backend: MathBackendWebGL, attrs: AnyAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n const xRank = x.shape.length;\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n let permutedX = x;\n if (permutedAxes != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims('any', axes, xRank);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(permutedX.shape, axes);\n const inSize = util.sizeFromShape(reduceShape);\n\n const a2D =\n reshape({inputs: {x: permutedX}, backend, attrs: {shape: [-1, inSize]}});\n const reduced = reduce(a2D, a2D.dtype, 'any', backend);\n\n let res;\n if (keepDims) {\n const newShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: newShape}});\n } else {\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: outShape}});\n }\n\n backend.disposeIntermediateTensorInfo(a2D);\n backend.disposeIntermediateTensorInfo(reduced);\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo(permutedX);\n }\n\n return res;\n}\n\nexport const anyConfig: KernelConfig = {\n kernelName: Any,\n backendName: 'webgl',\n kernelFunc: any as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ArgMinMaxProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n reduceInfo: backend_util.ReduceInfo, op: 'max'|'min',\n firstPass: boolean) {\n const {windowSize, batchSize, outSize} = reduceInfo;\n if (!firstPass) {\n this.variableNames.push('bestIndicesA');\n }\n this.outputShape = [batchSize, outSize];\n const compOp = (op === 'max') ? '>' : '<';\n const indexSnippet = firstPass ?\n 'inOffset + i;' :\n 'round(getBestIndicesA(batch, inOffset + i));';\n\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = outIdx * ${windowSize};\n\n int bestIndex = inOffset;\n float bestValue = getA(batch, bestIndex);\n\n for (int i = 0; i < ${windowSize}; i++) {\n int inIdx = ${indexSnippet};\n float candidate = getA(batch, inIdx);\n if (candidate ${compOp} bestValue) {\n bestValue = candidate;\n bestIndex = inIdx;\n }\n }\n setOutput(float(bestIndex));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class ArgMinMaxPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n\n constructor(\n shape: number[], windowSize: number, op: 'max'|'min',\n firstPass: boolean) {\n util.assert(\n shape.length > 2,\n () => `Packed arg${\n op.charAt(0).toUpperCase() +\n op.slice(1)} supports only inputs with rank above 2.`);\n const inSize = shape[shape.length - 1];\n const outSize = Math.ceil(inSize / windowSize);\n this.outputShape = shape.slice(0, -1);\n if (outSize > 1) {\n this.outputShape.push(outSize);\n }\n if (!firstPass) {\n this.variableNames.push('bestIndicesA');\n }\n const outShape = this.outputShape;\n const rank = outShape.length;\n const dtype = getCoordsDataType(rank);\n const coords = getChannels('coords', rank);\n\n let sourceLocSetup;\n let sourceRank;\n if (outSize === 1) {\n sourceRank = rank + 1;\n const sourceLocDType = getCoordsDataType(sourceRank);\n sourceLocSetup = `\n ${sourceLocDType} sourceLocR = ${sourceLocDType}(${coords.join()}, 0);\n ++${coords[rank - 1]};\n ${sourceLocDType} sourceLocG = ${sourceLocDType}(${coords.join()}, 0);\n ++${coords[rank - 2]};\n ${sourceLocDType} sourceLocA = ${sourceLocDType}(${coords.join()}, 0);\n --${coords[rank - 1]};\n ${sourceLocDType} sourceLocB = ${sourceLocDType}(${coords.join()}, 0);\n --${coords[rank - 2]};`;\n } else {\n sourceRank = rank;\n sourceLocSetup = `\n ${dtype} sourceLocR = coords;\n ++${coords[rank - 1]};\n ${dtype} sourceLocG = coords;\n ++${coords[rank - 2]};\n ${dtype} sourceLocA = coords;\n --${coords[rank - 1]};\n ${dtype} sourceLocB = coords;\n --${coords[rank - 2]};`;\n }\n const channels = ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, sourceRank);\n const inChannel = '.' + channels[sourceRank - 1]; // e.g. \".b\" for rank 3.\n const intChannels = channels.map(x => 'int ' + x);\n const srcRCoords =\n getChannels('sourceLocR', sourceRank - 1).concat('inIdx.r');\n const srcGCoords =\n getChannels('sourceLocG', sourceRank - 1).concat('inIdx.g');\n const srcBCoords =\n getChannels('sourceLocB', sourceRank - 1).concat('inIdx.b');\n const srcACoords =\n getChannels('sourceLocA', sourceRank - 1).concat('inIdx.a');\n\n const compOp = (op === 'max') ? 'greaterThan' : 'lessThan';\n const fetchCandidateIdx = firstPass ? '' : `\n inIdx = round(vec4(getBestIndicesAChannel(${srcRCoords.join()}),\n getBestIndicesAChannel(${srcGCoords.join()}),\n getBestIndicesAChannel(${srcBCoords.join()}),\n getBestIndicesAChannel(${srcACoords.join()})));`;\n\n const fetchValue = `vec4(\n getAChannel(${srcRCoords.join()}),\n hasNextCol ? getAChannel(${srcGCoords.join()}) : 0.,\n hasNextRow ? getAChannel(${srcBCoords.join()}) : 0.,\n hasNextRow && hasNextCol ? getAChannel(${srcACoords.join()}) : 0.)`;\n\n const getBestIndicesAChannelSnippet = firstPass ? '' : `\n float getBestIndicesAChannel(${intChannels.join()}) {\n return getChannel(getBestIndicesA(${channels.join()}),\n vec2(${channels.slice(-2).join()}));\n }`;\n\n this.userCode = `\n float getAChannel(${intChannels.join()}) {\n return getChannel(getA(${channels.join()}),\n vec2(${channels.slice(-2).join()}));\n }\n ${getBestIndicesAChannelSnippet}\n void main() {\n ${dtype} coords = getOutputCoords();\n bool hasNextCol = ${coords[rank - 1]} < ${outShape[rank - 1] - 1};\n bool hasNextRow = ${coords[rank - 2]} < ${outShape[rank - 2] - 1};\n ${sourceLocSetup}\n ivec4 srcIdx = ivec4(sourceLocR${inChannel}, sourceLocG${inChannel},\n sourceLocB${inChannel}, sourceLocA${inChannel}) * ${windowSize};\n ivec4 inIdx = srcIdx;\n vec4 bestIndex = vec4(inIdx);\n vec4 bestValue = ${fetchValue};\n\n for (int i = 0; i < ${windowSize}; i++) {\n inIdx = srcIdx;\n ${fetchCandidateIdx}\n vec4 candidate = ${fetchValue};\n bvec4 nan = isnan(candidate);\n bvec4 replace = bvec4(\n vec4(${compOp}(candidate, bestValue)) * (vec4(1.0) - vec4(nan)));\n\n bestValue = vec4(replace.x ? candidate.x : bestValue.x,\n replace.y ? candidate.y : bestValue.y,\n replace.z ? candidate.z : bestValue.z,\n replace.w ? candidate.w : bestValue.w);\n bestIndex = mix(bestIndex, vec4(inIdx), vec4(replace));\n srcIdx++;\n }\n setOutput(bestIndex);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {ArgMinMaxProgram} from '../argminmax_gpu';\nimport {ArgMinMaxPackedProgram} from '../argminmax_packed_gpu';\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reshape} from '../kernels/Reshape';\n\nfunction argReduce(\n backend: MathBackendWebGL, x: TensorInfo, reduceType: 'max'|'min',\n bestIndicesA: TensorInfo = null): TensorInfo {\n let batchSize = x.shape[0];\n let inSize = x.shape[1];\n if (bestIndicesA != null) {\n batchSize = bestIndicesA.shape[0];\n inSize = bestIndicesA.shape[1];\n }\n const windowSize = backend_util.computeOptimalWindowSize(inSize);\n const reduceInfo =\n {windowSize, inSize, batchSize, outSize: Math.ceil(inSize / windowSize)};\n const program =\n new ArgMinMaxProgram(reduceInfo, reduceType, bestIndicesA == null);\n const inputs = [x];\n if (bestIndicesA != null) {\n inputs.push(bestIndicesA);\n }\n const output = backend.runWebGLProgram(program, inputs, 'int32');\n // No need to run another GPGPU program.\n if (output.shape[1] === 1) {\n return output;\n }\n const result = argReduce(backend, x, reduceType, output);\n backend.disposeIntermediateTensorInfo(output);\n return result;\n}\n\nfunction argReducePacked(\n backend: MathBackendWebGL, x: TensorInfo, reduceType: 'max'|'min',\n bestIndicesA: TensorInfo = null): TensorInfo {\n const inShape = bestIndicesA != null ? bestIndicesA.shape : x.shape;\n const inSize = inShape[inShape.length - 1];\n const windowSize = backend_util.computeOptimalWindowSize(inSize);\n const program = new ArgMinMaxPackedProgram(\n inShape, windowSize, reduceType, bestIndicesA == null);\n const inputs = bestIndicesA == null ? [x] : [x, bestIndicesA];\n const output = backend.runWebGLProgram(program, inputs, 'int32');\n if (output.shape.length === x.shape.length) {\n const result = argReducePacked(backend, x, reduceType, output);\n backend.disposeIntermediateTensorInfo(output);\n return result;\n }\n return output;\n}\n\nexport function argMinMaxReduce(\n backend: MathBackendWebGL, x: TensorInfo, axis: number,\n reduceType: 'min'|'max'): TensorInfo {\n const axes = [axis];\n backend_util.assertAxesAreInnerMostDims(\n 'arg' + reduceType.charAt(0).toUpperCase() + reduceType.slice(1), axes,\n x.shape.length);\n if (!env().getBool('WEBGL_PACK_REDUCE') || x.shape.length <= 2) {\n const intermediateTensorInfos = [];\n // Eagerly unpack x input since it is passed in to all the shaders which\n // require unpacked inputs.\n const xtexData = backend.texData.get(x.dataId);\n const xIsPacked = xtexData !== null && xtexData.isPacked;\n let xUnPacked = x;\n if (xIsPacked) {\n xUnPacked = backend.unpackTensor(x);\n intermediateTensorInfos.push(xUnPacked);\n }\n\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(xUnPacked.shape, axes);\n const inSize = util.sizeFromShape(reduceShape);\n const a2D = reshape(\n {inputs: {x: xUnPacked}, backend, attrs: {shape: [-1, inSize]}});\n intermediateTensorInfos.push(a2D);\n\n const reduced = argReduce(backend, a2D, reduceType);\n intermediateTensorInfos.push(reduced);\n const reshaped =\n reshape({inputs: {x: reduced}, backend, attrs: {shape: outShape}});\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n return reshaped;\n }\n return argReducePacked(backend, x, reduceType);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMax, ArgMaxAttrs, ArgMaxInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {argMinMaxReduce} from '../kernel_utils/arg_min_max';\n\nimport {transpose} from './Transpose';\n\nexport function argMax(\n args:\n {inputs: ArgMaxInputs, backend: MathBackendWebGL, attrs: ArgMaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('argMax', [axes[0]], $x.shape.length);\n const out = argMinMaxReduce(backend, $x, axes[0], 'max');\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n return out;\n}\n\nexport const argMaxConfig: KernelConfig = {\n kernelName: ArgMax,\n backendName: 'webgl',\n kernelFunc: argMax as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ArgMin, ArgMinAttrs, ArgMinInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {argMinMaxReduce} from '../kernel_utils/arg_min_max';\nimport {transpose} from './Transpose';\n\nexport function argMin(\n args:\n {inputs: ArgMinInputs, backend: MathBackendWebGL, attrs: ArgMinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis} = attrs;\n\n let axes = util.parseAxisParam(axis, x.shape);\n const permutedAxes = backend_util.getAxesPermutation(axes, x.shape.length);\n let $x = x;\n const intermediateTensorInfos = [];\n if (permutedAxes != null) {\n $x = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n intermediateTensorInfos.push($x);\n axes = backend_util.getInnerMostAxes(axes.length, $x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('argMin', [axes[0]], $x.shape.length);\n\n const out = argMinMaxReduce(backend, $x, axes[0], 'min');\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n return out;\n}\n\nexport const argMinConfig: KernelConfig = {\n kernelName: ArgMin,\n backendName: 'webgl',\n kernelFunc: argMin as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asin, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ASIN = CHECK_NAN_SNIPPET + `\n if (abs(x) > 1.) {\n return NAN;\n }\n return asin(x);\n`;\n\nexport const asin = unaryKernelFunc({opSnippet: ASIN});\n\nexport const asinConfig: KernelConfig = {\n kernelName: Asin,\n backendName: 'webgl',\n kernelFunc: asin,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Asinh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ASINH = CHECK_NAN_SNIPPET + `return log(x + sqrt(x * x + 1.0));`;\n\nexport const asinh = unaryKernelFunc({opSnippet: ASINH});\n\nexport const asinhConfig: KernelConfig = {\n kernelName: Asinh,\n backendName: 'webgl',\n kernelFunc: asinh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ATAN = CHECK_NAN_SNIPPET + `\n return atan(x);\n`;\n\nexport const atan = unaryKernelFunc({opSnippet: ATAN});\n\nexport const atanConfig: KernelConfig = {\n kernelName: Atan,\n backendName: 'webgl',\n kernelFunc: atan,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atan2} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\nimport {CHECK_NAN_SNIPPET} from '../binaryop_gpu';\nimport {CHECK_NAN_SNIPPET_PACKED} from '../binaryop_packed_gpu';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst ATAN2 = CHECK_NAN_SNIPPET + `\n return atan(a, b);\n`;\n\nconst ATAN2_PACKED = `\n vec4 result = atan(a, b);\n bvec4 isNaNA = isnan(a);\n bvec4 isNaNB = isnan(b);\n bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w);\n ` +\n CHECK_NAN_SNIPPET_PACKED + `\n return result;\n`;\n\nexport const atan2 =\n binaryKernelFunc({opSnippet: ATAN2, packedOpSnippet: ATAN2_PACKED});\n\nexport const atan2Config: KernelConfig = {\n kernelName: Atan2,\n backendName: 'webgl',\n kernelFunc: atan2,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Atanh, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst ATANH = CHECK_NAN_SNIPPET + `\n if ((x < -1.0) || (x > 1.0)) return NAN;\nreturn (log(1.0 + x) - log(1.0 - x)) / 2.0;`;\n\nexport const atanh = unaryKernelFunc({opSnippet: ATANH});\n\nexport const atanhConfig: KernelConfig = {\n kernelName: Atanh,\n backendName: 'webgl',\n kernelFunc: atanh,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class Pool2DProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, poolType: 'max'|'avg',\n computePositions: boolean, flattenPositions = false,\n includeBatchInIndex = false) {\n if (poolType === 'avg' && computePositions) {\n throw new Error('Cannot compute positions for average pool.');\n }\n\n const filterWidth = convInfo.filterWidth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n this.outputShape = convInfo.outShape;\n\n const isAvgPool = poolType === 'avg';\n const batchFlattenPositionStr = `((batch * ${convInfo.inHeight} + xR) * ${\n convInfo.inWidth} + xC) * ${convInfo.inChannels} + d`;\n const flattenPositionStr =\n `(xR * ${convInfo.inWidth} + xC) * ${convInfo.inChannels} + d`;\n\n let initializationValue = '0.0';\n if (!isAvgPool) {\n // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps.\n initializationValue = '-1.0 / 1e-20';\n }\n\n if (computePositions) {\n const compareOp = '>=';\n\n this.userCode = `\n const ivec2 strides = ivec2(${strideHeight}, ${strideWidth});\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d = coords[3];\n\n ivec2 xRCCorner = coords.yz * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // max/min x(?, ?, d) to get y(yR, yC, d).\n // ? = to be determined\n float minMaxValue = 0.0;\n float minMaxValueFound = 0.0;\n int minMaxPosition = 0;\n float avgValue = 0.0;\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${effectiveFilterWidth};\n wC += ${dilationWidth}) {\n int xC = xCCorner + wC;\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n float value = getX(batch, xR, xC, d);\n\n // If a min / max value has already been found, use it. If not,\n // use the current value.\n float currMinMaxValue = mix(\n value, minMaxValue, minMaxValueFound);\n if (value ${compareOp} currMinMaxValue) {\n minMaxValue = value;\n minMaxValueFound = 1.0;\n minMaxPosition = ${\n flattenPositions ? (includeBatchInIndex ? batchFlattenPositionStr :\n flattenPositionStr) :\n `wR * ${effectiveFilterWidth} + wC`};\n }\n }\n }\n setOutput(float(minMaxPosition));\n }\n `;\n return;\n }\n\n const compareOp = 'max';\n\n let returnValue = `${poolType}(${poolType}(${poolType}(` +\n 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])';\n if (poolType === 'avg') {\n returnValue = `avgValue / max(count, 1.0)`;\n }\n\n const filterWidthNearestVec4 = Math.floor(filterWidth / 4) * 4;\n const filterWidthVec4Remainder = filterWidth % 4;\n\n const updateSnippet = `\n if (${isAvgPool}) {\n avgValue += dot(values, ones);\n } else {\n minMaxValue = ${compareOp}(values, minMaxValue);\n }\n `;\n\n this.userCode = `\n const ivec2 strides = ivec2(${strideHeight}, ${strideWidth});\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n const float initializationValue = ${initializationValue};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float count = 0.0;\n\n float getValue(int batch, int xR, int xC, int d) {\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n return initializationValue;\n }\n count += 1.0;\n return getX(batch, xR, xC, d);\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d = coords[3];\n\n ivec2 xRCCorner = coords.yz * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // max/min x(?, ?, d) to get y(yR, yC, d).\n // ? = to be determined\n vec4 minMaxValue = vec4(${initializationValue});\n float avgValue = 0.0;\n count = 0.0;\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${filterWidthNearestVec4}; wC += 4) {\n int xC = xCCorner + wC * ${dilationWidth};\n\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${dilationWidth}, d),\n getValue(batch, xR, xC + 2 * ${dilationWidth}, d),\n getValue(batch, xR, xC + 3 * ${dilationWidth}, d)\n );\n\n ${updateSnippet}\n }\n\n int xC = xCCorner + ${filterWidthNearestVec4};\n if (${filterWidthVec4Remainder === 1}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${filterWidthVec4Remainder === 2}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${dilationWidth}, d),\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${filterWidthVec4Remainder === 3}) {\n vec4 values = vec4(\n getValue(batch, xR, xC, d),\n getValue(batch, xR, xC + ${dilationWidth}, d),\n getValue(batch, xR, xC + 2 * ${dilationWidth}, d),\n initializationValue\n );\n\n ${updateSnippet}\n }\n }\n setOutput(${returnValue});\n }\n `;\n }\n}\n\nexport class Pool3DProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n convInfo: backend_util.Conv3DInfo, poolType: 'max'|'avg',\n computePositions: boolean, flattenPositions = false,\n includeBatchInIndex = false) {\n if (poolType === 'avg' && computePositions) {\n throw new Error('Cannot compute positions for average pool.');\n }\n\n const filterWidth = convInfo.filterWidth;\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padFront = convInfo.padInfo.front;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n this.outputShape = convInfo.outShape;\n\n const isAvgPool = poolType === 'avg';\n\n let initializationValue = '0.0';\n if (!isAvgPool) {\n // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps.\n initializationValue = '-1.0 / 1e-20';\n }\n\n if (computePositions) {\n const compareOp = '>=';\n\n this.userCode = `\n const ivec3 strides =\n ivec3(${strideDepth}, ${strideHeight}, ${strideWidth});\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xDCorner = xCorner.x;\n int xRCorner = xCorner.y;\n int xCCorner = xCorner.z;\n\n // max/min x(?, ?, ?, ch) to get y(yD, yR, yC, ch).\n // ? = to be determined\n float minMaxValue = 0.0;\n float minMaxValueFound = 0.0;\n int minMaxPosition = 0;\n\n for (int wD = 0; wD < ${effectiveFilterDepth};\n wD += ${dilationDepth}) {\n int xD = xDCorner + wD;\n\n if (xD < 0 || xD >= ${convInfo.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${effectiveFilterWidth};\n wC += ${dilationWidth}) {\n int xC = xCCorner + wC;\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n float value = getX(batch, xD, xR, xC, ch);\n\n // If a min / max value has already been found, use it. If not,\n // use the current value.\n float currMinMaxValue = mix(\n value, minMaxValue, minMaxValueFound);\n if (value ${compareOp} currMinMaxValue) {\n minMaxValue = value;\n minMaxValueFound = 1.0;\n minMaxPosition = ${\n flattenPositions ?\n (includeBatchInIndex ?\n `(((batch * ${convInfo.inDepth} + xD) * ${\n convInfo.inHeight} + xR) * ${convInfo.inWidth} + xC) * ${\n convInfo.inChannels} + ch` :\n `((xD * ${convInfo.inHeight} + xR) * ${\n convInfo.inWidth} + xC) * ${convInfo.inChannels} + ch`) :\n `wD * ${effectiveFilterHeight} * ${effectiveFilterWidth} +\n wR * ${effectiveFilterWidth} + wC`};\n }\n }\n }\n }\n setOutput(float(minMaxPosition));\n }\n `;\n return;\n }\n\n const compareOp = 'max';\n\n let returnValue = `${poolType}(${poolType}(${poolType}(` +\n 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])';\n if (poolType === 'avg') {\n // Use `max(count, 1.0)` instead of `count` in case count === 0.0.\n // If count === 0.0, `avgValue` is always 0.0 and we change `count`'s\n // value to avoid dividing zero.\n returnValue = `avgValue / max(count, 1.0)`;\n }\n\n const filterWidthNearestVec4 = Math.floor(filterWidth / 4) * 4;\n const filterWidthVec4Remainder = filterWidth % 4;\n\n const updateSnippet = `\n if (${isAvgPool}) {\n avgValue += dot(values, ones);\n } else {\n minMaxValue = ${compareOp}(values, minMaxValue);\n }\n `;\n\n this.userCode = `\n const ivec3 strides =\n ivec3(${strideDepth}, ${strideHeight}, ${strideWidth});\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n const float initializationValue = ${initializationValue};\n const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0);\n\n float count = 0.0;\n\n float getValue(int batch, int xD, int xR, int xC, int ch) {\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n return initializationValue;\n }\n count += 1.0;\n return getX(batch, xD, xR, xC, ch);\n }\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xDCorner = xCorner.x;\n int xRCorner = xCorner.y;\n int xCCorner = xCorner.z;\n\n // max/min x(?, ?, ?, d) to get y(yD, yR, yC, ch).\n // ? = to be determined\n vec4 minMaxValue = vec4(${initializationValue});\n float avgValue = 0.0;\n count = 0.0;\n\n for (int wD = 0; wD < ${effectiveFilterDepth};\n wD += ${dilationDepth}) {\n int xD = xDCorner + wD;\n\n if (xD < 0 || xD >= ${convInfo.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n int xR = xRCorner + wR;\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${filterWidthNearestVec4}; wC += 4) {\n int xC = xCCorner + wC * ${dilationWidth};\n\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${dilationWidth}, ch),\n getValue(batch, xD, xR, xC + 2 * ${dilationWidth}, ch),\n getValue(batch, xD, xR, xC + 3 * ${dilationWidth}, ch)\n );\n\n ${updateSnippet}\n }\n\n int xC = xCCorner + ${filterWidthNearestVec4};\n if (${filterWidthVec4Remainder === 1}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${filterWidthVec4Remainder === 2}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${dilationWidth}, ch),\n initializationValue,\n initializationValue\n );\n\n ${updateSnippet}\n } else if (${filterWidthVec4Remainder === 3}) {\n vec4 values = vec4(\n getValue(batch, xD, xR, xC, ch),\n getValue(batch, xD, xR, xC + ${dilationWidth}, ch),\n getValue(batch, xD, xR, xC + 2 * ${dilationWidth}, ch),\n initializationValue\n );\n\n ${updateSnippet}\n }\n }\n }\n setOutput(${returnValue});\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPool, AvgPoolAttrs, AvgPoolInputs, backend_util, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Pool2DProgram} from '../pool_gpu';\nimport {assertNotComplex} from '../webgl_util';\nimport {identity} from './Identity';\n\nexport function avgPool(args: {\n inputs: AvgPoolInputs,\n backend: MathBackendWebGL,\n attrs: AvgPoolAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n assertNotComplex(x, 'avgPool');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in avgPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&\n util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n return identity({inputs: {x}, backend});\n }\n const avgPoolProgram = new Pool2DProgram(convInfo, 'avg', false);\n return backend.runWebGLProgram(avgPoolProgram, [x], 'float32');\n}\n\nexport const avgPoolConfig: KernelConfig = {\n kernelName: AvgPool,\n backendName: 'webgl',\n kernelFunc: avgPool as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPool3D, AvgPool3DAttrs, AvgPool3DInputs, backend_util, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Pool3DProgram} from '../pool_gpu';\n\nexport function avgPool3D(args: {\n inputs: AvgPool3DInputs,\n backend: MathBackendWebGL,\n attrs: AvgPool3DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dimRoundingMode, dataFormat} = attrs;\n const dilations: [number, number, number] = [1, 1, 1];\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode, dataFormat);\n const avgPoolProgram = new Pool3DProgram(convInfo, 'avg', false);\n return backend.runWebGLProgram(avgPoolProgram, [x], 'float32');\n}\n\nexport const avgPool3DConfig: KernelConfig = {\n kernelName: AvgPool3D,\n backendName: 'webgl',\n kernelFunc: avgPool3D as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class AvgPool2DBackpropProgram implements GPGPUProgram {\n variableNames = ['dy'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n\n const avgMultiplier = 1 / (filterHeight * filterWidth);\n\n this.userCode = `\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n const float avgMultiplier = float(${avgMultiplier});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n\n ivec2 dyRCCorner = coords.yz - pads;\n int dyRCorner = dyRCCorner.x;\n int dyCCorner = dyRCCorner.y;\n\n // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${effectiveFilterWidth};\n wC+= ${dilationWidth}) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(b, idyR, idyC, d);\n\n dotProd += dyValue * avgMultiplier;\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class AvgPool3DBackpropProgram implements GPGPUProgram {\n variableNames = ['dy'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv3DInfo) {\n this.outputShape = convInfo.inShape;\n const filterDepth = convInfo.filterDepth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n\n const avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth);\n\n this.userCode = `\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n const float avgMultiplier = float(${avgMultiplier});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyDCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n // Convolve dy(?, ?, ?, d) with pos mask(:, :, :, ch) to get\n // dx(xD, xR, xC, ch).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int wD = 0; wD < ${effectiveFilterDepth};\n wD += ${dilationDepth}) {\n float dyD = float(dyDCorner + wD) / ${strideDepth}.0;\n\n if (dyD < 0.0 || dyD >= ${convInfo.outDepth}.0 || fract(dyD) > 0.0) {\n continue;\n }\n int idyD = int(dyD);\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${effectiveFilterWidth};\n wC += ${dilationWidth}) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(batch, idyD, idyR, idyC, ch);\n\n dotProd += dyValue * avgMultiplier;\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPool3DGrad, AvgPool3DGradAttrs, AvgPool3DGradInputs, backend_util, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {AvgPool3DBackpropProgram} from '../avg_pool_backprop_gpu';\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function avgPool3DGrad(args: {\n inputs: AvgPool3DGradInputs,\n backend: MathBackendWebGL,\n attrs: AvgPool3DGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const x = input;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations: [number, number, number] = [1, 1, 1];\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n const avgPoolBackpropProgram = new AvgPool3DBackpropProgram(convInfo);\n return backend.runWebGLProgram(avgPoolBackpropProgram, [dy], x.dtype);\n}\n\nexport const avgPool3DGradConfig: KernelConfig = {\n kernelName: AvgPool3DGrad,\n backendName: 'webgl',\n kernelFunc: avgPool3DGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {AvgPoolGrad, AvgPoolGradAttrs, AvgPoolGradInputs, backend_util, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {AvgPool2DBackpropProgram} from '../avg_pool_backprop_gpu';\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {assertNotComplex} from '../webgl_util';\n\nexport function avgPoolGrad(args: {\n inputs: AvgPoolGradInputs,\n backend: MathBackendWebGL,\n attrs: AvgPoolGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const x = input;\n assertNotComplex([dy, input], 'avgPoolGrad');\n const {filterSize, strides, pad} = attrs;\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad);\n const avgPoolBackpropProgram = new AvgPool2DBackpropProgram(convInfo);\n return backend.runWebGLProgram(avgPoolBackpropProgram, [dy], x.dtype);\n}\n\nexport const avgPoolGradConfig: KernelConfig = {\n kernelName: AvgPoolGrad,\n backendName: 'webgl',\n kernelFunc: avgPoolGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {BatchMatMul, BatchMatMulAttrs, BatchMatMulInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {batchMatMulImpl} from './BatchMatMul_impl';\n\nexport function batchMatMul(args: {\n inputs: BatchMatMulInputs,\n attrs: BatchMatMulAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {a, b} = inputs;\n const {transposeA, transposeB} = attrs;\n\n return batchMatMulImpl({a, b, transposeA, transposeB, backend});\n}\n\nexport const batchMatMulConfig: KernelConfig = {\n kernelName: BatchMatMul,\n backendName: 'webgl',\n kernelFunc: batchMatMul as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class BatchNormProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n xShape: number[], meanShape: number[], varianceShape: number[],\n offsetShape: number[]|null, scaleShape: number[]|null,\n varianceEpsilon: number) {\n this.variableNames = ['x', 'mean', 'variance'];\n backend_util.assertAndGetBroadcastShape(xShape, meanShape);\n backend_util.assertAndGetBroadcastShape(xShape, varianceShape);\n\n let offsetSnippet = '0.0';\n if (offsetShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, offsetShape);\n this.variableNames.push('offset');\n offsetSnippet = 'getOffsetAtOutCoords()';\n }\n\n let scaleSnippet = '1.0';\n if (scaleShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, scaleShape);\n this.variableNames.push('scale');\n scaleSnippet = 'getScaleAtOutCoords()';\n }\n\n this.outputShape = xShape;\n this.userCode = `\n void main() {\n float x = getXAtOutCoords();\n float mean = getMeanAtOutCoords();\n float variance = getVarianceAtOutCoords();\n float offset = ${offsetSnippet};\n float scale = ${scaleSnippet};\n float inv = scale * inversesqrt(variance + float(${varianceEpsilon}));\n setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1)));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class BatchNormPackedProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n\n constructor(\n xShape: number[], meanShape: number[], varianceShape: number[],\n offsetShape: number[]|null, scaleShape: number[]|null,\n varianceEpsilon: number) {\n this.variableNames = ['x', 'mean', 'variance'];\n backend_util.assertAndGetBroadcastShape(xShape, meanShape);\n backend_util.assertAndGetBroadcastShape(xShape, varianceShape);\n\n let offsetSnippet = 'vec4(0.0)';\n if (offsetShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, offsetShape);\n this.variableNames.push('offset');\n offsetSnippet = 'getOffsetAtOutCoords()';\n }\n\n let scaleSnippet = 'vec4(1.0)';\n if (scaleShape != null) {\n backend_util.assertAndGetBroadcastShape(xShape, scaleShape);\n this.variableNames.push('scale');\n scaleSnippet = 'getScaleAtOutCoords()';\n }\n\n this.outputShape = xShape;\n this.userCode = `\n void main() {\n vec4 offset = ${offsetSnippet};\n vec4 scale = ${scaleSnippet};\n\n vec4 x = getXAtOutCoords();\n vec4 mean = getMeanAtOutCoords();\n vec4 variance = getVarianceAtOutCoords();\n\n vec4 inv = scale * inversesqrt(variance + vec4(${varianceEpsilon}));\n\n setOutput((x - mean) * inv + offset);\n }\n `;\n }\n}\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, FusedBatchNorm, FusedBatchNormAttrs, FusedBatchNormInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {BatchNormProgram} from '../batchnorm_gpu';\nimport {BatchNormPackedProgram} from '../batchnorm_packed_gpu';\n\nexport const batchNorm: (params: {\n inputs: FusedBatchNormInputs,\n backend: MathBackendWebGL,\n attrs: FusedBatchNormAttrs\n}) => TensorInfo = ({inputs, backend, attrs}) => {\n const {x, mean, variance, offset, scale} = inputs;\n\n util.assert(\n mean.shape.length === variance.shape.length,\n () => 'Batch normalization gradient requires mean and variance to have ' +\n 'equal ranks.');\n util.assert(\n offset == null || mean.shape.length === offset.shape.length,\n () => 'Batch normalization gradient requires mean and offset to have ' +\n 'equal ranks.');\n util.assert(\n scale == null || mean.shape.length === scale.shape.length,\n () => 'Batch normalization gradient requires mean and scale to have ' +\n 'equal ranks.');\n\n let {varianceEpsilon} = attrs;\n if (varianceEpsilon == null) {\n varianceEpsilon = 0.001;\n }\n\n const finalInputs = [x, mean, variance];\n\n let offsetShape = null;\n if (offset != null) {\n offsetShape = offset.shape;\n finalInputs.push(offset);\n }\n\n let scaleShape = null;\n if (scale != null) {\n scaleShape = scale.shape;\n finalInputs.push(scale);\n }\n\n const program = env().getBool('WEBGL_PACK_NORMALIZATION') ?\n new BatchNormPackedProgram(\n x.shape, mean.shape, variance.shape, offsetShape, scaleShape,\n varianceEpsilon) :\n new BatchNormProgram(\n x.shape, mean.shape, variance.shape, offsetShape, scaleShape,\n varianceEpsilon);\n const output =\n backend.runWebGLProgram(program, finalInputs, finalInputs[0].dtype);\n\n return output;\n};\n\nexport const batchNormConfig: KernelConfig = {\n kernelName: FusedBatchNorm,\n backendName: 'webgl',\n kernelFunc: batchNorm as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType, UniformType} from './shader_compiler';\n\nexport class SliceProgram implements GPGPUProgram {\n variableNames = ['source'];\n outputShape: number[];\n userCode: string;\n rank: number;\n customUniforms: Array<{name: string; arrayIndex: number; type: UniformType;}>;\n\n constructor(destSize: number[]) {\n this.outputShape = destSize;\n this.rank = destSize.length;\n\n const dtype = getCoordsDataType(this.rank);\n this.customUniforms = [{name: 'start', arrayIndex: this.rank, type: 'int'}];\n const sourceCoords = getCoords(this.rank);\n\n let body: string;\n const coordSum = destSize.map((_, i) => {\n return `sourceLoc.${coords[i]} = start[${i}] + coords.${coords[i]};`;\n });\n body = `\n ${dtype} sourceLoc;\n ${dtype} coords = getOutputCoords();\n ${coordSum.join('\\n')}\n `;\n this.userCode = `\n void main() {\n ${body}\n setOutput(getSource(${sourceCoords}));\n }\n `;\n }\n}\n\nconst coords = ['x', 'y', 'z', 'w', 'u', 'v'];\n\nfunction getCoords(rank: number): string {\n if (rank === 1) {\n return 'sourceLoc';\n } else if (rank <= 6) {\n return coords.slice(0, rank).map(x => 'sourceLoc.' + x).join(',');\n } else {\n throw Error(`Slicing for rank ${rank} is not yet supported`);\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType, UniformType} from './shader_compiler';\n\nexport class SlicePackedProgram implements GPGPUProgram {\n variableNames = ['source'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n rank: number;\n customUniforms: Array<{name: string; arrayIndex: number; type: UniformType;}>;\n\n constructor(destSize: number[]) {\n this.outputShape = destSize;\n this.rank = destSize.length;\n this.customUniforms = [{name: 'start', arrayIndex: this.rank, type: 'int'}];\n const dtype = getCoordsDataType(this.rank);\n const coords = getChannels('coords', this.rank);\n const sourceLoc = getChannels('sourceLoc', this.rank);\n\n const innerDims =\n this.rank === 1 ? 'sourceLoc' : `vec2(${sourceLoc.slice(-2).join()})`;\n const getChannel =\n `getChannel(getSource(${sourceLoc.join()}), ${innerDims})`;\n const upperRow = `\n result.x = ${getChannel};\n if (++${coords[this.rank - 1]} < ${destSize[this.rank - 1]}) {\n ++${sourceLoc[this.rank - 1]};\n result.y = ${getChannel};\n --${sourceLoc[this.rank - 1]};\n }\n `;\n const lowerRow = this.rank === 1 ? '' : `\n --${coords[this.rank - 1]};\n if (++${coords[this.rank - 2]} < ${destSize[this.rank - 2]}) {\n ++${sourceLoc[this.rank - 2]};\n result.z = ${getChannel};\n if (++${coords[this.rank - 1]} < ${destSize[this.rank - 1]}) {\n ++${sourceLoc[this.rank - 1]};\n result.w = ${getChannel};\n }\n }\n `;\n\n const sourceLocSetup = this.rank <= 4 ?\n `sourceLoc = coords +\n ${dtype}(${destSize.map((_, i) => `start[${i}]`).join()});` :\n destSize.map((_, i) => `${sourceLoc[i]} = ${coords[i]} + start[${i}];`)\n .join('\\n');\n this.userCode = `\n void main() {\n ${dtype} coords = getOutputCoords();\n ${dtype} sourceLoc;\n ${sourceLocSetup}\n vec4 result = vec4(0.);\n ${upperRow}\n ${lowerRow}\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, Slice, slice_util, SliceAttrs, SliceInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {sliceImplCPU} from '../kernel_utils/shared';\nimport {SliceProgram} from '../slice_gpu';\nimport {SlicePackedProgram} from '../slice_packed_gpu';\n\nfunction shallowSlice(\n x: TensorInfo, begin: number[], size: number[], backend: MathBackendWebGL) {\n const xTexData = backend.texData.get(x.dataId);\n const t = backend.makeTensorInfo(size, x.dtype);\n const newTexData = backend.texData.get(t.dataId);\n // Copy texture data from the original tensor.\n Object.assign(newTexData, xTexData);\n newTexData.refCount = 1;\n newTexData.shape = size;\n newTexData.dtype = x.dtype;\n let flatOffset =\n slice_util.computeFlatOffset(begin, util.computeStrides(x.shape));\n if (xTexData.slice) {\n // We are slicing an already sliced tensor, so we have to accumulate\n // the offset.\n flatOffset += xTexData.slice.flatOffset;\n }\n newTexData.slice = {\n flatOffset,\n // Point to the original dataId, which is used to do ref counting.\n origDataId: xTexData.slice && xTexData.slice.origDataId || x.dataId\n };\n\n // Increase the ref count for that data bucket.\n const refCount = backend.dataRefCount.get(newTexData.slice.origDataId) || 1;\n backend.dataRefCount.set(newTexData.slice.origDataId, refCount + 1);\n return t;\n}\n\nexport function slice(\n args: {inputs: SliceInputs, backend: MathBackendWebGL, attrs: SliceAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {begin, size} = attrs;\n\n const [$begin, $size] = slice_util.parseSliceParams(x, begin, size);\n slice_util.assertParamsValid(x, $begin, $size);\n\n if (util.sizeFromShape($size) === 0) {\n return backend.makeTensorInfo($size, x.dtype, []);\n }\n\n // Run on cpu if dtype is string. For string, the backend represents it\n // as Uint8Array[], where each Uint8Array is a character. Given that the\n // computation is only on the outer array, uploading the whole data onto\n // gpu is wasteful. Also, currently webgl doesn't have a design to\n // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we\n // just run the kernel on cpu if dtype is string.\n if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string') {\n const xTexData = backend.texData.get(x.dataId);\n const outValues = sliceImplCPU(\n xTexData.values as TypedArray, $begin, $size, x.shape, x.dtype);\n return backend.makeTensorInfo($size, x.dtype, outValues);\n }\n\n const {isPacked} = backend.texData.get(x.dataId);\n const isContinous = slice_util.isSliceContinous(x.shape, $begin, $size);\n if (isPacked || !isContinous) {\n const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ?\n new SlicePackedProgram($size) :\n new SliceProgram($size);\n const customValues = [$begin];\n return backend.runWebGLProgram(program, [x], x.dtype, customValues);\n }\n backend.uploadToGPU(x.dataId);\n return shallowSlice(x, $begin, $size, backend);\n}\n\nexport const sliceConfig: KernelConfig = {\n kernelName: Slice,\n backendName: 'webgl',\n kernelFunc: slice as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BatchToSpaceND, BatchToSpaceNDAttrs, BatchToSpaceNDInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\nimport {transpose} from './Transpose';\n\nexport const batchToSpaceND = (args: {\n inputs: BatchToSpaceNDInputs,\n backend: MathBackendWebGL,\n attrs: BatchToSpaceNDAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, crops} = attrs;\n\n util.assert(\n x.shape.length <= 4,\n () => 'batchToSpaceND for rank > 4 with a WebGL backend not ' +\n 'implemented yet');\n const prod = blockShape.reduce((a, b) => a * b);\n\n const reshaped = backend_util.getReshaped(x.shape, blockShape, prod);\n const permuted = backend_util.getPermuted(reshaped.length, blockShape.length);\n const reshapedPermuted =\n backend_util.getReshapedPermuted(x.shape, blockShape, prod);\n const sliceBeginCoords =\n backend_util.getSliceBeginCoords(crops, blockShape.length);\n const sliceSize =\n backend_util.getSliceSize(reshapedPermuted, crops, blockShape.length);\n\n const toDispose = [];\n\n const reshapedIntermediate =\n reshape({inputs: {x}, backend, attrs: {shape: reshaped}});\n const transposedIntermediate = transpose(\n {inputs: {x: reshapedIntermediate}, backend, attrs: {perm: permuted}});\n const reshapedIntermediate2 = reshape({\n inputs: {x: transposedIntermediate},\n backend,\n attrs: {shape: reshapedPermuted}\n });\n const sliced = slice({\n inputs: {x: reshapedIntermediate2},\n backend,\n attrs: {begin: sliceBeginCoords, size: sliceSize}\n });\n\n toDispose.push(reshapedIntermediate);\n toDispose.push(transposedIntermediate);\n toDispose.push(reshapedIntermediate2);\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return sliced;\n};\n\nexport const batchToSpaceNDConfig: KernelConfig = {\n kernelName: BatchToSpaceND,\n backendName: 'webgl',\n kernelFunc: batchToSpaceND as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Bincount, BincountAttrs, BincountInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {bincountImplCPU} from '../kernel_utils/shared';\n\nexport function bincount(args: {\n inputs: BincountInputs,\n backend: MathBackendWebGL,\n attrs: BincountAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, weights} = inputs;\n const {size} = attrs;\n\n const xVals = backend.readSync(x.dataId) as TypedArray;\n const weightsVals = backend.readSync(weights.dataId) as TypedArray;\n\n const outVals =\n bincountImplCPU(xVals, weightsVals, weights.dtype, weights.shape, size);\n\n return backend.makeTensorInfo([size], weights.dtype, outVals);\n}\n\nexport const bincountConfig: KernelConfig = {\n kernelName: Bincount,\n backendName: 'webgl',\n kernelFunc: bincount as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, BroadcastArgs, BroadcastArgsInputs, KernelConfig, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function broadcastArgs(args: {\n inputs: BroadcastArgsInputs,\n backend: MathBackendWebGL,\n}): TensorInfo {\n const {inputs, backend} = args;\n const {s0, s1} = inputs;\n\n const s0Vals = backend.readSync(s0.dataId) as TypedArray;\n const s1Vals = backend.readSync(s1.dataId) as TypedArray;\n\n const broadcastShape = backend_util.assertAndGetBroadcastShape(\n Array.from(s0Vals), Array.from(s1Vals));\n\n return backend.makeTensorInfo(\n [broadcastShape.length], 'int32', Int32Array.from(broadcastShape));\n}\n\nexport const broadcastArgsConfig: KernelConfig = {\n kernelName: BroadcastArgs,\n backendName: 'webgl',\n kernelFunc: broadcastArgs\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {NotEqual} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {notEqualImplCPU} from '../kernel_utils/shared';\n\nconst NOT_EQUAL = `return float(a != b);`;\n\nexport const notEqual = binaryKernelFunc(\n {opSnippet: NOT_EQUAL, cpuKernelImpl: notEqualImplCPU, dtype: 'bool'});\n\nexport const notEqualConfig: KernelConfig = {\n kernelName: NotEqual,\n backendName: 'webgl',\n kernelFunc: notEqual,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Real, RealInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {identity} from './Identity';\n\nexport function real(args: {inputs: RealInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n const inputData = backend.texData.get(input.dataId);\n\n return identity({inputs: {x: inputData.complexTensorInfos.real}, backend});\n}\n\nexport const realConfig: KernelConfig = {\n kernelName: Real,\n backendName: 'webgl',\n kernelFunc: real as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {UnaryOpProgram} from '../unaryop_gpu';\n\nconst TO_INT = `return float(int(x));`;\n\nexport function int(input: TensorInfo, backend: MathBackendWebGL): TensorInfo {\n const program = new UnaryOpProgram(input.shape, TO_INT);\n const output = backend.runWebGLProgram(program, [input], 'int32');\n return {dataId: output.dataId, shape: output.shape, dtype: output.dtype};\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport * as tf from '@tensorflow/tfjs-core';\nimport {BinaryInputs, Cast, CastAttrs, CastInputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {castImplCPU} from '../kernel_utils/shared';\nimport {complex} from './Complex';\nimport {identity} from './Identity';\nimport {notEqual} from './NotEqual';\nimport {real} from './Real';\n\nimport {int} from '../kernel_utils/int';\n\nexport function cast(\n args: {inputs: CastInputs, backend: MathBackendWebGL, attrs: CastAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {dtype} = attrs;\n\n // Casting to complex64.\n if (dtype === 'complex64') {\n if (x.dtype === 'complex64') {\n return identity({inputs: {x}, backend});\n }\n\n // TODO(annxingyuan): Import kernel function once zeros is modularized.\n const zerosTensor = tf.zeros(x.shape);\n const floatX = cast({inputs: {x}, backend, attrs: {dtype: 'float32'}});\n\n const result =\n complex({inputs: {real: floatX, imag: zerosTensor}, backend});\n\n zerosTensor.dispose();\n backend.disposeIntermediateTensorInfo(floatX);\n\n return result;\n }\n\n // Casting from complex64\n if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const result = cast({inputs: {x: realPart}, backend, attrs: {dtype}});\n backend.disposeIntermediateTensorInfo(realPart);\n return result;\n }\n\n if (!util.hasEncodingLoss(x.dtype, dtype)) {\n // We don't change the underlying data, since we cast to higher\n // precision.\n const result = identity({inputs: {x}, backend});\n return {dataId: result.dataId, shape: result.shape, dtype};\n }\n\n if (backend.shouldExecuteOnCPU([x])) {\n const values = backend.texData.get(x.dataId).values as TypedArray;\n const [resultShape, resultType, resultData] =\n castImplCPU(values, x.shape, x.dtype, dtype);\n return backend.makeTensorInfo(resultShape, resultType, resultData);\n }\n\n if (dtype === 'int32') {\n return int(x, backend);\n }\n\n if (dtype === 'bool') {\n const zerosTensorInfo = backend.makeTensorInfo(\n [], 'bool', util.getTypedArrayFromDType('bool', 1));\n\n const binaryInputs: BinaryInputs = {a: x, b: zerosTensorInfo};\n\n const result = notEqual({inputs: binaryInputs, backend}) as TensorInfo;\n backend.disposeIntermediateTensorInfo(zerosTensorInfo);\n return result;\n }\n\n throw new Error(`Error in Cast: failed to cast ${x.dtype} to ${dtype}`);\n}\n\nexport const castConfig: KernelConfig = {\n kernelName: Cast,\n backendName: 'webgl',\n kernelFunc: cast as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Ceil, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {ceilImplCPU} from '../kernel_utils/shared';\n\nconst CEIL = `return ceil(x);`;\n\nexport const ceil = unaryKernelFunc(\n {opSnippet: CEIL, packedOpSnippet: CEIL, cpuKernelImpl: ceilImplCPU});\n\nexport const ceilConfig: KernelConfig = {\n kernelName: Ceil,\n backendName: 'webgl',\n kernelFunc: ceil as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class ClipProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n customUniforms = [\n {name: 'minVal', type: 'float' as UniformType},\n {name: 'maxVal', type: 'float' as UniformType}\n ];\n\n constructor(aShape: number[]) {\n this.outputShape = aShape;\n this.userCode = `\n\n void main() {\n float value = getAAtOutCoords();\n if (isnan(value)) {\n setOutput(value);\n return;\n }\n\n setOutput(clamp(value, minVal, maxVal));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class ClipPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = true;\n userCode: string;\n outputShape: number[];\n customUniforms = [\n {name: 'minVal', type: 'float' as UniformType},\n {name: 'maxVal', type: 'float' as UniformType}\n ];\n\n constructor(aShape: number[]) {\n this.outputShape = aShape;\n this.userCode = `\n void main() {\n vec4 value = getAAtOutCoords();\n\n if (any(isnan(value))) {\n setOutput(value);\n return;\n }\n\n setOutput(clamp(value, vec4(minVal), vec4(maxVal)));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ClipByValue, ClipByValueAttrs, ClipByValueInputs, env, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ClipProgram} from '../clip_gpu';\nimport {ClipPackedProgram} from '../clip_packed_gpu';\n\nexport function clipByValue(args: {\n inputs: ClipByValueInputs,\n backend: MathBackendWebGL,\n attrs: ClipByValueAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {clipValueMin, clipValueMax} = attrs;\n\n let program;\n if (env().getBool('WEBGL_PACK_CLIP')) {\n program = new ClipPackedProgram(x.shape);\n } else {\n program = new ClipProgram(x.shape);\n }\n const customValues = [[clipValueMin], [clipValueMax]];\n return backend.runWebGLProgram(program, [x], x.dtype, customValues);\n}\n\nexport const clipByValueConfig: KernelConfig = {\n kernelName: ClipByValue,\n backendName: 'webgl',\n kernelFunc: clipByValue as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ComplexAbsProgram implements GPGPUProgram {\n variableNames = ['real', 'imag'];\n userCode: string;\n outputShape: number[];\n\n constructor(shape: number[]) {\n this.outputShape = shape;\n this.userCode = `\n void main() {\n float re = abs(getRealAtOutCoords());\n float im = abs(getImagAtOutCoords());\n float mx = max(re, im);\n\n // sadly the length function in glsl is not underflow-safe\n // (at least not on Intel GPUs). So the safe solution is\n // to ensure underflow-safety in all cases.\n setOutput(\n mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx))\n );\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ComplexAbs, ComplexAbsInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ComplexAbsProgram} from '../complex_abs_gpu';\n\n// Returns a TensorInfo with the complex shape and the dataId of the\n// underlying part. We need to do this because a reshaped complex tensor is\n// not reflected in its parts.\nfunction makeComplexComponentTensorInfo(\n complexTensor: TensorInfo, complexPart: TensorInfo): TensorInfo {\n return {\n dataId: complexPart.dataId,\n dtype: complexPart.dtype,\n shape: complexTensor.shape\n };\n}\n\nexport function complexAbs(\n args: {inputs: ComplexAbsInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n const xData = backend.texData.get(x.dataId);\n\n const program = new ComplexAbsProgram(x.shape);\n const programInputs = [\n makeComplexComponentTensorInfo(x, xData.complexTensorInfos.real),\n makeComplexComponentTensorInfo(x, xData.complexTensorInfos.imag),\n ];\n\n return backend.runWebGLProgram(\n program, programInputs, programInputs[0].dtype);\n}\n\nexport const complexAbsConfig: KernelConfig = {\n kernelName: ComplexAbs,\n backendName: 'webgl',\n kernelFunc: complexAbs as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ConcatProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[] = [];\n userCode: string;\n\n // Concats 2d tensors along axis=1. See comments in MathBackendWebGL.concat().\n constructor(shapes: Array<[number, number]>) {\n this.outputShape = backend_util.computeOutShape(shapes, 1 /* axis */);\n this.variableNames = shapes.map((_, i) => `T${i}`);\n\n const offsets: number[] = new Array(shapes.length - 1);\n offsets[0] = shapes[0][1];\n for (let i = 1; i < offsets.length; i++) {\n offsets[i] = offsets[i - 1] + shapes[i][1];\n }\n\n const snippets = [`if (yC < ${offsets[0]}) setOutput(getT0(yR, yC));`];\n for (let i = 1; i < offsets.length; i++) {\n const shift = offsets[i - 1];\n snippets.push(\n `else if (yC < ${offsets[i]}) ` +\n `setOutput(getT${i}(yR, yC-${shift}));`);\n }\n const lastIndex = offsets.length;\n const lastShift = offsets[offsets.length - 1];\n snippets.push(`else setOutput(getT${lastIndex}(yR, yC-${lastShift}));`);\n\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n int yR = coords.x;\n int yC = coords.y;\n\n ${snippets.join('\\n ')}\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class ConcatPackedProgram implements GPGPUProgram {\n variableNames: string[];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[] = [];\n userCode: string;\n\n constructor(shapes: number[][], axis: number) {\n this.outputShape = backend_util.computeOutShape(shapes, axis);\n const shape = this.outputShape;\n const rank = shape.length;\n const dtype = getCoordsDataType(rank);\n const coords = getChannels('coords', rank);\n const channels = ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, rank);\n this.variableNames = shapes.map((_, i) => `T${i}`);\n\n const offsets: number[] = new Array(shapes.length - 1);\n offsets[0] = shapes[0][axis];\n for (let i = 1; i < offsets.length; i++) {\n offsets[i] = offsets[i - 1] + shapes[i][axis];\n }\n\n const channel = channels[axis];\n const lastChannels = channels.slice(-2);\n const allChannels = channels.join();\n\n let getValueSnippet = `if (${channel} < ${offsets[0]}) {\n return getChannel(\n getT0(${allChannels}), vec2(${lastChannels.join()}));\n }`;\n for (let i = 1; i < offsets.length; i++) {\n const shift = offsets[i - 1];\n // Note: the >= comparison below may seem unnecessary given the check\n // above but is needed to workaround branch execution issues on some\n // devices. It makes all the conditions exclusive without relying on\n // execution order.\n getValueSnippet += `\n if (${channel} < ${offsets[i]} && ${channel} >= ${offsets[i - 1]}) {\n return getChannel(\n getT${i}(${shiftedChannels(channels, channel, shift)}),\n vec2(${shiftedChannels(lastChannels, channel, shift)}));\n }`;\n }\n const lastIndex = offsets.length;\n const shift = offsets[offsets.length - 1];\n getValueSnippet += `\n return getChannel(\n getT${lastIndex}(${shiftedChannels(channels, channel, shift)}),\n vec2(${shiftedChannels(lastChannels, channel, shift)}));`;\n\n this.userCode = `\n float getValue(${channels.map(x => 'int ' + x)}) {\n ${getValueSnippet}\n }\n\n void main() {\n ${dtype} coords = getOutputCoords();\n vec4 result = vec4(getValue(${coords}), 0., 0., 0.);\n\n ${coords[rank - 1]} = ${coords[rank - 1]} + 1;\n if (${coords[rank - 1]} < ${shape[rank - 1]}) {\n result.g = getValue(${coords});\n }\n\n ${coords[rank - 2]} = ${coords[rank - 2]} + 1;\n if (${coords[rank - 2]} < ${shape[rank - 2]}) {\n result.a = getValue(${coords});\n }\n\n ${coords[rank - 1]} = ${coords[rank - 1]} - 1;\n if (${coords[rank - 2]} < ${shape[rank - 2]} &&\n ${coords[rank - 1]} < ${shape[rank - 1]}) {\n result.b = getValue(${coords});\n }\n setOutput(result);\n }\n `;\n }\n}\n\n/**\n * Return an expression for coordinates into a vector where a given channel\n * will be offset by [shift].\n *\n * @param channels the channels to consider\n * @param channel the channel we want shifted\n * @param shift the amount to subtract from the channel.\n *\n * @returns a string of the form 'x, y-[shift], z' where any one channel can\n * have the shift applied.\n */\nfunction shiftedChannels(channels: string[], channel: string, shift: number) {\n const channelIdx = channels.indexOf(channel);\n const res = channels.map((c, idx) => {\n if (idx === channelIdx) {\n return `${c} - ${shift}`;\n } else {\n return c;\n }\n });\n return res.join();\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Imag, ImagInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {identity} from './Identity';\n\nexport function imag(args: {inputs: ImagInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n const inputData = backend.texData.get(input.dataId);\n\n return identity({inputs: {x: inputData.complexTensorInfos.imag}, backend});\n}\n\nexport const imagConfig: KernelConfig = {\n kernelName: Imag,\n backendName: 'webgl',\n kernelFunc: imag as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, ConcatInputs, env, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ConcatProgram} from '../concat_gpu';\nimport {ConcatPackedProgram} from '../concat_packed_gpu';\nimport {concatImplCPU} from '../kernel_utils/shared';\nimport {CLONE, UnaryOpProgram} from '../unaryop_gpu';\nimport {UnaryOpPackedProgram} from '../unaryop_packed_gpu';\n\nimport {complex} from './Complex';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {reshape} from './Reshape';\n\nexport function concatImpl(\n inputs: ConcatInputs, axis: number, backend: MathBackendWebGL): TensorInfo {\n const dtype = inputs[0].dtype;\n if (dtype === 'complex64') {\n const reals = inputs.map((t) => real({inputs: {input: t}, backend}));\n const imags = inputs.map((t) => imag({inputs: {input: t}, backend}));\n\n const realConcated = concatImpl(reals, axis, backend);\n const imagConcated = concatImpl(imags, axis, backend);\n\n const result =\n complex({inputs: {real: realConcated, imag: imagConcated}, backend});\n\n reals.forEach(r => backend.disposeIntermediateTensorInfo(r));\n imags.forEach(i => backend.disposeIntermediateTensorInfo(i));\n backend.disposeIntermediateTensorInfo(realConcated);\n backend.disposeIntermediateTensorInfo(imagConcated);\n\n return result;\n }\n\n let runOnCpu = backend.shouldExecuteOnCPU(inputs);\n\n // Run on cpu if dtype is string. For string, the backend represents it\n // as Uint8Array[], where each Uint8Array is a character. Given that the\n // computation is only on the outer array, uploading the whole data onto\n // gpu is wasteful. Also, currently webgl doesn't have a design to\n // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we\n // just run the kernel on cpu if dtype is string.\n if (dtype === 'string') {\n runOnCpu = true;\n }\n\n if (runOnCpu) {\n // Any concat of n-dimensional tensors across any axis can be reduced to\n // a concatenation of two-dimensional tensors across the axis 1 by first\n // partitioning the axes of the original tensors into those less than the\n // axis to be concatenated and the rest. Then reshape the tensors\n // into a two-dimensional tensor by collapsing these two sets of axes and\n // concatenate the resulting matrices across the axis 1, finally reshaping\n // the result to have the proper shape.\n const tensors2D = inputs.map(t => {\n const innerSize = util.sizeFromShape(t.shape.slice(axis));\n const shape = [-1, innerSize];\n return reshape({inputs: {x: t}, backend, attrs: {shape}});\n });\n\n const inputsValShapes = tensors2D.map(t => {\n return {vals: backend.readSync(t.dataId), shape: t.shape};\n });\n\n // Concats 2d tensors along axis=1.\n const outShape =\n backend_util.computeOutShape(tensors2D.map(t => t.shape), 1 /* axis */);\n const simplyConcat = tensors2D[0].shape[0] === 1;\n const outVals =\n concatImplCPU(inputsValShapes, outShape, dtype, simplyConcat);\n\n const finalOutShape =\n backend_util.computeOutShape(inputs.map(t => t.shape), axis);\n\n const outInfo = backend.makeTensorInfo(finalOutShape, dtype, outVals);\n\n tensors2D.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return outInfo;\n }\n\n // Keep only non-empty tensors (ignore tensors with 0 in their shape).\n const $inputs = inputs.filter(t => util.sizeFromShape(t.shape) > 0);\n\n const shouldPack: boolean = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') &&\n $inputs[0].shape.length > 1;\n\n if ($inputs.length === 1) {\n // Clone tensor.\n const program = shouldPack ?\n new UnaryOpProgram(inputs[0].shape, CLONE) :\n new UnaryOpPackedProgram(inputs[0].shape, CLONE);\n return backend.runWebGLProgram(program, inputs, dtype);\n }\n\n const maxTexturesInShader = env().getNumber('WEBGL_MAX_TEXTURES_IN_SHADER');\n if ($inputs.length > maxTexturesInShader) {\n const reducedInputs = [];\n for (let i = 0; i < $inputs.length; i += maxTexturesInShader) {\n const subArray = $inputs.slice(i, i + maxTexturesInShader);\n reducedInputs.push(concatImpl(subArray, axis, backend));\n }\n const result = concatImpl(reducedInputs, axis, backend);\n\n for (const i of reducedInputs) {\n backend.disposeIntermediateTensorInfo(i);\n }\n\n return result;\n }\n\n if (shouldPack) {\n const program = new ConcatPackedProgram($inputs.map(t => t.shape), axis);\n return backend.runWebGLProgram(program, $inputs, dtype);\n }\n\n const {tensors2D, outShape} = computeTensors2D($inputs, axis, backend);\n const program =\n new ConcatProgram(tensors2D.map(t => t.shape as [number, number]));\n const result = backend.runWebGLProgram(program, tensors2D, dtype);\n\n tensors2D.forEach(r => backend.disposeIntermediateTensorInfo(r));\n const reshapedResult =\n reshape({inputs: {x: result}, attrs: {shape: outShape}, backend});\n backend.disposeIntermediateTensorInfo(result);\n\n return reshapedResult;\n}\n\nfunction computeTensors2D(\n inputs: ConcatInputs, axis: number, backend: MathBackendWebGL) {\n // Any concat of n-dimensional tensors across any axis can be reduced to\n // a concatenation of two-dimensional tensors across the axis 1 by first\n // partitioning the axes of the original tensors into those less than the\n // axis to be concatenated and the rest. Then reshape the tensors\n // into a two-dimensional tensor by collapsing these two sets of axes and\n // concatenate the resulting matrices across the axis 1, finally reshaping\n // the result to have the proper shape.\n const outShape = backend_util.computeOutShape(inputs.map(t => t.shape), axis);\n const tensors2D = inputs.map(\n x => reshape({\n inputs: {x},\n attrs: {shape: [-1, util.sizeFromShape(x.shape.slice(axis))]},\n backend\n }));\n\n return {tensors2D, outShape};\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Concat, ConcatAttrs, ConcatInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {concatImpl} from './Concat_impl';\nimport {identity} from './Identity';\n\nexport function concat(\n args:\n {inputs: ConcatInputs, attrs: ConcatAttrs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, inputs[0].shape)[0];\n\n const shapes = inputs.map(t => t.shape);\n backend_util.assertParamsConsistent(shapes, $axis);\n\n const outShape =\n backend_util.computeOutShape(inputs.map(t => t.shape), $axis);\n\n if (util.sizeFromShape(outShape) === 0) {\n return backend.makeTensorInfo(outShape, inputs[0].dtype, []);\n }\n\n // Keep only non-empty tensors (ignore tensors with 0 in their shape).\n const $inputs = inputs.filter(t => util.sizeFromShape(t.shape) > 0);\n if ($inputs.length === 1) {\n return identity({inputs: {x: $inputs[0]}, backend});\n }\n\n return concatImpl($inputs, $axis, backend);\n}\n\nexport const concatConfig: KernelConfig = {\n kernelName: Concat,\n backendName: 'webgl',\n kernelFunc: concat as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class Conv2DProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: string = null, hasPreluActivationWeights = false,\n hasLeakyreluAlpha = false) {\n this.outputShape = convInfo.outShape;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n\n const inputDepthNearestVec4 = Math.floor(convInfo.inChannels / 4) * 4;\n const inputDepthVec4Remainder = convInfo.inChannels % 4;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n\n const rowDim = isChannelsLast ? 1 : 2;\n const colDim = isChannelsLast ? 2 : 3;\n const channelDim = isChannelsLast ? 3 : 1;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (activation) {\n if (hasPreluActivationWeights) {\n activationSnippet = `float activation(float a) {\n float b = getPreluActivationWeightsAtOutCoords();\n ${activation}\n }`;\n } else if (hasLeakyreluAlpha) {\n activationSnippet = `float activation(float a) {\n float b = getLeakyreluAlphaAtOutCoords();\n ${activation}\n }`;\n } else {\n activationSnippet = `\n float activation(float x) {\n ${activation}\n }\n `;\n }\n\n applyActivationSnippet = `result = activation(result);`;\n }\n\n const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : '';\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivationWeights) {\n this.variableNames.push('preluActivationWeights');\n }\n\n if (hasLeakyreluAlpha) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n this.userCode = `\n ${activationSnippet}\n\n const ivec2 strides = ivec2(${strideHeight}, ${strideWidth});\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d2 = coords[${channelDim}];\n\n ivec2 xRCCorner =\n ivec2(coords[${rowDim}], coords[${colDim}]) * strides - pads;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // Convolve x(?, ?, d1) with w(:, :, d1, d2) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n int xR = xRCorner + wR * ${dilationHeight};\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n int xC = xCCorner + wC * ${dilationWidth};\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n for (int d1 = 0; d1 < ${inputDepthNearestVec4}; d1 += 4) {\n vec4 wValues = vec4(\n getW(wR, wC, d1, d2),\n getW(wR, wC, d1 + 1, d2),\n getW(wR, wC, d1 + 2, d2),\n getW(wR, wC, d1 + 3, d2)\n );\n\n if (${isChannelsLast}) {\n vec4 xValues = vec4(\n getX(batch, xR, xC, d1),\n getX(batch, xR, xC, d1 + 1),\n getX(batch, xR, xC, d1 + 2),\n getX(batch, xR, xC, d1 + 3)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec4 xValues = vec4(\n getX(batch, d1, xR, xC),\n getX(batch, d1 + 1, xR, xC),\n getX(batch, d1 + 2, xR, xC),\n getX(batch, d1 + 3, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n }\n\n if (${inputDepthVec4Remainder === 1}) {\n\n if (${isChannelsLast}) {\n dotProd +=\n getX(batch, xR, xC, ${inputDepthNearestVec4}) *\n getW(wR, wC, ${inputDepthNearestVec4}, d2);\n } else {\n dotProd +=\n getX(batch, ${inputDepthNearestVec4}, xR, xC) *\n getW(wR, wC, ${inputDepthNearestVec4}, d2);\n }\n\n } else if (${inputDepthVec4Remainder === 2}) {\n vec2 wValues = vec2(\n getW(wR, wC, ${inputDepthNearestVec4}, d2),\n getW(wR, wC, ${inputDepthNearestVec4} + 1, d2)\n );\n\n if (${isChannelsLast}) {\n vec2 xValues = vec2(\n getX(batch, xR, xC, ${inputDepthNearestVec4}),\n getX(batch, xR, xC, ${inputDepthNearestVec4} + 1)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec2 xValues = vec2(\n getX(batch, ${inputDepthNearestVec4}, xR, xC),\n getX(batch, ${inputDepthNearestVec4} + 1, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n\n } else if (${inputDepthVec4Remainder === 3}) {\n vec3 wValues = vec3(\n getW(wR, wC, ${inputDepthNearestVec4}, d2),\n getW(wR, wC, ${inputDepthNearestVec4} + 1, d2),\n getW(wR, wC, ${inputDepthNearestVec4} + 2, d2)\n );\n\n if (${isChannelsLast}) {\n vec3 xValues = vec3(\n getX(batch, xR, xC, ${inputDepthNearestVec4}),\n getX(batch, xR, xC, ${inputDepthNearestVec4} + 1),\n getX(batch, xR, xC, ${inputDepthNearestVec4} + 2)\n );\n dotProd += dot(xValues, wValues);\n } else {\n vec3 xValues = vec3(\n getX(batch, ${inputDepthNearestVec4}, xR, xC),\n getX(batch, ${inputDepthNearestVec4} + 1, xR, xC),\n getX(batch, ${inputDepthNearestVec4} + 2, xR, xC)\n );\n dotProd += dot(xValues, wValues);\n }\n\n }\n }\n }\n\n float result = dotProd;\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutput(result);\n }\n `;\n }\n}\n\nexport class Conv3DProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv3DInfo) {\n this.outputShape = convInfo.outShape;\n const padFront = convInfo.padInfo.front;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const filterDepth = convInfo.filterDepth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n\n const inputDepthNearestVec4 = Math.floor(convInfo.inChannels / 4) * 4;\n const inputDepthVec4Remainder = convInfo.inChannels % 4;\n\n this.userCode = `\n const ivec3 strides = ivec3(${strideDepth}, ${strideHeight}, ${\n strideWidth});\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int d2 = coords.u;\n\n ivec3 xFRCCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads;\n int xFCorner = xFRCCorner.x;\n int xRCorner = xFRCCorner.y;\n int xCCorner = xFRCCorner.z;\n\n // Convolve x(?, ?, ?, d1) with w(:, :, :, d1, d2) to get\n // y(yF, yR, yC, d2). ? = to be determined. : = across all\n // values in that axis.\n float dotProd = 0.0;\n for (int wF = 0; wF < ${filterDepth}; wF++) {\n int xF = xFCorner + wF * ${dilationDepth};\n\n if (xF < 0 || xF >= ${convInfo.inDepth}) {\n continue;\n }\n\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n int xR = xRCorner + wR * ${dilationHeight};\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n int xC = xCCorner + wC * ${dilationWidth};\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n for (int d1 = 0; d1 < ${inputDepthNearestVec4}; d1 += 4) {\n vec4 xValues = vec4(\n getX(batch, xF, xR, xC, d1),\n getX(batch, xF, xR, xC, d1 + 1),\n getX(batch, xF, xR, xC, d1 + 2),\n getX(batch, xF, xR, xC, d1 + 3)\n );\n vec4 wValues = vec4(\n getW(wF, wR, wC, d1, d2),\n getW(wF, wR, wC, d1 + 1, d2),\n getW(wF, wR, wC, d1 + 2, d2),\n getW(wF, wR, wC, d1 + 3, d2)\n );\n\n dotProd += dot(xValues, wValues);\n }\n\n if (${inputDepthVec4Remainder === 1}) {\n dotProd +=\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4}) *\n getW(wF, wR, wC, ${inputDepthNearestVec4}, d2);\n } else if (${inputDepthVec4Remainder === 2}) {\n vec2 xValues = vec2(\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4}),\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 1)\n );\n vec2 wValues = vec2(\n getW(wF, wR, wC, ${inputDepthNearestVec4}, d2),\n getW(wF, wR, wC, ${inputDepthNearestVec4} + 1, d2)\n );\n dotProd += dot(xValues, wValues);\n } else if (${inputDepthVec4Remainder === 3}) {\n vec3 xValues = vec3(\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4}),\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 1),\n getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 2)\n );\n vec3 wValues = vec3(\n getW(wF, wR, wC, ${inputDepthNearestVec4}, d2),\n getW(wF, wR, wC, ${inputDepthNearestVec4} + 1, d2),\n getW(wF, wR, wC, ${inputDepthNearestVec4} + 2, d2)\n );\n dotProd += dot(xValues, wValues);\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\n import {backend_util, util} from '@tensorflow/tfjs-core';\n\n import {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\n export class Conv2DPackedProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n customUniforms = [\n {name: 'pads', type: 'ivec2' as const },\n {name: 'strides', type: 'ivec2' as const },\n {name: 'dilations', type: 'ivec2' as const },\n {name: 'inDims', type: 'ivec2' as const },\n ];\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: string = null, hasPreluActivation = false,\n hasLeakyReluAlpha = false) {\n this.outputShape = convInfo.outShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n const padLeft = convInfo.padInfo.left;\n const strideWidth = convInfo.strideWidth;\n const dilationWidth = convInfo.dilationWidth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const texelsAcross = filterWidth;\n\n let mainLoop = `\n int xR; int xC; int xCOffset;\n vec4 wTexel; vec4 previous; vec4 final;`;\n\n for (let c = 0; c < filterWidth; c++) {\n mainLoop += `\n vec4 xTexelC${c * 2};\n int xTexelC${c * 2}Ready;\n vec4 xTexelC${c * 2 + 1};\n int xTexelC${c * 2 + 1}Ready;\n vec4 xC${c};`;\n }\n\n /**\n * This vectorized implementation works by gathering the values needed for\n * each output channel's dot product into vec4's and then multiplying them\n * all together (this happens in the final double for-loop below). Most of\n * the main loop consists of constructing these vec4's with the minimum\n * number of texture2D calls, which means making use of all four returned\n * values from a texture2D call at once.\n */\n mainLoop += `\n for (int r = 0; r < ${filterHeight}; r++) {\n for (int d1 = 0; d1 < ${convInfo.inChannels}; d1 += 2) {\n `;\n for (let c = 0; c < filterWidth; c++) {\n mainLoop += `\n xTexelC${c * 2} = vec4(0.0);\n xTexelC${c * 2}Ready = 0;\n xTexelC${c * 2 + 1} = vec4(0.0);\n xTexelC${c * 2 + 1}Ready = 0;\n xC${c} = vec4(0.0);`;\n }\n mainLoop += `\n xR = xRCorner + r * dilations[0];\n if (xR >=0 && xR < inDims[0]) {\n `;\n\n for (let texelC = 0; texelC < (texelsAcross + 1) / 2; texelC++) {\n const colIndex = texelC * 2;\n\n mainLoop += `\n xC = xCCorner + ${colIndex * dilationWidth};\n `;\n\n if (strideWidth === 1) {\n if (colIndex < filterWidth) {\n // If padding is odd, the outer texels have to be composed.\n if (padLeft % 2 === 1) {\n // TODO: Ensure vec4 previous does not result in redundant sample,\n // and avoid setting xTexelRC's that exceed the boundary in the\n // first place rather than resetting them to vec4(0)).\n\n // To compute xCOffset:\n // - If padding is odd, we must add 1 to ensure we ask for an\n // even-numbered row.\n // - We subtract 2 to access the previous texel.\n\n mainLoop += `\n xCOffset = xC + 1;\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n `;\n // This texel has been read in previous iteration if the dilation\n // is 1.\n if (dilationWidth === 1 && colIndex > 0) {\n mainLoop += `\n xC${colIndex} = vec4(xTexelC${colIndex - 2}.zw, xTexelC${\n colIndex}.xy);\n `;\n } else {\n mainLoop += `\n xCOffset = xC + 1 - 2;\n\n if (xCOffset >= 0 && xCOffset < inDims[1]) {\n previous = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n previous.zw = vec2(0.0);\n }\n\n xC${colIndex} = vec4(previous.zw, xTexelC${colIndex}.xy);\n } else {\n xC${colIndex} = vec4(0.0, 0.0, xTexelC${colIndex}.xy);\n }\n `;\n }\n } else {\n // Padding is even, so xRC corresponds to a single texel.\n mainLoop += `\n if (xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xC, d1);\n if (xC + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n xC${colIndex} = xTexelC${colIndex};\n `;\n }\n\n if (colIndex + 1 < filterWidth) {\n // If dilation is even, the second entry should match the first\n // (either both are composed or both are single samples). But if\n // dilation is odd, then the second entry should be the opposite\n // of the first (if the first is composed, the second is a single\n // sample, and vice versa.)\n\n const nextTexelOffset = padLeft % 2 === 0 ?\n util.nearestLargerEven(dilationWidth) :\n dilationWidth;\n\n if ((dilationWidth % 2 === 0 && padLeft % 2 === 1) ||\n (dilationWidth % 2 !== 0 && padLeft % 2 !== 1)) {\n mainLoop += `\n xCOffset = xC + imod(pads[1], 2) + ${nextTexelOffset};\n\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n `;\n\n // If dilation > 1 then the xRC's will not be able to share any\n // values, so each xRC will require two unique calls to getX.\n if (dilationWidth > 1) {\n mainLoop += `\n xCOffset -= 2;\n if (xCOffset >= 0 && xCOffset < inDims[1]) {\n previous = getX(batch, xR, xCOffset, d1);\n xC${colIndex + 1} = vec4(previous.zw, xTexelC${\n colIndex + 1}.xy);\n } else {\n xC${colIndex + 1} = vec4(0.0, 0.0, xTexelC${\n colIndex + 1}.xy);\n }\n `;\n } else {\n mainLoop += `\n xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.xy);\n `;\n }\n\n } else {\n // If dilation is 1 and padding is odd, we have already read the\n // texel when constructing the previous x value. Here we can\n // simply skip the texture read.\n if (nextTexelOffset === 1) {\n mainLoop += `\n xC${colIndex + 1} = xTexelC${colIndex};\n `;\n } else {\n mainLoop += `\n xCOffset = xC + ${nextTexelOffset};\n\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex + 1} = xTexelC${colIndex + 1};\n `;\n }\n }\n }\n }\n } else { // stride === 2\n if (colIndex < filterWidth) {\n // Depending on whether padLeft is even or odd, we want either the\n // xy or zw channels from X texels for xC${colIndex}. If padLeft is\n // even, xC${colIndex +1} is simply the zw channels of texels we've\n // already sampled. But if padLeft is odd, xC{$c + 1}.zw will\n // need to come from the xy channels of a new texel, hence the `\n // vec4\n // final` initialized below.\n if (padLeft % 2 === 1) {\n mainLoop += `\n xCOffset = xC + 1 - strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xCOffset, d1);\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xC + 1, d1);\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xC + 2 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.zw);\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n final = vec4(0.0);\n xCOffset = xC + 1 + strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1]) {\n final = getX(batch, xR, xCOffset, d1);\n }\n xC${colIndex + 1} = vec4(xTexelC${colIndex + 1}.xy, final.xy);\n `;\n }\n } else {\n mainLoop += `\n if(xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xC, d1);\n if (xC + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n xCOffset = xC + strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex} = vec4(\n xTexelC${colIndex}.xy, xTexelC${colIndex + 1}.xy);\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.zw);\n `;\n }\n }\n }\n }\n\n // localize the dotProd accumulation within the loop, the theory is for\n // GPU with limited cache, accumulate sum across large amount of\n // veriables will cause lots of cache misses. (i.e. 5x5 filter will have\n // 50 variables)\n if (colIndex < filterWidth) {\n mainLoop += `\n wTexel = getW(r, ${colIndex}, d1, d2);\n dotProd += xC${colIndex}.xxzz * vec4(wTexel.xy, wTexel.xy);\n if(d1 + 1 < ${convInfo.inChannels}) {\n dotProd += xC${colIndex}.yyww * vec4(wTexel.zw, wTexel.zw);\n }\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n wTexel = getW(r, ${colIndex + 1}, d1, d2);\n dotProd += xC${colIndex + 1}.xxzz * vec4(wTexel.xy, wTexel.xy);\n if(d1 + 1 < ${convInfo.inChannels}) {\n dotProd += xC${colIndex + 1}.yyww * vec4(wTexel.zw, wTexel.zw);\n }\n `;\n }\n }\n }\n mainLoop += `\n }\n `;\n mainLoop += `\n }\n `;\n mainLoop += `\n }\n `;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (activation) {\n if (hasPreluActivation) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getPreluActivationWeightsAtOutCoords();\n ${activation}\n }`;\n } else if (hasLeakyReluAlpha) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getLeakyreluAlphaAtOutCoords();\n ${activation}\n }`;\n } else {\n activationSnippet = `vec4 activation(vec4 x) {\n ${activation}\n }`;\n }\n\n applyActivationSnippet = `result = activation(result);`;\n }\n\n const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : '';\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n if (hasLeakyReluAlpha) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n this.userCode = `\n ${activationSnippet}\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n ivec2 xRCCorner = coords.yz * strides - pads;\n int d2 = coords.w;\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n //intialize dotProd with a small epsilon seems to reduce GPU accuracy loss.\n vec4 dotProd = vec4(0.000000000000001);\n\n ${mainLoop}\n\n vec4 result = dotProd - vec4(0.000000000000001);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutput(result);\n }\n `;\n }\n }\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {getGlslDifferences} from './glsl_version';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport class Im2ColPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n customUniforms = [\n {name: 'inputShape', type: 'ivec4' as const },\n {name: 'pad', type: 'ivec2' as const },\n {name: 'stride', type: 'ivec2' as const },\n {name: 'dilation', type: 'ivec2' as const },\n {name: 'inChannels', type: 'int' as const },\n {name: 'itemsPerBlockRow', type: 'int' as const },\n {name: 'outWidth', type: 'int' as const },\n ];\n\n constructor(outputShape: number[], convInfo: backend_util.Conv2DInfo) {\n this.outputShape = outputShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n const {dataFormat} = convInfo;\n const glsl = getGlslDifferences();\n const isChannelsLast = dataFormat === 'channelsLast';\n const rowDim = isChannelsLast ? 1 : 2;\n const colDim = isChannelsLast ? 2 : 3;\n\n const boundsCheckingSnippet = this.enableShapeUniforms ?\n 'if(blockIndex < outShape[2] && pos < outShape[1]) {' :\n `if(blockIndex < ${outputShape[2]} && pos < ${outputShape[1]}) {`;\n let unrolled = ``;\n\n for (let row = 0; row <= 1; row++) {\n for (let col = 0; col <= 1; col++) {\n unrolled += `\n blockIndex = rc.z + ${col};\n pos = rc.y + ${row};\n\n ${boundsCheckingSnippet}\n offsetY = int(blockIndex / outWidth) * stride[0] - pad[0];\n d0 = offsetY + dilation[0] * (pos / itemsPerBlockRow);\n\n if(d0 < inputShape[${rowDim}] && d0 >= 0) {\n // Use custom imod instead mod. On Intel GPU, mod may generate\n // unexpected value.\n // https://github.com/tensorflow/tfjs/issues/5447\n offsetX = imod(blockIndex, outWidth) * stride[1] - pad[1];\n d1 = offsetX + dilation[1] * (imod(pos, itemsPerBlockRow) /\n inChannels);\n\n if(d1 < inputShape[${colDim}] && d1 >= 0) {\n\n ch = imod(pos, inChannels);\n\n if (${isChannelsLast}) {\n innerDims = vec2(d1, ch);\n result[${row * 2 + col}] = getChannel(\n getA(rc.x, d0, int(innerDims.x),\n int(innerDims.y)), innerDims);\n } else {\n innerDims = vec2(d0, d1);\n result[${row * 2 + col}] = getChannel(\n getA(rc.x, ch, int(innerDims.x),\n int(innerDims.y)), innerDims);\n }\n }\n }\n }\n `;\n }\n }\n\n this.userCode = `\n void main() {\n ivec3 rc = getOutputCoords();\n\n vec4 result = vec4(0);\n\n int blockIndex, pos, offsetY, d0, offsetX, d1, ch;\n vec2 innerDims;\n\n ${unrolled}\n\n ${glsl.output} = result;\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo, util} from '@tensorflow/tfjs-core';\n\n// import {assertAndGetBroadcastShape} from\n// '../../../tfjs-core/src/ops/broadcast_util';\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Im2ColPackedProgram} from '../im2col_packed_gpu';\nimport {mapActivationToShaderProgram} from '../kernel_utils/kernel_funcs_utils';\nimport {MatMulPackedProgram} from '../mulmat_packed_gpu';\nimport * as webgl_util from '../webgl_util';\n\nimport {batchMatMulImpl, MATMUL_SHARED_DIM_THRESHOLD} from './BatchMatMul_impl';\nimport {identity} from './Identity';\nimport {reshape} from './Reshape';\n\ntype Conv2DConfig = {\n x: TensorInfo,\n filter: TensorInfo,\n convInfo: backend_util.Conv2DInfo,\n backend: MathBackendWebGL,\n bias?: TensorInfo,\n preluActivationWeights?: TensorInfo,\n leakyreluAlpha?: number,\n activation?: backend_util.Activation\n};\n\n// Both conv2dByMatMul and conv2dWithIm2Row fuse height and width into one\n// dimension to compute batchMatMul, so bias and activation weights are also\n// supposed to fuse the two dimensions into one.\n//\n// This function computes the target shape for fusing height and width\n// dimensions. Returning null means the shape is already compatible.\n//\n// Even though the bias is not supposed to be a 3-D or a 4-D (including\n// batch) tensor and PReLU activiation weights is not supposed to be a 4-D\n// tensor, we still need to support them, because we haven't disabled\n// them for NHWC format.\n// https://github.com/tensorflow/tfjs/blob/b53bd47e880367ae57493f0ea628abaf08db2d5d/tfjs-core/src/ops/fused/conv2d.ts#L181-L196\nfunction getShapeForBatchMatMul(\n shape: number[], isChannelsLast: boolean): number[] {\n const length = shape.length;\n if (length >= 3) {\n return isChannelsLast ?\n [\n ...shape.slice(0, -3) /* batch */,\n shape[length - 3] * shape[length - 2] /* height * width */,\n shape[length - 1] /* channel */\n ] :\n [\n ...shape.slice(0, -3) /* batch */, shape[length - 3] /* channel */,\n shape[length - 2] * shape[length - 1] /* height * width */\n ];\n } else if (!isChannelsLast && length === 1 && shape[0] > 1) {\n return [shape[0], 1];\n } else {\n return null;\n }\n}\n\n// For 1x1 kernels that iterate through every point in the input, convolution\n// can be expressed as matrix multiplication (without need for memory\n// remapping).\nexport function conv2dByMatMul({\n x,\n filter,\n convInfo,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: Conv2DConfig) {\n // Reshapes conv2D input to 2D tensors, uses matMul and then reshape the\n // result from 2D to 4D.\n const xShape = x.shape;\n const xTexData = backend.texData.get(x.dataId);\n const sharedMatMulDim = convInfo.inChannels;\n const outerShapeX = xShape[0] * xShape[1] * xShape[2];\n const outerShapeFilter = convInfo.outChannels;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n const transposeA = false;\n const transposeB = false;\n\n let out: TensorInfo;\n const intermediates: TensorInfo[] = [];\n\n if (preluActivationWeights != null) {\n const targetShape =\n getShapeForBatchMatMul(preluActivationWeights.shape, isChannelsLast);\n if (targetShape != null) {\n preluActivationWeights = reshape({\n inputs: {x: preluActivationWeights},\n backend,\n attrs: {shape: targetShape}\n });\n intermediates.push(preluActivationWeights);\n }\n }\n\n if (bias != null) {\n const targetShape = getShapeForBatchMatMul(bias.shape, isChannelsLast);\n if (targetShape != null) {\n bias = reshape({inputs: {x: bias}, backend, attrs: {shape: targetShape}});\n intermediates.push(bias);\n }\n }\n\n // TODO: Once reduction ops are packed, batchMatMul will always be packed\n // and we can remove this condition.\n const batchMatMulWillBeUnpacked =\n (outerShapeX === 1 || outerShapeFilter === 1) &&\n sharedMatMulDim > MATMUL_SHARED_DIM_THRESHOLD;\n\n // The algorithm in the if condition assumes (1) the output will be packed,\n // (2) x is packed, (3) x isChannelsLast, (4) x's packed texture is already\n // on GPU, (5) col is odd, (6) the width, height and inChannels are the same\n // for xTexData.shape and xShape.\n const canOptimize = !batchMatMulWillBeUnpacked && xTexData.isPacked &&\n isChannelsLast && xTexData.texture != null && xShape[2] % 2 !== 0 &&\n util.arraysEqual(xTexData.shape.slice(-3), xShape.slice(-3));\n\n if (canOptimize) {\n // We avoid expensive packed 2x2 reshape by padding col count to next,\n // even number. When col is odd, the result of packed batchMatMul is\n // the same (has the same texture layout and and values in the texture) as\n // it is for next even col. We make the odd-cols tensor to look like\n // even-cols tensor before the operation and, after the batchMatMul,\n // fix the even-cols result to have odd number of cols.\n const targetShape = xShape[0] * xShape[1] * (xShape[2] + 1);\n const xReshaped: TensorInfo = {\n dataId: x.dataId,\n shape: [1, targetShape, convInfo.inChannels],\n dtype: x.dtype\n };\n // xTexData.shape gets referenced from GPGPUBinary.inShapeInfos.\n // Decrementing col count, after batchMatMul->...->compileProgram leads to\n // invalid col count within the reference in GPGPUBinary.inShapeInfos.\n // Alternative fix would be to provide a copy to GPGPUBinary.inShapeInfos\n // in compileProgram method, but that would affect compilation of all\n // programs - instead, provide a copy here, with even col count, before\n // calling batchMatMul->...->compileProgram and after that, the original\n // xTexData.shape is restored.\n const originalXTexDataShape = xTexData.shape;\n xTexData.shape = xTexData.shape.slice();\n xTexData.shape[xTexData.shape.length - 2]++;\n util.assert(\n webgl_util.isReshapeFree(xTexData.shape, xReshaped.shape),\n () => `packed reshape ${xTexData.shape} to ${\n xReshaped.shape} isn't free`);\n const filterReshaped = reshape({\n inputs: {x: filter},\n backend,\n attrs: {shape: [1, convInfo.inChannels, convInfo.outChannels]}\n });\n intermediates.push(filterReshaped);\n const pointwiseConv = batchMatMulImpl({\n a: xReshaped,\n b: filterReshaped,\n backend,\n transposeA,\n transposeB,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n\n const pointwiseConvTexData = backend.texData.get(pointwiseConv.dataId);\n util.assert(\n pointwiseConvTexData.isPacked,\n () => 'batchMatMul result is expected to be packed');\n // Restore the input shape to original.\n xTexData.shape = originalXTexDataShape;\n // Set the output shape - there is no need for expensive reshape as data\n // layout is already correct.\n pointwiseConvTexData.shape = convInfo.outShape;\n\n out = identity({inputs: {x: pointwiseConv}, backend});\n out.shape = convInfo.outShape;\n\n intermediates.push(pointwiseConv);\n } else {\n const numCols = convInfo.outHeight * convInfo.outWidth;\n const xReshaped = reshape({\n inputs: {x},\n backend,\n attrs: {\n shape: isChannelsLast ?\n [convInfo.batchSize, numCols, convInfo.inChannels] :\n [convInfo.batchSize, convInfo.inChannels, numCols]\n }\n });\n const filterReshaped = reshape({\n inputs: {x: filter},\n backend,\n attrs: {shape: [1, convInfo.inChannels, convInfo.outChannels]}\n });\n const result = batchMatMulImpl({\n a: isChannelsLast ? xReshaped : filterReshaped,\n b: isChannelsLast ? filterReshaped : xReshaped,\n transposeA: !isChannelsLast,\n transposeB,\n backend,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n\n out = reshape(\n {inputs: {x: result}, backend, attrs: {shape: convInfo.outShape}});\n\n intermediates.push(xReshaped);\n intermediates.push(filterReshaped);\n intermediates.push(result);\n }\n\n for (const i of intermediates) {\n backend.disposeIntermediateTensorInfo(i);\n }\n\n return out;\n}\n\n// Implements the im2row algorithm as outlined in \"High Performance\n// Convolutional Neural Networks for Document Processing\" (Suvisoft, 2006)\nexport function conv2dWithIm2Row({\n x,\n filter,\n convInfo,\n backend,\n bias = null,\n preluActivationWeights = null,\n leakyreluAlpha = 0,\n activation = null\n}: Conv2DConfig) {\n // Rearranges conv2d input so each block to be convolved over forms the\n // column of a new matrix with shape [filterWidth * filterHeight *\n // inChannels, outHeight * outWidth]. The filter is also rearranged so each\n // output channel forms a row of a new matrix with shape [outChannels,\n // filterWidth * filterHeight * inChannels]. The convolution is then\n // computed by multiplying these matrices and reshaping the result.\n const {\n filterWidth,\n filterHeight,\n inChannels,\n outWidth,\n outHeight,\n dataFormat\n } = convInfo;\n\n const isChannelsLast = dataFormat === 'channelsLast';\n\n const sharedDim = filterWidth * filterHeight * inChannels;\n const numCols = outHeight * outWidth;\n const x2ColShape = [convInfo.batchSize, sharedDim, numCols];\n const transposeA = true;\n const transposeB = false;\n\n const intermediates: TensorInfo[] = [];\n\n if (preluActivationWeights != null) {\n const targetShape =\n getShapeForBatchMatMul(preluActivationWeights.shape, isChannelsLast);\n if (targetShape != null) {\n preluActivationWeights = reshape({\n inputs: {x: preluActivationWeights},\n backend,\n attrs: {shape: targetShape}\n });\n intermediates.push(preluActivationWeights);\n }\n }\n\n if (bias != null) {\n const targetShape = getShapeForBatchMatMul(bias.shape, isChannelsLast);\n if (targetShape != null) {\n bias = reshape({inputs: {x: bias}, backend, attrs: {shape: targetShape}});\n intermediates.push(bias);\n }\n }\n\n const w2Row = reshape({\n inputs: {x: filter},\n backend,\n attrs: {shape: [1, sharedDim, util.sizeFromShape(filter.shape) / sharedDim]}\n });\n intermediates.push(w2Row);\n\n const im2ColProgram = new Im2ColPackedProgram(x2ColShape, convInfo);\n const customValues = [\n x.shape, [convInfo.padInfo.top, convInfo.padInfo.left],\n [convInfo.strideHeight, convInfo.strideWidth],\n [convInfo.dilationHeight, convInfo.dilationWidth], [convInfo.inChannels],\n [convInfo.filterWidth * convInfo.inChannels], [convInfo.outWidth]\n ];\n const im2Col =\n backend.runWebGLProgram(im2ColProgram, [x], 'float32', customValues);\n const im2ColReshaped =\n reshape({inputs: {x: im2Col}, backend, attrs: {shape: x2ColShape}});\n\n intermediates.push(im2Col);\n intermediates.push(im2ColReshaped);\n\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n const hasLeakyreluAlpha = activation === 'leakyrelu';\n const fusedActivation =\n activation ? mapActivationToShaderProgram(activation, true) : null;\n const matmulProgram = new MatMulPackedProgram(\n isChannelsLast ? im2ColReshaped.shape as [number, number, number] :\n w2Row.shape as [number, number, number],\n isChannelsLast ? w2Row.shape as [number, number, number] :\n im2ColReshaped.shape as [number, number, number],\n isChannelsLast ? [convInfo.batchSize, numCols, convInfo.outChannels] :\n [convInfo.batchSize, convInfo.outChannels, numCols],\n transposeA, transposeB, hasBias, fusedActivation,\n hasPreluActivationWeights, hasLeakyreluAlpha);\n const inputs: TensorInfo[] =\n isChannelsLast ? [im2ColReshaped, w2Row] : [w2Row, im2ColReshaped];\n if (bias) {\n inputs.push(bias);\n }\n if (hasPreluActivationWeights) {\n inputs.push(preluActivationWeights);\n }\n if (hasLeakyreluAlpha) {\n const $leakyreluAlpha = backend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(leakyreluAlpha as unknown as 'float32',\n 'float32'));\n inputs.push($leakyreluAlpha);\n intermediates.push($leakyreluAlpha);\n }\n const product = backend.runWebGLProgram(matmulProgram, inputs, 'float32');\n const out = reshape(\n {inputs: {x: product}, backend, attrs: {shape: convInfo.outShape}});\n\n intermediates.push(product);\n for (const i of intermediates) {\n backend.disposeIntermediateTensorInfo(i);\n }\n\n return out;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2D, Conv2DAttrs, Conv2DInputs, env, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv2DProgram} from '../conv_gpu';\nimport {Conv2DPackedProgram} from '../conv_packed_gpu';\nimport {conv2dByMatMul, conv2dWithIm2Row} from './Conv2D_impl';\nimport {reshape} from './Reshape';\n\nexport function conv2d(\n args:\n {inputs: Conv2DInputs, attrs: Conv2DAttrs, backend: MathBackendWebGL}) {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dataFormat, dilations, dimRoundingMode} = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, dilations, pad,\n dimRoundingMode, false /* depthwise */, $dataFormat);\n let out: TensorInfo;\n\n if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 &&\n convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 &&\n convInfo.strideHeight === 1 && convInfo.strideWidth === 1 &&\n (convInfo.padInfo.type === 'SAME' || convInfo.padInfo.type === 'VALID')) {\n out = conv2dByMatMul({x, filter, convInfo, backend});\n } else if (convInfo.strideWidth <= 2 && $dataFormat === 'channelsLast'\n && env().getBool('WEBGL_EXP_CONV')\n ) {\n const program = new Conv2DPackedProgram(convInfo);\n const customValues = [\n [convInfo.padInfo.top, convInfo.padInfo.left],\n [convInfo.strideHeight, convInfo.strideWidth],\n [convInfo.dilationHeight, convInfo.dilationWidth],\n [convInfo.inHeight, convInfo.inWidth]\n ];\n out =\n backend.runWebGLProgram(program, [x, filter], 'float32', customValues);\n } else if (env().getBool('WEBGL_CONV_IM2COL')) {\n out = conv2dWithIm2Row({x, filter, convInfo, backend});\n } else {\n const program = new Conv2DProgram(convInfo);\n out = backend.runWebGLProgram(program, [x, filter], 'float32');\n }\n\n const outReshaped =\n reshape({inputs: {x: out}, backend, attrs: {shape: convInfo.outShape}});\n backend.disposeIntermediateTensorInfo(out);\n\n return outReshaped;\n}\n\nexport const conv2DConfig: KernelConfig = {\n kernelName: Conv2D,\n backendName: 'webgl',\n kernelFunc: conv2d as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class Conv2DDerFilterProgram implements GPGPUProgram {\n variableNames = ['x', 'dy'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.filterShape;\n\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int wR = coords.x;\n int wC = coords.y;\n int d1 = coords.z;\n int d2 = coords.w;\n\n // Convolve x(?, ?, d1) with dy(:, :, d2) to get dw(wR, wC, d1, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int b = 0; b < ${convInfo.batchSize}; b++) {\n for (int yR = 0; yR < ${convInfo.outHeight}; yR++) {\n int xR = wR + yR * ${strideHeight} - ${padTop};\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${convInfo.outWidth}; yC++) {\n int xC = wC + yC * ${strideWidth} - ${padLeft};\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n if (${isChannelsLast}) {\n float dyValue = getDy(b, yR, yC, d2);\n float xValue = getX(b, xR, xC, d1);\n dotProd += (xValue * dyValue);\n } else {\n float dyValue = getDy(b, d2, yR, yC);\n float xValue = getX(b, d1, xR, xC);\n dotProd += (xValue * dyValue);\n }\n\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class Conv2DDerInputProgram implements GPGPUProgram {\n variableNames = ['dy', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const isChannelsLast = convInfo.dataFormat === 'channelsLast';\n\n const padTop = filterHeight - 1 - convInfo.padInfo.top;\n const padLeft = filterWidth - 1 - convInfo.padInfo.left;\n\n const rowDim = isChannelsLast ? 1 : 2;\n const colDim = isChannelsLast ? 2 : 3;\n const channelDim = isChannelsLast ? 3 : 1;\n\n this.userCode = `\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d1 = coords[${channelDim}];\n\n ivec2 dyCorner = ivec2(coords[${rowDim}], coords[${colDim}]) - pads;\n int dyRCorner = dyCorner.x;\n int dyCCorner = dyCorner.y;\n\n // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${filterHeight} - 1 - wR;\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${filterWidth} - 1 - wC;\n\n for (int d2 = 0; d2 < ${convInfo.outChannels}; d2++) {\n\n if (${isChannelsLast}) {\n float xValue = getDy(batch, idyR, idyC, d2);\n float wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n } else {\n float xValue = getDy(batch, d2, idyR, idyC);\n float wValue = getW(wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n }\n\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class Conv3DDerFilterProgram implements GPGPUProgram {\n variableNames = ['x', 'dy'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv3DInfo) {\n this.outputShape = convInfo.filterShape;\n\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const padFront = convInfo.padInfo.front;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n\n this.userCode = `\n void main() {\n ivec5 coords = getOutputCoords();\n int wF = coords.x;\n int wR = coords.y;\n int wC = coords.z;\n int d1 = coords.w;\n int d2 = coords.u;\n\n float dotProd = 0.0;\n\n for (int b = 0; b < ${convInfo.batchSize}; b++) {\n for (int yF = 0; yF < ${convInfo.outDepth}; yF++) {\n int xF = wF + yF * ${strideDepth} - ${padFront};\n\n if (xF < 0 || xF >= ${convInfo.inDepth}) {\n continue;\n }\n\n for (int yR = 0; yR < ${convInfo.outHeight}; yR++) {\n int xR = wR + yR * ${strideHeight} - ${padTop};\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${convInfo.outWidth}; yC++) {\n int xC = wC + yC * ${strideWidth} - ${padLeft};\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n float dyValue = getDy(b, yF, yR, yC, d2);\n float xValue = getX(b, xF, xR, xC, d1);\n dotProd += (xValue * dyValue);\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class Conv3DDerInputProgram implements GPGPUProgram {\n variableNames = ['dy', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv3DInfo) {\n this.outputShape = convInfo.inShape;\n\n const filterDepth = convInfo.filterDepth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n\n const padFront = filterDepth - 1 - convInfo.padInfo.front;\n const padTop = filterHeight - 1 - convInfo.padInfo.top;\n const padLeft = filterWidth - 1 - convInfo.padInfo.left;\n\n this.userCode = `\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int d1 = coords.u;\n\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyFCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n float dotProd = 0.0;\n for (int wF = 0; wF < ${filterDepth}; wF++) {\n float dyF = float(dyFCorner + wF) / ${strideDepth}.0;\n\n if (dyF < 0.0 || dyF >= ${convInfo.outDepth}.0 || fract(dyF) > 0.0) {\n continue;\n }\n int idyF = int(dyF);\n\n int wFPerm = ${filterDepth} - 1 - wF;\n\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${filterHeight} - 1 - wR;\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${filterWidth} - 1 - wC;\n\n for (int d2 = 0; d2 < ${convInfo.outChannels}; d2++) {\n float xValue = getDy(batch, idyF, idyR, idyC, d2);\n float wValue = getW(wFPerm, wRPerm, wCPerm, d1, d2);\n dotProd += xValue * wValue;\n }\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2DBackpropFilter, Conv2DBackpropFilterAttrs, Conv2DBackpropFilterInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv2DDerFilterProgram} from '../conv_backprop_gpu';\n\nexport function conv2DBackpropFilter(args: {\n inputs: Conv2DBackpropFilterInputs,\n attrs: Conv2DBackpropFilterAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, pad, dataFormat, dimRoundingMode, filterShape} = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number], filterShape, strides,\n 1 /* dilations */, pad, dimRoundingMode, false /* depthwise */,\n $dataFormat);\n\n const program = new Conv2DDerFilterProgram(convInfo);\n return backend.runWebGLProgram(program, [x, dy], 'float32');\n}\n\nexport const conv2DBackpropFilterConfig: KernelConfig = {\n kernelName: Conv2DBackpropFilter,\n backendName: 'webgl',\n kernelFunc: conv2DBackpropFilter as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv2DBackpropInput, Conv2DBackpropInputAttrs, Conv2DBackpropInputInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv2DDerInputProgram} from '../conv_backprop_gpu';\n\nexport function conv2DBackpropInput(args: {\n inputs: Conv2DBackpropInputInputs,\n attrs: Conv2DBackpropInputAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {inputShape, strides, pad, dataFormat, dimRoundingMode} = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n inputShape, filter.shape as [number, number, number, number], strides,\n 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat);\n\n const program = new Conv2DDerInputProgram(convInfo);\n return backend.runWebGLProgram(program, [dy, filter], 'float32');\n}\n\nexport const conv2DBackpropInputConfig: KernelConfig = {\n kernelName: Conv2DBackpropInput,\n backendName: 'webgl',\n kernelFunc: conv2DBackpropInput as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3D, Conv3DAttrs, Conv3DInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv3DProgram} from '../conv_gpu';\n\nexport function conv3D(\n args:\n {inputs: Conv3DInputs, attrs: Conv3DAttrs, backend: MathBackendWebGL}) {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations} = attrs;\n\n const convInfo = backend_util.computeConv3DInfo(\n x.shape as [number, number, number, number, number],\n filter.shape as [number, number, number, number, number], strides,\n dilations, pad);\n\n const program = new Conv3DProgram(convInfo);\n return backend.runWebGLProgram(program, [x, filter], 'float32');\n}\n\nexport const conv3DConfig: KernelConfig = {\n kernelName: Conv3D,\n backendName: 'webgl',\n kernelFunc: conv3D as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3DBackpropFilterV2, Conv3DBackpropFilterV2Attrs, Conv3DBackpropFilterV2Inputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv3DDerFilterProgram} from '../conv_backprop_gpu';\n\nexport function conv3DBackpropFilterV2(args: {\n inputs: Conv3DBackpropFilterV2Inputs,\n attrs: Conv3DBackpropFilterV2Attrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, pad, filterShape} = attrs;\n\n const convInfo = backend_util.computeConv3DInfo(\n x.shape as [number, number, number, number, number], filterShape, strides,\n 1 /* dilations */, pad);\n\n const program = new Conv3DDerFilterProgram(convInfo);\n return backend.runWebGLProgram(program, [x, dy], 'float32');\n}\n\nexport const conv3DBackpropFilterV2Config: KernelConfig = {\n kernelName: Conv3DBackpropFilterV2,\n backendName: 'webgl',\n kernelFunc: conv3DBackpropFilterV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Conv3DBackpropInputV2, Conv3DBackpropInputV2Attrs, Conv3DBackpropInputV2Inputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv3DDerInputProgram} from '../conv_backprop_gpu';\n\nexport function conv3DBackpropInput(args: {\n inputs: Conv3DBackpropInputV2Inputs,\n attrs: Conv3DBackpropInputV2Attrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {pad, strides, inputShape} = attrs;\n\n const convInfo = backend_util.computeConv3DInfo(\n inputShape, filter.shape as [number, number, number, number, number],\n strides, 1 /* dilations */, pad);\n\n const program = new Conv3DDerInputProgram(convInfo);\n return backend.runWebGLProgram(program, [dy, filter], 'float32');\n}\n\nexport const conv3DBackpropInputConfig: KernelConfig = {\n kernelName: Conv3DBackpropInputV2,\n backendName: 'webgl',\n kernelFunc: conv3DBackpropInput as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType, UniformType} from './shader_compiler';\n\nexport enum CumOpType {\n Prod = '*',\n Sum = '+',\n}\n\nexport class CumProgram implements GPGPUProgram {\n variableNames = ['x'];\n userCode: string;\n customUniforms = [{name: 'index', type: 'float' as UniformType}];\n\n constructor(\n public op: CumOpType, public outputShape: number[], exclusive: boolean,\n reverse: boolean) {\n const rank = this.outputShape.length;\n const initVal = this.op === CumOpType.Prod ? '1.0' : '0.0';\n const val =\n exclusive ? initVal : `getX(${getCoords(rank, 'coords', this.op)})`;\n const length = this.outputShape[this.outputShape.length - 1];\n let condition = '';\n let idxString = '';\n // When exclusive is set, the cum op becomes roll op that copies the\n // value from the previous index based on the direction specified by the\n // reverse flag.\n if (exclusive) {\n condition = reverse ? `end != ${length - 1}` : 'end != 0';\n idxString = reverse ? 'end + 1' : 'end - 1';\n } else {\n condition = reverse ? `end + pow2 < ${length}` : 'end >= pow2';\n idxString = (reverse ? 'end + pow2' : 'end - pow2');\n }\n\n this.userCode = `\n void main() {\n ${getCoordsDataType(rank)} coords = getOutputCoords();\n int end = ${getFinalCoord(rank, 'coords', this.op)};\n float val = ${val};\n int pow2 = int(pow(2.0, index));\n if (${condition}) {\n int idx = ${idxString};\n ${getFinalCoord(rank, 'coords', this.op)} = idx;\n val ${this.op}= getX(${getCoords(rank, 'coords', this.op)});\n }\n setOutput(val);\n }\n `;\n }\n}\n\nfunction getCoords(rank: number, name: string, op: CumOpType): string {\n if (rank === 1) {\n return `${name}`;\n } else if (rank === 2) {\n return `${name}.x, ${name}.y`;\n } else if (rank === 3) {\n return `${name}.x, ${name}.y, ${name}.z`;\n } else if (rank === 4) {\n return `${name}.x, ${name}.y, ${name}.z, ${name}.w`;\n } else {\n throw new Error(`Cumulative ${op} for rank ${rank} is not yet supported`);\n }\n}\n\nfunction getFinalCoord(rank: number, name: string, op: CumOpType): string {\n if (rank === 1) {\n return `${name}`;\n } else if (rank === 2) {\n return `${name}.y`;\n } else if (rank === 3) {\n return `${name}.z`;\n } else if (rank === 4) {\n return `${name}.w`;\n } else {\n throw new Error(`Cumulative ${op} for rank ${rank} is not yet supported`);\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cos, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst COS = CHECK_NAN_SNIPPET_UNARY + `\n return cos(x);\n`;\n\nexport const cos = unaryKernelFunc({opSnippet: COS});\n\nexport const cosConfig: KernelConfig = {\n kernelName: Cos,\n backendName: 'webgl',\n kernelFunc: cos,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cosh, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst COSH = `\n float e2x = exp(-x);\n return (e2x + 1.0 / e2x) / 2.0;\n`;\n\nexport const cosh = unaryKernelFunc({opSnippet: COSH});\n\nexport const coshConfig: KernelConfig = {\n kernelName: Cosh,\n backendName: 'webgl',\n kernelFunc: cosh,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class CropAndResizeProgram implements GPGPUProgram {\n variableNames = ['Image', 'Boxes', 'BoxInd'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n imageShape: [number, number, number, number], boxShape: [number, number],\n cropSize: [number, number], method: 'bilinear'|'nearest',\n extrapolationValue: number) {\n const [batch, imageHeight, imageWidth, depth] = imageShape;\n const [numBoxes, ] = boxShape;\n const [cropHeight, cropWidth] = cropSize;\n this.outputShape = [numBoxes, cropHeight, cropWidth, depth];\n const methodId = method === 'bilinear' ? 1 : 0;\n\n const [inputHeightFloat, inputWidthFloat] =\n [`${imageHeight - 1}.0`, `${imageWidth - 1}.0`];\n\n const [heightRatio, heightScale, inY] = cropHeight > 1 ?\n [\n `${(imageHeight - 1) / (cropHeight - 1)}`,\n '(y2-y1) * height_ratio',\n `y1*${inputHeightFloat} + float(y)*(height_scale)`,\n ] :\n [\n '0.0',\n '0.0',\n `0.5 * (y1+y2) * ${inputHeightFloat}`,\n ];\n const [widthRatio, widthScale, inX] = cropWidth > 1 ?\n [\n `${(imageWidth - 1) / (cropWidth - 1)}`,\n '(x2-x1) * width_ratio',\n `x1*${inputWidthFloat} + float(x)*(width_scale)`,\n ] :\n [\n '0.0',\n '0.0',\n `0.5 * (x1+x2) * ${inputWidthFloat}`,\n ];\n\n // Reference implementation\n // tslint:disable-next-line:max-line-length\n // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc\n this.userCode = `\n const float height_ratio = float(${heightRatio});\n const float width_ratio = float(${widthRatio});\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int y = coords[1];\n int x = coords[2];\n int d = coords[3];\n\n // get box vals\n float y1 = getBoxes(b,0);\n float x1 = getBoxes(b,1);\n float y2 = getBoxes(b,2);\n float x2 = getBoxes(b,3);\n\n // get image in batch index\n int bInd = round(getBoxInd(b));\n if(bInd < 0 || bInd >= ${batch}) {\n return;\n }\n\n float height_scale = ${heightScale};\n float width_scale = ${widthScale};\n\n float in_y = ${inY};\n if( in_y < 0.0 || in_y > ${inputHeightFloat} ) {\n setOutput(float(${extrapolationValue}));\n return;\n }\n float in_x = ${inX};\n if( in_x < 0.0 || in_x > ${inputWidthFloat} ) {\n setOutput(float(${extrapolationValue}));\n return;\n }\n\n vec2 sourceFracIndexCR = vec2(in_x,in_y);\n if(${methodId} == 1) {\n // Compute the four integer indices.\n ivec2 sourceFloorCR = ivec2(sourceFracIndexCR);\n ivec2 sourceCeilCR = ivec2(ceil(sourceFracIndexCR));\n\n float topLeft = getImage(b, sourceFloorCR.y, sourceFloorCR.x, d);\n float bottomLeft = getImage(b, sourceCeilCR.y, sourceFloorCR.x, d);\n float topRight = getImage(b, sourceFloorCR.y, sourceCeilCR.x, d);\n float bottomRight = getImage(b, sourceCeilCR.y, sourceCeilCR.x, d);\n\n vec2 fracCR = sourceFracIndexCR - vec2(sourceFloorCR);\n\n float top = topLeft + (topRight - topLeft) * fracCR.x;\n float bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x;\n float newValue = top + (bottom - top) * fracCR.y;\n setOutput(newValue);\n } else {\n // Compute the coordinators of nearest neighbor point.\n ivec2 sourceNearestCR = ivec2(floor(\n sourceFracIndexCR + vec2(0.5,0.5)));\n float newValue = getImage(b, sourceNearestCR.y, sourceNearestCR.x, d);\n setOutput(newValue);\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {CropAndResize, CropAndResizeAttrs, CropAndResizeInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {CropAndResizeProgram} from '../crop_and_resize_gpu';\n\nexport const cropAndResize = (args: {\n inputs: CropAndResizeInputs,\n backend: MathBackendWebGL,\n attrs: CropAndResizeAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {image, boxes, boxInd} = inputs;\n const {cropSize, method, extrapolationValue} = attrs;\n\n const program = new CropAndResizeProgram(\n image.shape as [number, number, number, number],\n boxes.shape as [number, number], cropSize, method, extrapolationValue);\n return backend.runWebGLProgram(program, [image, boxes, boxInd], 'float32');\n};\n\nexport const cropAndResizeConfig: KernelConfig = {\n kernelName: CropAndResize,\n backendName: 'webgl',\n kernelFunc: cropAndResize as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {CumOpType, CumProgram} from '../cum_gpu';\n\nimport {identity} from './Identity';\nimport {transpose} from './Transpose';\n\nexport function cumImpl(\n op: CumOpType, x: TensorInfo, backend: MathBackendWebGL, axis: number,\n exclusive: boolean, reverse: boolean): TensorInfo {\n const xRank = x.shape.length;\n const permutation = backend_util.getAxesPermutation([axis], xRank);\n let permutedX = x;\n if (permutation != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n }\n const permutedAxis = backend_util.getInnerMostAxes(1, xRank)[0];\n\n if (permutedAxis !== xRank - 1) {\n throw new Error(\n `WebGL cumprod shader expects an inner-most axis=${\n x.shape.length - 1} ` +\n `but got axis=${axis}`);\n }\n const size = permutedX.shape[permutedAxis];\n let result = identity({inputs: {x: permutedX}, backend});\n // Use cum parallel algorithm, inspired by:\n // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda\n // Note: although the algorithm is called sum, it works for any associtative\n // operator with an identity.\n\n for (let i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) {\n const program = new CumProgram(op, permutedX.shape, false, reverse);\n const customValues = [[i]];\n const prevResult = result;\n result =\n backend.runWebGLProgram(program, [result], result.dtype, customValues);\n backend.disposeIntermediateTensorInfo(prevResult);\n }\n // For exclusive cum, shift the end result in the direction of product or sum\n // and add 1 for product or 0 for sum to the front index.\n if (exclusive) {\n const program = new CumProgram(op, permutedX.shape, exclusive, reverse);\n const prevResult = result;\n result = backend.runWebGLProgram(program, [result], result.dtype);\n backend.disposeIntermediateTensorInfo(prevResult);\n }\n\n if (permutation != null) {\n const reversePermutation = backend_util.getUndoAxesPermutation(permutation);\n const reverseTransposedResult = transpose(\n {inputs: {x: result}, backend, attrs: {perm: reversePermutation}});\n\n backend.disposeIntermediateTensorInfo(result);\n backend.disposeIntermediateTensorInfo(permutedX);\n\n return reverseTransposedResult;\n }\n\n return result;\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cumprod, CumprodAttrs, CumprodInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {CumOpType} from '../cum_gpu';\nimport {cumImpl} from './Cum_impl';\n\nexport function cumprod(args: {\n inputs: CumprodInputs,\n backend: MathBackendWebGL,\n attrs: CumprodAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n\n return cumImpl(CumOpType.Prod, x, backend, axis, exclusive, reverse);\n}\n\nexport const cumprodConfig: KernelConfig = {\n kernelName: Cumprod,\n backendName: 'webgl',\n kernelFunc: cumprod as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Cumsum, CumsumAttrs, CumsumInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {CumOpType} from '../cum_gpu';\nimport {cumImpl} from './Cum_impl';\n\nexport function cumsum(\n args:\n {inputs: CumsumInputs, backend: MathBackendWebGL, attrs: CumsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, exclusive, reverse} = attrs;\n return cumImpl(CumOpType.Sum, x, backend, axis, exclusive, reverse);\n}\n\nexport const cumsumConfig: KernelConfig = {\n kernelName: Cumsum,\n backendName: 'webgl',\n kernelFunc: cumsum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DenseBincount, DenseBincountAttrs, DenseBincountInputs, KernelConfig, KernelFunc, Rank, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {bincountImplCPU, bincountReduceImplCPU} from '../kernel_utils/shared';\n\nexport function denseBincount(args: {\n inputs: DenseBincountInputs,\n backend: MathBackendWebGL,\n attrs: DenseBincountAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, weights} = inputs;\n const {size, binaryOutput} = attrs;\n\n if (x.shape.length === 1) {\n const xVals = backend.readSync(x.dataId) as TypedArray;\n const weightsVals = backend.readSync(weights.dataId) as TypedArray;\n\n const outVals =\n bincountImplCPU(xVals, weightsVals, weights.dtype, weights.shape, size);\n\n return backend.makeTensorInfo([size], weights.dtype, outVals);\n } else if (x.shape.length === 2) {\n const xBuf = backend.bufferSync(x);\n const weightsBuf = backend.bufferSync(weights);\n\n const outBuf = bincountReduceImplCPU(xBuf, weightsBuf, size, binaryOutput);\n\n return backend.makeTensorInfo(outBuf.shape, weights.dtype, outBuf.values);\n }\n\n throw new Error(\n `Error in denseBincount: input must be at most rank 2, but got rank` +\n `${x.shape.length}.`);\n}\n\nexport const denseBincountConfig: KernelConfig = {\n kernelName: DenseBincount,\n backendName: 'webgl',\n kernelFunc: denseBincount as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class DepthToSpaceProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[] = [];\n userCode: string;\n blockSize: number;\n dataFormat: string;\n\n constructor(\n outputShape: number[], blockSize: number, dataFormat: 'NHWC'|'NCHW') {\n this.outputShape = outputShape;\n this.blockSize = blockSize;\n this.dataFormat = dataFormat;\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int h = ${this.getHeightCoordString()};\n int w = ${this.getWidthCoordString()};\n int d = ${this.getDepthCoordString()};\n\n int in_h = h / ${blockSize};\n int offset_h = imod(h, ${blockSize});\n int in_w = w / ${blockSize};\n int offset_w = imod(w, ${blockSize});\n int offset_d = (offset_h * ${blockSize} + offset_w) *\n ${this.getOutputDepthSize()};\n int in_d = d + offset_d;\n\n float result = ${this.getInputSamplingString()};\n setOutput(result);\n }\n `;\n }\n\n private getHeightCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[1]`;\n } else {\n return `coords[2]`;\n }\n }\n\n private getWidthCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[2]`;\n } else {\n return `coords[3]`;\n }\n }\n\n private getDepthCoordString(): string {\n if (this.dataFormat === 'NHWC') {\n return `coords[3]`;\n } else {\n return `coords[1]`;\n }\n }\n\n private getOutputDepthSize(): number {\n if (this.dataFormat === 'NHWC') {\n return this.outputShape[3];\n } else {\n return this.outputShape[1];\n }\n }\n\n private getInputSamplingString(): string {\n if (this.dataFormat === 'NHWC') {\n return `getX(b, in_h, in_w, in_d)`;\n } else {\n return `getX(b, in_d, in_h, in_w)`;\n }\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {DepthToSpace, DepthToSpaceAttrs, DepthToSpaceInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DepthToSpaceProgram} from '../depth_to_space_gpu';\n\nexport function depthToSpace(args: {\n inputs: DepthToSpaceInputs,\n backend: MathBackendWebGL,\n attrs: DepthToSpaceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockSize, dataFormat} = attrs;\n\n const batchSize = x.shape[0];\n const inputHeight = (dataFormat === 'NHWC') ? x.shape[1] : x.shape[2];\n const inputWidth = (dataFormat === 'NHWC') ? x.shape[2] : x.shape[3];\n const inputDepth = (dataFormat === 'NHWC') ? x.shape[3] : x.shape[1];\n\n const outputHeight = inputHeight * blockSize;\n const outputWidth = inputWidth * blockSize;\n const outputDepth = inputDepth / (blockSize * blockSize);\n\n const outputShape = (dataFormat === 'NHWC') ?\n [batchSize, outputHeight, outputWidth, outputDepth] :\n [batchSize, outputDepth, outputHeight, outputWidth];\n\n const program = new DepthToSpaceProgram(outputShape, blockSize, dataFormat);\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport const depthToSpaceConfig: KernelConfig = {\n kernelName: DepthToSpace,\n backendName: 'webgl',\n kernelFunc: depthToSpace as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport class DepthwiseConv2DProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n customUniforms = [\n {name: 'pads', type: 'ivec2' as const },\n {name: 'strides', type: 'ivec2' as const },\n {name: 'dilations', type: 'ivec2' as const },\n {name: 'inDims', type: 'ivec2' as const },\n ];\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: string = null, hasPreluActivation = false,\n hasLeakyReluAlpha = false) {\n this.outputShape = convInfo.outShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const channelMul = convInfo.outChannels / convInfo.inChannels;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (activation) {\n if (hasPreluActivation) {\n activationSnippet = `float activation(float a) {\n float b = getPreluActivationWeightsAtOutCoords();\n ${activation}\n }`;\n } else if (hasLeakyReluAlpha) {\n activationSnippet = `float activation(float a) {\n float b = getLeakyreluAlphaAtOutCoords();\n ${activation}\n }`;\n } else {\n activationSnippet = `\n float activation(float x) {\n ${activation}\n }\n `;\n }\n\n applyActivationSnippet = `result = activation(result);`;\n }\n\n const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : '';\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n if (hasLeakyReluAlpha) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n this.userCode = `\n ${activationSnippet}\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n ivec2 xRCCorner = coords.yz * strides - pads;\n int d2 = coords.w;\n int d1 = d2 / ${channelMul};\n int q = d2 - d1 * ${channelMul};\n\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n // TO DO(dsmilkov): Flatten the two for loops and vec4 the operations.\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n int xR = xRCorner + wR * dilations[0];\n\n if (xR < 0 || xR >= inDims[0]) {\n continue;\n }\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n int xC = xCCorner + wC * dilations[1];\n\n if (xC < 0 || xC >= inDims[1]) {\n continue;\n }\n\n float xVal = getX(batch, xR, xC, d1);\n float wVal = getW(wR, wC, d1, q);\n dotProd += xVal * wVal;\n }\n }\n\n float result = dotProd;\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, util} from '@tensorflow/tfjs-core';\n\nimport {GPGPUProgram, useShapeUniforms} from './gpgpu_math';\n\nexport class DepthwiseConvPacked2DProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n enableShapeUniforms: boolean;\n customUniforms = [\n {name: 'pads', type: 'ivec2' as const },\n {name: 'strides', type: 'ivec2' as const },\n {name: 'dilations', type: 'ivec2' as const },\n {name: 'inDims', type: 'ivec2' as const },\n ];\n\n constructor(\n convInfo: backend_util.Conv2DInfo, addBias = false,\n activation: string = null, hasPreluActivation = false,\n hasLeakyReluAlpha = false) {\n this.outputShape = convInfo.outShape;\n this.enableShapeUniforms = useShapeUniforms(this.outputShape.length);\n const channelMul = convInfo.outChannels / convInfo.inChannels;\n const padLeft = convInfo.padInfo.left;\n const strideWidth = convInfo.strideWidth;\n const dilationWidth = convInfo.dilationWidth;\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const texelsAcross = filterWidth;\n\n let mainLoop = `\n int xR; int xC; int xCOffset;\n vec4 wTexel; vec4 previous; vec4 final;`;\n\n for (let c = 0; c < filterWidth; c++) {\n mainLoop += `\n vec4 xTexelC${c * 2};\n int xTexelC${c * 2}Ready;\n vec4 xTexelC${c * 2 + 1};\n int xTexelC${c * 2 + 1}Ready;\n vec4 xC${c};`;\n }\n\n /**\n * This vectorized implementation works by gathering the values needed for\n * each output channel's dot product into vec4's and then multiplying them\n * all together (this happens in the final double for-loop below). Most of\n * the main loop consists of constructing these vec4's with the minimum\n * number of texture2D calls, which means making use of all four returned\n * values from a texture2D call at once.\n */\n mainLoop += `\n for (int r = 0; r < ${filterHeight}; r++) {\n `;\n for (let c = 0; c < filterWidth; c++) {\n mainLoop += `\n xTexelC${c * 2} = vec4(0.0);\n xTexelC${c * 2}Ready = 0;\n xTexelC${c * 2 + 1} = vec4(0.0);\n xTexelC${c * 2 + 1}Ready = 0;\n xC${c} = vec4(0.0);`;\n }\n mainLoop += `\n xR = xRCorner + r * dilations[0];\n if (xR >=0 && xR < inDims[0]) {\n `;\n\n for (let texelC = 0; texelC < (texelsAcross + 1) / 2; texelC++) {\n const colIndex = texelC * 2;\n\n mainLoop += `\n xC = xCCorner + ${colIndex * dilationWidth};\n `;\n\n if (strideWidth === 1) {\n if (colIndex < filterWidth) {\n // If padding is odd, the outer texels have to be composed.\n if (padLeft % 2 === 1) {\n // TODO: Ensure vec4 previous does not result in redundant sample,\n // and avoid setting xTexelRC's that exceed the boundary in the\n // first place rather than resetting them to vec4(0)).\n\n // To compute xCOffset:\n // - If padding is odd, we must add 1 to ensure we ask for an\n // even-numbered row.\n // - We subtract 2 to access the previous texel.\n\n mainLoop += `\n xCOffset = xC + 1;\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n `;\n // This texel has been read in previous iteration if the dilation\n // is 1.\n if (dilationWidth === 1 && colIndex > 0) {\n mainLoop += `\n xC${colIndex} = vec4(xTexelC${colIndex - 2}.zw, xTexelC${\n colIndex}.xy);\n `;\n } else {\n mainLoop += `\n xCOffset = xC + 1 - 2;\n\n if (xCOffset >= 0 && xCOffset < inDims[1]) {\n previous = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n previous.zw = vec2(0.0);\n }\n\n xC${colIndex} = vec4(previous.zw, xTexelC${colIndex}.xy);\n } else {\n xC${colIndex} = vec4(0.0, 0.0, xTexelC${colIndex}.xy);\n }\n `;\n }\n } else {\n // Padding is even, so xRC corresponds to a single texel.\n mainLoop += `\n if (xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xC, d1);\n if (xC + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n xC${colIndex} = xTexelC${colIndex};\n `;\n }\n\n if (colIndex + 1 < filterWidth) {\n // If dilation is even, the second entry should match the first\n // (either both are composed or both are single samples). But if\n // dilation is odd, then the second entry should be the opposite\n // of the first (if the first is composed, the second is a single\n // sample, and vice versa.)\n\n const nextTexelOffset = padLeft % 2 === 0 ?\n util.nearestLargerEven(dilationWidth) :\n dilationWidth;\n\n if ((dilationWidth % 2 === 0 && padLeft % 2 === 1) ||\n (dilationWidth % 2 !== 0 && padLeft % 2 !== 1)) {\n mainLoop += `\n xCOffset = xC + imod(pads[1], 2) + ${nextTexelOffset};\n\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n `;\n\n // If dilation > 1 then the xRC's will not be able to share any\n // values, so each xRC will require two unique calls to getX.\n if (dilationWidth > 1) {\n mainLoop += `\n xCOffset -= 2;\n if (xCOffset >= 0 && xCOffset < inDims[1]) {\n previous = getX(batch, xR, xCOffset, d1);\n xC${colIndex + 1} = vec4(previous.zw, xTexelC${\n colIndex + 1}.xy);\n } else {\n xC${colIndex + 1} = vec4(0.0, 0.0, xTexelC${\n colIndex + 1}.xy);\n }\n `;\n } else {\n mainLoop += `\n xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.xy);\n `;\n }\n } else {\n // If dilation is 1 and padding is odd, we have already read the\n // texel when constructing the previous x value. Here we can\n // simply skip the texture read.\n if (nextTexelOffset === 1) {\n mainLoop += `\n xC${colIndex + 1} = xTexelC${colIndex};\n `;\n } else {\n mainLoop += `\n xCOffset = xC + ${nextTexelOffset};\n\n if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex + 1} = xTexelC${colIndex + 1};\n `;\n }\n }\n }\n }\n } else { // stride === 2\n if (colIndex < filterWidth) {\n // Depending on whether padLeft is even or odd, we want either the\n // xy or zw channels from X texels for xC${colIndex}. If padLeft is\n // even, xC${colIndex +1} is simply the zw channels of texels we've\n // already sampled. But if padLeft is odd, xC{$c + 1}.zw will\n // need to come from the xy channels of a new texel, hence the `\n // vec4\n // final` initialized below.\n if (padLeft % 2 === 1) {\n mainLoop += `\n xCOffset = xC + 1 - strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xCOffset, d1);\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xC + 1, d1);\n // Need to manually clear unused channels in case\n // we're reading from recycled texture.\n if (xC + 2 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.0);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.zw);\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n final = vec4(0.0);\n xCOffset = xC + 1 + strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1]) {\n final = getX(batch, xR, xCOffset, d1);\n }\n xC${colIndex + 1} = vec4(xTexelC${colIndex + 1}.xy, final.xy);\n `;\n }\n } else {\n mainLoop += `\n if(xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) {\n xTexelC${colIndex} = getX(batch, xR, xC, d1);\n if (xC + 1 >= inDims[1]) {\n xTexelC${colIndex}.zw = vec2(0.0);\n }\n xTexelC${colIndex}Ready = 1;\n }\n\n xCOffset = xC + strides[1];\n if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${\n colIndex + 1}Ready == 0) {\n xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1);\n if (xCOffset + 1 >= inDims[1]) {\n xTexelC${colIndex + 1}.zw = vec2(0.);\n }\n xTexelC${colIndex + 1}Ready = 1;\n }\n\n xC${colIndex} = vec4(\n xTexelC${colIndex}.xy, xTexelC${colIndex + 1}.xy);\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${\n colIndex + 1}.zw);\n `;\n }\n }\n }\n }\n\n // localize the dotProd accumulation within the loop, the theory is for\n // GPU with limited cache, accumulate sum across large amount of\n // veriables will cause lots of cache misses. (i.e. 5x5 filter will have\n // 50 variables)\n if (colIndex < filterWidth) {\n mainLoop += `\n wTexel = getW(r, ${colIndex}, d1, q);\n dotProd += xC${colIndex} * vec4(wTexel.xz, wTexel.xz);\n `;\n\n if (colIndex + 1 < filterWidth) {\n mainLoop += `\n wTexel = getW(r, ${colIndex + 1}, d1, q);\n dotProd += xC${colIndex + 1} * vec4(wTexel.xz, wTexel.xz);\n `;\n }\n }\n }\n mainLoop += `\n }\n `;\n mainLoop += `\n }\n `;\n\n let activationSnippet = '', applyActivationSnippet = '';\n if (activation) {\n if (hasPreluActivation) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getPreluActivationWeightsAtOutCoords();\n ${activation}\n }`;\n } else if (hasLeakyReluAlpha) {\n activationSnippet = `vec4 activation(vec4 a) {\n vec4 b = getLeakyreluAlphaAtOutCoords();\n ${activation}\n }`;\n } else {\n activationSnippet = `vec4 activation(vec4 x) {\n ${activation}\n }`;\n }\n\n applyActivationSnippet = `result = activation(result);`;\n }\n\n const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : '';\n if (addBias) {\n this.variableNames.push('bias');\n }\n\n if (hasPreluActivation) {\n this.variableNames.push('preluActivationWeights');\n }\n if (hasLeakyReluAlpha) {\n this.variableNames.push('leakyreluAlpha');\n }\n\n this.userCode = `\n ${activationSnippet}\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n ivec2 xRCCorner = coords.yz * strides - pads;\n int d2 = coords.w;\n int d1 = d2 / ${channelMul};\n int q = d2 - d1 * ${channelMul};\n int xRCorner = xRCCorner.x;\n int xCCorner = xRCCorner.y;\n\n //intialize dotProd with a small epsilon seems to reduce GPU accuracy loss.\n vec4 dotProd = vec4(0.000000000000001);\n\n ${mainLoop}\n\n vec4 result = dotProd - vec4(0.000000000000001);\n ${addBiasSnippet}\n ${applyActivationSnippet}\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNative, DepthwiseConv2dNativeAttrs, DepthwiseConv2dNativeInputs, env, KernelConfig, KernelFunc, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DepthwiseConv2DProgram} from '../conv_gpu_depthwise';\nimport {DepthwiseConvPacked2DProgram} from '../conv_packed_gpu_depthwise';\n\nexport function depthwiseConv2dNative(args: {\n inputs: DepthwiseConv2dNativeInputs,\n attrs: DepthwiseConv2dNativeAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations, dimRoundingMode} = attrs;\n\n let $dilations = dilations;\n if ($dilations == null) {\n $dilations = [1, 1];\n }\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, $dilations),\n () => 'Error in depthwiseConv2d: Either strides or dilations must be ' +\n `1. Got strides ${strides} and dilations '${$dilations}'`);\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, $dilations,\n pad, dimRoundingMode, true /* depthwise */);\n\n let program: DepthwiseConv2DProgram|DepthwiseConvPacked2DProgram;\n if (env().getBool('WEBGL_PACK_DEPTHWISECONV') && convInfo.strideWidth <= 2 &&\n convInfo.outChannels / convInfo.inChannels === 1) {\n program = new DepthwiseConvPacked2DProgram(convInfo);\n } else {\n program = new DepthwiseConv2DProgram(convInfo);\n }\n const customValues = [\n [convInfo.padInfo.top, convInfo.padInfo.left],\n [convInfo.strideHeight, convInfo.strideWidth],\n [convInfo.dilationHeight, convInfo.dilationWidth],\n [convInfo.inHeight, convInfo.inWidth]\n ];\n return backend.runWebGLProgram(program, [x, filter], 'float32', customValues);\n}\n\nexport const depthwiseConv2dNativeConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNative,\n backendName: 'webgl',\n kernelFunc: depthwiseConv2dNative as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class DepthwiseConv2DDerFilterProgram implements GPGPUProgram {\n variableNames = ['x', 'dy'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.filterShape;\n\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const padTop = convInfo.padInfo.top;\n const padLeft = convInfo.padInfo.left;\n const channelMul = convInfo.outChannels / convInfo.inChannels;\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int wR = coords.x;\n int wC = coords.y;\n int d1 = coords.z;\n int dm = coords.w;\n int d2 = d1 * ${channelMul} + dm;\n\n float dotProd = 0.0;\n\n // TO DO: Vec4 over the batch size\n for (int b = 0; b < ${convInfo.batchSize}; b++) {\n for (int yR = 0; yR < ${convInfo.outHeight}; yR++) {\n int xR = wR + yR * ${strideHeight} - ${padTop};\n\n if (xR < 0 || xR >= ${convInfo.inHeight}) {\n continue;\n }\n\n for (int yC = 0; yC < ${convInfo.outWidth}; yC++) {\n int xC = wC + yC * ${strideWidth} - ${padLeft};\n\n if (xC < 0 || xC >= ${convInfo.inWidth}) {\n continue;\n }\n\n float dyValue = getDy(b, yR, yC, d2);\n float xValue = getX(b, xR, xC, d1);\n dotProd += (xValue * dyValue);\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class DepthwiseConv2DDerInputProgram implements GPGPUProgram {\n variableNames = ['dy', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n\n const filterHeight = convInfo.filterHeight;\n const filterWidth = convInfo.filterWidth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n\n const padTop = filterHeight - 1 - convInfo.padInfo.top;\n const padLeft = filterWidth - 1 - convInfo.padInfo.left;\n const channelMul = convInfo.outChannels / convInfo.inChannels;\n\n this.userCode = `\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords[0];\n int d1 = coords[3];\n ivec2 dyCorner = coords.yz - pads;\n int dyRCorner = dyCorner.x;\n int dyCCorner = dyCorner.y;\n\n float dotProd = 0.0;\n\n for (int wR = 0; wR < ${filterHeight}; wR++) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n int wRPerm = ${filterHeight} - 1 - wR;\n\n for (int wC = 0; wC < ${filterWidth}; wC++) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n int wCPerm = ${filterWidth} - 1 - wC;\n\n // TO DO: Vec4 over the channelMul\n for (int dm = 0; dm < ${channelMul}; dm++) {\n int d2 = d1 * ${channelMul} + dm;\n float xValue = getDy(batch, idyR, idyC, d2);\n float wValue = getW(wRPerm, wCPerm, d1, dm);\n dotProd += xValue * wValue;\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNativeBackpropFilter, DepthwiseConv2dNativeBackpropFilterAttrs, DepthwiseConv2dNativeBackpropFilterInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DepthwiseConv2DDerFilterProgram} from '../conv_backprop_gpu_depthwise';\n\nexport function depthwiseConv2dNativeBackpropFilter(args: {\n inputs: DepthwiseConv2dNativeBackpropFilterInputs,\n attrs: DepthwiseConv2dNativeBackpropFilterAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, dy} = inputs;\n const {strides, dilations, pad, dimRoundingMode, filterShape} = attrs;\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number], filterShape, strides,\n dilations, pad, dimRoundingMode, true /* depthwise */);\n\n const program = new DepthwiseConv2DDerFilterProgram(convInfo);\n return backend.runWebGLProgram(program, [x, dy], 'float32');\n}\n\nexport const depthwiseConv2dNativeBackpropFilterConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNativeBackpropFilter,\n backendName: 'webgl',\n kernelFunc: depthwiseConv2dNativeBackpropFilter as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DepthwiseConv2dNativeBackpropInput, DepthwiseConv2dNativeBackpropInputAttrs, DepthwiseConv2dNativeBackpropInputInputs, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DepthwiseConv2DDerInputProgram} from '../conv_backprop_gpu_depthwise';\n\nexport function depthwiseConv2dNativeBackpropInput(args: {\n inputs: DepthwiseConv2dNativeBackpropInputInputs,\n attrs: DepthwiseConv2dNativeBackpropInputAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {dy, filter} = inputs;\n const {strides, dilations, pad, dimRoundingMode, inputShape} = attrs;\n\n const convInfo = backend_util.computeConv2DInfo(\n inputShape, filter.shape as [number, number, number, number], strides,\n dilations, pad, dimRoundingMode, true /* depthwise */);\n\n const program = new DepthwiseConv2DDerInputProgram(convInfo);\n return backend.runWebGLProgram(program, [dy, filter], 'float32');\n}\n\nexport const depthwiseConv2dNativeBackpropInputConfig: KernelConfig = {\n kernelName: DepthwiseConv2dNativeBackpropInput,\n backendName: 'webgl',\n kernelFunc: depthwiseConv2dNativeBackpropInput as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class DiagProgram implements GPGPUProgram {\n variableNames = ['X'];\n outputShape: number[];\n userCode: string;\n\n constructor(size: number) {\n this.outputShape = [size, size];\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0;\n setOutput(val);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Diag, DiagInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DiagProgram} from '../diag_gpu';\nimport {reshape} from './Reshape';\n\nexport function diag(args: {inputs: DiagInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n const outShape = [...x.shape, ...x.shape];\n const xSize = util.sizeFromShape(x.shape);\n\n const flat = reshape({inputs: {x}, backend, attrs: {shape: [xSize]}});\n\n const program = new DiagProgram(xSize);\n const res = backend.runWebGLProgram(program, [flat], flat.dtype);\n\n const out = reshape({inputs: {x: res}, backend, attrs: {shape: outShape}});\n\n backend.disposeIntermediateTensorInfo(flat);\n backend.disposeIntermediateTensorInfo(res);\n\n return out;\n}\n\nexport const diagConfig: KernelConfig = {\n kernelName: Diag,\n backendName: 'webgl',\n kernelFunc: diag as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class Dilation2DProgram implements GPGPUProgram {\n variableNames = ['x', 'W'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.outShape;\n\n const {\n inHeight,\n inWidth,\n padInfo,\n strideHeight,\n strideWidth,\n filterHeight,\n filterWidth,\n dilationHeight,\n dilationWidth\n } = convInfo;\n\n const {top: padTop, left: padLeft} = padInfo;\n\n this.userCode = `\n const ivec2 strides = ivec2(${strideHeight}, ${strideWidth});\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n const float neg_infinity = -3.4e38;\n\n void main() {\n ivec4 coords = getOutputCoords();\n int batch = coords.x;\n int d1 = coords.w;\n ivec2 outTopLeftCorner =\n coords.yz * strides - pads;\n int hBeg = outTopLeftCorner.x;\n int wBeg = outTopLeftCorner.y;\n\n float curVal = neg_infinity;\n for (int h = 0; h < ${filterHeight}; h++) {\n int hIn = hBeg + h * ${dilationHeight};\n\n if (hIn >= 0 && hIn < ${inHeight}) {\n for (int w = 0; w < ${filterWidth}; w++) {\n int wIn = wBeg + w * ${dilationWidth};\n\n if (wIn >= 0 && wIn < ${inWidth}) {\n float xVal = getX(batch, hIn, wIn, d1);\n float wVal = getW(h, w, d1);\n\n float val = xVal + wVal;\n if (val > curVal) {\n curVal = val;\n }\n }\n }\n }\n }\n\n float result = curVal;\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Dilation2D, Dilation2DAttrs, Dilation2DInputs, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Dilation2DProgram} from '../dilation_gpu';\nimport {reshape} from './Reshape';\n\nexport function dilation2D(args: {\n inputs: Dilation2DInputs,\n attrs: Dilation2DAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter} = inputs;\n const {strides, pad, dilations} = attrs;\n\n const convInfo = backend_util.computeDilation2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number], strides, pad,\n 'NHWC' /* dataFormat */, dilations);\n let out: TensorInfo;\n\n const program = new Dilation2DProgram(convInfo);\n out = backend.runWebGLProgram(program, [x, filter], 'float32');\n\n const outReshaped =\n reshape({inputs: {x: out}, backend, attrs: {shape: convInfo.outShape}});\n backend.disposeIntermediateTensorInfo(out);\n\n return outReshaped;\n}\n\nexport const dilation2DConfig: KernelConfig = {\n kernelName: Dilation2D,\n backendName: 'webgl',\n kernelFunc: dilation2D as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Einsum, EinsumAttrs, EinsumInputs, KernelConfig, KernelFunc, Tensor, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {multiply} from './Multiply';\nimport {reshape} from './Reshape';\nimport {sum} from './Sum';\nimport {transpose} from './Transpose';\n\nexport function einsum(\n args:\n {inputs: EinsumInputs, backend: MathBackendWebGL, attrs: EinsumAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {equation} = attrs;\n const tensors = inputs as Tensor[];\n\n const {allDims, summedDims, idDims} =\n backend_util.decodeEinsumEquation(equation, tensors.length);\n backend_util.checkEinsumDimSizes(allDims.length, idDims, tensors);\n const {path, steps} = backend_util.getEinsumComputePath(summedDims, idDims);\n\n const nSteps = steps.length;\n let out: TensorInfo|null = null;\n let numDimsRemaining = allDims.length;\n const tensorsToDispose: TensorInfo[] = [];\n for (let i = 0; i < nSteps; ++i) {\n for (const idTerm of steps[i]) {\n const {permutationIndices: perm, expandDims: dimsToExpand} =\n backend_util.getEinsumPermutation(numDimsRemaining, idDims[idTerm]);\n let x: TensorInfo;\n if (backend_util.isIdentityPermutation(perm)) {\n x = tensors[idTerm];\n } else {\n x = transpose({inputs: {x: tensors[idTerm]}, backend, attrs: {perm}});\n tensorsToDispose.push(x);\n }\n const targetShape: number[] = x.shape.slice();\n for (let k = 0; k < dimsToExpand.length; ++k) {\n targetShape.splice(dimsToExpand[k], 0, 1);\n }\n\n if (!util.arraysEqual(x.shape, targetShape)) {\n x = reshape({inputs: {x}, backend, attrs: {shape: targetShape}});\n tensorsToDispose.push(x);\n }\n if (out === null) {\n out = x;\n } else {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n out = multiply({inputs: {a: x, b: out}, backend}) as TensorInfo;\n tensorsToDispose.push(out);\n }\n }\n if (i < nSteps - 1) {\n if (path[i] >= 0) {\n out = sum({\n inputs: {x: out},\n backend,\n attrs: {\n axis: path[i] - (allDims.length - numDimsRemaining),\n keepDims: false\n }\n });\n tensorsToDispose.push(out);\n }\n numDimsRemaining--;\n }\n }\n\n // Clean up intermediate tensors.\n for (const tensorInfo of tensorsToDispose) {\n if (tensorInfo === out) {\n continue;\n }\n backend.disposeIntermediateTensorInfo(tensorInfo);\n }\n\n return out;\n}\n\nexport const einsumConfig: KernelConfig = {\n kernelName: Einsum,\n backendName: 'webgl',\n kernelFunc: einsum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Elu, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst ELU = `return (x >= 0.0) ? x : (exp(x) - 1.0);`;\n\nconst ELU_PACKED = `\n vec4 result;\n\n result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0);\n result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0);\n result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0);\n result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0);\n\n return result;\n`;\n\nconst elu = unaryKernelFunc({opSnippet: ELU, packedOpSnippet: ELU_PACKED});\n\nexport const eluConfig: KernelConfig = {\n kernelName: Elu,\n backendName: 'webgl',\n kernelFunc: elu as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {EluGrad, EluGradInputs, env, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {BinaryOpProgram} from '../binaryop_gpu';\nimport {BinaryOpPackedProgram} from '../binaryop_packed_gpu';\n\nconst ELU_DER = `return (b >= 1.0) ? a : a * (b + 1.0);`;\nconst ELU_DER_PACKED = `\n vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.)));\n return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0))));\n`;\n\nexport const eluGrad =\n (args: {inputs: EluGradInputs, backend: MathBackendWebGL}): TensorInfo => {\n const {inputs, backend} = args;\n const {dy, y} = inputs;\n\n const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ?\n new BinaryOpPackedProgram(ELU_DER_PACKED, dy.shape, y.shape) :\n new BinaryOpProgram(ELU_DER, dy.shape, y.shape);\n return backend.runWebGLProgram(program, [dy, y], dy.dtype);\n };\n\nexport const eluGradConfig: KernelConfig = {\n kernelName: EluGrad,\n backendName: 'webgl',\n kernelFunc: eluGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Equal, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {equalImplCPU} from '../kernel_utils/shared';\nconst PACKED_EQUAL = `\n return vec4(equal(a, b));\n`;\n\nconst EQUAL = `return float(a == b);`;\n\nexport const equal = binaryKernelFunc({\n opSnippet: EQUAL,\n packedOpSnippet: PACKED_EQUAL,\n dtype: 'bool',\n cpuKernelImpl: equalImplCPU,\n});\n\nexport const equalConfig: KernelConfig = {\n kernelName: Equal,\n backendName: 'webgl',\n kernelFunc: equal as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, Erf, KernelConfig} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst ERF = `\n // Error function is calculated approximately with elementary function.\n // See \"Handbook of Mathematical Functions with Formulas,\n // Graphs, and Mathematical Tables\", Abramowitz and Stegun.\n float p = ${backend_util.ERF_P};\n float a1 = ${backend_util.ERF_A1};\n float a2 = ${backend_util.ERF_A2};\n float a3 = ${backend_util.ERF_A3};\n float a4 = ${backend_util.ERF_A4};\n float a5 = ${backend_util.ERF_A5};\n\n float sign = sign(x);\n x = abs(x);\n float t = 1.0 / (1.0 + p * x);\n return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x));\n`;\n\nexport const erf = unaryKernelFunc({opSnippet: ERF});\n\nexport const erfConfig: KernelConfig = {\n kernelName: Erf,\n backendName: 'webgl',\n kernelFunc: erf,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Exp, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {expImplCPU} from '../kernel_utils/shared';\n\nexport const EXP = CHECK_NAN_SNIPPET_UNARY + `\n return exp(x);\n`;\n\nconst EXP_PACKED = `\n vec4 result = exp(x);\n bvec4 isNaN = isnan(x);\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\nexport const exp = unaryKernelFunc({\n opSnippet: EXP,\n packedOpSnippet: EXP_PACKED,\n cpuKernelImpl: expImplCPU,\n dtype: 'float32',\n});\n\nexport const expConfig: KernelConfig = {\n kernelName: Exp,\n backendName: 'webgl',\n kernelFunc: exp as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {ExpandDims, ExpandDimsAttrs, ExpandDimsInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reshape} from './Reshape';\n\nexport function expandDims(args: {\n inputs: ExpandDimsInputs,\n attrs: ExpandDimsAttrs,\n backend: MathBackendWebGL\n}): TensorInfo {\n const {inputs, attrs, backend} = args;\n const {dim} = attrs;\n const {input} = inputs;\n\n const inputRank = input.shape.length;\n const newShape = input.shape.slice();\n let $dim = dim;\n if (dim < 0) {\n // Negative value is counted from the tail of rank.\n util.assert(\n -(inputRank + 1) <= dim,\n () => `Axis must be in the interval [${- (inputRank + 1)}, ${\n inputRank}]`);\n $dim = inputRank + dim + 1;\n }\n newShape.splice($dim, 0, 1);\n\n return reshape({inputs: {x: input}, backend, attrs: {shape: newShape}});\n}\n\nexport const expandDimsConfig: KernelConfig = {\n kernelName: ExpandDims,\n backendName: 'webgl',\n kernelFunc: expandDims as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Expm1, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {expm1ImplCPU} from '../kernel_utils/shared';\n\nconst EXPM1 = `return exp(x) - 1.0;`;\n\nexport const expm1 = unaryKernelFunc(\n {opSnippet: EXPM1, packedOpSnippet: EXPM1, cpuKernelImpl: expm1ImplCPU});\n\nexport const expm1Config: KernelConfig = {\n kernelName: Expm1,\n backendName: 'webgl',\n kernelFunc: expm1 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class FFTProgram implements GPGPUProgram {\n variableNames = ['real', 'imag'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n component: 'real'|'imag', inputShape: [number, number],\n inverse: boolean) {\n const innerDim = inputShape[1];\n this.outputShape = inputShape;\n\n const exponentMultiplierSnippet =\n inverse ? `2.0 * ${Math.PI}` : `-2.0 * ${Math.PI}`;\n const resultDenominator = inverse ? `${innerDim}.0` : '1.0';\n\n let opString: string;\n if (component === 'real') {\n opString = 'return real * expR - imag * expI;';\n } else if (component === 'imag') {\n opString = 'return real * expI + imag * expR;';\n } else {\n throw new Error(\n `FFT component must be either \"real\" or \"imag\", got ${component}.`);\n }\n\n this.userCode = `\n const float exponentMultiplier = ${exponentMultiplierSnippet};\n\n float unaryOpComplex(float real, float expR, float imag, float expI) {\n ${opString}\n }\n\n float mulMatDFT(int batch, int index) {\n float indexRatio = float(index) / float(${innerDim});\n float exponentMultiplierTimesIndexRatio =\n exponentMultiplier * indexRatio;\n\n float result = 0.0;\n\n for (int i = 0; i < ${innerDim}; i++) {\n // x = (-2|2 * PI / N) * index * i;\n float x = exponentMultiplierTimesIndexRatio * float(i);\n float expR = cos(x);\n float expI = sin(x);\n float real = getReal(batch, i);\n float imag = getImag(batch, i);\n\n result +=\n unaryOpComplex(real, expR, imag, expI) / ${resultDenominator};\n }\n\n return result;\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n setOutput(mulMatDFT(coords[0], coords[1]));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {FFTProgram} from '../fft_gpu';\n\nimport {complex} from './Complex';\nimport {reshape} from './Reshape';\n\nexport function fftImpl(\n x: TensorInfo, inverse: boolean, backend: MathBackendWebGL): TensorInfo {\n const xData = backend.texData.get(x.dataId);\n\n const inputSize = util.sizeFromShape(x.shape);\n // Collapse all outer dimensions to a single batch dimension.\n const innerDimensionSize = x.shape[x.shape.length - 1];\n const batch = inputSize / innerDimensionSize;\n\n const input2D = reshape(\n {inputs: {x}, backend, attrs: {shape: [batch, innerDimensionSize]}});\n\n const xShape = input2D.shape as [number, number];\n const realProgram = new FFTProgram('real', xShape, inverse);\n const imagProgram = new FFTProgram('imag', xShape, inverse);\n\n const inputs = [\n {\n dataId: xData.complexTensorInfos.real.dataId,\n dtype: xData.complexTensorInfos.real.dtype,\n shape: xShape\n },\n {\n dataId: xData.complexTensorInfos.imag.dataId,\n dtype: xData.complexTensorInfos.imag.dtype,\n shape: xShape\n }\n ];\n\n const realPart = backend.runWebGLProgram(realProgram, inputs, 'float32');\n const imagPart = backend.runWebGLProgram(imagProgram, inputs, 'float32');\n\n const complexOutput =\n complex({inputs: {real: realPart, imag: imagPart}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(imagPart);\n\n const complexOutputReshaped =\n reshape({inputs: {x: complexOutput}, backend, attrs: {shape: x.shape}});\n\n backend.disposeIntermediateTensorInfo(input2D);\n backend.disposeIntermediateTensorInfo(complexOutput);\n return complexOutputReshaped;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FFT, FFTInputs, KernelConfig, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {fftImpl} from './FFT_impl';\n\nexport function fft(args: {inputs: FFTInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n return fftImpl(input, false /* inverse */, backend);\n}\n\nexport const fftConfig: KernelConfig = {\n kernelName: FFT,\n backendName: 'webgl',\n kernelFunc: fft\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class FillProgram implements GPGPUProgram {\n variableNames: string[];\n outputShape: number[] = [];\n userCode: string;\n customUniforms = [{name: 'value', type: 'float' as UniformType}];\n\n constructor(shape: number[], value: number) {\n this.variableNames = ['x'];\n this.outputShape = shape;\n\n this.userCode = `\n void main() {\n // Input can be obtained from uniform value.\n setOutput(value);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Fill, FillAttrs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {FillProgram} from '../fill_gpu';\n\nexport function fill(args: {backend: MathBackendWebGL, attrs: FillAttrs}):\n TensorInfo {\n const {backend, attrs} = args;\n const {shape, value} = attrs;\n let {dtype} = attrs;\n\n dtype = dtype || util.inferDtype(value);\n\n if (dtype === 'string') {\n // String type should be handled in CPU memory.\n const values = util.getArrayFromDType(dtype, util.sizeFromShape(shape));\n values.fill(value as string);\n return backend.makeTensorInfo(shape, dtype, values);\n } else {\n const program = new FillProgram(shape, value as number);\n const customValues = [[value as number]];\n return backend.runWebGLProgram(program, [], dtype, customValues);\n }\n}\n\nexport const fillConfig: KernelConfig = {\n kernelName: Fill,\n backendName: 'webgl',\n kernelFunc: fill as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, TensorInfo} from '@tensorflow/tfjs-core';\nimport {FromPixels, FromPixelsAttrs, FromPixelsInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {TextureUsage} from '../tex_util';\n\nimport {FromPixelsProgram} from './FromPixels_utils/from_pixels_gpu';\nimport {FromPixelsPackedProgram} from './FromPixels_utils/from_pixels_packed_gpu';\n\nexport const fromPixelsConfig: KernelConfig = {\n kernelName: FromPixels,\n backendName: 'webgl',\n kernelFunc: fromPixels as unknown as KernelFunc,\n};\n\nlet fromPixels2DContext: CanvasRenderingContext2D;\nlet willReadFrequently = env().getBool('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU');\n\nfunction fromPixels(args: {\n inputs: FromPixelsInputs,\n backend: MathBackendWebGL,\n attrs: FromPixelsAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n let {pixels} = inputs;\n const {numChannels} = attrs;\n\n const isVideo = typeof (HTMLVideoElement) !== 'undefined' &&\n pixels instanceof HTMLVideoElement;\n const isImage = typeof (HTMLImageElement) !== 'undefined' &&\n pixels instanceof HTMLImageElement;\n const [width, height] = isVideo ?\n [\n (pixels as HTMLVideoElement).videoWidth,\n (pixels as HTMLVideoElement).videoHeight\n ] :\n [pixels.width, pixels.height];\n\n const texShape: [number, number] = [height, width];\n const outShape = [height, width, numChannels];\n\n if (isImage || isVideo) {\n const newWillReadFrequently =\n env().getBool('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU');\n if (fromPixels2DContext == null ||\n newWillReadFrequently !== willReadFrequently) {\n willReadFrequently = newWillReadFrequently;\n fromPixels2DContext =\n document.createElement('canvas').getContext(\n '2d', {willReadFrequently});\n }\n\n fromPixels2DContext.canvas.width = width;\n fromPixels2DContext.canvas.height = height;\n fromPixels2DContext.drawImage(\n pixels as HTMLVideoElement | HTMLImageElement | ImageBitmap, 0, 0,\n width, height);\n pixels = fromPixels2DContext.canvas;\n }\n\n const tempPixelHandle = backend.makeTensorInfo(texShape, 'int32');\n // This is a byte texture with pixels.\n backend.texData.get(tempPixelHandle.dataId).usage = TextureUsage.PIXELS;\n backend.gpgpu.uploadPixelDataToTexture(\n backend.getTexture(tempPixelHandle.dataId), pixels as ImageData);\n const program = env().getBool('WEBGL_PACK') ?\n new FromPixelsPackedProgram(outShape) :\n new FromPixelsProgram(outShape);\n const res = backend.runWebGLProgram(program, [tempPixelHandle], 'int32');\n backend.disposeData(tempPixelHandle.dataId);\n return res;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class FlipLeftRightProgram implements GPGPUProgram {\n variableNames = ['Image'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(imageShape: [number, number, number, number]) {\n const imageWidth = imageShape[2];\n this.outputShape = imageShape;\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int x = coords[2];\n\n int coordX = ${imageWidth} - x - 1;\n float outputValue;\n if(coordX >= 0 && coordX < ${imageWidth}) {\n outputValue = getImage(coords[0], coords[1], coordX, coords[3]);\n } else {\n outputValue = getImage(coords[0], coords[1], coords[2], coords[3]);\n }\n setOutput(outputValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tensor4D} from '@tensorflow/tfjs-core';\nimport {FlipLeftRight, FlipLeftRightInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {FlipLeftRightProgram} from '../flip_left_right_gpu';\n\nexport const flipLeftRightConfig: KernelConfig = {\n kernelName: FlipLeftRight,\n backendName: 'webgl',\n kernelFunc: ({inputs, backend}) => {\n const {image} = inputs as FlipLeftRightInputs;\n const webglBackend = backend as MathBackendWebGL;\n\n const program = new FlipLeftRightProgram((image as Tensor4D).shape);\n const output = webglBackend.runWebGLProgram(program, [image], image.dtype);\n return output;\n }\n};\n","\n/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Floor, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {floorImplCPU} from '../kernel_utils/shared';\n\nconst FLOOR = `return floor(x);`;\n\nexport const floor = unaryKernelFunc(\n {opSnippet: FLOOR, packedOpSnippet: FLOOR, cpuKernelImpl: floorImplCPU});\n\nexport const floorConfig: KernelConfig = {\n kernelName: Floor,\n backendName: 'webgl',\n kernelFunc: floor,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {FloorDiv, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\n// We use native integer division to deal with floating point imprecision. Since\n// we implement floor division and glsl implements truncated division, we\n// correct for this by subtracting 1 from result when the result is negative and\n// there is a remainder.\nconst INT_DIV = `\n float s = sign(a) * sign(b);\n int ia = round(a);\n int ib = round(b);\n if (ib != 0) {\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n return float(idiv(ia, ib, s));\n } else {\n return NAN;\n }\n`;\n\nconst INT_DIV_PACKED = `\n ivec4 ia = round(a);\n ivec4 ib = round(b);\n bvec4 cond = notEqual(ib, ivec4(0));\n ivec4 result = ivec4(0);\n vec4 s = sign(a) * sign(b);\n\n // Windows (D3D) wants guaranteed non-zero int division at compile-time.\n if (cond[0]) {\n result[0] = idiv(ia[0], ib[0], s[0]);\n }\n if (cond[1]) {\n result[1] = idiv(ia[1], ib[1], s[1]);\n }\n if (cond[2]) {\n result[2] = idiv(ia[2], ib[2], s[2]);\n }\n if (cond[3]) {\n result[3] = idiv(ia[3], ib[3], s[3]);\n }\n return vec4(result);\n`;\n\nexport const floorDiv = binaryKernelFunc(\n {opSnippet: INT_DIV, packedOpSnippet: INT_DIV_PACKED, dtype: 'int32'});\n\nexport const floorDivConfig: KernelConfig = {\n kernelName: FloorDiv,\n backendName: 'webgl',\n kernelFunc: floorDiv as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from '../../glsl_version';\nimport {GPGPUProgram} from '../../gpgpu_math';\n\nexport class FromPixelsProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n\n constructor(outputShape: number[]) {\n const glsl = getGlslDifferences();\n const [height, width, ] = outputShape;\n this.outputShape = outputShape;\n this.userCode = `\n void main() {\n ivec3 coords = getOutputCoords();\n int texR = coords[0];\n int texC = coords[1];\n int depth = coords[2];\n vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${width}.0, ${height}.0);\n\n vec4 values = ${glsl.texture2D}(A, uv);\n float value;\n if (depth == 0) {\n value = values.r;\n } else if (depth == 1) {\n value = values.g;\n } else if (depth == 2) {\n value = values.b;\n } else if (depth == 3) {\n value = values.a;\n }\n\n setOutput(floor(value * 255.0 + 0.5));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {getGlslDifferences} from '../../glsl_version';\nimport {GPGPUProgram} from '../../gpgpu_math';\n\nexport class FromPixelsPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n userCode: string;\n outputShape: number[];\n packedInputs = false;\n packedOutput = true;\n\n constructor(outputShape: number[]) {\n const glsl = getGlslDifferences();\n const [height, width, ] = outputShape;\n this.outputShape = outputShape;\n this.userCode = `\n void main() {\n ivec3 coords = getOutputCoords();\n int texR = coords[0];\n int texC = coords[1];\n int depth = coords[2];\n\n vec4 result = vec4(0.);\n\n for(int row=0; row<=1; row++) {\n for(int col=0; col<=1; col++) {\n texC = coords[1] + row;\n depth = coords[2] + col;\n\n vec2 uv = (vec2(texC, texR) + halfCR) /\n vec2(${width}.0, ${height}.0);\n vec4 values = ${glsl.texture2D}(A, uv);\n float value;\n if (depth == 0) {\n value = values.r;\n } else if (depth == 1) {\n value = values.g;\n } else if (depth == 2) {\n value = values.b;\n } else if (depth == 3) {\n value = values.a;\n }\n\n result[row * 2 + col] = floor(value * 255.0 + 0.5);\n }\n }\n\n ${glsl.output} = result;\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, env, FusedConv2D, FusedConv2DAttrs, FusedConv2DInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Conv2DProgram} from '../conv_gpu';\nimport {Conv2DPackedProgram} from '../conv_packed_gpu';\nimport {mapActivationToShaderProgram} from '../kernel_utils/kernel_funcs_utils';\n\nimport {conv2dByMatMul, conv2dWithIm2Row} from './Conv2D_impl';\nimport {reshape} from './Reshape';\n\nexport function fusedConv2d(args: {\n inputs: FusedConv2DInputs,\n attrs: FusedConv2DAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {\n strides,\n pad,\n dataFormat,\n dilations,\n dimRoundingMode,\n activation,\n leakyreluAlpha\n } = attrs;\n\n const $dataFormat = backend_util.convertConv2DDataFormat(dataFormat);\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, dilations, pad,\n dimRoundingMode, false /* depthwise */, $dataFormat);\n let out: TensorInfo;\n const intermediates: TensorInfo[] = [];\n\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n const hasLeakyreluAlpha = activation === 'leakyrelu';\n\n const prepareInputs = (): TensorInfo[] => {\n const inputs: TensorInfo[] = [x, filter];\n\n // If the input is a 1-D tensor, align it with the channels.\n //\n // For fusedConv2d, the inputs (x, W, bias, preluActivationWeights) are\n // supposed to be aligned with the dataFormat. The 4-D tensor inputs or\n // scalar inputs are originally aligned, but the 1-D tensor inputs are\n // supposed to be aligned with the channels (only bias and PReLU activation\n // weights could be a 1-D tensor).\n const alignInputWithDataFormat =\n (input: TensorInfo, dataFormat: 'NHWC'|'NCHW'): TensorInfo => {\n if (dataFormat === 'NCHW' && input.shape.length === 1 &&\n input.shape[0] !== 1) {\n const alignedInput = reshape({\n inputs: {x: input},\n backend,\n attrs: {shape: [input.shape[0], 1, 1]}\n });\n intermediates.push(alignedInput);\n return alignedInput;\n }\n return input;\n };\n\n if (hasBias) {\n inputs.push(alignInputWithDataFormat(bias, dataFormat));\n }\n\n if (hasPreluActivationWeights) {\n inputs.push(alignInputWithDataFormat(preluActivationWeights, dataFormat));\n }\n\n if (hasLeakyreluAlpha) {\n const $leakyreluAlpha = backend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(leakyreluAlpha as unknown as 'float32', 'float32'));\n inputs.push($leakyreluAlpha);\n intermediates.push($leakyreluAlpha);\n }\n return inputs;\n };\n\n if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 &&\n convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 &&\n convInfo.strideHeight === 1 && convInfo.strideWidth === 1 &&\n (convInfo.padInfo.type === 'SAME' || convInfo.padInfo.type === 'VALID')) {\n out = conv2dByMatMul({\n x,\n filter,\n convInfo,\n backend,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n } else if (convInfo.strideWidth <= 2 && $dataFormat === 'channelsLast'\n && env().getBool('WEBGL_EXP_CONV')\n ) {\n const fusedActivation =\n activation ? mapActivationToShaderProgram(activation, true) : null;\n const program = new Conv2DPackedProgram(\n convInfo, hasBias, fusedActivation, hasPreluActivationWeights,\n hasLeakyreluAlpha);\n const customValues = [\n [convInfo.padInfo.top, convInfo.padInfo.left],\n [convInfo.strideHeight, convInfo.strideWidth],\n [convInfo.dilationHeight, convInfo.dilationWidth],\n [convInfo.inHeight, convInfo.inWidth]\n ];\n const inputs = prepareInputs();\n out = backend.runWebGLProgram(program, inputs, 'float32', customValues);\n } else if (env().getBool('WEBGL_CONV_IM2COL')) {\n out = conv2dWithIm2Row({\n x,\n filter,\n convInfo,\n backend,\n bias,\n activation,\n preluActivationWeights,\n leakyreluAlpha\n });\n } else {\n const fusedActivation =\n activation ? mapActivationToShaderProgram(activation, false) : null;\n const program = new Conv2DProgram(\n convInfo, hasBias, fusedActivation, hasPreluActivationWeights,\n hasLeakyreluAlpha);\n\n const inputs = prepareInputs();\n out = backend.runWebGLProgram(program, inputs, 'float32');\n }\n\n const outReshaped =\n reshape({inputs: {x: out}, backend, attrs: {shape: convInfo.outShape}});\n\n intermediates.push(out);\n intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return outReshaped;\n}\n\nexport const fusedConv2DConfig: KernelConfig = {\n kernelName: FusedConv2D,\n backendName: 'webgl',\n kernelFunc: fusedConv2d as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, env, FusedDepthwiseConv2D, FusedDepthwiseConv2DAttrs, FusedDepthwiseConv2DInputs, KernelConfig, KernelFunc, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {DepthwiseConv2DProgram} from '../conv_gpu_depthwise';\nimport {DepthwiseConvPacked2DProgram} from '../conv_packed_gpu_depthwise';\nimport {mapActivationToShaderProgram} from '../kernel_utils/kernel_funcs_utils';\n\nexport function fusedDepthwiseConv2D(args: {\n inputs: FusedDepthwiseConv2DInputs,\n attrs: FusedDepthwiseConv2DAttrs,\n backend: MathBackendWebGL\n}) {\n const {inputs, backend, attrs} = args;\n const {x, filter, bias, preluActivationWeights} = inputs;\n const {strides, pad, dilations, dimRoundingMode, activation, leakyreluAlpha} =\n attrs;\n\n const intermediates: TensorInfo[] = [];\n\n let $dilations = dilations;\n if ($dilations == null) {\n $dilations = [1, 1];\n }\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, $dilations),\n () => 'Error in depthwiseConv2d: Either strides or dilations must be ' +\n `1. Got strides ${strides} and dilations '${$dilations}'`);\n\n const convInfo = backend_util.computeConv2DInfo(\n x.shape as [number, number, number, number],\n filter.shape as [number, number, number, number], strides, $dilations,\n pad, dimRoundingMode, true /* depthwise */);\n\n const shouldPackDepthwiseConv = env().getBool('WEBGL_PACK_DEPTHWISECONV') &&\n convInfo.strideWidth <= 2 &&\n convInfo.outChannels / convInfo.inChannels === 1;\n const fusedActivation = activation ?\n mapActivationToShaderProgram(activation, shouldPackDepthwiseConv) :\n null;\n const programInputs: TensorInfo[] = [x, filter];\n\n const hasBias = bias != null;\n const hasPreluActivationWeights = preluActivationWeights != null;\n const hasLeakyreluAlpha = activation === 'leakyrelu';\n\n if (hasBias) {\n programInputs.push(bias);\n }\n if (hasPreluActivationWeights) {\n programInputs.push(preluActivationWeights);\n }\n if (hasLeakyreluAlpha) {\n const $leakyreluAlpha = backend.makeTensorInfo(\n [], 'float32',\n util.createScalarValue(leakyreluAlpha as unknown as 'float32',\n 'float32'));\n programInputs.push($leakyreluAlpha);\n intermediates.push($leakyreluAlpha);\n }\n\n let program: DepthwiseConv2DProgram|DepthwiseConvPacked2DProgram;\n if (shouldPackDepthwiseConv) {\n program = new DepthwiseConvPacked2DProgram(\n convInfo, hasBias, fusedActivation, hasPreluActivationWeights,\n hasLeakyreluAlpha);\n } else {\n program = new DepthwiseConv2DProgram(\n convInfo, hasBias, fusedActivation, hasPreluActivationWeights,\n hasLeakyreluAlpha);\n }\n const customValues = [\n [convInfo.padInfo.top, convInfo.padInfo.left],\n [convInfo.strideHeight, convInfo.strideWidth],\n [convInfo.dilationHeight, convInfo.dilationWidth],\n [convInfo.inHeight, convInfo.inWidth]\n ];\n const result =\n backend.runWebGLProgram(program, programInputs, 'float32', customValues);\n\n intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n}\n\nexport const fusedDepthwiseConv2DConfig: KernelConfig = {\n kernelName: FusedDepthwiseConv2D,\n backendName: 'webgl',\n kernelFunc: fusedDepthwiseConv2D as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class GatherNDProgram implements GPGPUProgram {\n variableNames = ['x', 'indices'];\n outputShape: number[];\n userCode: string;\n constructor(\n private sliceDim: number, private strides: number[], shape: number[],\n private paramsShape: number[]) {\n this.outputShape = shape;\n const dtype = getCoordsDataType(shape.length);\n\n let mainLoop = `\n int index;`;\n for (let j = 0; j < this.sliceDim; j++) {\n mainLoop += `\n index = round(getIndices(coords[0], ${j}));\n out_of_bounds = out_of_bounds || index < 0;\n out_of_bounds = out_of_bounds || index >= ${this.paramsShape[j]};\n flattenIndex += index * ${this.strides[j]};`;\n }\n\n this.userCode = `\n void main() {\n ${dtype} coords = getOutputCoords();\n int flattenIndex = 0;\n bool out_of_bounds = false;\n\n ${mainLoop}\n\n setOutput(out_of_bounds ? 0.0 : getX(flattenIndex, coords[1]));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, GatherNd, GatherNdInputs, KernelConfig, KernelFunc, Rank, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {GatherNDProgram} from '../gather_nd_gpu';\nimport {gatherNdImplCPU} from '../kernel_utils/shared';\n\nimport {reshape} from './Reshape';\n\nexport function gatherNd(\n args: {inputs: GatherNdInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {params, indices} = inputs;\n\n const indicesShape = indices.shape;\n const sliceRank = indicesShape[indicesShape.length - 1];\n const paramsSize = util.sizeFromShape(params.shape);\n\n const [resultShape, numSlices, sliceSize, strides] =\n backend_util.prepareAndValidate(params, indices);\n\n const flattenIndices = reshape(\n {inputs: {x: indices}, backend, attrs: {shape: [numSlices, sliceRank]}});\n const flattenX = reshape({\n inputs: {x: params},\n backend,\n attrs: {shape: [(util.sizeFromShape(params.shape) / sliceSize), sliceSize]}\n });\n\n if (backend.shouldExecuteOnCPU([params, indices]) ||\n params.dtype === 'string') {\n const indicesData = backend.readSync(indices.dataId) as TypedArray;\n const paramsBuf = backend.bufferSync(params);\n const outValue = gatherNdImplCPU(\n indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize,\n strides, params.shape, paramsSize);\n\n return backend.makeTensorInfo(resultShape, params.dtype, outValue.values);\n }\n const program =\n new GatherNDProgram(sliceRank, strides, [numSlices, sliceSize], \n params.shape);\n const res = backend.runWebGLProgram(\n program, [flattenX, flattenIndices], flattenX.dtype);\n\n const reshaped =\n reshape({inputs: {x: res}, backend, attrs: {shape: resultShape}});\n\n backend.disposeIntermediateTensorInfo(flattenIndices);\n backend.disposeIntermediateTensorInfo(flattenX);\n backend.disposeIntermediateTensorInfo(res);\n\n return reshaped;\n}\n\nexport const gatherNdConfig: KernelConfig = {\n kernelName: GatherNd,\n backendName: 'webgl',\n kernelFunc: gatherNd as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport type GatherShape = [number, number, number, number];\n\nexport class GatherProgram implements GPGPUProgram {\n variableNames = ['A', 'indices'];\n outputShape: number[];\n userCode: string;\n rank: number;\n\n constructor(aShape: GatherShape, outputShape: GatherShape) {\n this.outputShape = outputShape;\n this.rank = outputShape.length;\n const dtype = getCoordsDataType(this.rank);\n const sourceCoords = getSourceCoords(aShape, 2);\n\n this.userCode = `\n void main() {\n ${dtype} resRC = getOutputCoords();\n int index = int(getIndices(resRC.x, resRC.z));\n float inBounds = (index >= 0) && (index < ${aShape[2]}) ? 1.0 : 0.0;\n setOutput(inBounds * getA(${sourceCoords}));\n }\n `;\n }\n}\n\n// The input and output are always flattened into rank 4 tensors.\nfunction getSourceCoords(aShape: GatherShape, axis: number): string {\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w'];\n\n const sourceCoords = [];\n for (let i = 0; i < aShape.length; i++) {\n if (i === 2) {\n sourceCoords.push('index');\n } else {\n sourceCoords.push(`${currentCoords[i]}`);\n }\n }\n return sourceCoords.join();\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, GatherV2, GatherV2Attrs, GatherV2Inputs, KernelConfig, KernelFunc, TensorInfo, TypedArray, util, env} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {GatherProgram, GatherShape} from '../gather_gpu';\nimport {gatherV2ImplCPU} from '../kernel_utils/shared';\n\nimport {reshape} from './Reshape';\n\nexport function gatherV2(args: {\n inputs: GatherV2Inputs,\n backend: MathBackendWebGL,\n attrs: GatherV2Attrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, indices} = inputs;\n const {axis, batchDims} = attrs;\n\n const parsedAxis = util.parseAxisParam(axis, x.shape)[0];\n if (env().get('DEBUG')) {\n // In debug mode, throw error when any index is out of bound.\n // Otherwise, just fill out of bounds with zeroes.\n const indicesVals = backend.readSync(indices.dataId) as TypedArray;\n const axisDim = x.shape[parsedAxis];\n for (let i = 0; i < indicesVals.length; ++i) {\n const index = indicesVals[i];\n util.assert(\n index <= axisDim - 1 && index >= 0,\n () =>\n `GatherV2: the index value ${index} is not in [0, ${axisDim - 1}]`);\n }\n }\n\n const shapeInfo = backend_util.segment_util.collectGatherOpShapeInfo(\n x, indices, parsedAxis, batchDims);\n\n const indicesSize = util.sizeFromShape(indices.shape);\n\n const toDispose = [];\n\n const flattenX = reshape({\n inputs: {x},\n backend,\n attrs: {\n shape: [\n shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize,\n shapeInfo.sliceSize\n ]\n }\n });\n\n const flattenIndex = reshape({\n inputs: {x: indices},\n backend,\n attrs: {shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize]}\n });\n\n toDispose.push(flattenX);\n toDispose.push(flattenIndex);\n\n const flattenOutputShape = [\n shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize,\n shapeInfo.sliceSize\n ];\n\n if (backend.shouldExecuteOnCPU([x, indices]) || x.dtype === 'string') {\n const indicesBuf = backend.bufferSync(flattenIndex);\n const xBuf = backend.bufferSync(flattenX);\n const outBuf = gatherV2ImplCPU(xBuf, indicesBuf, flattenOutputShape);\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return backend.makeTensorInfo(\n shapeInfo.outputShape, outBuf.dtype, outBuf.values as TypedArray);\n }\n\n const program = new GatherProgram(flattenX.shape as GatherShape,\n flattenOutputShape as GatherShape);\n const res = backend.runWebGLProgram(\n program, [flattenX, flattenIndex], flattenX.dtype);\n toDispose.push(res);\n\n const reshaped = reshape(\n {inputs: {x: res}, backend, attrs: {shape: shapeInfo.outputShape}});\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n return reshaped;\n}\n\nexport const gatherV2Config: KernelConfig = {\n kernelName: GatherV2,\n backendName: 'webgl',\n kernelFunc: gatherV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {Greater, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {greaterImplCPU} from '../kernel_utils/shared';\n\nconst GREATER = `return float(a > b);`;\nconst GREATER_PACKED = `\n return vec4(greaterThan(a, b));\n`;\n\nexport const greater = binaryKernelFunc({\n opSnippet: GREATER,\n packedOpSnippet: GREATER_PACKED,\n cpuKernelImpl: greaterImplCPU,\n dtype: 'bool'\n});\n\nexport const greaterConfig: KernelConfig = {\n kernelName: Greater,\n backendName: 'webgl',\n kernelFunc: greater as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GreaterEqual, KernelConfig, KernelFunc} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {greaterEqualImplCPU} from '../kernel_utils/shared';\n\nconst GREATER_EQUAL = `return float(a >= b);`;\nconst GREATER_EQUAL_PACKED = `\n return vec4(greaterThanEqual(a, b));\n`;\n\nexport const greaterEqual = binaryKernelFunc({\n opSnippet: GREATER_EQUAL,\n packedOpSnippet: GREATER_EQUAL_PACKED,\n dtype: 'bool',\n cpuKernelImpl: greaterEqualImplCPU\n});\n\nexport const greaterEqualConfig: KernelConfig = {\n kernelName: GreaterEqual,\n backendName: 'webgl',\n kernelFunc: greaterEqual as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IFFT, IFFTInputs, KernelConfig, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {fftImpl} from './FFT_impl';\n\nexport function ifft(args: {inputs: IFFTInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {input} = inputs;\n\n return fftImpl(input, true /* inverse */, backend);\n}\n\nexport const ifftConfig: KernelConfig = {\n kernelName: IFFT,\n backendName: 'webgl',\n kernelFunc: ifft\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsFinite, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst IS_FINITE = `return float(!isnan(x) && !isinf(x));`;\n\nexport const isFinite = unaryKernelFunc({opSnippet: IS_FINITE, dtype: 'bool'});\n\nexport const isFiniteConfig: KernelConfig = {\n kernelName: IsFinite,\n backendName: 'webgl',\n kernelFunc: isFinite,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsInf, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst IS_INF = `return float(isinf(x));`;\n\nexport const isInf = unaryKernelFunc({opSnippet: IS_INF, dtype: 'bool'});\n\nexport const isInfConfig: KernelConfig = {\n kernelName: IsInf,\n backendName: 'webgl',\n kernelFunc: isInf,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {IsNan, KernelConfig} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst IS_NAN = `return float(isnan(x));`;\n\nexport const isNaN = unaryKernelFunc({opSnippet: IS_NAN, dtype: 'bool'});\n\nexport const isNaNConfig: KernelConfig = {\n kernelName: IsNan,\n backendName: 'webgl',\n kernelFunc: isNaN,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Less} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {lessImplCPU} from '../kernel_utils/shared';\n\nconst LESS = `return float(a < b);`;\nconst LESS_PACKED = `\n return vec4(lessThan(a, b));\n`;\n\nexport const less = binaryKernelFunc({\n opSnippet: LESS,\n packedOpSnippet: LESS_PACKED,\n cpuKernelImpl: lessImplCPU,\n dtype: 'bool'\n});\n\nexport const lessConfig: KernelConfig = {\n kernelName: Less,\n backendName: 'webgl',\n kernelFunc: less as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LessEqual} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {lessEqualImplCPU} from '../kernel_utils/shared';\n\nexport const LESS_EQUAL = `return float(a <= b);`;\nexport const LESS_EQUAL_PACKED = `\n return vec4(lessThanEqual(a, b));\n`;\n\nexport const lessEqual = binaryKernelFunc({\n opSnippet: LESS_EQUAL,\n packedOpSnippet: LESS_EQUAL_PACKED,\n cpuKernelImpl: lessEqualImplCPU,\n dtype: 'bool'\n});\n\nexport const lessEqualConfig: KernelConfig = {\n kernelName: LessEqual,\n backendName: 'webgl',\n kernelFunc: lessEqual as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LinSpace, LinSpaceAttrs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {linSpaceImplCPU} from '../kernel_utils/shared';\n\nexport function linSpace(\n args: {backend: MathBackendWebGL, attrs: LinSpaceAttrs}): TensorInfo {\n const {backend, attrs} = args;\n const {start, stop, num} = attrs;\n\n // TODO: Use CPU implementation due to the precision problem in Safari.\n const outVals = linSpaceImplCPU(start, stop, num);\n return backend.makeTensorInfo([outVals.length], 'float32', outVals);\n}\n\nexport const linSpaceConfig: KernelConfig = {\n kernelName: LinSpace,\n backendName: 'webgl',\n kernelFunc: linSpace as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Log} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {logImplCPU} from '../kernel_utils/shared';\n\n// Windows chrome return 0 if the input is negative value. We will specifically\n// return NaN if the input is 0 to solve compatiblity issue.\nconst LOG = CHECK_NAN_SNIPPET_UNARY + `\n return x < 0.0 ? 0./0. : log(x);\n`;\n\nconst LOG_PACKED = `\n vec4 result = log(x);\n bvec4 isNaN = isnan(x);\n result.r = isNaN.r ? x.r : (x.r < 0.0 ? 0./0. : result.r);\n result.g = isNaN.g ? x.g : (x.g < 0.0 ? 0./0. : result.g);\n result.b = isNaN.b ? x.b : (x.b < 0.0 ? 0./0. : result.b);\n result.a = isNaN.a ? x.a : (x.a < 0.0 ? 0./0. : result.a);\n return result;\n`;\n\nexport const log = unaryKernelFunc(\n {opSnippet: LOG, packedOpSnippet: LOG_PACKED, cpuKernelImpl: logImplCPU});\n\nexport const logConfig: KernelConfig = {\n kernelName: Log,\n backendName: 'webgl',\n kernelFunc: log as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Log1p} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst LOG1P = CHECK_NAN_SNIPPET_UNARY + `\n return log(1.0 + x);\n`;\n\nexport const log1p = unaryKernelFunc({opSnippet: LOG1P});\n\nexport const log1pConfig: KernelConfig = {\n kernelName: Log1p,\n backendName: 'webgl',\n kernelFunc: log1p,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LogicalAnd} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst LOGICAL_AND = `return float(a >= 1.0 && b >= 1.0);`;\nconst LOGICAL_AND_PACKED = `\n return vec4(\n vec4(greaterThanEqual(a, vec4(1.0))) *\n vec4(greaterThanEqual(b, vec4(1.0))));\n`;\n\nexport const logicalAnd = binaryKernelFunc({\n opSnippet: LOGICAL_AND,\n packedOpSnippet: LOGICAL_AND_PACKED,\n dtype: 'bool'\n});\n\nexport const logicalAndConfig: KernelConfig = {\n kernelName: LogicalAnd,\n backendName: 'webgl',\n kernelFunc: logicalAnd as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, LogicalNot} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst LOGICAL_NOT = `return float(!(x >= 1.0));`;\n\nexport const logicalNot = unaryKernelFunc({opSnippet: LOGICAL_NOT});\n\nexport const logicalNotConfig: KernelConfig = {\n kernelName: LogicalNot,\n backendName: 'webgl',\n kernelFunc: logicalNot,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LogicalOr} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst LOGICAL_OR = `return float(a >= 1.0 || b >= 1.0);`;\nconst LOGICAL_OR_PACKED = `\n return min(\n vec4(greaterThanEqual(a, vec4(1.0))) +\n vec4(greaterThanEqual(b, vec4(1.0))),\n vec4(1.0));\n`;\n\nexport const logicalOr = binaryKernelFunc(\n {opSnippet: LOGICAL_OR, packedOpSnippet: LOGICAL_OR_PACKED, dtype: 'bool'});\n\nexport const logicalOrConfig: KernelConfig = {\n kernelName: LogicalOr,\n backendName: 'webgl',\n kernelFunc: logicalOr as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class LRNProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n xShape: number[], radius: number, bias: number, alpha: number,\n beta: number) {\n const rad = radius;\n const maxD = xShape[3] - 1;\n this.outputShape = xShape;\n\n // optimize pow(bias + alpha * sum, -beta)\n // src: https://github.com/tensorflow/tensorflow/..\n // blob/26033a1644a9c4a5fbe3170ab2e864b6a4ccd4ca/..\n // tensorflow/core/kernels/mkl_lrn_op.cc#L320\n let powOperator;\n const basis = `float(${bias}) + float(${alpha}) * sum`;\n if (beta === 0.5) {\n powOperator = `inversesqrt(${basis})`;\n } else if (beta === 1.0) {\n powOperator = `1.0/(${basis})`;\n } else {\n powOperator = `exp(log(${basis}) * float(-${beta}));`;\n }\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int r = coords[1];\n int c = coords[2];\n int d = coords[3];\n float x = getX(b, r, c, d);\n float sum = 0.0;\n for (int j = -${rad}; j <= ${rad}; j++) {\n int idx = d + j;\n if (idx >= 0 && idx <= ${maxD}) {\n float z = getX(b, r, c, idx);\n sum += z * z;\n }\n }\n float val = x * ${powOperator};\n setOutput(val);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class LRNPackedProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[] = [];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n\n constructor(\n xShape: number[], radius: number, bias: number, alpha: number,\n beta: number) {\n const rad = radius;\n const maxD = xShape[3] - 1;\n this.outputShape = xShape;\n\n // optimize pow(bias + alpha * sum, -beta)\n // src: https://github.com/tensorflow/tensorflow/..\n // blob/26033a1644a9c4a5fbe3170ab2e864b6a4ccd4ca/..\n // tensorflow/core/kernels/mkl_lrn_op.cc#L320\n let powOperator;\n const basis = `float(${bias}) + float(${alpha}) * sum`;\n if (beta === 0.5) {\n powOperator = `inversesqrt(${basis})`;\n } else if (beta === 1.0) {\n powOperator = `1.0/(${basis})`;\n } else {\n powOperator = `exp(log(${basis}) * float(-${beta}));`;\n }\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords.x;\n int r = coords.y;\n int c = coords.z;\n int d = coords.w;\n\n bool hasNextCol = d < ${this.outputShape[3]};\n bool hasNextRow = c < ${this.outputShape[2]};\n\n vec4 sum = vec4(0.);\n vec4 xFragAtOutputCoords = getX(b, r, c, d);\n\n vec4 xAtOutputCoords = vec4(\n getChannel(xFragAtOutputCoords, vec2(c, d)),\n hasNextCol ?\n getChannel(xFragAtOutputCoords, vec2(c, d + 1)) : 0.0,\n hasNextRow ?\n getChannel(xFragAtOutputCoords , vec2(c + 1, d)) : 0.0,\n (hasNextRow && hasNextCol) ?\n getChannel(xFragAtOutputCoords, vec2(c + 1, d + 1)) : 0.0\n );\n\n int firstChannel = d - ${rad};\n vec2 cache = vec2(0.);\n if(firstChannel >= 0){\n vec4 firstChannelFrag = getX(b, r, c, firstChannel);\n cache.x = getChannel(firstChannelFrag, vec2(c, firstChannel));\n if(hasNextRow){\n cache.y = getChannel(firstChannelFrag, vec2(c + 1, firstChannel));\n }\n }\n\n ivec2 depth = ivec2(d, d + 1);\n for (int j = - ${rad}; j <= ${rad}; j++) {\n ivec2 idx = depth + j;\n bvec2 aboveLowerBound = greaterThanEqual(idx, ivec2(0));\n bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${maxD}));\n\n bool depthInRange = aboveLowerBound.x && belowUpperBound.x;\n bool depthPlusOneInRange = aboveLowerBound.y && belowUpperBound.y;\n\n if(depthInRange || depthPlusOneInRange){\n vec4 z = vec4(0.);\n vec4 xFragAtCurrentDepth;\n z.xz = cache.xy;\n if(depthPlusOneInRange && hasNextCol){\n xFragAtCurrentDepth = idx.y != d ?\n getX(b, r, c, idx.y) : xFragAtOutputCoords;\n z.y = getChannel(xFragAtCurrentDepth, vec2(c, idx.y));\n if(hasNextRow){\n z.w = getChannel(xFragAtCurrentDepth, vec2(c + 1, idx.y));\n }\n }\n cache.xy = z.yw;\n sum += z * z;\n }\n }\n vec4 result = xAtOutputCoords * ${powOperator};\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, LRN, LRNAttrs, LRNInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {LRNProgram} from '../lrn_gpu';\nimport {LRNPackedProgram} from '../lrn_packed_gpu';\n\nexport const lrn =\n (args: {inputs: LRNInputs, backend: MathBackendWebGL, attrs: LRNAttrs}):\n TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {depthRadius, bias, alpha, beta} = attrs;\n\n const program = env().getBool('WEBGL_PACK_NORMALIZATION') ?\n new LRNPackedProgram(x.shape, depthRadius, bias, alpha, beta) :\n new LRNProgram(x.shape, depthRadius, bias, alpha, beta);\n return backend.runWebGLProgram(program, [x], x.dtype);\n };\n\n// tslint:disable-next-line: variable-name\nexport const LRNConfig: KernelConfig = {\n kernelName: LRN,\n backendName: 'webgl',\n kernelFunc: lrn as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class LRNGradProgram implements GPGPUProgram {\n variableNames = ['inputImage', 'outputImage', 'dy'];\n outputShape: number[] = [];\n userCode: string;\n depthRadius: number;\n bias: number;\n alpha: number;\n beta: number;\n depth: number;\n\n constructor(\n inputShape: number[], depthRadius: number, bias: number, alpha: number,\n beta: number) {\n this.outputShape = inputShape;\n this.depth = inputShape[3];\n this.depthRadius = depthRadius;\n this.bias = bias;\n this.alpha = alpha;\n this.beta = beta;\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int r = coords[1];\n int c = coords[2];\n\n float result = 0.0;\n for (int d = 0; d < ${this.depth}; ++d) {\n int depthBegin = int(max(0.0, float(d - ${depthRadius})));\n int depthEnd = int(min(float(${this.depth}),\n float(d + ${depthRadius} + 1)));\n\n const int MIN_DEPTH_BEGIN = 0;\n const int MAX_DEPTH_END = ${this.depth};\n\n float norm = 0.0;\n for (int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k) {\n if (k < depthBegin){\n continue;\n }\n else if (k >= depthBegin && k < depthEnd) {\n norm += getInputImage(b, r, c, k) * getInputImage(b, r, c, k);\n }\n else {\n break;\n }\n }\n\n norm = float(${alpha}) * norm + float(${bias});\n\n for(int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k){\n if (k < depthBegin){\n continue;\n }\n else if (k >= depthBegin && k < depthEnd){\n float dyi = -2.0 * float(${alpha})\n * float(${beta})\n * getInputImage(b, r, c, k) * getOutputImage(b, r, c, d)\n / norm;\n if (k == d) {\n dyi += pow(norm, -1.0 * ${beta});\n }\n if (k == coords[3]) {\n dyi *= getDy(b, r, c, d);\n result += dyi;\n }\n }\n else {\n break;\n }\n }\n }\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, LRNGrad, LRNGradAttrs, LRNGradInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {LRNGradProgram} from '../lrn_grad_gpu';\n\nexport const lrnGrad = (args: {\n inputs: LRNGradInputs,\n backend: MathBackendWebGL,\n attrs: LRNGradAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x, y, dy} = inputs;\n const {depthRadius, bias, alpha, beta} = attrs;\n\n const program = new LRNGradProgram(x.shape, depthRadius, bias, alpha, beta);\n return backend.runWebGLProgram(program, [x, y, dy], x.dtype);\n};\n\n// tslint:disable-next-line: variable-name\nexport const LRNGradConfig: KernelConfig = {\n kernelName: LRNGrad,\n backendName: 'webgl',\n kernelFunc: lrnGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelFunc, Max, MaxAttrs, MaxInputs, TensorInfo} from '@tensorflow/tfjs-core';\nimport {backend_util, KernelConfig, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {maxImplCPU} from '../kernel_utils/shared';\n\nimport {maxImpl} from './Max_impl';\nimport {transposeImpl, transposeImplCPU} from './Transpose_impl';\n\nexport function max(\n args: {inputs: MaxInputs, backend: MathBackendWebGL, attrs: MaxAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {reductionIndices, keepDims} = attrs;\n\n const xRank = x.shape.length;\n\n const origAxes = util.parseAxisParam(reductionIndices, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n const maxInputIsTransposed = permutedAxes != null;\n const shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]);\n\n let maxInput = x;\n if (maxInputIsTransposed) {\n if (shouldExecuteOnCPU) {\n const xTexData = backend.texData.get(maxInput.dataId);\n const values = xTexData.values as TypedArray;\n\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = x.shape[permutedAxes[i]];\n }\n const maxInputValues =\n transposeImplCPU(values, x.shape, x.dtype, permutedAxes, newShape);\n\n maxInput = backend.makeTensorInfo(newShape, x.dtype);\n const maxInputData = backend.texData.get(maxInput.dataId);\n maxInputData.values = maxInputValues;\n } else {\n maxInput = transposeImpl(x, permutedAxes, backend);\n }\n\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims('max', axes, xRank);\n const [maxOutShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(maxInput.shape, axes);\n\n let outShape = maxOutShape;\n if (keepDims) {\n // rather than reshape at the end, set the target shape here.\n outShape = backend_util.expandShapeToKeepDim(maxOutShape, origAxes);\n }\n\n let out;\n if (shouldExecuteOnCPU) {\n const xTexData = backend.texData.get(maxInput.dataId);\n const values = xTexData.values as TypedArray;\n\n const outValues =\n maxImplCPU(values, util.sizeFromShape(reduceShape), outShape, x.dtype);\n\n out = backend.makeTensorInfo(outShape, x.dtype);\n const outData = backend.texData.get(out.dataId);\n outData.values = outValues;\n } else {\n out = maxImpl(maxInput, reduceShape, outShape, backend);\n }\n\n if (maxInputIsTransposed) {\n backend.disposeIntermediateTensorInfo(maxInput);\n }\n\n return out;\n}\n\nexport const maxConfig: KernelConfig = {\n kernelName: Max,\n backendName: 'webgl',\n kernelFunc: max as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\nimport {reshape} from '../kernels/Reshape';\n\nexport function maxImpl(\n x: TensorInfo, reduceShape: number[], outShape: number[],\n backend: MathBackendWebGL): TensorInfo {\n const inSize = util.sizeFromShape(reduceShape);\n const xSize = util.sizeFromShape(x.shape);\n const batchSize = xSize / inSize;\n const reshapedInput =\n reshape({inputs: {x}, attrs: {shape: [batchSize, inSize]}, backend});\n\n const reduced = reduce(reshapedInput, x.dtype, 'max', backend);\n const reshapedOutput =\n reshape({inputs: {x: reduced}, attrs: {shape: outShape}, backend});\n\n backend.disposeIntermediateTensorInfo(reshapedInput);\n backend.disposeIntermediateTensorInfo(reduced);\n\n return reshapedOutput;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Maximum} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET} from '../binaryop_gpu';\nimport {CHECK_NAN_SNIPPET_PACKED} from '../binaryop_packed_gpu';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {maximumImplCPU} from '../kernel_utils/shared';\n\nconst MAXIMUM = CHECK_NAN_SNIPPET + `\n return max(a, b);\n`;\n\nconst MAXIMUM_PACKED = `\n vec4 result = vec4(max(a, b));\n bvec4 isNaNA = isnan(a);\n bvec4 isNaNB = isnan(b);\n bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w);\n ` +\n CHECK_NAN_SNIPPET_PACKED + `\n return result;\n`;\n\nexport const maximum = binaryKernelFunc({\n opSnippet: MAXIMUM,\n packedOpSnippet: MAXIMUM_PACKED,\n cpuKernelImpl: maximumImplCPU\n});\n\nexport const maximumConfig: KernelConfig = {\n kernelName: Maximum,\n backendName: 'webgl',\n kernelFunc: maximum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPool, MaxPoolAttrs, MaxPoolInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Pool2DProgram} from '../pool_gpu';\nimport {assertNotComplex} from '../webgl_util';\nimport {identity} from './Identity';\n\nexport function maxPool(args: {\n inputs: MaxPoolInputs,\n backend: MathBackendWebGL,\n attrs: MaxPoolAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n assertNotComplex(x, 'maxPool');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations = 1;\n\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in maxPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 &&\n util.arraysEqual(convInfo.inShape, convInfo.outShape)) {\n return identity({inputs: {x}, backend});\n }\n const maxPoolProgram = new Pool2DProgram(convInfo, 'max', false);\n return backend.runWebGLProgram(maxPoolProgram, [x], x.dtype);\n}\n\nexport const maxPoolConfig: KernelConfig = {\n kernelName: MaxPool,\n backendName: 'webgl',\n kernelFunc: maxPool as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPool3D, MaxPool3DAttrs, MaxPool3DInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Pool3DProgram} from '../pool_gpu';\n\nexport function maxPool3d(args: {\n inputs: MaxPool3DInputs,\n backend: MathBackendWebGL,\n attrs: MaxPool3DAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {filterSize, strides, pad, dataFormat, dimRoundingMode} = attrs;\n const dilations: [number, number, number] = [1, 1, 1];\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode, dataFormat);\n const maxPoolProgram = new Pool3DProgram(convInfo, 'max', false);\n return backend.runWebGLProgram(maxPoolProgram, [x], x.dtype);\n}\n\nexport const maxPool3DConfig: KernelConfig = {\n kernelName: MaxPool3D,\n backendName: 'webgl',\n kernelFunc: maxPool3d as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class MaxPool2DBackpropProgram implements GPGPUProgram {\n variableNames = ['dy', 'maxPos'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv2DInfo) {\n this.outputShape = convInfo.inShape;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationHeight = convInfo.dilationHeight;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n\n const lastIndex = effectiveFilterHeight * effectiveFilterWidth - 1;\n this.userCode = `\n const ivec2 pads = ivec2(${padTop}, ${padLeft});\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n\n ivec2 dyRCCorner = coords.yz - pads;\n int dyRCorner = dyRCCorner.x;\n int dyCCorner = dyRCCorner.y;\n\n // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${effectiveFilterWidth}; wC++) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(b, idyR, idyC, d);\n int maxPosValue = ${lastIndex} - int(getMaxPos(b, idyR, idyC, d));\n\n // Get the current value, check it against the value from the\n // position matrix.\n int curPosValue = wR * ${effectiveFilterWidth} + wC;\n float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0);\n\n dotProd += dyValue * mask;\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n\nexport class MaxPool3DBackpropProgram implements GPGPUProgram {\n variableNames = ['dy', 'maxPos'];\n outputShape: number[];\n userCode: string;\n\n constructor(convInfo: backend_util.Conv3DInfo) {\n this.outputShape = convInfo.inShape;\n const strideDepth = convInfo.strideDepth;\n const strideHeight = convInfo.strideHeight;\n const strideWidth = convInfo.strideWidth;\n const dilationDepth = convInfo.dilationDepth;\n const dilationHeight = convInfo.dilationHeight;\n const dilationWidth = convInfo.dilationWidth;\n const effectiveFilterDepth = convInfo.effectiveFilterDepth;\n const effectiveFilterHeight = convInfo.effectiveFilterHeight;\n const effectiveFilterWidth = convInfo.effectiveFilterWidth;\n\n const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front;\n const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top;\n const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left;\n\n const lastIndex =\n effectiveFilterDepth * effectiveFilterHeight * effectiveFilterWidth - 1;\n this.userCode = `\n const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft});\n\n void main() {\n ivec5 coords = getOutputCoords();\n int batch = coords.x;\n int ch = coords.u;\n\n ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads;\n int dyDCorner = dyCorner.x;\n int dyRCorner = dyCorner.y;\n int dyCCorner = dyCorner.z;\n\n // Convolve dy(?, ?, ?, ch) with pos mask(:, :, :, d) to get\n // dx(xD, xR, xC, ch).\n // ? = to be determined. : = across all values in that axis.\n float dotProd = 0.0;\n\n for (int wD = 0; wD < ${effectiveFilterDepth};\n wD += ${dilationDepth}) {\n float dyD = float(dyDCorner + wD) / ${strideDepth}.0;\n\n if (dyD < 0.0 || dyD >= ${convInfo.outDepth}.0 || fract(dyD) > 0.0) {\n continue;\n }\n int idyD = int(dyD);\n\n for (int wR = 0; wR < ${effectiveFilterHeight};\n wR += ${dilationHeight}) {\n float dyR = float(dyRCorner + wR) / ${strideHeight}.0;\n\n if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 ||\n fract(dyR) > 0.0) {\n continue;\n }\n int idyR = int(dyR);\n\n for (int wC = 0; wC < ${effectiveFilterWidth};\n wC += ${dilationWidth}) {\n float dyC = float(dyCCorner + wC) / ${strideWidth}.0;\n\n if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 ||\n fract(dyC) > 0.0) {\n continue;\n }\n int idyC = int(dyC);\n\n float dyValue = getDy(batch, idyD, idyR, idyC, ch);\n int maxPosValue = ${lastIndex} -\n int(getMaxPos(batch, idyD, idyR, idyC, ch));\n\n // Get the current value, check it against the value from the\n // position matrix.\n int curPosValue =\n wD * ${effectiveFilterHeight} * ${effectiveFilterWidth} +\n wR * ${effectiveFilterWidth} + wC;\n float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0);\n\n dotProd += dyValue * mask;\n }\n }\n }\n setOutput(dotProd);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPool3DGrad, MaxPool3DGradAttrs, MaxPool3DGradInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {MaxPool3DBackpropProgram} from '../max_pool_backprop_gpu';\nimport {Pool3DProgram} from '../pool_gpu';\n\nexport function maxPool3DGrad(args: {\n inputs: MaxPool3DGradInputs,\n backend: MathBackendWebGL,\n attrs: MaxPool3DGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input} = inputs;\n const x = input;\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n const dilations: [number, number, number] = [1, 1, 1];\n\n const convInfo = backend_util.computePool3DInfo(\n x.shape as [number, number, number, number, number], filterSize, strides,\n dilations, pad, dimRoundingMode);\n\n const maxPool3dPositionsProgram =\n new Pool3DProgram(convInfo, 'max', true /* get positions */);\n const maxPool3dPositions =\n backend.runWebGLProgram(maxPool3dPositionsProgram, [x], x.dtype);\n const maxPoolBackpropProgram = new MaxPool3DBackpropProgram(convInfo);\n const result = backend.runWebGLProgram(\n maxPoolBackpropProgram, [dy, maxPool3dPositions], x.dtype);\n backend.disposeIntermediateTensorInfo(maxPool3dPositions);\n return result;\n}\n\nexport const maxPool3DGradConfig: KernelConfig = {\n kernelName: MaxPool3DGrad,\n backendName: 'webgl',\n kernelFunc: maxPool3DGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {backend_util, KernelConfig, KernelFunc, MaxPoolGrad, MaxPoolGradAttrs, MaxPoolGradInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {MaxPool2DBackpropProgram} from '../max_pool_backprop_gpu';\nimport {Pool2DProgram} from '../pool_gpu';\nimport {assertNotComplex} from '../webgl_util';\n\nexport function maxPoolGrad(args: {\n inputs: MaxPoolGradInputs,\n backend: MathBackendWebGL,\n attrs: MaxPoolGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {dy, input, output} = inputs;\n const x = input;\n assertNotComplex([input, output], 'maxPoolGrad');\n const {filterSize, strides, pad, dimRoundingMode} = attrs;\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n 1 /* dilations */, pad, dimRoundingMode);\n const getPositions = true;\n const maxPoolPositionsProgram =\n new Pool2DProgram(convInfo, 'max', getPositions);\n const maxPoolPositions: TensorInfo =\n backend.runWebGLProgram(maxPoolPositionsProgram, [x], x.dtype);\n\n const maxPoolBackPropProgram = new MaxPool2DBackpropProgram(convInfo);\n const result = backend.runWebGLProgram(\n maxPoolBackPropProgram, [dy, maxPoolPositions], x.dtype);\n backend.disposeIntermediateTensorInfo(maxPoolPositions);\n return result;\n}\n\nexport const maxPoolGradConfig: KernelConfig = {\n kernelName: MaxPoolGrad,\n backendName: 'webgl',\n kernelFunc: maxPoolGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {MaxPoolWithArgmax, MaxPoolWithArgmaxAttrs, MaxPoolWithArgmaxInputs} from '@tensorflow/tfjs-core';\nimport {backend_util, KernelConfig, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {maxPoolWithArgmaxImpl} from './MaxPoolWithArgmax_impl';\n\nexport const maxPoolWithArgmaxConfig: KernelConfig = {\n kernelName: MaxPoolWithArgmax,\n backendName: 'webgl',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {x} = inputs as MaxPoolWithArgmaxInputs;\n const {filterSize, strides, pad, includeBatchInIndex} =\n attrs as unknown as MaxPoolWithArgmaxAttrs;\n const webglBackend = backend as MathBackendWebGL;\n\n util.assert(\n x.shape.length === 4,\n () => `Error in maxPool: input must be rank 4 but got rank ${\n x.shape.length}.`);\n const dilations: [number, number] = [1, 1];\n util.assert(\n backend_util.eitherStridesOrDilationsAreOne(strides, dilations),\n () => 'Error in maxPool: Either strides or dilations must be 1. ' +\n `Got strides ${strides} and dilations '${dilations}'`);\n\n const convInfo = backend_util.computePool2DInfo(\n x.shape as [number, number, number, number], filterSize, strides,\n dilations, pad);\n\n const [result, indexes] =\n maxPoolWithArgmaxImpl(x, includeBatchInIndex, convInfo, webglBackend);\n return [result, indexes];\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {Pool2DProgram} from '../pool_gpu';\n\nexport function maxPoolWithArgmaxImpl(\n x: TensorInfo, includeBatchInIndex: boolean,\n convInfo: backend_util.Conv2DInfo,\n backend: MathBackendWebGL): TensorInfo[] {\n let program = new Pool2DProgram(convInfo, 'max', false);\n const poolOutput = backend.runWebGLProgram(program, [x], 'float32');\n\n program = new Pool2DProgram(convInfo, 'max', true, true, includeBatchInIndex);\n const indexOutput = backend.runWebGLProgram(program, [x], 'float32');\n return [poolOutput, indexOutput];\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, Mean, MeanAttrs, MeanInputs, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {meanImpl} from './Mean_impl';\nimport {transposeImpl, transposeImplCPU} from './Transpose_impl';\n\nexport const meanConfig: KernelConfig = {\n kernelName: Mean,\n backendName: 'webgl',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {x} = inputs as MeanInputs;\n const {keepDims, axis} = attrs as unknown as MeanAttrs;\n const webglBackend = backend as MathBackendWebGL;\n\n const xRank = x.shape.length;\n const origAxes = util.parseAxisParam(axis, x.shape);\n\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n const meanInputIsTransposed = permutedAxes != null;\n const shouldExecuteOnCPU = webglBackend.shouldExecuteOnCPU([x]);\n\n const intermediates: TensorInfo[] = [];\n\n let meanInput = x;\n if (meanInputIsTransposed) {\n if (shouldExecuteOnCPU) {\n const xTexData = webglBackend.texData.get(meanInput.dataId);\n const values = xTexData.values as TypedArray;\n\n const newShape: number[] = new Array(xRank);\n for (let i = 0; i < newShape.length; i++) {\n newShape[i] = x.shape[permutedAxes[i]];\n }\n const meanInputValues =\n transposeImplCPU(values, x.shape, x.dtype, permutedAxes, newShape);\n\n meanInput = webglBackend.makeTensorInfo(newShape, x.dtype);\n const meanInputData = webglBackend.texData.get(meanInput.dataId);\n meanInputData.values = meanInputValues;\n } else {\n meanInput = transposeImpl(x, permutedAxes, webglBackend);\n }\n\n intermediates.push(meanInput);\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n }\n\n backend_util.assertAxesAreInnerMostDims('sum', axes, xRank);\n const [meanOutShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(meanInput.shape, axes);\n\n let outShape = meanOutShape;\n if (keepDims) {\n // rather than reshape at the end, set the target shape here.\n outShape = backend_util.expandShapeToKeepDim(meanOutShape, origAxes);\n }\n\n const out = meanImpl(meanInput, reduceShape, outShape, webglBackend);\n for (const i of intermediates) {\n webglBackend.disposeIntermediateTensorInfo(i);\n }\n\n return out;\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\nimport {reshape} from '../kernels/Reshape';\n\nexport function meanImpl(\n x: TensorInfo, reduceShape: number[], outShape: number[],\n backend: MathBackendWebGL): TensorInfo {\n const inSize = util.sizeFromShape(reduceShape);\n const xSize = util.sizeFromShape(x.shape);\n const batchSize = xSize / inSize;\n const reshapedInput =\n reshape({inputs: {x}, attrs: {shape: [batchSize, inSize]}, backend});\n\n const reduced = reduce(reshapedInput, 'float32', 'mean', backend);\n const reshapedOutput =\n reshape({inputs: {x: reduced}, attrs: {shape: outShape}, backend});\n\n backend.disposeIntermediateTensorInfo(reshapedInput);\n backend.disposeIntermediateTensorInfo(reduced);\n\n return reshapedOutput;\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Min, MinAttrs, MinInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\n\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function min(\n args: {inputs: MinInputs, backend: MathBackendWebGL, attrs: MinAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n const xRank = x.shape.length;\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n let permutedX = x;\n if (permutedAxes != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, x.shape.length);\n }\n\n backend_util.assertAxesAreInnerMostDims('min', axes, xRank);\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(permutedX.shape, axes);\n const inSize = util.sizeFromShape(reduceShape);\n const a2D =\n reshape({inputs: {x: permutedX}, backend, attrs: {shape: [-1, inSize]}});\n const reduced = reduce(a2D, a2D.dtype, 'min', backend);\n\n let res;\n if (keepDims) {\n const newShape = backend_util.expandShapeToKeepDim(outShape, origAxes);\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: newShape}});\n } else {\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: outShape}});\n }\n\n backend.disposeIntermediateTensorInfo(a2D);\n backend.disposeIntermediateTensorInfo(reduced);\n\n if (permutedAxes != null) {\n backend.disposeIntermediateTensorInfo(permutedX);\n }\n\n return res;\n}\n\nexport const minConfig: KernelConfig = {\n kernelName: Min,\n backendName: 'webgl',\n kernelFunc: min as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Minimum} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET} from '../binaryop_gpu';\nimport {CHECK_NAN_SNIPPET_PACKED} from '../binaryop_packed_gpu';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {minimumImplCPU} from '../kernel_utils/shared';\n\nconst MINIMUM = CHECK_NAN_SNIPPET + `\n return min(a, b);\n`;\n\nconst MINIMUM_PACKED = `\n vec4 result = vec4(min(a, b));\n bvec4 isNaNA = isnan(a);\n bvec4 isNaNB = isnan(b);\n bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w);\n ` +\n CHECK_NAN_SNIPPET_PACKED + `\n return result;\n`;\n\nexport const minimum = binaryKernelFunc({\n opSnippet: MINIMUM,\n packedOpSnippet: MINIMUM_PACKED,\n cpuKernelImpl: minimumImplCPU\n});\n\nexport const minimumConfig: KernelConfig = {\n kernelName: Minimum,\n backendName: 'webgl',\n kernelFunc: minimum as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class MirrorPadProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n xShape: number[], paddings: Array<[number, number]>,\n mode: 'reflect'|'symmetric') {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n const rank = xShape.length;\n const dtype = getCoordsDataType(rank);\n\n const start = paddings.map(p => p[0]).join(',');\n const end = paddings.map((p, i) => p[0] + xShape[i]).join(',');\n const unpackedCoords =\n ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank);\n const offset = mode === 'reflect' ? 0 : 1;\n\n if (rank === 1) {\n this.userCode = `\n int start = ${start};\n int end = ${end};\n\n void main() {\n int outC = getOutputCoords();\n if (outC < start) {\n outC = start * 2 - outC - ${offset};\n } else if(outC >= end) {\n outC = (end - 1) * 2 - outC + ${offset};\n }\n setOutput(getX(outC - start));\n }\n `;\n return;\n }\n this.userCode = `\n ${dtype} start = ${dtype}(${start});\n ${dtype} end = ${dtype}(${end});\n\n void main() {\n ${dtype} outC = getOutputCoords();\n for (int i = 0; i < ${rank}; i++) {\n if (outC[i] < start[i]) {\n outC[i] = start[i] * 2 - outC[i] - ${offset};\n } else if(outC[i] >= end[i]) {\n outC[i] = (end[i] - 1) * 2 - outC[i] + ${offset};\n }\n }\n ${dtype} coords = outC - start;\n setOutput(getX(${unpackedCoords}));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\n/**\n * Example shader code for\n * `mirrorPad(tf.tensor1d([1, 2, 3], 'int32'), [[2, 2]], 'reflect')`\n * ```\n * const int start = int(2);\n * const int end = int(5);\n *\n * void main() {\n * int outputLoc = getOutputCoords();\n * vec4 result = vec4(0.);\n *\n * int rc = outputLoc;\n *\n * int source = rc;\n * if (source < start) {\n * source = start * 2 - source - 0;\n * } else if (source >= end) {\n * source = (end - 1) * 2 - source + 0;\n * }\n * source -= start;\n *\n * result[0] = getChannel(getX(source), source);\n * rc += 1;\n * if(rc < 6) {\n * int source = rc;\n * if (source < start) {\n * source = start * 2 - source - 0;\n * } else if (source >= end) {\n * source = (end - 1) * 2 - source + 0;\n * }\n * source -= start;\n *\n * result[1] = getChannel(getX(source), source);\n * }\n *\n * setOutput(result);\n * }\n * ```\n */\nexport class MirrorPadPackedProgram implements GPGPUProgram {\n variableNames = ['x'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n\n constructor(\n xShape: number[], paddings: Array<[number, number]>,\n mode: 'reflect'|'symmetric') {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n const rank = xShape.length;\n const dtype = getCoordsDataType(rank);\n\n const start = paddings.map(p => p[0]).join(',');\n const end = paddings.map((p, i) => p[0] + xShape[i]).join(',');\n const coords = getChannels('rc', rank);\n const source = getChannels('source', rank);\n const cLimit = `${coords[rank - 1]} < ${this.outputShape[rank - 1]}`;\n const innerDims =\n rank === 1 ? 'source' : `vec2(${source.slice(-2).join()})`;\n const offset = mode === 'reflect' ? 0 : 1;\n\n let mainLoop = '';\n if (rank === 1) {\n const padSetup = `\n ${dtype} source = rc;\n if (source < start) {\n source = start * 2 - source - ${offset};\n } else if (source >= end) {\n source = (end - 1) * 2 - source + ${offset};\n }\n source -= start;\n `;\n mainLoop = `\n ${dtype} rc = outputLoc;\n ${padSetup}\n result[0] = getChannel(getX(${source.join()}), ${innerDims});\n ${coords[rank - 1]} += 1;\n if(${cLimit}) {\n ${padSetup}\n result[1] = getChannel(getX(${source.join()}), ${innerDims});\n }\n `;\n } else {\n const padSetup = `\n ${dtype} source = rc;\n ${dtype} lt = ${dtype}(lessThan(source, start));\n ${dtype} gte = ${dtype}(greaterThanEqual(source, end));\n ${dtype} orig = 1 - (lt + gte);\n source = orig * source +\n lt * (start * 2 - source - ${offset}) +\n gte * ((end - 1) * 2 - source + ${offset});\n source -= start;\n `;\n\n mainLoop = `\n ${dtype} rc = outputLoc;\n ${padSetup}\n result[0] = getChannel(getX(${source.join()}), ${innerDims});\n ${coords[rank - 1]} += 1;\n if(${cLimit}) {\n ${padSetup}\n result[1] = getChannel(getX(${source.join()}), ${innerDims});\n }\n rc = outputLoc;\n ${coords[rank - 2]} += 1;\n if(${coords[rank - 2]} < ${this.outputShape[rank - 2]}) {\n ${padSetup}\n result[2] = getChannel(getX(${source.join()}), ${innerDims});\n ${coords[rank - 1]} += 1;\n if(${cLimit}) {\n ${padSetup}\n result[3] = getChannel(getX(${source.join()}), ${innerDims});\n }\n }\n `;\n }\n\n this.userCode = `\n const ${dtype} start = ${dtype}(${start});\n const ${dtype} end = ${dtype}(${end});\n\n void main() {\n ${dtype} outputLoc = getOutputCoords();\n vec4 result = vec4(0.);\n ${mainLoop}\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, MirrorPad, MirrorPadAttrs, MirrorPadInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {MirrorPadProgram} from '../mirror_pad_gpu';\nimport {MirrorPadPackedProgram} from '../mirror_pad_packed_gpu';\n\nexport const mirrorPadKernelFunc: (params: {\n inputs: MirrorPadInputs,\n backend: MathBackendWebGL,\n attrs: MirrorPadAttrs\n}) => TensorInfo = ({inputs, backend, attrs}) => {\n const {x} = inputs;\n const {paddings, mode} = attrs;\n\n const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ?\n new MirrorPadPackedProgram(x.shape, paddings, mode) :\n new MirrorPadProgram(x.shape, paddings, mode);\n\n const output = backend.runWebGLProgram(program, [x], x.dtype);\n\n return output;\n};\n\nexport const mirrorPadConfig: KernelConfig = {\n kernelName: MirrorPad,\n backendName: 'webgl',\n kernelFunc: mirrorPadKernelFunc as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Mod} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_PACKED} from '../binaryop_packed_gpu';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst MOD = `if (b == 0.0) return NAN;\n return mod(a, b);`;\n\nconst MOD_PACKED = `\n vec4 result = mod(a, b);\n bvec4 isNaN = equal(b, vec4(0.0));\n ` +\n CHECK_NAN_SNIPPET_PACKED + `\n return result;\n`;\n\nexport const mod = binaryKernelFunc({\n opSnippet: MOD,\n packedOpSnippet: MOD_PACKED,\n});\n\nexport const modConfig: KernelConfig = {\n kernelName: Mod,\n backendName: 'webgl',\n kernelFunc: mod as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class MultinomialProgram implements GPGPUProgram {\n variableNames = ['probs'];\n outputShape: number[];\n userCode: string;\n customUniforms = [{name: 'seed', type: 'float' as UniformType}];\n\n constructor(batchSize: number, numOutcomes: number, numSamples: number) {\n this.outputShape = [batchSize, numSamples];\n\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n\n float r = random(seed);\n float cdf = 0.0;\n\n for (int i = 0; i < ${numOutcomes - 1}; i++) {\n cdf += getProbs(batch, i);\n\n if (r < cdf) {\n setOutput(float(i));\n return;\n }\n }\n\n // If no other event happened, last event happened.\n setOutput(float(${numOutcomes - 1}));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {RealDiv} from '@tensorflow/tfjs-core';\nimport {KernelConfig} from '@tensorflow/tfjs-core';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\n// Without the equality check div produces 0.9999 for a = b, which when\n// floored can cause errors.\nconst DIV = `\nif (a == b) {\n return 1.0;\n};\nreturn a / b;`;\n\n// We do the same as in ./binaryop_gpu, with vec4 and ivec4.\n// On Linux, the vectorized implementation produces NaNs when a and b are 0.\nconst DIV_PACKED = `\n // vec4 one = vec4(equal(a, b));\n // return one + (vec4(1.0) - one) * a / b;\n vec4 result = a / b;\n if(a.x == b.x) {\n result.x = 1.;\n }\n if(a.y == b.y) {\n result.y = 1.;\n }\n if(a.z == b.z) {\n result.z = 1.;\n }\n if(a.w == b.w) {\n result.w = 1.;\n }\n\n return result;\n`;\n\nexport const realDiv = binaryKernelFunc(\n {opSnippet: DIV, packedOpSnippet: DIV_PACKED, checkOutOfBounds: true});\n\nexport const realDivConfig: KernelConfig = {\n kernelName: RealDiv,\n backendName: 'webgl',\n kernelFunc: realDiv,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sub} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {subImplCPU as cpuSub} from '../kernel_utils/shared';\n\nconst SUB = 'return a - b;';\n\nexport const sub = binaryKernelFunc({\n opSnippet: SUB,\n packedOpSnippet: SUB,\n supportsComplex: true,\n cpuKernelImpl: cpuSub\n});\n\nexport const subConfig: KernelConfig = {\n kernelName: Sub,\n backendName: 'webgl',\n kernelFunc: sub\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Softmax, SoftmaxAttrs, SoftmaxInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {exp} from './Exp';\nimport {max} from './Max';\nimport {realDiv} from './RealDiv';\nimport {reshape} from './Reshape';\nimport {sub} from './Sub';\nimport {sum} from './Sum';\n\nexport function softmax(args: {\n inputs: SoftmaxInputs,\n backend: MathBackendWebGL,\n attrs: SoftmaxAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {logits} = inputs;\n const {dim} = attrs;\n\n const axes = util.parseAxisParam([dim], logits.shape);\n\n const maxLogit = max({\n inputs: {x: logits},\n backend,\n attrs: {reductionIndices: axes, keepDims: false}\n });\n\n const expandedShape = backend_util.expandShapeToKeepDim(maxLogit.shape, axes);\n\n const maxLogitsReshaped =\n reshape({inputs: {x: maxLogit}, backend, attrs: {shape: expandedShape}});\n const a =\n sub({inputs: {a: logits, b: maxLogitsReshaped}, backend}) as TensorInfo;\n const b = exp({inputs: {x: a}, backend}) as TensorInfo;\n const sumExp =\n sum({inputs: {x: b}, backend, attrs: {axis: axes, keepDims: false}});\n const sumExpReshaped =\n reshape({inputs: {x: sumExp}, backend, attrs: {shape: expandedShape}});\n\n const res =\n realDiv({inputs: {a: b, b: sumExpReshaped}, backend}) as TensorInfo;\n\n backend.disposeIntermediateTensorInfo(maxLogit);\n backend.disposeIntermediateTensorInfo(maxLogitsReshaped);\n backend.disposeIntermediateTensorInfo(a);\n backend.disposeIntermediateTensorInfo(b);\n backend.disposeIntermediateTensorInfo(sumExp);\n backend.disposeIntermediateTensorInfo(sumExpReshaped);\n\n return res;\n}\n\nexport const softmaxConfig: KernelConfig = {\n kernelName: Softmax,\n backendName: 'webgl',\n kernelFunc: softmax as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Multinomial, MultinomialAttrs, MultinomialInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {MultinomialProgram} from '../multinomial_gpu';\n\nimport {softmax} from './Softmax';\n\nexport function multinomial(args: {\n inputs: MultinomialInputs,\n backend: MathBackendWebGL,\n attrs: MultinomialAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {logits} = inputs;\n const {numSamples, seed, normalized} = attrs;\n\n const probs = normalized ?\n logits :\n softmax(\n {inputs: {logits}, backend, attrs: {dim: logits.shape.length - 1}});\n const batchSize = probs.shape[0];\n const numOutcomes = probs.shape[1];\n const program = new MultinomialProgram(batchSize, numOutcomes, numSamples);\n const customValues = [[seed]];\n const res = backend.runWebGLProgram(program, [probs], 'int32', customValues);\n if (!normalized) {\n backend.disposeIntermediateTensorInfo(probs);\n }\n return res;\n}\n\nexport const multinomialConfig: KernelConfig = {\n kernelName: Multinomial,\n backendName: 'webgl',\n kernelFunc: multinomial as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, Neg, NegInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {negImplCPU} from '../kernel_utils/shared';\nimport {CHECK_NAN_SNIPPET, UnaryOpProgram} from '../unaryop_gpu';\nimport {UnaryOpPackedProgram} from '../unaryop_packed_gpu';\n\nconst NEG = CHECK_NAN_SNIPPET + `\n return -x;\n`;\n\nconst NEG_PACKED = `\n vec4 result = -x;\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\n// This doesn't use unaryKernelFunc because negImplCPU is not of type\n// SimpleUnaryKernelImplCPU.\nexport function neg(args: {inputs: NegInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (backend.shouldExecuteOnCPU([x])) {\n const xData = backend.texData.get(x.dataId);\n const [outValues, newShape] =\n negImplCPU(xData.values as TypedArray, x.shape, x.dtype);\n return backend.makeTensorInfo(newShape, x.dtype, outValues);\n }\n\n let program: UnaryOpProgram|UnaryOpPackedProgram;\n if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) {\n program = new UnaryOpPackedProgram(x.shape, NEG_PACKED);\n } else {\n program = new UnaryOpProgram(x.shape, NEG);\n }\n\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport const negConfig: KernelConfig = {\n kernelName: Neg,\n backendName: 'webgl',\n kernelFunc: neg as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV3, NonMaxSuppressionV3Attrs, NonMaxSuppressionV3Inputs, TypedArray} from '@tensorflow/tfjs-core';\n\nconst nonMaxSuppressionV3Impl = kernel_impls.nonMaxSuppressionV3Impl;\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function nonMaxSuppressionV3(args: {\n inputs: NonMaxSuppressionV3Inputs,\n backend: MathBackendWebGL,\n attrs: NonMaxSuppressionV3Attrs\n}) {\n backend_util.warn(\n 'tf.nonMaxSuppression() in webgl locks the UI thread. ' +\n 'Call tf.nonMaxSuppressionAsync() instead');\n\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold} = attrs;\n\n const boxesVals = backend.readSync(boxes.dataId) as TypedArray;\n const scoresVals = backend.readSync(scores.dataId) as TypedArray;\n\n const {selectedIndices} = nonMaxSuppressionV3Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);\n\n return backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices));\n}\n\nexport const nonMaxSuppressionV3Config: KernelConfig = {\n kernelName: NonMaxSuppressionV3,\n backendName: 'webgl',\n kernelFunc: nonMaxSuppressionV3 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV4, NonMaxSuppressionV4Attrs, NonMaxSuppressionV4Inputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\nconst nonMaxSuppressionV4Impl = kernel_impls.nonMaxSuppressionV4Impl;\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function nonMaxSuppressionV4(args: {\n inputs: NonMaxSuppressionV4Inputs,\n backend: MathBackendWebGL,\n attrs: NonMaxSuppressionV4Attrs\n}): [TensorInfo, TensorInfo] {\n backend_util.warn(\n 'tf.nonMaxSuppression() in webgl locks the UI thread. ' +\n 'Call tf.nonMaxSuppressionAsync() instead');\n\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize} =\n attrs;\n\n const boxesVals = backend.readSync(boxes.dataId) as TypedArray;\n const scoresVals = backend.readSync(scores.dataId) as TypedArray;\n\n const {selectedIndices, validOutputs} = nonMaxSuppressionV4Impl(\n boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold,\n padToMaxOutputSize);\n\n return [\n backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices)),\n backend.makeTensorInfo([], 'int32', new Int32Array([validOutputs]))\n ];\n}\n\nexport const nonMaxSuppressionV4Config: KernelConfig = {\n kernelName: NonMaxSuppressionV4,\n backendName: 'webgl',\n kernelFunc: nonMaxSuppressionV4 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, kernel_impls, KernelConfig, KernelFunc, NonMaxSuppressionV5, NonMaxSuppressionV5Attrs, NonMaxSuppressionV5Inputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nconst nonMaxSuppressionV5Impl = kernel_impls.nonMaxSuppressionV5Impl;\nimport {MathBackendWebGL} from '../backend_webgl';\n\nexport function nonMaxSuppressionV5(args: {\n inputs: NonMaxSuppressionV5Inputs,\n backend: MathBackendWebGL,\n attrs: NonMaxSuppressionV5Attrs\n}): [TensorInfo, TensorInfo] {\n backend_util.warn(\n 'tf.nonMaxSuppression() in webgl locks the UI thread. ' +\n 'Call tf.nonMaxSuppressionAsync() instead');\n\n const {inputs, backend, attrs} = args;\n const {boxes, scores} = inputs;\n const {maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma} = attrs;\n\n const boxesVals = backend.readSync(boxes.dataId) as TypedArray;\n const scoresVals = backend.readSync(scores.dataId) as TypedArray;\n\n const maxOutputSizeVal = maxOutputSize;\n const iouThresholdVal = iouThreshold;\n const scoreThresholdVal = scoreThreshold;\n const softNmsSigmaVal = softNmsSigma;\n\n const {selectedIndices, selectedScores} = nonMaxSuppressionV5Impl(\n boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal,\n scoreThresholdVal, softNmsSigmaVal);\n\n return [\n backend.makeTensorInfo(\n [selectedIndices.length], 'int32', new Int32Array(selectedIndices)),\n backend.makeTensorInfo(\n [selectedScores.length], 'float32', new Float32Array(selectedScores))\n ];\n}\n\nexport const nonMaxSuppressionV5Config: KernelConfig = {\n kernelName: NonMaxSuppressionV5,\n backendName: 'webgl',\n kernelFunc: nonMaxSuppressionV5 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class OneHotProgram implements GPGPUProgram {\n variableNames = ['indices'];\n outputShape: number[];\n userCode: string;\n\n // Caching uniform location for speed.\n seedLoc: WebGLUniformLocation;\n\n constructor(\n numIndices: number, depth: number, onValue: number, offValue: number) {\n this.outputShape = [numIndices, depth];\n\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n int index = round(getIndices(coords.x));\n setOutput(mix(float(${offValue}), float(${onValue}),\n float(index == coords.y)));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, OneHot, OneHotAttrs, OneHotInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {OneHotProgram} from '../onehot_gpu';\nimport {reshape} from './Reshape';\n\nexport const oneHot = (args: {\n inputs: OneHotInputs,\n backend: MathBackendWebGL,\n attrs: OneHotAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {indices} = inputs;\n const {dtype, depth, onValue, offValue} = attrs;\n\n const indicesSize = util.sizeFromShape(indices.shape);\n const program = new OneHotProgram(indicesSize, depth, onValue, offValue);\n const reshaped =\n reshape({inputs: {x: indices}, backend, attrs: {shape: [indicesSize]}});\n const result = backend.runWebGLProgram(program, [reshaped], dtype);\n backend.disposeIntermediateTensorInfo(reshaped);\n\n const outShape = [...indices.shape, depth];\n const out = reshape({inputs: {x: result}, backend, attrs: {shape: outShape}});\n backend.disposeIntermediateTensorInfo(result);\n return out;\n};\n\nexport const oneHotConfig: KernelConfig = {\n kernelName: OneHot,\n backendName: 'webgl',\n kernelFunc: oneHot as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, ZerosLike, ZerosLikeInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\n\nexport function zerosLike(\n args: {inputs: ZerosLikeInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = zerosLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(r);\n backend.disposeIntermediateTensorInfo(imagPart);\n backend.disposeIntermediateTensorInfo(i);\n\n return result;\n } else {\n return fill({\n attrs: {\n shape: x.shape,\n dtype: x.dtype,\n value: x.dtype === 'string' ? '' : 0\n },\n backend\n });\n }\n}\n\nexport const zerosLikeConfig: KernelConfig = {\n kernelName: ZerosLike,\n backendName: 'webgl',\n kernelFunc: zerosLike as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, OnesLike, OnesLikeInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {complex} from './Complex';\nimport {fill} from './Fill';\nimport {imag} from './Imag';\nimport {real} from './Real';\nimport {zerosLike} from './ZerosLike';\n\nexport function onesLike(\n args: {inputs: OnesLikeInputs, backend: MathBackendWebGL}): TensorInfo {\n const {inputs, backend} = args;\n const {x} = inputs;\n\n if (x.dtype === 'string') {\n throw new Error('onesLike is not supported under string dtype');\n } else if (x.dtype === 'complex64') {\n const realPart = real({inputs: {input: x}, backend});\n const r = onesLike({inputs: {x: realPart}, backend});\n const imagPart = imag({inputs: {input: x}, backend});\n const i = zerosLike({inputs: {x: imagPart}, backend});\n\n const result = complex({inputs: {real: r, imag: i}, backend});\n\n backend.disposeIntermediateTensorInfo(realPart);\n backend.disposeIntermediateTensorInfo(r);\n backend.disposeIntermediateTensorInfo(imagPart);\n backend.disposeIntermediateTensorInfo(i);\n\n return result;\n } else {\n // TODO(cais, smilkov): Add WebGL shader for onesLike:\n // https://github.com/tensorflow/tfjs/issues/1293\n return fill({attrs: {shape: x.shape, dtype: x.dtype, value: 1}, backend});\n }\n}\n\nexport const onesLikeConfig: KernelConfig = {\n kernelName: OnesLike,\n backendName: 'webgl',\n kernelFunc: onesLike as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Pack, PackAttrs, PackInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {concat} from './Concat';\nimport {expandDims} from './ExpandDims';\n\nexport function pack(\n args: {inputs: PackInputs, backend: MathBackendWebGL, attrs: PackAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {axis} = attrs;\n\n if (inputs.length === 1) {\n return expandDims(\n {inputs: {input: inputs[0]}, backend, attrs: {dim: axis}});\n }\n\n const shape = inputs[0].shape;\n const dtype = inputs[0].dtype;\n\n inputs.forEach(t => {\n util.assertShapesMatch(\n shape, t.shape,\n 'All tensors passed to stack must have matching shapes');\n util.assert(\n dtype === t.dtype,\n () => 'All tensors passed to stack must have matching dtypes');\n });\n\n const intermediateTensorInfos: TensorInfo[] = [];\n const expandedTensors = inputs.map(t => {\n const expandedT =\n expandDims({inputs: {input: t}, backend, attrs: {dim: axis}});\n intermediateTensorInfos.push(expandedT);\n return expandedT;\n });\n\n const result = concat({inputs: expandedTensors, backend, attrs: {axis}});\n\n intermediateTensorInfos.forEach(\n t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n}\n\nexport const packConfig: KernelConfig = {\n kernelName: Pack,\n backendName: 'webgl',\n kernelFunc: pack as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType, UniformType} from './shader_compiler';\n\nexport class PadProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n customUniforms = [{name: 'value', type: 'float' as UniformType}];\n\n constructor(\n xShape: number[], paddings: Array<[number, number]>,\n constantValue: number) {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n const rank = xShape.length;\n const type = getCoordsDataType(rank);\n\n const start = paddings.map(p => p[0]).join(',');\n const end = paddings.map((p, i) => p[0] + xShape[i]).join(',');\n const unpackedCoords =\n ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank);\n\n if (rank === 1) {\n this.userCode = `\n int start = ${start};\n int end = ${end};\n\n void main() {\n int outC = getOutputCoords();\n if (outC < start || outC >= end) {\n setOutput(value);\n } else {\n setOutput(getX(outC - start));\n }\n }\n `;\n return;\n }\n this.userCode = `\n ${type} start = ${type}(${start});\n ${type} end = ${type}(${end});\n\n void main() {\n ${type} outC = getOutputCoords();\n if (any(lessThan(outC, start)) || any(greaterThanEqual(outC, end))) {\n setOutput(value);\n } else {\n ${type} coords = outC - start;\n setOutput(getX(${unpackedCoords}));\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType, UniformType} from './shader_compiler';\n\nexport class PadPackedProgram implements GPGPUProgram {\n variableNames = ['x'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[];\n userCode: string;\n customUniforms = [{name: 'value', type: 'float' as UniformType}];\n\n constructor(\n xShape: number[], paddings: Array<[number, number]>,\n constantValue: number) {\n this.outputShape = paddings.map(\n (p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */);\n const rank = xShape.length;\n const dtype = getCoordsDataType(rank);\n\n const start = paddings.map(p => p[0]).join(',');\n const end = paddings.map((p, i) => p[0] + xShape[i]).join(',');\n const coords = getChannels('rc', rank);\n const source = getChannels('source', rank);\n const cLimit = `${coords[rank - 1]} < ${this.outputShape[rank - 1]}`;\n const innerDims =\n rank === 1 ? 'source' : `vec2(${source.slice(-2).join()})`;\n\n const componentSetup = [\n `${dtype} rc = outputLoc;`, `${coords[rank - 1]} += 1;\n if(${cLimit}) {\n `,\n rank === 1 ? '' : `}\n rc = outputLoc;\n ${coords[rank - 2]} += 1;\n if(${coords[rank - 2]} < ${this.outputShape[rank - 2]}) {`,\n rank === 1 ? '' : ` ${coords[rank - 1]} += 1;\n if(${cLimit}) {`\n ];\n\n const paddingArea = rank === 1 ?\n 'rc < start || rc >= end' :\n 'any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))';\n let mainLoop = '';\n for (let i = 0, j = rank === 1 ? 2 : 4; i < j; i++) {\n mainLoop += `\n ${componentSetup[i]}\n if (${paddingArea}) {\n result[${i}] = float(value);\n } else {\n ${dtype} source = rc - start;\n result[${i}] = getChannel(getX(${source.join()}), ${innerDims});\n }\n `;\n }\n mainLoop += (rank === 1 ? `} ` : `}}`);\n\n this.userCode = `\n const ${dtype} start = ${dtype}(${start});\n const ${dtype} end = ${dtype}(${end});\n\n void main() {\n ${dtype} outputLoc = getOutputCoords();\n vec4 result = vec4(0.);\n ${mainLoop}\n setOutput(result);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, PadV2, PadV2Attrs, PadV2Inputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {PadProgram} from '../pad_gpu';\nimport {PadPackedProgram} from '../pad_packed_gpu';\nimport {fill} from './Fill';\n\nexport const padV2 =\n (args: {inputs: PadV2Inputs, backend: MathBackendWebGL, attrs: PadV2Attrs}):\n TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {paddings, constantValue} = attrs;\n\n if (util.sizeFromShape(x.shape) === 0) {\n // Short-circuit the computation, since x doesn't have value, only\n // the shape is used to compute output shape to pad.\n const outputShape = paddings.map(\n (p, i) =>\n p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */);\n return fill({\n backend,\n attrs: {shape: outputShape, value: constantValue, dtype: x.dtype}\n });\n }\n\n const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ?\n new PadPackedProgram(x.shape, paddings, constantValue) :\n new PadProgram(x.shape, paddings, constantValue);\n const customValues = [[constantValue]];\n return backend.runWebGLProgram(program, [x], x.dtype, customValues);\n };\n\nexport const padV2Config: KernelConfig = {\n kernelName: PadV2,\n backendName: 'webgl',\n kernelFunc: padV2 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Pow} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_PACKED} from '../binaryop_packed_gpu';\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst POW = `\n if(a < 0.0 && floor(b) < b){\n return NAN;\n }\n if (b == 0.0) {\n return 1.0;\n }\n return (round(mod(b, 2.0)) != 1) ?\n pow(abs(a), b) : sign(a) * pow(abs(a), b);\n`;\n\nconst POW_PACKED = `\n // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise.\n vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1)));\n vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1);\n vec4 result = multiplier * pow(abs(a), b);\n\n // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS\n bvec4 isExpZero = equal(b, vec4(0.0));\n result.r = isExpZero.r ? 1.0 : result.r;\n result.g = isExpZero.g ? 1.0 : result.g;\n result.b = isExpZero.b ? 1.0 : result.b;\n result.a = isExpZero.a ? 1.0 : result.a;\n\n bvec4 isNaN1 = lessThan(a, vec4(0.0));\n bvec4 isNaN2 = lessThan(floor(b), b);\n bvec4 isNaN = bvec4(isNaN1.x && isNaN2.x, isNaN1.y && isNaN2.y, isNaN1.z && isNaN2.z, isNaN1.w && isNaN2.w);\n ` +\n CHECK_NAN_SNIPPET_PACKED + `\n return result;\n`;\n\nexport const pow =\n binaryKernelFunc({opSnippet: POW, packedOpSnippet: POW_PACKED});\n\nexport const powConfig: KernelConfig = {\n kernelName: Pow,\n backendName: 'webgl',\n kernelFunc: pow as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Prod, ProdAttrs, ProdInputs, sumOutType, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {reduce} from '../kernel_utils/reduce';\nimport {prodImplCPU} from '../kernel_utils/shared';\n\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport function prod(\n args: {inputs: ProdInputs, backend: MathBackendWebGL, attrs: ProdAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {axis, keepDims} = attrs;\n\n const xRank = x.shape.length;\n const toDispose = [];\n\n const origAxes = util.parseAxisParam(axis, x.shape);\n let axes = origAxes;\n const permutedAxes = backend_util.getAxesPermutation(axes, xRank);\n let permutedX = x;\n if (permutedAxes != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutedAxes}});\n axes = backend_util.getInnerMostAxes(axes.length, xRank);\n toDispose.push(permutedX);\n }\n\n backend_util.assertAxesAreInnerMostDims('prod', axes, xRank);\n\n let res;\n if (backend.shouldExecuteOnCPU([permutedX])) {\n const xVals = backend.texData.get(permutedX.dataId).values as TypedArray;\n const {outVals, outShape, outDtype} =\n prodImplCPU(permutedX.shape, permutedX.dtype, xVals, axes);\n res = backend.makeTensorInfo(outShape, outDtype, outVals);\n } else {\n const [outShape, reduceShape] =\n backend_util.computeOutAndReduceShapes(permutedX.shape, axes);\n const inSize = util.sizeFromShape(reduceShape);\n const a2D = reshape(\n {inputs: {x: permutedX}, backend, attrs: {shape: [-1, inSize]}});\n const outputDType = sumOutType(x.dtype);\n const reduced = reduce(a2D, outputDType, 'prod', backend);\n res = reshape({inputs: {x: reduced}, backend, attrs: {shape: outShape}});\n\n toDispose.push(a2D);\n toDispose.push(reduced);\n }\n\n if (keepDims) {\n toDispose.push(res);\n const newShape = backend_util.expandShapeToKeepDim(res.shape, origAxes);\n res = reshape({inputs: {x: res}, backend, attrs: {shape: newShape}});\n }\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return res;\n}\n\nexport const prodConfig: KernelConfig = {\n kernelName: Prod,\n backendName: 'webgl',\n kernelFunc: prod as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedGather, RaggedGatherAttrs, RaggedGatherInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {raggedGatherImplCPU} from '../kernel_utils/shared';\n\nexport function raggedGather(args: {\n inputs: RaggedGatherInputs,\n backend: MathBackendWebGL,\n attrs: RaggedGatherAttrs\n}): TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {paramsNestedSplits, paramsDenseValues, indices} = inputs;\n const {outputRaggedRank} = attrs;\n\n const $paramsNestedSplits =\n paramsNestedSplits.map(t => backend.readSync(t.dataId) as TypedArray);\n const $paramsNestedSplitsShapes = paramsNestedSplits.map(t => t.shape);\n const $paramsDenseValues =\n backend.readSync(paramsDenseValues.dataId) as TypedArray;\n const $indices = backend.readSync(indices.dataId) as TypedArray;\n\n const [outputNestedSplits, outputDenseValues, outputDenseValuesShape] =\n raggedGatherImplCPU(\n $paramsNestedSplits, $paramsNestedSplitsShapes, $paramsDenseValues,\n paramsDenseValues.shape, paramsDenseValues.dtype, $indices,\n indices.shape, outputRaggedRank);\n\n const outputNestedSplitsTensors = outputNestedSplits.map(\n (splits) => backend.makeTensorInfo([splits.length], 'int32', splits));\n\n const outputDenseValuesTensor = backend.makeTensorInfo(\n outputDenseValuesShape, paramsDenseValues.dtype, outputDenseValues);\n\n return outputNestedSplitsTensors.concat([outputDenseValuesTensor]);\n}\n\nexport const raggedGatherConfig: KernelConfig = {\n kernelName: RaggedGather,\n backendName: 'webgl',\n kernelFunc: raggedGather as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2022 Google LLC.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedRange, RaggedRangeInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {raggedRangeImplCPU} from '../kernel_utils/shared';\n\nexport function raggedRange(\n args: {inputs: RaggedRangeInputs, backend: MathBackendWebGL}):\n [TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {starts, limits, deltas} = inputs;\n\n const $starts = backend.readSync(starts.dataId) as TypedArray;\n const $limits = backend.readSync(limits.dataId) as TypedArray;\n const $deltas = backend.readSync(deltas.dataId) as TypedArray;\n\n const [rtNestedSplitsData, rtDenseValuesData] = raggedRangeImplCPU(\n $starts, starts.shape, starts.dtype, $limits, limits.shape, $deltas,\n deltas.shape);\n\n const rtNestedSplits = backend.makeTensorInfo(\n [rtNestedSplitsData.length], 'int32', rtNestedSplitsData);\n const rtDenseValues = backend.makeTensorInfo(\n [rtDenseValuesData.length], starts.dtype, rtDenseValuesData);\n\n return [rtNestedSplits, rtDenseValues];\n}\n\nexport const raggedRangeConfig: KernelConfig = {\n kernelName: RaggedRange,\n backendName: 'webgl',\n kernelFunc: raggedRange as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, RaggedTensorToTensor, RaggedTensorToTensorAttrs, RaggedTensorToTensorInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {raggedTensorToTensorImplCPU} from '../kernel_utils/shared';\n\nexport function raggedTensorToTensor(args: {\n inputs: RaggedTensorToTensorInputs,\n backend: MathBackendWebGL,\n attrs: RaggedTensorToTensorAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {shape, values, defaultValue, rowPartitionTensors} = inputs;\n const {rowPartitionTypes} = attrs;\n\n const $shape = backend.readSync(shape.dataId) as TypedArray;\n const $values = backend.readSync(values.dataId) as TypedArray;\n const $defaultValue = backend.readSync(defaultValue.dataId) as TypedArray;\n const $rowPartitionValues =\n rowPartitionTensors.map(t => backend.readSync(t.dataId) as TypedArray);\n const rowPartitionValuesShapes = rowPartitionTensors.map(t => t.shape);\n\n const [outputShape, output] = raggedTensorToTensorImplCPU(\n $shape, shape.shape, $values, values.shape, values.dtype, $defaultValue,\n defaultValue.shape, $rowPartitionValues, rowPartitionValuesShapes,\n rowPartitionTypes);\n return backend.makeTensorInfo(outputShape, values.dtype, output);\n}\n\nexport const raggedTensorToTensorConfig: KernelConfig = {\n kernelName: RaggedTensorToTensor,\n backendName: 'webgl',\n kernelFunc: raggedTensorToTensor as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Range, RangeAttrs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {rangeImplCPU} from '../kernel_utils/shared';\n\nexport const range =\n (args: {backend: MathBackendWebGL, attrs: RangeAttrs}): TensorInfo => {\n const {backend, attrs} = args;\n const {start, stop, step, dtype} = attrs;\n const values = rangeImplCPU(start, stop, step, dtype);\n return backend.makeTensorInfo([values.length], dtype, values);\n };\n\nexport const rangeConfig: KernelConfig = {\n kernelName: Range,\n backendName: 'webgl',\n kernelFunc: range as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Reciprocal} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst RECIPROCAL = `return 1.0 / x;`;\n\nexport const reciprocal = unaryKernelFunc({opSnippet: RECIPROCAL});\n\nexport const reciprocalConfig: KernelConfig = {\n kernelName: Reciprocal,\n backendName: 'webgl',\n kernelFunc: reciprocal,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Relu} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst RELU = CHECK_NAN_SNIPPET + `\n return (x < 0.0) ? 0.0 : x;\n`;\n\nconst RELU_PACKED = `\n vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\nexport const relu =\n unaryKernelFunc({opSnippet: RELU, packedOpSnippet: RELU_PACKED});\n\nexport const reluConfig: KernelConfig = {\n kernelName: Relu,\n backendName: 'webgl',\n kernelFunc: relu as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Relu6} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {CHECK_NAN_SNIPPET} from '../unaryop_gpu';\n\nconst RELU6 = CHECK_NAN_SNIPPET + `\n return (x < 0.0) ? 0.0 : min(6.0, x);\n`;\n\nconst RELU6_PACKED = `\n vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0)));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\n\nexport const relu6 =\n unaryKernelFunc({opSnippet: RELU6, packedOpSnippet: RELU6_PACKED});\n\nexport const relu6Config: KernelConfig = {\n kernelName: Relu6,\n backendName: 'webgl',\n kernelFunc: relu6 as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeBilinearProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number, alignCorners: boolean, halfPixelCenters: boolean) {\n const [batch, oldHeight, oldWidth, depth] = inputShape;\n this.outputShape = [batch, newHeight, newWidth, depth];\n\n const effectiveInSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n\n let sourceFracIndexRC: string;\n if (halfPixelCenters) {\n sourceFracIndexRC =\n `(vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC` +\n ` - vec2(0.5)`;\n } else {\n sourceFracIndexRC = `vec2(yRC) * effectiveInputOverOutputRatioRC`;\n }\n\n this.userCode = `\n const vec2 effectiveInputOverOutputRatioRC = vec2(\n ${effectiveInSize[0] / effectiveOutSize[0]},\n ${effectiveInSize[1] / effectiveOutSize[1]});\n const vec2 inputShapeRC = vec2(${oldHeight}.0, ${oldWidth}.0);\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n ivec2 yRC = coords.yz;\n\n // Fractional source index.\n vec2 sourceFracIndexRC = ${sourceFracIndexRC};\n\n // Compute the four integer indices.\n ivec2 sourceFloorRC = ivec2(max(sourceFracIndexRC, vec2(0.0)));\n ivec2 sourceCeilRC = ivec2(\n min(inputShapeRC - 1.0, ceil(sourceFracIndexRC)));\n\n float topLeft = getA(b, sourceFloorRC.x, sourceFloorRC.y, d);\n float bottomLeft = getA(b, sourceCeilRC.x, sourceFloorRC.y, d);\n float topRight = getA(b, sourceFloorRC.x, sourceCeilRC.y, d);\n float bottomRight = getA(b, sourceCeilRC.x, sourceCeilRC.y, d);\n\n vec2 fracRC = sourceFracIndexRC - vec2(sourceFloorRC);\n\n float top = topLeft + (topRight - topLeft) * fracRC.y;\n float bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y;\n float newValue = top + (bottom - top) * fracRC.x;\n\n setOutput(newValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeBilinearPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number, alignCorners: boolean, halfPixelCenters: boolean) {\n const [batch, oldHeight, oldWidth, depth] = inputShape;\n this.outputShape = [batch, newHeight, newWidth, depth];\n\n const effectiveInSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n\n let sourceFracIndexRC: string;\n if (halfPixelCenters) {\n sourceFracIndexRC = `(vec3(yRC) + vec3(0.5)) * ` +\n `effectiveInputOverOutputRatioRC - vec3(0.5)`;\n } else {\n sourceFracIndexRC = `vec3(yRC) * effectiveInputOverOutputRatioRC`;\n }\n\n this.userCode = `\n const vec3 effectiveInputOverOutputRatioRC = vec3(\n ${effectiveInSize[0] / effectiveOutSize[0]},\n ${effectiveInSize[1] / effectiveOutSize[1]},\n ${effectiveInSize[1] / effectiveOutSize[1]});\n const vec3 inputShapeRC = vec3(${oldHeight}.0, ${oldWidth}.0,\n ${oldWidth}.0);\n\n float getAValue(int b, int r, int c, int d) {\n return getChannel(getA(b, r, c, d), vec2(c, d));\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n // Calculate values for next column in yRC.z.\n ivec3 yRC = coords.yzz + ivec3(0, 0, 1);\n\n // Fractional source index.\n vec3 sourceFracIndexRC = ${sourceFracIndexRC};\n\n // Compute the four integer indices.\n ivec3 sourceFloorRC = ivec3(max(sourceFracIndexRC, vec3(0.0)));\n ivec3 sourceCeilRC = ivec3(\n min(inputShapeRC - 1.0, ceil(sourceFracIndexRC)));\n\n // Should we calculate next column and row elements in 2x2 packed cell.\n bool hasNextCol = d < ${depth - 1};\n bool hasNextRow = coords.z < ${newWidth - 1};\n\n // In parallel, construct four corners for all four components in\n // packed 2x2 cell.\n vec4 topLeft = vec4(\n getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d),\n hasNextCol ? getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d + 1) : 0.0);\n\n vec4 bottomLeft = vec4(\n getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d),\n hasNextCol ? getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d + 1) : 0.0);\n\n vec4 topRight = vec4(\n getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d),\n hasNextCol ? getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d + 1) : 0.0);\n\n vec4 bottomRight = vec4(\n getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d),\n hasNextCol ? getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d + 1) : 0.0);\n\n vec3 fracRC = sourceFracIndexRC - vec3(sourceFloorRC);\n\n vec4 top = mix(topLeft, topRight, fracRC.yyzz);\n vec4 bottom = mix(bottomLeft, bottomRight, fracRC.yyzz);\n vec4 newValue = mix(top, bottom, fracRC.x);\n\n setOutput(newValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, ResizeBilinear, ResizeBilinearAttrs, ResizeBilinearInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ResizeBilinearProgram} from '../resize_bilinear_gpu';\nimport {ResizeBilinearPackedProgram} from '../resize_bilinear_packed_gpu';\n\nexport function resizeBilinear(args: {\n inputs: ResizeBilinearInputs,\n backend: MathBackendWebGL,\n attrs: ResizeBilinearAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, halfPixelCenters, size} = attrs;\n\n const [newHeight, newWidth] = size;\n\n const program = env().getBool('WEBGL_PACK_IMAGE_OPERATIONS') ?\n new ResizeBilinearPackedProgram(\n images.shape as [number, number, number, number], newHeight, newWidth,\n alignCorners, halfPixelCenters) :\n new ResizeBilinearProgram(\n images.shape as [number, number, number, number], newHeight, newWidth,\n alignCorners, halfPixelCenters);\n return backend.runWebGLProgram(program, [images], 'float32');\n}\n\nexport const resizeBilinearConfig: KernelConfig = {\n kernelName: ResizeBilinear,\n backendName: 'webgl',\n kernelFunc: resizeBilinear as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeBilinearBackpropProgram implements GPGPUProgram {\n variableNames = ['dy'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n dyShape: [number, number, number, number],\n inputShape: [number, number, number, number], alignCorners: boolean) {\n this.outputShape = inputShape;\n const [, xHeight, xWidth, ] = inputShape;\n const [, yHeight, yWidth] = dyShape;\n\n // In the backwards pass, we want to find the pixels that were generated for\n // each pixel in the input image the forward pass and add the corresponding\n // coefficient from dy to the gradient (with some interpolation).\n\n const effectiveXSize: [number, number] = [\n (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,\n (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth\n ];\n\n const effectiveYSize: [number, number] = [\n (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,\n (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth\n ];\n\n const heightScale = effectiveXSize[0] / effectiveYSize[0];\n const widthScale = effectiveXSize[1] / effectiveYSize[1];\n\n const invHeightScale = 1 / heightScale;\n const invWidthScale = 1 / widthScale;\n\n // This defines the size of the window of values around a particular\n // index in dy that we want to search for contributions to dx.\n const winHeight = (Math.ceil(invHeightScale) * 2) + 2;\n const winWidth = (Math.ceil(invWidthScale) * 2) + 2;\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n int r = coords[1];\n int c = coords[2];\n\n float accumulator = 0.0;\n\n const float heightScale = float(${heightScale});\n const float widthScale = float(${widthScale});\n\n const float invHeightScale = float(${invHeightScale});\n const float invWidthScale = float(${invWidthScale});\n\n const int winHeight = int(${winHeight});\n const int winWidth = int(${winWidth});\n\n // Compute bounds for where in dy we will look\n float startRLerp = floor(float(r) * invHeightScale);\n int startDyR = int(startRLerp - float(winHeight / 2));\n\n float startCLerp = floor(float(c) * invWidthScale);\n int startDyC = int(startCLerp - float(winWidth / 2));\n\n // Loop over dy\n for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) {\n int dyR = dyROffset + startDyR;\n\n // Guard against the window exceeding the bounds of dy\n if (dyR < 0 || dyR >= ${yHeight}) {\n continue;\n }\n\n for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) {\n int dyC = dyCOffset + startDyC;\n\n // Guard against the window exceeding the bounds of dy\n if (dyC < 0 || dyC >= ${yWidth}) {\n continue;\n }\n\n float dxR = float(dyR) * heightScale;\n int topDxRIndex = int(floor(dxR));\n int bottomDxRIndex = int(min(ceil(dxR), ${xHeight - 1}.0));\n float dxRLerp = dxR - float(topDxRIndex);\n float inverseDxRLerp = 1.0 - dxRLerp;\n\n float dxC = float(dyC) * widthScale;\n int leftDxCIndex = int(floor(dxC));\n int rightDxCIndex = int(min(ceil(dxC), ${xWidth - 1}.0));\n float dxCLerp = dxC - float(leftDxCIndex);\n float inverseDxCLerp = 1.0 - dxCLerp;\n\n if (r == topDxRIndex && c == leftDxCIndex) {\n // topLeft\n accumulator +=\n getDy(b, dyR, dyC, d) * inverseDxRLerp * inverseDxCLerp;\n }\n\n if (r == topDxRIndex && c == rightDxCIndex) {\n // topRight\n accumulator += getDy(b, dyR, dyC, d) * inverseDxRLerp * dxCLerp;\n }\n\n if (r == bottomDxRIndex && c == leftDxCIndex) {\n // bottomLeft\n accumulator += getDy(b, dyR, dyC, d) * dxRLerp * inverseDxCLerp;\n }\n\n if (r == bottomDxRIndex && c == rightDxCIndex) {\n // bottomRight\n accumulator += getDy(b, dyR, dyC, d) * dxRLerp * dxCLerp;\n }\n }\n }\n // End loop over dy\n\n setOutput(accumulator);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeBilinearGrad, ResizeBilinearGradAttrs, ResizeBilinearGradInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ResizeBilinearBackpropProgram} from '../resize_bilinear_backprop_gpu';\n\nexport function resizeBilinearGrad(args: {\n inputs: ResizeBilinearGradInputs,\n backend: MathBackendWebGL,\n attrs: ResizeBilinearGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images, dy} = inputs;\n const {alignCorners} = attrs;\n\n const program = new ResizeBilinearBackpropProgram(\n dy.shape as [number, number, number, number],\n images.shape as [number, number, number, number], alignCorners);\n\n return backend.runWebGLProgram(program, [dy], dy.dtype);\n}\n\nexport const resizeBilinearGradConfig: KernelConfig = {\n kernelName: ResizeBilinearGrad,\n backendName: 'webgl',\n kernelFunc: resizeBilinearGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeNearestNeighborProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number, alignCorners: boolean, halfPixelCenters: boolean) {\n const [batch, oldHeight, oldWidth, depth] = inputShape;\n this.outputShape = [batch, newHeight, newWidth, depth];\n\n const effectiveInSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n\n // When align corners is false, we rounds the value with floor.\n const roundBase = alignCorners ? '0.5' : '0.0';\n\n let sourceFracIndexRC: string;\n if (halfPixelCenters) {\n sourceFracIndexRC =\n `max((vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC` +\n `, vec2(0.0))`;\n } else {\n sourceFracIndexRC = `vec2(yRC) * effectiveInputOverOutputRatioRC`;\n }\n this.userCode = `\n const vec2 effectiveInputOverOutputRatioRC = vec2(\n ${effectiveInSize[0] / effectiveOutSize[0]},\n ${effectiveInSize[1] / effectiveOutSize[1]});\n const vec2 inputShapeRC = vec2(${oldHeight}.0, ${oldWidth}.0);\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n ivec2 yRC = coords.yz;\n\n // Fractional source index.\n vec2 sourceFracIndexRC = ${sourceFracIndexRC};\n\n // Compute the coordinators of nearest neighbor point.\n ivec2 sourceNearestRC = ivec2(\n min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${roundBase})));\n float newValue = getA(b, sourceNearestRC.x, sourceNearestRC.y, d);\n\n setOutput(newValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeNearestNeighborPackedProgram implements GPGPUProgram {\n variableNames = ['A'];\n packedInputs = true;\n packedOutput = true;\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n inputShape: [number, number, number, number], newHeight: number,\n newWidth: number, alignCorners: boolean, halfPixelCenters: boolean) {\n const [batch, oldHeight, oldWidth, depth] = inputShape;\n this.outputShape = [batch, newHeight, newWidth, depth];\n\n const effectiveInSize: [number, number] = [\n (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight,\n (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth\n ];\n\n const effectiveOutSize: [number, number] = [\n (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight,\n (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth\n ];\n\n // When align corners is false, we rounds the value with floor.\n const roundBase = alignCorners ? '0.5' : '0.0';\n let sourceFracIndexRC: string;\n if (halfPixelCenters) {\n sourceFracIndexRC = `max((vec3(yRC) + vec3(0.5)) * ` +\n `effectiveInputOverOutputRatioRC, vec3(0.0))`;\n } else {\n sourceFracIndexRC = `vec3(yRC) * effectiveInputOverOutputRatioRC`;\n }\n\n this.userCode = `\n const vec3 effectiveInputOverOutputRatioRC = vec3(\n ${effectiveInSize[0] / effectiveOutSize[0]},\n ${effectiveInSize[1] / effectiveOutSize[1]},\n ${effectiveInSize[1] / effectiveOutSize[1]});\n const vec3 inputShapeRC = vec3(${oldHeight}.0, ${oldWidth}.0,\n ${oldWidth}.0);\n\n float getAValue(int b, int r, int c, int d) {\n return getChannel(getA(b, r, c, d), vec2(c, d));\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n // Calculate values for next column in yRC.z.\n ivec3 yRC = coords.yzz + ivec3(0, 0, 1);\n\n // Fractional source index.\n vec3 sourceFracIndexRC = ${sourceFracIndexRC};\n\n // Compute the coordinators of nearest neighbor point.\n ivec3 sourceNearestRC = ivec3(\n min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${roundBase})));\n\n // Should we calculate next column and row elements in 2x2 packed cell.\n bool hasNextCol = d < ${depth - 1};\n bool hasNextRow = coords.z < ${newWidth - 1};\n\n vec4 newValue = vec4(\n getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d),\n hasNextCol ? getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d + 1)\n : 0.0,\n hasNextRow ? getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d)\n : 0.0,\n (hasNextRow && hasNextCol) ?\n getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d + 1) : 0.0);\n\n setOutput(newValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, ResizeNearestNeighbor, ResizeNearestNeighborAttrs, ResizeNearestNeighborInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ResizeNearestNeighborProgram} from '../resize_nearest_neighbor_gpu';\nimport {ResizeNearestNeighborPackedProgram} from '../resize_nearest_neighbor_packed_gpu';\n\nexport function resizeNearestNeighbor(args: {\n inputs: ResizeNearestNeighborInputs,\n backend: MathBackendWebGL,\n attrs: ResizeNearestNeighborAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images} = inputs;\n const {alignCorners, halfPixelCenters, size} = attrs;\n\n const [newHeight, newWidth] = size;\n\n const program = env().getBool('WEBGL_PACK_IMAGE_OPERATIONS') ?\n new ResizeNearestNeighborPackedProgram(\n images.shape as [number, number, number, number], newHeight, newWidth,\n alignCorners, halfPixelCenters) :\n new ResizeNearestNeighborProgram(\n images.shape as [number, number, number, number], newHeight, newWidth,\n alignCorners, halfPixelCenters);\n return backend.runWebGLProgram(program, [images], images.dtype);\n}\n\nexport const resizeNearestNeighborConfig: KernelConfig = {\n kernelName: ResizeNearestNeighbor,\n backendName: 'webgl',\n kernelFunc: resizeNearestNeighbor as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class ResizeNearestNeigborBackpropProgram implements GPGPUProgram {\n variableNames = ['dy'];\n outputShape: number[] = [];\n userCode: string;\n\n constructor(\n dyShape: [number, number, number, number],\n inputShape: [number, number, number, number], alignCorners: boolean) {\n this.outputShape = inputShape;\n const [, xHeight, xWidth, ] = inputShape;\n const [, yHeight, yWidth] = dyShape;\n\n // In the backwards pass, we want to find the pixels that were generated for\n // each pixel in the input image the forward pass and add the corresponding\n // coefficient from dy to the gradient (with some interpolation).\n\n const effectiveXSize: [number, number] = [\n (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight,\n (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth\n ];\n\n const effectiveYSize: [number, number] = [\n (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight,\n (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth\n ];\n\n const heightScale = effectiveXSize[0] / effectiveYSize[0];\n const widthScale = effectiveXSize[1] / effectiveYSize[1];\n\n const invHeightScale = 1 / heightScale;\n const invWidthScale = 1 / widthScale;\n\n // This defines the size of the window of values around a particular\n // index in dy that we want to search for contributions to dx.\n const winHeight = (Math.ceil(invHeightScale) * 2) + 2;\n const winWidth = (Math.ceil(invWidthScale) * 2) + 2;\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int b = coords[0];\n int d = coords[3];\n int r = coords[1];\n int c = coords[2];\n\n float accumulator = 0.0;\n\n const float heightScale = float(${heightScale});\n const float widthScale = float(${widthScale});\n\n const float invHeightScale = float(${invHeightScale});\n const float invWidthScale = float(${invWidthScale});\n\n const int winHeight = int(${winHeight});\n const int winWidth = int(${winWidth});\n\n // Compute bounds for where in dy we will look\n float startRLerp = floor(float(r) * invHeightScale);\n int startDyR = int(floor(startRLerp - float(winHeight / 2)));\n\n float startCLerp = floor(float(c) * invWidthScale);\n int startDyC = int(floor(startCLerp - float(winWidth / 2)));\n\n // Loop over dy\n for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) {\n int dyR = dyROffset + startDyR;\n\n // Guard against the window exceeding the bounds of dy\n if (dyR < 0 || dyR >= ${yHeight}) {\n continue;\n }\n\n for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) {\n int dyC = dyCOffset + startDyC;\n\n // Guard against the window exceeding the bounds of dy\n if (dyC < 0 || dyC >= ${yWidth}) {\n continue;\n }\n\n float sourceFracRow =\n float(${effectiveXSize[0]}) *\n (float(dyR) / float(${effectiveYSize[0]}));\n\n float sourceFracCol =\n float(${effectiveXSize[1]}) *\n (float(dyC) / float(${effectiveYSize[1]}));\n\n int sourceNearestRow = int(min(\n float(int(${xHeight}) - 1),\n ${alignCorners} ? float(round(sourceFracRow)) :\n float(floor(sourceFracRow))));\n\n int sourceNearestCol = int(min(\n float(int(${xWidth}) - 1),\n ${alignCorners} ? float(round(sourceFracCol)) :\n float(floor(sourceFracCol))));\n\n if (r == sourceNearestRow && c == sourceNearestCol) {\n accumulator += getDy(b, dyR, dyC, d);\n }\n }\n }\n // End loop over dy\n\n setOutput(accumulator);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, ResizeNearestNeighborGrad, ResizeNearestNeighborGradAttrs, ResizeNearestNeighborGradInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ResizeNearestNeigborBackpropProgram} from '../resize_nearest_neighbor_backprop_gpu';\n\nexport function resizeNearestNeighborGrad(args: {\n inputs: ResizeNearestNeighborGradInputs,\n backend: MathBackendWebGL,\n attrs: ResizeNearestNeighborGradAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {images, dy} = inputs;\n const {alignCorners} = attrs;\n\n const program = new ResizeNearestNeigborBackpropProgram(\n dy.shape as [number, number, number, number],\n images.shape as [number, number, number, number], alignCorners);\n return backend.runWebGLProgram(program, [dy], dy.dtype);\n}\n\nexport const resizeNearestNeighborGradConfig: KernelConfig = {\n kernelName: ResizeNearestNeighborGrad,\n backendName: 'webgl',\n kernelFunc: resizeNearestNeighborGrad as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class ReverseProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(xShape: number[], axis: number[]) {\n const rank = xShape.length;\n if (rank > 4) {\n throw new Error(\n `WebGL backend: Reverse of rank-${rank} tensor is not yet supported`);\n }\n this.outputShape = xShape;\n\n if (rank === 1) {\n this.userCode = `\n void main() {\n int coord = getOutputCoords();\n setOutput(getX(${xShape[0]} - coord - 1));\n }\n `;\n return;\n }\n const getInCoord = (i: number) => {\n if (axis.indexOf(i) !== -1 && xShape[i] !== 1) {\n return `${xShape[i]} - coords[${i}] - 1`;\n }\n return `coords[${i}]`;\n };\n const inCoords = xShape.map((_, i) => getInCoord(i)).join(',');\n const type = getCoordsDataType(rank);\n\n this.userCode = `\n void main() {\n ${type} coords = getOutputCoords();\n setOutput(getX(${inCoords}));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getChannels} from './packing_util';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class ReversePackedProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n packedInputs = true;\n packedOutput = true;\n\n constructor(xShape: number[], axis: number[]) {\n const rank = xShape.length;\n if (rank > 4) {\n throw new Error(\n `WebGL backend: Reverse of rank-${rank} tensor is not yet supported`);\n }\n this.outputShape = xShape;\n const channels = getChannels('rc', rank);\n const nextColumn =\n `${channels[rank - 1]} + 1 < ${this.outputShape[rank - 1]}`;\n const nextRow = `${channels[rank - 2]} + 1 < ${this.outputShape[rank - 2]}`;\n const type = getCoordsDataType(rank);\n if (rank === 1) {\n this.userCode = `\n void main(){\n int rc = getOutputCoords();\n vec4 result = vec4(0.);\n result.r = getChannel(getX(${xShape[0]} - rc - 1),\n ${xShape[0]} - rc - 1);\n if(${nextColumn}){\n result.g = getChannel(getX(${xShape[0]} - (rc + 1) - 1),\n ${xShape[0]} - (rc + 1) - 1);\n }\n setOutput(result);\n }\n `;\n } else {\n this.userCode = `\n void main() {\n ${type} rc = getOutputCoords();\n vec4 result = vec4(0.);\n result.r = ${getR(channels.slice())};\n if(${nextColumn}){\n result.g = ${getG(channels.slice())};\n }\n if(${nextRow}) {\n result.b = ${getB(channels.slice())};\n if(${nextColumn}) {\n result.a = ${getA(channels.slice())};\n }\n }\n setOutput(result);\n }\n `;\n }\n\n function getR(channels: string[]): string {\n return getChannel(channels);\n }\n\n function getG(channels: string[]): string {\n channels[rank - 1] = '(' + channels[rank - 1] + ` + 1)`;\n return getChannel(channels);\n }\n\n function getB(channels: string[]): string {\n channels[rank - 2] = '(' + channels[rank - 2] + ` + 1)`;\n return getChannel(channels);\n }\n\n function getA(channels: string[]): string {\n channels[rank - 1] = '(' + channels[rank - 1] + ` + 1)`;\n channels[rank - 2] = '(' + channels[rank - 2] + ` + 1)`;\n return getChannel(channels);\n }\n\n function getChannel(channels: string[]): string {\n const inCoordsArray = xShape.map((_, i) => getInCoord(i, channels));\n const inCoords = inCoordsArray.join(',');\n const innerDims = inCoordsArray.slice(-2).join(',');\n return `getChannel(getX(${inCoords}), vec2(${innerDims}))`;\n }\n\n function getInCoord(i: number, channels1: string[]): string {\n if (axis.indexOf(i) !== -1 && xShape[i] !== 1) {\n return `${xShape[i]} - ${channels1[i]} - 1`;\n } else {\n return `${channels1[i]}`;\n }\n }\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, Reverse, ReverseAttrs, ReverseInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ReverseProgram} from '../reverse_gpu';\nimport {ReversePackedProgram} from '../reverse_packed_gpu';\n\nimport {identity} from './Identity';\n\nexport function reverse(args: {\n inputs: ReverseInputs,\n backend: MathBackendWebGL,\n attrs: ReverseAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {dims} = attrs;\n\n const xRank = x.shape.length;\n\n const $dims = util.parseAxisParam(dims, x.shape);\n if (xRank === 0) {\n return identity({inputs: {x}, backend});\n }\n\n const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ?\n new ReversePackedProgram(x.shape, $dims) :\n new ReverseProgram(x.shape, $dims);\n\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport const reverseConfig: KernelConfig = {\n kernelName: Reverse,\n backendName: 'webgl',\n kernelFunc: reverse as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class RotateProgram implements GPGPUProgram {\n variableNames = ['Image'];\n outputShape: number[] = [];\n userCode: string;\n customUniforms = [{name: 'params', type: 'vec4' as UniformType}];\n constructor(\n imageShape: [number, number, number, number],\n fillValue: number|[number, number, number]) {\n const imageHeight = imageShape[1];\n const imageWidth = imageShape[2];\n this.outputShape = imageShape;\n\n let fillSnippet = '';\n if (typeof fillValue === 'number') {\n fillSnippet = `float outputValue = ${fillValue.toFixed(2)};`;\n } else {\n fillSnippet = `\n vec3 fill = vec3(${fillValue.join(',')});\n float outputValue = fill[coords[3]];`;\n }\n\n this.userCode = `\n void main() {\n ivec4 coords = getOutputCoords();\n int x = coords[2];\n int y = coords[1];\n float coordXFloat = (float(x) - params[0]) * params[3] -\n (float(y) - params[1]) * params[2];\n float coordYFloat = (float(x) - params[0]) * params[2] +\n (float(y) - params[1]) * params[3];\n int coordX = int(round(coordXFloat + params[0]));\n int coordY = int(round(coordYFloat + params[1]));\n ${fillSnippet}\n if(coordX >= 0 && coordX < ${imageWidth} && coordY >= 0 && coordY < ${\n imageHeight}) {\n outputValue = getImage(coords[0], coordY, coordX, coords[3]);\n }\n setOutput(outputValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, Tensor4D} from '@tensorflow/tfjs-core';\nimport {RotateWithOffset, RotateWithOffsetAttrs, RotateWithOffsetInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {RotateProgram} from '../rotate_gpu';\n\nexport const rotateWithOffsetConfig: KernelConfig = {\n kernelName: RotateWithOffset,\n backendName: 'webgl',\n kernelFunc: ({inputs, attrs, backend}) => {\n const {image} = inputs as RotateWithOffsetInputs;\n const {radians, fillValue, center} =\n attrs as unknown as RotateWithOffsetAttrs;\n const webglBackend = backend as MathBackendWebGL;\n\n const program = new RotateProgram((image as Tensor4D).shape, fillValue);\n const [centerX, centerY] =\n backend_util.getImageCenter(center, image.shape[1], image.shape[2]);\n const customValues =\n [[centerX, centerY, Math.sin(radians), Math.cos(radians)]];\n const output = webglBackend.runWebGLProgram(\n program, [image], image.dtype, customValues);\n return output;\n }\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Round} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst ROUND = `\n // OpenGL ES does not support round function.\n // The algorithm is based on banker's rounding.\n float base = floor(x);\n if ((x - base) < 0.5) {\n return floor(x);\n } else if ((x - base) > 0.5) {\n return ceil(x);\n } else {\n if (mod(base, 2.0) == 0.0) {\n return base;\n } else {\n return base + 1.0;\n }\n }\n`;\n\nexport const round = unaryKernelFunc({opSnippet: ROUND});\n\nexport const roundConfig: KernelConfig = {\n kernelName: Round,\n backendName: 'webgl',\n kernelFunc: round,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Rsqrt} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {rsqrtImplCPU} from '../kernel_utils/shared';\n\nconst RSQRT = `return inversesqrt(x);`;\n\nexport const rsqrt =\n unaryKernelFunc({opSnippet: RSQRT, cpuKernelImpl: rsqrtImplCPU});\n\nexport const rsqrtConfig: KernelConfig = {\n kernelName: Rsqrt,\n backendName: 'webgl',\n kernelFunc: rsqrt as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class ScatterProgram implements GPGPUProgram {\n variableNames = ['updates', 'indices', 'defaultValue'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n updateSize: number, sliceDim: number, indicesRank: number,\n updatesRank: number, strides: number[], shape: number[],\n summingDupeIndex = true) {\n this.outputShape = shape;\n const stridesType = getCoordsDataType(strides.length);\n const dtype = getCoordsDataType(shape.length);\n let indicesString = '';\n if (indicesRank === 1) {\n indicesString = 'i';\n } else if (indicesRank === 2) {\n indicesString = 'i, j';\n }\n const indicesSnippet = `getIndices(${indicesString})`;\n\n let updatesString = '';\n if (updatesRank === 1) {\n updatesString = 'i';\n } else if (updatesRank === 2) {\n updatesString = 'i, coords[1]';\n }\n const updatesSnippet = `getUpdates(${updatesString})`;\n\n const strideString = sliceDim > 1 ? 'strides[j]' : 'strides';\n this.userCode = `\n ${stridesType} strides = ${stridesType}(${strides});\n\n void main() {\n ${dtype} coords = getOutputCoords();\n float sum = 0.0;\n bool found = false;\n for (int i = 0; i < ${updateSize}; i++) {\n int flattenedIndex = 0;\n for (int j = 0; j < ${sliceDim}; j++) {\n int index = round(${indicesSnippet});\n flattenedIndex += index * ${strideString};\n }\n if (flattenedIndex == coords[0]) {\n sum += ${updatesSnippet};\n found = true;\n }\n }\n setOutput(mix(getDefaultValue(), sum, float(found)));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, ScatterNd, ScatterNdAttrs, ScatterNdInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {ScatterProgram} from '../scatter_gpu';\nimport {reshape} from './Reshape';\n\nexport function scatterNd(args: {\n inputs: ScatterNdInputs,\n backend: MathBackendWebGL,\n attrs: ScatterNdAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {indices, updates} = inputs;\n const {shape} = attrs;\n\n const {sliceRank, numUpdates, sliceSize, strides, outputSize} =\n backend_util.calculateShapes(updates, indices, shape);\n\n const flattenShape = [outputSize / sliceSize, sliceSize];\n\n if (outputSize === 0) {\n return backend.makeTensorInfo(shape, indices.dtype);\n }\n\n const flattenIndices = reshape(\n {inputs: {x: indices}, backend, attrs: {shape: [numUpdates, sliceRank]}});\n const flattenX = reshape(\n {inputs: {x: updates}, backend, attrs: {shape: [numUpdates, sliceSize]}});\n\n const defaultValue = backend.makeTensorInfo(\n [], 'float32', new Float32Array([0])); // scalar(0)\n const program = new ScatterProgram(\n numUpdates, sliceRank, flattenIndices.shape.length, flattenX.shape.length,\n strides, flattenShape);\n const res = backend.runWebGLProgram(\n program, [flattenX, flattenIndices, defaultValue], flattenX.dtype);\n\n const reshaped = reshape({inputs: {x: res}, backend, attrs: {shape}});\n\n backend.disposeIntermediateTensorInfo(flattenIndices);\n backend.disposeIntermediateTensorInfo(flattenX);\n backend.disposeIntermediateTensorInfo(res);\n backend.disposeIntermediateTensorInfo(defaultValue);\n\n return reshaped;\n}\n\nexport const scatterNdConfig: KernelConfig = {\n kernelName: ScatterNd,\n backendName: 'webgl',\n kernelFunc: scatterNd as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\nexport class SearchSortedProgram implements GPGPUProgram {\n variableNames = ['sortedSequence', 'values'];\n outputShape: number[];\n userCode: string;\n customUniforms = [{name: 'numInputs', type: 'int' as UniformType}];\n\n constructor(\n batchSize: number, numInputs: number, numValues: number,\n side: 'left'|'right') {\n this.outputShape = [batchSize, numValues];\n\n const webGL2LoopHead = 'while (left < right) {';\n // WebGL1 doesn't accept non constant loop conditions, so upper bound loop\n // iterations.\n const webGL1LoopHead = `for (int i = 0; i < ${\n Math.ceil(Math.log2(numInputs + 1))}; ++i) { if (left >= right) break;`;\n const loopHead = env().getNumber('WEBGL_VERSION') === 2 ? webGL2LoopHead :\n webGL1LoopHead;\n\n // left corresponds to lower bound and right to upper bound.\n const boundComparator = side === 'left' ? '<' : '<=';\n this.userCode = `\n int findBound(int batch, float value) {\n int left = 0;\n int right = numInputs;\n int mid;\n ${loopHead}\n mid = (left + right) / 2;\n if (getSortedSequence(batch, mid) ${boundComparator} value) {\n left = mid + 1;\n } else {\n right = mid;\n }\n }\n return right;\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int valueIndex = coords[1];\n\n float value = getValues(batch, valueIndex);\n\n setOutput(float(findBound(batch, value)));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2022 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SearchSorted, SearchSortedAttrs, SearchSortedInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {SearchSortedProgram} from '../search_sorted_gpu';\n\nexport function searchSorted(args: {\n inputs: SearchSortedInputs,\n backend: MathBackendWebGL,\n attrs: SearchSortedAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {sortedSequence, values} = inputs;\n const {side} = attrs;\n\n const program = new SearchSortedProgram(\n sortedSequence.shape[0], sortedSequence.shape[1], values.shape[1], side);\n const customValues = [[sortedSequence.shape[1]]];\n return backend.runWebGLProgram(\n program, [sortedSequence, values], 'int32', customValues);\n}\n\nexport const searchSortedConfig: KernelConfig = {\n kernelName: SearchSorted,\n backendName: 'webgl',\n kernelFunc: searchSorted as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class SelectProgram implements GPGPUProgram {\n variableNames = ['c', 'a', 'b'];\n outputShape: number[];\n userCode: string;\n\n constructor(cRank: number, shape: number[], rank: number) {\n this.outputShape = shape;\n\n let cCoords;\n let abCoords;\n if (rank > 4) {\n throw Error(`Where for rank ${rank} is not yet supported`);\n }\n\n if (rank === 1) {\n abCoords = `resRC`;\n cCoords = `resRC`;\n } else {\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w'];\n const cCoordVars = [];\n const abCoordVars = [];\n for (let i = 0; i < shape.length; i++) {\n abCoordVars.push(`${currentCoords[i]}`);\n if (i < cRank) {\n cCoordVars.push(`${currentCoords[i]}`);\n }\n }\n cCoords = cCoordVars.join();\n abCoords = abCoordVars.join();\n }\n\n const dtype = getCoordsDataType(rank);\n\n this.userCode = `\n void main() {\n ${dtype} resRC = getOutputCoords();\n float cVal = getC(${cCoords});\n if (cVal >= 1.0) {\n setOutput(getA(${abCoords}));\n } else {\n setOutput(getB(${abCoords}));\n }\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Select, SelectInputs, TensorInfo, upcastType} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {SelectProgram} from '../select_gpu';\n\nexport function select(args: {inputs: SelectInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {condition, t, e} = inputs;\n\n const program =\n new SelectProgram(condition.shape.length, t.shape, t.shape.length);\n return backend.runWebGLProgram(\n program, [condition, t, e], upcastType(t.dtype, e.dtype));\n}\n\nexport const selectConfig: KernelConfig = {\n kernelName: Select,\n backendName: 'webgl',\n kernelFunc: select as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, Selu} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SELU = `\n // Stable and Attracting Fixed Point (0, 1) for Normalized Weights.\n // see: https://arxiv.org/abs/1706.02515\n float scaleAlpha = ${backend_util.SELU_SCALEALPHA};\n float scale = ${backend_util.SELU_SCALE};\n return (x >= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0);\n`;\n\nexport const selu = unaryKernelFunc({opSnippet: SELU});\n\nexport const seluConfig: KernelConfig = {\n kernelName: Selu,\n backendName: 'webgl',\n kernelFunc: selu,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sigmoid} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {sigmoidImplCPU} from '../kernel_utils/shared';\n\nconst SIGMOID = CHECK_NAN_SNIPPET_UNARY + `\n return 1.0 / (1.0 + exp(-1.0 * x));\n`;\n\nconst SIGMOID_PACKED = `\n vec4 result = 1.0 / (1.0 + exp(-1.0 * x));\n bvec4 isNaN = isnan(x);\n\n result.r = isNaN.r ? x.r : result.r;\n result.g = isNaN.g ? x.g : result.g;\n result.b = isNaN.b ? x.b : result.b;\n result.a = isNaN.a ? x.a : result.a;\n\n return result;\n`;\nexport const sigmoid = unaryKernelFunc({\n opSnippet: SIGMOID,\n packedOpSnippet: SIGMOID_PACKED,\n cpuKernelImpl: sigmoidImplCPU\n});\n\nexport const sigmoidConfig: KernelConfig = {\n kernelName: Sigmoid,\n backendName: 'webgl',\n kernelFunc: sigmoid,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sign} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\n// Sign does not propagate NANs.\nconst SIGN = `\n if (isnan(x)) { return 0.0; }\n return sign(x);\n`;\n\nexport const sign = unaryKernelFunc({opSnippet: SIGN});\n\nexport const signConfig: KernelConfig = {\n kernelName: Sign,\n backendName: 'webgl',\n kernelFunc: sign,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sin} from '@tensorflow/tfjs-core';\n\nimport {CHECK_NAN_SNIPPET_UNARY, unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SIN = CHECK_NAN_SNIPPET_UNARY + `\n return sin(x);\n`;\n\nexport const sin = unaryKernelFunc({opSnippet: SIN});\n\nexport const sinConfig: KernelConfig = {\n kernelName: Sin,\n backendName: 'webgl',\n kernelFunc: sin,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Sinh} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SINH = `\n float e2x = exp(x);\n return (e2x - 1.0 / e2x) / 2.0;\n`;\n\nexport const sinh = unaryKernelFunc({opSnippet: SINH});\n\nexport const sinhConfig: KernelConfig = {\n kernelName: Sinh,\n backendName: 'webgl',\n kernelFunc: sinh,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Softplus} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SOFTPLUS = `\n float epsilon = 1.1920928955078125e-7;\n float threshold = log(epsilon) + 2.0;\n\n bool too_large = x > -threshold;\n bool too_small = x < threshold;\n\n float result;\n float exp_x = exp(x);\n\n if (too_large){\n result = x;\n }\n else if (too_small){\n result = exp_x;\n }\n else{\n result = log(exp_x + 1.0);\n }\n return result;\n`;\n\nexport const softplus = unaryKernelFunc({opSnippet: SOFTPLUS});\n\nexport const softplusConfig: KernelConfig = {\n kernelName: Softplus,\n backendName: 'webgl',\n kernelFunc: softplus,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, SpaceToBatchND, SpaceToBatchNDAttrs, SpaceToBatchNDInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {padV2} from './PadV2';\nimport {reshape} from './Reshape';\nimport {transpose} from './Transpose';\n\nexport const spaceToBatchND = (args: {\n inputs: SpaceToBatchNDInputs,\n backend: MathBackendWebGL,\n attrs: SpaceToBatchNDAttrs\n}): TensorInfo => {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {blockShape, paddings} = attrs;\n\n util.assert(\n x.shape.length <= 4,\n () => 'spaceToBatchND for rank > 4 with a WebGL backend not ' +\n 'implemented yet');\n\n const prod = blockShape.reduce((a, b) => a * b);\n\n const completePaddings: Array<[number, number]> = [[0, 0]];\n completePaddings.push(...paddings as Array<[number, number]>);\n for (let i = 1 + blockShape.length; i < x.shape.length; ++i) {\n completePaddings.push([0, 0]);\n }\n\n const toDispose = [];\n\n const paddedX = padV2({\n inputs: {x},\n backend,\n attrs: {paddings: completePaddings, constantValue: 0}\n });\n\n const reshapedPaddedShape =\n backend_util.getReshaped(paddedX.shape, blockShape, prod, false);\n\n const permutedReshapedPaddedPermutation = backend_util.getPermuted(\n reshapedPaddedShape.length, blockShape.length, false);\n\n const flattenShape =\n backend_util.getReshapedPermuted(paddedX.shape, blockShape, prod, false);\n\n const reshapedPaddedX = reshape(\n {inputs: {x: paddedX}, backend, attrs: {shape: reshapedPaddedShape}});\n\n const paddedXT = transpose({\n inputs: {x: reshapedPaddedX},\n backend,\n attrs: {perm: permutedReshapedPaddedPermutation}\n });\n\n const result =\n reshape({inputs: {x: paddedXT}, backend, attrs: {shape: flattenShape}});\n\n toDispose.push(paddedX);\n toDispose.push(reshapedPaddedX);\n toDispose.push(paddedXT);\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n\n return result;\n};\n\nexport const spaceToBatchNDConfig: KernelConfig = {\n kernelName: SpaceToBatchND,\n backendName: 'webgl',\n kernelFunc: spaceToBatchND as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SparseFillEmptyRows, SparseFillEmptyRowsInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {sparseFillEmptyRowsImplCPU} from '../kernel_utils/shared';\n\nexport function sparseFillEmptyRows(args: {\n inputs: SparseFillEmptyRowsInputs,\n backend: MathBackendWebGL\n}): [TensorInfo, TensorInfo, TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {indices, values, denseShape, defaultValue} = inputs;\n if (denseShape.shape.length !== 1) {\n throw new Error(`Dense shape must be a vector, saw:\n ${denseShape.shape}`);\n }\n if (indices.shape.length !== 2) {\n throw new Error(`Indices must be a matrix, saw:\n ${indices.shape}`);\n }\n if (values.shape.length !== 1) {\n throw new Error(`Values must be a vector, saw:\n ${values.shape}`);\n }\n if (defaultValue.shape.length !== 0) {\n throw new Error(`Default value must be a scalar, saw:\n ${defaultValue.shape}`);\n }\n\n const $indices = backend.readSync(indices.dataId) as TypedArray;\n const $values = backend.readSync(values.dataId) as TypedArray;\n const $denseShape = backend.readSync(denseShape.dataId) as TypedArray;\n const $defaultValue =\n backend.readSync(defaultValue.dataId)[0] as number;\n\n const [outputIndices, outputIndicesShape, outputValues,\n emptyRowIndicator, reverseIndexMap] =\n sparseFillEmptyRowsImplCPU(\n $indices, indices.shape, indices.dtype, $values, values.dtype,\n $denseShape, $defaultValue);\n return [\n backend.makeTensorInfo(outputIndicesShape, indices.dtype, outputIndices),\n backend.makeTensorInfo(\n [outputIndicesShape[0]], values.dtype, outputValues),\n backend.makeTensorInfo(\n [emptyRowIndicator.length], 'bool',\n new Uint8Array(\n emptyRowIndicator.map((value: boolean) => Number(value)))),\n backend.makeTensorInfo(\n [reverseIndexMap.length], indices.dtype,\n new Int32Array(reverseIndexMap)),\n ];\n}\n\nexport const sparseFillEmptyRowsConfig: KernelConfig = {\n kernelName: SparseFillEmptyRows,\n backendName: 'webgl',\n kernelFunc: sparseFillEmptyRows as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SparseReshape, SparseReshapeInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {sparseReshapeImplCPU} from '../kernel_utils/shared';\n\nexport function sparseReshape(\n args: {inputs: SparseReshapeInputs, backend: MathBackendWebGL}):\n [TensorInfo, TensorInfo] {\n const {inputs, backend} = args;\n const {inputIndices, inputShape, newShape} = inputs;\n if (inputIndices.shape.length !== 2) {\n throw new Error(`Input indices should be a matrix but received shape ${\n inputIndices.shape}`);\n }\n if (inputShape.shape.length !== 1) {\n throw new Error(`Input shape should be a vector but received shape ${\n inputShape.shape}`);\n }\n\n if (newShape.shape.length !== 1) {\n throw new Error(\n `Target shape should be a vector but received shape ${newShape.shape}`);\n }\n\n const $inputShape =\n Array.from(backend.readSync(inputShape.dataId) as TypedArray);\n const $inputIndices = backend.readSync(inputIndices.dataId) as TypedArray;\n const targetShape =\n Array.from(backend.readSync(newShape.dataId) as TypedArray);\n\n const [newIndices, indicesShape, outputShape] = sparseReshapeImplCPU(\n $inputIndices, inputIndices.shape, inputIndices.dtype, $inputShape,\n targetShape);\n return [\n backend.makeTensorInfo(indicesShape, inputIndices.dtype, newIndices),\n backend.makeTensorInfo(\n [outputShape.length], newShape.dtype, new Int32Array(outputShape)),\n ];\n}\n\nexport const sparseReshapeConfig: KernelConfig = {\n kernelName: SparseReshape,\n backendName: 'webgl',\n kernelFunc: sparseReshape,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SparseSegmentMean, SparseSegmentMeanInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {sparseSegmentReductionImplCPU} from '../kernel_utils/shared';\n\nexport function sparseSegmentMean(\n args: {inputs: SparseSegmentMeanInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {data, indices, segmentIds} = inputs;\n if (data.shape.length < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if (indices.shape.length !== 1) {\n throw new Error(`Indices should be a vector but received shape\n ${indices.shape}`);\n }\n if (segmentIds.shape.length !== 1) {\n throw new Error(`Segment ids should be a vector but received shape\n ${segmentIds.shape}`);\n }\n\n const $data = backend.readSync(data.dataId) as TypedArray;\n const $indices = backend.readSync(indices.dataId) as TypedArray;\n const $segmentIds = backend.readSync(segmentIds.dataId) as TypedArray;\n\n const [outputData, outputDataShape] = sparseSegmentReductionImplCPU(\n $data, data.shape, data.dtype, $indices, $segmentIds, true);\n return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);\n}\n\nexport const sparseSegmentMeanConfig: KernelConfig = {\n kernelName: SparseSegmentMean,\n backendName: 'webgl',\n kernelFunc: sparseSegmentMean as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, SparseSegmentSum, SparseSegmentSumInputs, TensorInfo, TypedArray} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {sparseSegmentReductionImplCPU} from '../kernel_utils/shared';\n\nexport function sparseSegmentSum(\n args: {inputs: SparseSegmentSumInputs, backend: MathBackendWebGL}):\n TensorInfo {\n const {inputs, backend} = args;\n const {data, indices, segmentIds} = inputs;\n if (data.shape.length < 1) {\n throw new Error(\n `Data should be at least 1 dimensional but received scalar`);\n }\n if (indices.shape.length !== 1) {\n throw new Error(`Indices should be a vector but received shape\n ${indices.shape}`);\n }\n if (segmentIds.shape.length !== 1) {\n throw new Error(`Segment ids should be a vector but received shape\n ${segmentIds.shape}`);\n }\n\n const $data = backend.readSync(data.dataId) as TypedArray;\n const $indices = backend.readSync(indices.dataId) as TypedArray;\n const $segmentIds = backend.readSync(segmentIds.dataId) as TypedArray;\n\n const [outputData, outputDataShape] = sparseSegmentReductionImplCPU(\n $data, data.shape, data.dtype, $indices, $segmentIds);\n return backend.makeTensorInfo(outputDataShape, data.dtype, outputData);\n}\n\nexport const sparseSegmentSumConfig: KernelConfig = {\n kernelName: SparseSegmentSum,\n backendName: 'webgl',\n kernelFunc: sparseSegmentSum as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, Rank, SparseToDense, SparseToDenseAttrs, SparseToDenseInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {scatterImplCPU} from '../kernel_utils/shared';\nimport {ScatterProgram} from '../scatter_gpu';\n\nimport {reshape} from './Reshape';\n\nexport function sparseToDense(args: {\n inputs: SparseToDenseInputs,\n backend: MathBackendWebGL,\n attrs: SparseToDenseAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {sparseIndices, sparseValues, defaultValue} = inputs;\n const {outputShape} = attrs;\n\n const {sliceRank, numUpdates, sliceSize, strides, outputSize} =\n backend_util.calculateShapes(sparseValues, sparseIndices, outputShape);\n const sumDupeIndices = false;\n\n if (sparseValues.dtype === 'string') {\n const indicesBuf = backend.bufferSync(sparseIndices);\n const updatesBuf = backend.bufferSync(sparseValues);\n const $defaultValue = util.decodeString(\n backend.readSync(defaultValue.dataId)[0] as Uint8Array);\n const outBuf = scatterImplCPU(\n indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates,\n sliceRank, strides, $defaultValue, sumDupeIndices);\n return backend.makeTensorInfo(outputShape, outBuf.dtype, outBuf.values);\n }\n const program = new ScatterProgram(\n numUpdates, sliceRank, sparseIndices.shape.length,\n sparseValues.shape.length, strides, [outputSize, 1], sumDupeIndices);\n\n const res = backend.runWebGLProgram(\n program, [sparseValues, sparseIndices, defaultValue], sparseValues.dtype);\n\n const reshaped =\n reshape({inputs: {x: res}, backend, attrs: {shape: outputShape}});\n\n backend.disposeIntermediateTensorInfo(res);\n return reshaped;\n}\n\nexport const sparseToDenseConfig: KernelConfig = {\n kernelName: SparseToDense,\n backendName: 'webgl',\n kernelFunc: sparseToDense as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, KernelConfig, KernelFunc, SplitV, SplitVAttrs, SplitVInputs, TensorInfo, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {slice} from './Slice';\n\nexport function splitV(\n args:\n {inputs: SplitVInputs, backend: MathBackendWebGL, attrs: SplitVAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {numOrSizeSplits, axis} = attrs;\n\n const $axis = util.parseAxisParam(axis, x.shape)[0];\n const splitSizes = backend_util.prepareSplitSize(x, numOrSizeSplits, $axis);\n\n const xRank = x.shape.length;\n const begin = new Array(xRank).fill(0);\n const size = x.shape.slice();\n\n return splitSizes.map(s => {\n const sliceSize = [...size];\n sliceSize[$axis] = s;\n const sliceT =\n slice({inputs: {x}, backend, attrs: {begin, size: sliceSize}});\n begin[$axis] += s;\n return sliceT;\n });\n}\n\nexport const splitVConfig: KernelConfig = {\n kernelName: SplitV,\n backendName: 'webgl',\n kernelFunc: splitV as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Sqrt} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\nimport {sqrtImplCPU} from '../kernel_utils/shared';\n\nconst SQRT = `return sqrt(x);`;\n\nexport const sqrt = unaryKernelFunc(\n {opSnippet: SQRT, packedOpSnippet: SQRT, cpuKernelImpl: sqrtImplCPU});\n\nexport const sqrtConfig: KernelConfig = {\n kernelName: Sqrt,\n backendName: 'webgl',\n kernelFunc: sqrt as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2019 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Square} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SQUARE = `return x * x;`;\n\nexport const square = unaryKernelFunc({opSnippet: SQUARE});\n\nexport const squareConfig: KernelConfig = {\n kernelName: Square,\n backendName: 'webgl',\n kernelFunc: square,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, SquaredDifference} from '@tensorflow/tfjs-core';\n\nimport {binaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst SQUARED_DIFFERENCE = 'return (a - b) * (a - b);';\n\nexport const squaredDifference = binaryKernelFunc(\n {opSnippet: SQUARED_DIFFERENCE, packedOpSnippet: SQUARED_DIFFERENCE});\n\nexport const squaredDifferenceConfig: KernelConfig = {\n kernelName: SquaredDifference,\n backendName: 'webgl',\n kernelFunc: squaredDifference,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, Step, StepAttrs, TensorInfo, UnaryInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {CHECK_NAN_SNIPPET, UnaryOpProgram} from '../unaryop_gpu';\n\nexport function step(\n {inputs, attrs, backend}:\n {inputs: UnaryInputs, attrs: StepAttrs, backend: MathBackendWebGL}):\n TensorInfo {\n const {x} = inputs;\n const opSnippet = CHECK_NAN_SNIPPET + `\n return x > 0.0 ? 1.0 : float(${attrs.alpha});\n `;\n\n const program = new UnaryOpProgram(x.shape, opSnippet);\n\n return backend.runWebGLProgram(program, [x], x.dtype);\n}\n\nexport const stepConfig: KernelConfig = {\n kernelName: Step,\n backendName: 'webgl',\n kernelFunc: step as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class StridedSliceProgram implements GPGPUProgram {\n variableNames = ['x'];\n outputShape: number[];\n userCode: string;\n\n constructor(begin: number[], strides: number[], size: number[]) {\n this.outputShape = size;\n const rank = size.length;\n const inputDtype = getCoordsDataType(size.length);\n const dtype = getCoordsDataType(size.length);\n\n let newCoords = '';\n if (rank === 1) {\n newCoords = 'coords * strides + begin';\n } else {\n let outputAxis = 0;\n newCoords =\n size.map((_, i) => {\n outputAxis++;\n return size.length === 1 ?\n `coords * strides[${i}] + begin[${i}]` :\n `coords[${outputAxis - 1}] * strides[${i}] + begin[${i}]`;\n })\n .join(',');\n }\n\n this.userCode = `\n ${inputDtype} begin = ${inputDtype}(${begin});\n ${inputDtype} strides = ${inputDtype}(${strides});\n\n void main() {\n ${dtype} coords = getOutputCoords();\n setOutput(getX(${newCoords}));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, KernelConfig, KernelFunc, Rank, slice_util, StridedSlice, StridedSliceAttrs, StridedSliceInputs, TensorBuffer, TensorInfo, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {stridedSliceImplCPU} from '../kernel_utils/shared';\nimport {StridedSliceProgram} from '../strided_slice_gpu';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nexport function stridedSlice(args: {\n inputs: StridedSliceInputs,\n backend: MathBackendWebGL,\n attrs: StridedSliceAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {\n begin,\n end,\n strides,\n beginMask,\n endMask,\n ellipsisMask,\n newAxisMask,\n shrinkAxisMask\n } = attrs;\n\n const {\n finalShapeSparse,\n finalShape,\n isIdentity,\n sliceDim0,\n isSimpleSlice,\n begin: $begin,\n end: $end,\n strides: $strides\n } =\n slice_util.sliceInfo(\n x.shape, begin, end, strides, beginMask, endMask, ellipsisMask,\n newAxisMask, shrinkAxisMask);\n\n let result;\n\n if (isIdentity) {\n // Optimization #1, slice is a no-op plus reshape\n result = reshape({inputs: {x}, backend, attrs: {shape: finalShape}});\n } else if (sliceDim0 || isSimpleSlice) {\n // Optimization #2, slice is memory contiguous (only occurs in dim 0)\n util.assert(\n x.shape.length >= 1,\n () => `Input must have rank at least 1, got: ${x.shape.length}`);\n\n const size = slice_util.computeOutShape($begin, $end, $strides);\n // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end).\n const sliced = slice({inputs: {x}, backend, attrs: {begin: $begin, size}});\n result =\n reshape({inputs: {x: sliced}, backend, attrs: {shape: finalShape}});\n backend.disposeIntermediateTensorInfo(sliced);\n } else {\n const shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]);\n if (shouldExecuteOnCPU) {\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const values = backend.readSync(x.dataId) as TypedArray;\n // tslint:disable-next-line: no-unnecessary-type-assertion\n const xBuf = buffer(x.shape, x.dtype, values) as TensorBuffer;\n const resultValues =\n stridedSliceImplCPU(finalShapeSparse, xBuf, $strides, $begin);\n result = backend.makeTensorInfo(finalShape, x.dtype, resultValues.values);\n } else {\n const program =\n new StridedSliceProgram($begin, $strides, finalShapeSparse);\n result = backend.runWebGLProgram(program, [x], x.dtype);\n }\n }\n\n const resultReshaped =\n reshape({inputs: {x: result}, backend, attrs: {shape: finalShape}});\n\n backend.disposeIntermediateTensorInfo(result);\n\n return resultReshaped;\n}\n\nexport const stridedSliceConfig: KernelConfig = {\n kernelName: StridedSlice,\n backendName: 'webgl',\n kernelFunc: stridedSlice as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringNGrams, StringNGramsAttrs, StringNGramsInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {stringNGramsImplCPU} from '../kernel_utils/shared';\n\nexport function stringNGrams(args: {\n inputs: StringNGramsInputs,\n backend: MathBackendWebGL,\n attrs: StringNGramsAttrs\n}): [TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {\n separator,\n nGramWidths,\n leftPad,\n rightPad,\n padWidth,\n preserveShortSequences\n } = attrs;\n const {data, dataSplits} = inputs;\n const $data = backend.readSync(data.dataId) as Uint8Array[];\n const $dataSplits = backend.readSync(dataSplits.dataId) as Int32Array;\n\n const [nGrams, nGramsSplits] = stringNGramsImplCPU(\n $data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth,\n preserveShortSequences);\n return [\n backend.makeTensorInfo([nGrams.length], 'string', nGrams),\n backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits),\n ];\n}\n\nexport const stringNGramsConfig: KernelConfig = {\n kernelName: StringNGrams,\n backendName: 'webgl',\n kernelFunc: stringNGrams as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringSplit, StringSplitAttrs, StringSplitInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {stringSplitImplCPU} from '../kernel_utils/shared';\n\nexport function stringSplit(args: {\n inputs: StringSplitInputs,\n backend: MathBackendWebGL,\n attrs: StringSplitAttrs\n}): [TensorInfo, TensorInfo, TensorInfo] {\n const {inputs, backend, attrs} = args;\n const {skipEmpty} = attrs;\n const {input, delimiter} = inputs;\n\n if (input.dtype !== 'string') {\n throw new Error('Input must be of datatype string');\n }\n if (input.shape.length !== 1) {\n throw new Error(`Input must be a vector, got shape: ${input.shape}`);\n }\n if (delimiter.shape.length !== 0) {\n throw new Error(\n `Delimiter must be a scalar, got shape: ${delimiter.shape}`);\n }\n\n const $input = backend.readSync(input.dataId) as Uint8Array[];\n const $delimiter = backend.readSync(delimiter.dataId)[0] as Uint8Array;\n\n const [indices, values, shape] =\n stringSplitImplCPU($input, $delimiter, skipEmpty);\n const outputSize = values.length;\n return [\n backend.makeTensorInfo([outputSize, 2], 'int32', indices),\n backend.makeTensorInfo([outputSize], 'string', values),\n backend.makeTensorInfo([2], 'int32', new Int32Array(shape))\n ];\n}\n\nexport const stringSplitConfig: KernelConfig = {\n kernelName: StringSplit,\n backendName: 'webgl',\n kernelFunc: stringSplit as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, StringToHashBucketFast, StringToHashBucketFastAttrs, StringToHashBucketFastInputs, TensorInfo} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {stringToHashBucketFastImplCPU} from '../kernel_utils/shared';\n\nexport function stringToHashBucketFast(args: {\n inputs: StringToHashBucketFastInputs,\n backend: MathBackendWebGL,\n attrs: StringToHashBucketFastAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {numBuckets} = attrs;\n const {input} = inputs;\n\n if (input.dtype !== 'string') {\n throw new Error('Input must be of datatype string');\n }\n if (numBuckets <= 0) {\n throw new Error(`Number of buckets must be at least 1`);\n }\n\n const $input = backend.readSync(input.dataId) as Uint8Array[];\n\n const output = stringToHashBucketFastImplCPU($input, numBuckets);\n return backend.makeTensorInfo(input.shape, 'int32', output);\n}\n\nexport const stringToHashBucketFastConfig: KernelConfig = {\n kernelName: StringToHashBucketFast,\n backendName: 'webgl',\n kernelFunc: stringToHashBucketFast as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tan} from '@tensorflow/tfjs-core';\n\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst TAN = `return tan(x);`;\n\nexport const tan = unaryKernelFunc({opSnippet: TAN});\n\nexport const tanConfig: KernelConfig = {\n kernelName: Tan,\n backendName: 'webgl',\n kernelFunc: tan,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, Tanh} from '@tensorflow/tfjs-core';\nimport {unaryKernelFunc} from '../kernel_utils/kernel_funcs_utils';\n\nconst TANH = `\n float e2x = exp(-2.0 * abs(x));\n return sign(x) * (1.0 - e2x) / (1.0 + e2x);\n`;\n\nexport const tanh = unaryKernelFunc({opSnippet: TANH});\n\nexport const tanhConfig: KernelConfig = {\n kernelName: Tanh,\n backendName: 'webgl',\n kernelFunc: tanh,\n};\n","/**\n * @license\n * Copyright 2017 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\nimport {getCoordsDataType} from './shader_compiler';\n\nexport class TileProgram implements GPGPUProgram {\n variableNames = ['A'];\n outputShape: number[];\n userCode: string;\n rank: number;\n\n constructor(aShape: number[], reps: number[]) {\n const outputShape: number[] = new Array(aShape.length);\n for (let i = 0; i < outputShape.length; i++) {\n outputShape[i] = aShape[i] * reps[i];\n }\n this.outputShape = outputShape;\n this.rank = outputShape.length;\n const dtype = getCoordsDataType(this.rank);\n const sourceCoords = getSourceCoords(aShape);\n\n this.userCode = `\n void main() {\n ${dtype} resRC = getOutputCoords();\n setOutput(getA(${sourceCoords}));\n }\n `;\n }\n}\n\nfunction getSourceCoords(aShape: number[]): string {\n const rank = aShape.length;\n if (rank > 5) {\n throw Error(`Tile for rank ${rank} is not yet supported`);\n }\n if (rank === 1) {\n return `imod(resRC, ${aShape[0]})`;\n }\n\n const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w', 'resRC.u'];\n\n const sourceCoords = [];\n for (let i = 0; i < aShape.length; i++) {\n sourceCoords.push(`imod(${currentCoords[i]}, ${aShape[i]})`);\n }\n return sourceCoords.join();\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {buffer, KernelConfig, KernelFunc, TensorInfo, Tile, TileAttrs, TileInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {tileImplCPU} from '../kernel_utils/shared';\nimport {TileProgram} from '../tile_gpu';\n\nexport function tile(\n params: {inputs: TileInputs, backend: MathBackendWebGL, attrs: TileAttrs}):\n TensorInfo {\n const {inputs, backend, attrs} = params;\n const {x} = inputs;\n const {reps} = attrs;\n\n // tile gpu program cannot handle rank > 5 case.\n if (x.dtype === 'string' || x.shape.length > 5) {\n // Even thought string tensor is always on CPU, just to be consistent on how\n // to access tensor data.\n const data = backend.readSync(x.dataId);\n const value = x.dtype === 'string' ?\n (data as Uint8Array[]).map(d => util.decodeString(d)) :\n data as TypedArray;\n const buf = buffer(x.shape, x.dtype, value);\n const outBuf = tileImplCPU(buf, reps);\n return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values);\n }\n\n const program = new TileProgram(x.shape, reps);\n const output = backend.runWebGLProgram(program, [x], x.dtype);\n\n return output;\n}\n\nexport const tileConfig: KernelConfig = {\n kernelName: Tile,\n backendName: 'webgl',\n kernelFunc: tile as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {GPGPUProgram} from './gpgpu_math';\nimport {UniformType} from './shader_compiler';\n\n// Based on Algorithm 2 of Bitonic Top K, ref:\n// https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf\n// The original algorithm is based on computing the top K only, however\n// since for TFJS we require the indices of the top K values as well then the\n// algorithm found here is a bit modified. Rather than producing the values\n// at each step, the indices containing the top K are generated instead.\n// The output values are not generated to reduce the number of outputs in the\n// GPU, the values can easily be retrieved from the indices using a gather\n// op.\nexport class SwapProgram implements GPGPUProgram {\n variableNames = ['x', 'indices'];\n outputShape: number[];\n userCode: string;\n // |n| Size of the original input of TopK.\n // |firstPass|indicates if this is the first time swap is being used which\n // means no indices input containing the top K is present yet.\n // |inc| Swaps pairs of indices (0, inc), (1, inc + 1), (2, inc + 2) ...\n customUniforms = [\n {name: 'n', type: 'int' as UniformType},\n {name: 'firstPass', type: 'int' as UniformType},\n {name: 'negativeInf', type: 'float' as UniformType},\n {name: 'dir', type: 'int' as UniformType},\n {name: 'inc', type: 'int' as UniformType}\n ];\n\n /**\n * @param shape desired output shape (can be larger than input shape, output\n * will be padded with -Infinity)\n */\n constructor(shape: number[]) {\n this.outputShape = shape;\n\n this.userCode = `\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int elemIdx = coords[1];\n\n // We compare elements pair-wise within a group of size 2 * inc.\n // The comparing rule for each group alternates between ascending\n // and descending. Within each group, we compare each pair at\n // positions i and i+inc. To decide whether an element at position i\n // is x0 or x1, we mod it by 2 * inc, if the result is smaller than\n // inc, it is in the first half of the group, we denote it as x0,\n // otherwise we denote it as x1.\n // For example, as shown in the Bitonic top K paper referenced above,\n // Figure5(a) shows that element[1] is in the\n // second half of the group when group size is 2, but it is in the\n // first half of the group when group size is 4.\n\n bool isFirstInPair = imod(elemIdx, 2 * inc) < inc;\n int i = isFirstInPair ? elemIdx : elemIdx - inc;\n\n int i0 = firstPass == 1 ? i : int(getIndices(batch, i));\n int i1 = firstPass == 1 ? i + inc : int(getIndices(batch, i + inc));\n float x0 = i0 < n ? getX(batch, i0) : negativeInf;\n float x1 = i1 < n ? getX(batch, i1) : negativeInf;\n\n // Denotes which direction indices are in (ascending or descending).\n bool reverse = imod(elemIdx, 2 * dir) >= dir;\n bool isGreater = x0 > x1 || (x0 == x1 && i1 > i0);\n if (reverse == isGreater) { // Elements in opposite order of direction\n int iTemp = i0;\n i0 = i1;\n i1 = iTemp;\n }\n if (isFirstInPair) {\n setOutput(float(i0));\n } else {\n setOutput(float(i1));\n }\n }\n `;\n }\n}\n\nexport class MergeProgram implements GPGPUProgram {\n variableNames = ['x', 'indices'];\n outputShape: number[];\n userCode: string;\n // |n| Size of the original input of TopK\n // |firstPass| indicates if this is the first time swap is being used which\n // means no indices input containing the top K is present yet.\n // |k| Top k elements desired\n customUniforms = [\n {name: 'n', type: 'int' as UniformType},\n {name: 'firstPass', type: 'int' as UniformType},\n {name: 'k', type: 'int' as UniformType}\n ];\n\n /**\n * @param shape desired output shape (must be half of the input size)\n */\n constructor(shape: number[]) {\n this.outputShape = shape;\n\n this.userCode = `\n void main() {\n // Takes max of indices (0, k), (1, k + 1), (2, k + 2) ...\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int elemIdx = coords[1];\n\n // The output size is half of the previous size.\n // If the previous sequence is | | | | _ _ _ _ | | | | _ _ _ _ (k=4),\n // we only need to output the indices at positions |, the indices at\n // positions _ can be thrown away, see Figure5(b) After Phase 2\n // (Merge phase) in the Bitonic Top K paper referenced above.\n // For example, the paper shows we only need to output the orange bars.\n // The output sequence should look like this | | | | | | | |.\n // Because the sequence is halved, to map the output index back\n // to the previous sequence to find the corresponding value,\n // we need to double the index. When we double the index,\n // we basically interpolate a position, so 2i looks like\n // | _ | _ | _ | _ | _ | _ | _. We move the | to the first k position\n // of each 2k positions by - elemIdx % k. E.g. for output at\n // index 4,5,6,7, we want to get the corresponding element at\n // original index 8,9,10,11, for output at index 8,9,10,11,\n // we want to get the corresponding element at original index\n // 16,17,18,19, so on and so forth.\n\n int i = elemIdx < k ? elemIdx : (elemIdx * 2 - imod(elemIdx, k));\n int i0 = firstPass == 1 ? i : int(getIndices(batch, i));\n int i1 = firstPass == 1 ? i + k : int(getIndices(batch, i + k));\n\n float x0 = getX(batch, i0);\n float x1 = i1 < n ? getX(batch, i1) : x0;\n\n setOutput(x0 >= x1 ? float(i0) : float(i1));\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {env, KernelConfig, KernelFunc, NumericDataType, TensorInfo, TopK, TopKAttrs, TopKInputs, TypedArray, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {topKImplCPU} from '../kernel_utils/shared';\nimport {MergeProgram, SwapProgram} from '../top_k_gpu';\nimport {fill} from './Fill';\nimport {gatherV2} from './GatherV2';\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nfunction disposeIntermediateTensorInfoOrNull(\n backend: MathBackendWebGL, tensorInfo: TensorInfo) {\n if (tensorInfo !== null) {\n backend.disposeIntermediateTensorInfo(tensorInfo);\n }\n}\n\nfunction roundUpToPow2(num: number) {\n let pow2 = 1;\n while (pow2 < num) {\n pow2 *= 2;\n }\n return pow2;\n}\n\n// Based on Algorithm 2 of Bitonic Top K, ref:\n// https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf\nexport function topK(\n args: {inputs: TopKInputs, backend: MathBackendWebGL, attrs: TopKAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {x} = inputs;\n const {k, sorted} = attrs;\n\n // Empirically determined constant used to determine last dim threshold for\n // handing off execution to the CPU.\n const TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD =\n env().getNumber('TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD');\n\n // Empirically determined constant used to determine k threshold for handing\n // off execution to the CPU.\n const TOPK_K_CPU_HANDOFF_THRESHOLD =\n env().getNumber('TOPK_K_CPU_HANDOFF_THRESHOLD');\n\n const xShape = x.shape;\n const lastDim = xShape[xShape.length - 1];\n\n if (backend.shouldExecuteOnCPU([x]) ||\n lastDim < TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD ||\n k > TOPK_K_CPU_HANDOFF_THRESHOLD) {\n const xVals = backend.readSync(x.dataId) as TypedArray;\n const [allTopKVals, allTopKIndices] =\n topKImplCPU(xVals, xShape, x.dtype as NumericDataType, k, sorted);\n\n return [\n backend.makeTensorInfo(\n allTopKVals.shape, allTopKVals.dtype, allTopKVals.values),\n backend.makeTensorInfo(\n allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values)\n ];\n }\n\n if (k === 0) {\n xShape[xShape.length - 1] = 0;\n return [\n backend.makeTensorInfo(xShape, x.dtype, []),\n backend.makeTensorInfo(xShape, 'int32', [])\n ];\n }\n\n if (lastDim === 1 /* firstPass */) {\n return [\n x, fill({attrs: {shape: xShape, dtype: 'int32', value: 0}, backend})\n ];\n }\n\n // Eagerly unpack x input since it is passed in to all the shaders which\n // require unpacked inputs.\n const xtexData = backend.texData.get(x.dataId);\n const xIsPacked = xtexData !== null && xtexData.isPacked;\n const xUnPacked = xIsPacked ? backend.unpackTensor(x) : x;\n\n // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim.\n const xSize = util.sizeFromShape(xShape);\n const batch = xSize / lastDim;\n const x2D = reshape(\n {inputs: {x: xUnPacked}, attrs: {shape: [batch, lastDim]}, backend});\n\n if (xIsPacked) {\n disposeIntermediateTensorInfoOrNull(backend, xUnPacked);\n }\n\n const kPow2 = roundUpToPow2(k);\n const lastDimPow2 = roundUpToPow2(lastDim);\n\n // Only the indices containing the top K are kept at every step to reduce\n // number of outputs in the GPU algorithms, so once the final set of indices\n // is computed then gather is used to grab the corresponding values\n // from the original input.\n let indices: TensorInfo = null;\n\n // GPU algorithm always takes in an indices input but this input is not used\n // on the first run of a GPU algorithm, therefore if indices is null we simply\n // pass in x2D instead of it but the value will not actually be used\n const getInputs = () => indices === null ? [x2D, x2D] : [x2D, indices];\n\n const runSwap = (dir: number, inc: number, shape: number[]) => {\n const inputs = getInputs();\n const program = new SwapProgram(shape);\n const fistPass = indices === null ? 1 : 0;\n const customValues =\n [[lastDim], [fistPass], [Number.NEGATIVE_INFINITY], [dir], [inc]];\n const prevIndices = indices;\n indices = backend.runWebGLProgram(program, inputs, 'int32', customValues);\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n };\n\n // Step 1: local sort\n for (let len = 1; len < kPow2; len *= 2) {\n const dir = len * 2;\n for (let inc = len; inc >= 1; inc /= 2) {\n runSwap(dir, inc, [batch, lastDimPow2]);\n }\n }\n\n // Step 2: merge\n for (let indicesSize = lastDimPow2; indicesSize > kPow2; indicesSize /= 2) {\n const inputs = getInputs();\n const mergeProgram = new MergeProgram([batch, indicesSize / 2]);\n const firstPass = indices === null ? 1 : 0;\n const customValues = [[lastDim], [firstPass], [kPow2]];\n const prevIndices = indices;\n indices =\n backend.runWebGLProgram(mergeProgram, inputs, 'int32', customValues);\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n // Step 3: rebuild\n const len = kPow2 / 2;\n const dir = len * 2;\n for (let inc = len; inc >= 1; inc /= 2) {\n runSwap(dir, inc, indices.shape);\n }\n }\n\n // Keep only the requested top K results instead of kPow2\n let prevIndices = indices;\n indices = slice(\n {inputs: {x: indices}, backend, attrs: {begin: 0, size: [batch, k]}});\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n // Gather values on last dimension\n let values = gatherV2(\n {inputs: {x: x2D, indices}, backend, attrs: {axis: 1, batchDims: 1}});\n disposeIntermediateTensorInfoOrNull(backend, x2D);\n\n // Reshape back to the original input shape, except that the last\n // dimension is k.\n const newShape = xShape.slice(0, -1);\n newShape.push(k);\n\n prevIndices = indices;\n indices = reshape({inputs: {x: indices}, attrs: {shape: newShape}, backend});\n disposeIntermediateTensorInfoOrNull(backend, prevIndices);\n\n const prevValues = values;\n values = reshape({inputs: {x: values}, attrs: {shape: newShape}, backend});\n disposeIntermediateTensorInfoOrNull(backend, prevValues);\n\n return [values, indices];\n}\n\nexport const topKConfig: KernelConfig = {\n kernelName: TopK,\n backendName: 'webgl',\n kernelFunc: topK as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class TransformProgram implements GPGPUProgram {\n variableNames = ['Image', 'Transforms'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n imageHeight: number, imageWidth: number,\n interpolation: 'nearest'|'bilinear',\n fillMode: 'constant'|'reflect'|'wrap'|'nearest', fillValue: number,\n outShape: [number, number, number, number]) {\n this.outputShape = outShape;\n const interpolationModeId = interpolation === 'nearest' ? 1 : 2;\n let fillModeId;\n switch (fillMode) {\n case 'constant':\n fillModeId = 1;\n break;\n case 'reflect':\n fillModeId = 2;\n break;\n case 'wrap':\n fillModeId = 3;\n break;\n case 'nearest':\n fillModeId = 4;\n break;\n default:\n fillModeId = 1;\n break;\n }\n this.userCode = `\n float mapCoord(float outCoord, float len) {\n float inCoord = outCoord;\n if(${fillModeId} == 2) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz2 = 2.0 * len;\n if (inCoord < sz2) {\n inCoord = sz2 * float(int(float(-inCoord / sz2))) +\n inCoord;\n }\n inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1.0;\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz2 = 2.0 * len;\n inCoord -= sz2 * float(int(float(inCoord / sz2)));\n if (inCoord >= len) {\n inCoord = sz2 - inCoord - 1.0;\n }\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (${fillModeId} == 3) {\n if (inCoord < 0.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz = len - 1.0;\n inCoord += len * (float(int(float(-inCoord / sz))) + 1.0);\n }\n } else if (inCoord > len - 1.0) {\n if (len <= 1.0) {\n inCoord = 0.0;\n } else {\n float sz = len - 1.0;\n inCoord -= len * float(int(float(inCoord / sz)));\n }\n }\n return clamp(inCoord, 0.0, len - 1.0);\n } else if (${fillModeId} == 4) {\n return clamp(outCoord, 0.0, len - 1.0);\n } else {\n return outCoord;\n }\n }\n\n float readWithFillValue(int batch, int coordY, int coordX,\n int channel) {\n float outputValue;\n if (0 <= coordY && coordY < ${\n imageHeight} && 0 <= coordX && coordX < ${imageWidth}) {\n outputValue = getImage(batch, coordY, coordX, channel);\n } else {\n outputValue = float(${fillValue});\n }\n return outputValue;\n }\n\n void main() {\n ivec4 coords = getOutputCoords();\n float outputValue;\n int batch = coords[0];\n int x = coords[2];\n int y = coords[1];\n int channel = coords[3];\n float xf = float(x);\n float yf = float(y);\n float a1 = getTransforms(batch, 0);\n float a2 = getTransforms(batch, 1);\n float a3 = getTransforms(batch, 2);\n float b1 = getTransforms(batch, 3);\n float b2 = getTransforms(batch, 4);\n float b3 = getTransforms(batch, 5);\n float c1 = getTransforms(batch, 6);\n float c2 = getTransforms(batch, 7);\n float projection = c1 * xf + c2 * yf + 1.0;\n if (projection == 0.0) {\n outputValue = float(${fillValue});\n } else {\n float inX = (a1 * xf + a2 * yf + a3) / projection;\n float inY = (b1 * xf + b2 * yf + b3) / projection;\n float mapX = mapCoord(inX, float(${imageWidth}));\n float mapY = mapCoord(inY, float(${imageHeight}));\n\n if (${interpolationModeId} == 1) {\n int coordY = int(round(mapY));\n int coordX = int(round(mapX));\n outputValue = readWithFillValue(batch, coordY, coordX,\n channel);\n } else {\n float yFloor = floor(mapY);\n float xFloor = floor(mapX);\n float yCeil = yFloor + 1.0;\n float xCeil = xFloor + 1.0;\n float valueYFloor = (xCeil - mapX) *\n readWithFillValue(batch, int(yFloor), int(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, int(yFloor), int(xCeil), channel);\n float valueYCeil = (xCeil - mapX) *\n readWithFillValue(batch, int(yCeil), int(xFloor), channel) +\n (mapX - xFloor) *\n readWithFillValue(batch, int(yCeil), int(xCeil), channel);\n outputValue = (yCeil - mapY) * valueYFloor +\n (mapY - yFloor) * valueYCeil;\n }\n }\n setOutput(outputValue);\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2021 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Transform, TransformAttrs, TransformInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {TransformProgram} from '../transform_gpu';\n\nexport function transform(args: {\n inputs: TransformInputs,\n backend: MathBackendWebGL,\n attrs: TransformAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {image, transforms} = inputs;\n const {interpolation, fillMode, fillValue, outputShape} = attrs;\n\n const [batch, imageHeight, imageWidth, numChannels] = image.shape;\n const [outHeight, outWidth] =\n outputShape != null ? outputShape : [imageHeight, imageWidth];\n const outShape =\n [batch, outHeight, outWidth,\n numChannels] as [number, number, number, number];\n\n const program = new TransformProgram(\n imageHeight, imageWidth, interpolation, fillMode, fillValue, outShape);\n return backend.runWebGLProgram(program, [image, transforms], 'float32');\n}\n\nexport const transformConfig: KernelConfig = {\n kernelName: Transform,\n backendName: 'webgl',\n kernelFunc: transform as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the License);\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an AS IS BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Unique, UniqueAttrs, UniqueInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {uniqueImplCPU} from '../kernel_utils/shared';\nimport {assertNotComplex} from '../webgl_util';\n\nexport function unique(\n args:\n {inputs: UniqueInputs, attrs: UniqueAttrs, backend: MathBackendWebGL}):\n TensorInfo[] {\n const {inputs, attrs, backend} = args;\n const {axis} = attrs;\n const {x} = inputs;\n assertNotComplex(x, 'unique');\n\n // For now, always forward calculation to the CPU backend.\n console.warn(\n 'WARNING: ',\n 'UI might be locked temporarily as data is being downloaded');\n const values = backend.readSync(x.dataId);\n const {outputValues, outputShape, indices} =\n uniqueImplCPU(values, axis, x.shape, x.dtype);\n return [\n backend.makeTensorInfo(outputShape, x.dtype, outputValues),\n backend.makeTensorInfo([indices.length], 'int32', indices),\n ];\n}\n\nexport const uniqueConfig: KernelConfig = {\n kernelName: Unique,\n backendName: 'webgl',\n kernelFunc: unique as unknown as KernelFunc,\n};\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {KernelConfig, KernelFunc, TensorInfo, Unpack, UnpackAttrs, UnpackInputs} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\n\nimport {reshape} from './Reshape';\nimport {slice} from './Slice';\n\nexport function unpack(\n args:\n {inputs: UnpackInputs, backend: MathBackendWebGL, attrs: UnpackAttrs}):\n TensorInfo[] {\n const {inputs, backend, attrs} = args;\n const {value} = inputs;\n let {axis} = attrs;\n\n if (axis < 0) {\n axis += value.shape.length;\n }\n\n const x = value;\n const xRank = x.shape.length;\n\n const num = value.shape[axis];\n const outShape: number[] = new Array(xRank - 1);\n let outIndex = 0;\n for (let i = 0; i < xRank; i++) {\n if (i !== axis) {\n outShape[outIndex++] = x.shape[i];\n }\n }\n\n const toDispose = [];\n\n const begin = new Array(xRank).fill(0);\n const size = x.shape.slice();\n size[axis] = 1;\n const res: TensorInfo[] = new Array(num);\n for (let i = 0; i < res.length; i++) {\n begin[axis] = i;\n const sliced = slice({inputs: {x}, backend, attrs: {begin, size}});\n const reshaped =\n reshape({inputs: {x: sliced}, backend, attrs: {shape: outShape}});\n res[i] = reshaped;\n\n toDispose.push(sliced);\n }\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n return res;\n}\n\nexport const unpackConfig: KernelConfig = {\n kernelName: Unpack,\n backendName: 'webgl',\n kernelFunc: unpack as unknown as KernelFunc\n};\n","/**\n * @license\n * Copyright 2018 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util} from '@tensorflow/tfjs-core';\nimport {GPGPUProgram} from './gpgpu_math';\n\nexport class SegmentOpProgram implements GPGPUProgram {\n variableNames = ['x', 'segmentIds'];\n outputShape: number[];\n userCode: string;\n\n constructor(\n segOpInfo: backend_util.segment_util.SegOpInfo,\n segOpType: 'unsortedSegmentSum') {\n const windowSize = segOpInfo.windowSize;\n const batchSize = segOpInfo.batchSize;\n const inSize = segOpInfo.inSize;\n const numSegments = segOpInfo.numSegments;\n const outSize = numSegments * Math.ceil(inSize / windowSize);\n this.outputShape = [batchSize, outSize];\n\n const initializationValue = '0.0';\n const returnValue = `sumValue`;\n\n const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4;\n const windowSizeVec4Remainder = windowSize % 4;\n\n const updateSnippet = `\n sumValue += dot(values, segFilter);\n `;\n\n let checkValueOutOfBounds = '';\n if (inSize % windowSize > 0) {\n checkValueOutOfBounds = `\n if (inIdx < 0 || inIdx >= ${inSize}) {\n return initializationValue;\n }\n `;\n }\n\n let checkSegmentIdOutOfBounds = '';\n if (inSize % windowSize > 0) {\n checkSegmentIdOutOfBounds = `\n if (inIdx < 0 || inIdx >= ${inSize}) {\n return -1.0;\n }\n `;\n }\n\n this.userCode = `\n const float initializationValue = ${initializationValue};\n\n float getValue(int batch, int inIdx) {\n ${checkValueOutOfBounds}\n return getX(batch, inIdx);\n }\n\n float getSegmentIdAtIndex(int inIdx) {\n ${checkSegmentIdOutOfBounds}\n return getSegmentIds(inIdx);\n }\n\n void main() {\n ivec2 coords = getOutputCoords();\n int batch = coords[0];\n int outIdx = coords[1];\n int inOffset = int(floor(float(outIdx) / float(\n ${numSegments})) * float(${windowSize}));\n int currentSeg = int(mod(float(outIdx), float(${numSegments})));\n\n float sumValue = 0.0;\n\n for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) {\n int inIdx = inOffset + i;\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n getValue(batch, inIdx + 3)\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0\n );\n\n ${updateSnippet}\n }\n\n int inIdx = inOffset + ${windowSizeNearestVec4};\n if (${windowSizeVec4Remainder === 1}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n initializationValue,\n initializationValue,\n initializationValue\n );\n\n int inIdxSeg = int(getSegmentIdAtIndex(inIdx));\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n 0,\n 0,\n 0\n );\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 2}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n initializationValue,\n initializationValue\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n 0,\n 0\n );\n\n ${updateSnippet}\n } else if (${windowSizeVec4Remainder === 3}) {\n vec4 values = vec4(\n getValue(batch, inIdx),\n getValue(batch, inIdx + 1),\n getValue(batch, inIdx + 2),\n initializationValue\n );\n\n vec4 segFilter = vec4(\n int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0,\n int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0,\n 0\n );\n\n ${updateSnippet}\n }\n setOutput(${returnValue});\n }\n `;\n }\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\nimport {KernelConfig, registerKernel} from '@tensorflow/tfjs-core';\n\nimport {_fusedMatMulConfig} from './kernels/_FusedMatMul';\nimport {absConfig} from './kernels/Abs';\nimport {acosConfig} from './kernels/Acos';\nimport {acoshConfig} from './kernels/Acosh';\nimport {addConfig} from './kernels/Add';\nimport {addNConfig} from './kernels/AddN';\nimport {allConfig} from './kernels/All';\nimport {anyConfig} from './kernels/Any';\nimport {argMaxConfig} from './kernels/ArgMax';\nimport {argMinConfig} from './kernels/ArgMin';\nimport {asinConfig} from './kernels/Asin';\nimport {asinhConfig} from './kernels/Asinh';\nimport {atanConfig} from './kernels/Atan';\nimport {atan2Config} from './kernels/Atan2';\nimport {atanhConfig} from './kernels/Atanh';\nimport {avgPoolConfig} from './kernels/AvgPool';\nimport {avgPool3DConfig} from './kernels/AvgPool3D';\nimport {avgPool3DGradConfig} from './kernels/AvgPool3DGrad';\nimport {avgPoolGradConfig} from './kernels/AvgPoolGrad';\nimport {batchMatMulConfig} from './kernels/BatchMatMul';\nimport {batchNormConfig} from './kernels/BatchNorm';\nimport {batchToSpaceNDConfig} from './kernels/BatchToSpaceND';\nimport {bincountConfig} from './kernels/Bincount';\nimport {broadcastArgsConfig} from './kernels/BroadcastArgs';\nimport {castConfig} from './kernels/Cast';\nimport {ceilConfig} from './kernels/Ceil';\nimport {clipByValueConfig} from './kernels/ClipByValue';\nimport {complexConfig} from './kernels/Complex';\nimport {complexAbsConfig} from './kernels/ComplexAbs';\nimport {concatConfig} from './kernels/Concat';\nimport {conv2DConfig} from './kernels/Conv2D';\nimport {conv2DBackpropFilterConfig} from './kernels/Conv2DBackpropFilter';\nimport {conv2DBackpropInputConfig} from './kernels/Conv2DBackpropInput';\nimport {conv3DConfig} from './kernels/Conv3D';\nimport {conv3DBackpropFilterV2Config} from './kernels/Conv3DBackpropFilterV2';\nimport {conv3DBackpropInputConfig} from './kernels/Conv3DBackpropInputV2';\nimport {cosConfig} from './kernels/Cos';\nimport {coshConfig} from './kernels/Cosh';\nimport {cropAndResizeConfig} from './kernels/CropAndResize';\nimport {cumprodConfig} from './kernels/Cumprod';\nimport {cumsumConfig} from './kernels/Cumsum';\nimport {denseBincountConfig} from './kernels/DenseBincount';\nimport {depthToSpaceConfig} from './kernels/DepthToSpace';\nimport {depthwiseConv2dNativeConfig} from './kernels/DepthwiseConv2dNative';\nimport {depthwiseConv2dNativeBackpropFilterConfig} from './kernels/DepthwiseConv2dNativeBackpropFilter';\nimport {depthwiseConv2dNativeBackpropInputConfig} from './kernels/DepthwiseConv2dNativeBackpropInput';\nimport {diagConfig} from './kernels/Diag';\nimport {dilation2DConfig} from './kernels/Dilation2D';\nimport {einsumConfig} from './kernels/Einsum';\nimport {eluConfig} from './kernels/Elu';\nimport {eluGradConfig} from './kernels/EluGrad';\nimport {equalConfig} from './kernels/Equal';\nimport {erfConfig} from './kernels/Erf';\nimport {expConfig} from './kernels/Exp';\nimport {expandDimsConfig} from './kernels/ExpandDims';\nimport {expm1Config} from './kernels/Expm1';\nimport {fftConfig} from './kernels/FFT';\nimport {fillConfig} from './kernels/Fill';\nimport {flipLeftRightConfig} from './kernels/FlipLeftRight';\nimport {floorConfig} from './kernels/Floor';\nimport {floorDivConfig} from './kernels/FloorDiv';\nimport {fromPixelsConfig} from './kernels/FromPixels';\nimport {fusedConv2DConfig} from './kernels/FusedConv2D';\nimport {fusedDepthwiseConv2DConfig} from './kernels/FusedDepthwiseConv2D';\nimport {gatherNdConfig} from './kernels/GatherNd';\nimport {gatherV2Config} from './kernels/GatherV2';\nimport {greaterConfig} from './kernels/Greater';\nimport {greaterEqualConfig} from './kernels/GreaterEqual';\nimport {identityConfig} from './kernels/Identity';\nimport {ifftConfig} from './kernels/IFFT';\nimport {imagConfig} from './kernels/Imag';\nimport {isFiniteConfig} from './kernels/IsFinite';\nimport {isInfConfig} from './kernels/IsInf';\nimport {isNaNConfig} from './kernels/IsNaN';\nimport {leakyReluConfig} from './kernels/LeakyRelu';\nimport {lessConfig} from './kernels/Less';\nimport {lessEqualConfig} from './kernels/LessEqual';\nimport {linSpaceConfig} from './kernels/LinSpace';\nimport {logConfig} from './kernels/Log';\nimport {log1pConfig} from './kernels/Log1p';\nimport {logicalAndConfig} from './kernels/LogicalAnd';\nimport {logicalNotConfig} from './kernels/LogicalNot';\nimport {logicalOrConfig} from './kernels/LogicalOr';\nimport {LRNConfig} from './kernels/LRN';\nimport {LRNGradConfig} from './kernels/LRNGrad';\nimport {maxConfig} from './kernels/Max';\nimport {maximumConfig} from './kernels/Maximum';\nimport {maxPoolConfig} from './kernels/MaxPool';\nimport {maxPool3DConfig} from './kernels/MaxPool3D';\nimport {maxPool3DGradConfig} from './kernels/MaxPool3DGrad';\nimport {maxPoolGradConfig} from './kernels/MaxPoolGrad';\nimport {maxPoolWithArgmaxConfig} from './kernels/MaxPoolWithArgmax';\nimport {meanConfig} from './kernels/Mean';\nimport {minConfig} from './kernels/Min';\nimport {minimumConfig} from './kernels/Minimum';\nimport {mirrorPadConfig} from './kernels/MirrorPad';\nimport {modConfig} from './kernels/Mod';\nimport {multinomialConfig} from './kernels/Multinomial';\nimport {multiplyConfig} from './kernels/Multiply';\nimport {negConfig} from './kernels/Neg';\nimport {nonMaxSuppressionV3Config} from './kernels/NonMaxSuppressionV3';\nimport {nonMaxSuppressionV4Config} from './kernels/NonMaxSuppressionV4';\nimport {nonMaxSuppressionV5Config} from './kernels/NonMaxSuppressionV5';\nimport {notEqualConfig} from './kernels/NotEqual';\nimport {oneHotConfig} from './kernels/OneHot';\nimport {onesLikeConfig} from './kernels/OnesLike';\nimport {packConfig} from './kernels/Pack';\nimport {padV2Config} from './kernels/PadV2';\nimport {powConfig} from './kernels/Pow';\nimport {preluConfig} from './kernels/Prelu';\nimport {prodConfig} from './kernels/Prod';\nimport {raggedGatherConfig} from './kernels/RaggedGather';\nimport {raggedRangeConfig} from './kernels/RaggedRange';\nimport {raggedTensorToTensorConfig} from './kernels/RaggedTensorToTensor';\nimport {rangeConfig} from './kernels/Range';\nimport {realConfig} from './kernels/Real';\nimport {realDivConfig} from './kernels/RealDiv';\nimport {reciprocalConfig} from './kernels/Reciprocal';\nimport {reluConfig} from './kernels/Relu';\nimport {relu6Config} from './kernels/Relu6';\nimport {reshapeConfig} from './kernels/Reshape';\nimport {resizeBilinearConfig} from './kernels/ResizeBilinear';\nimport {resizeBilinearGradConfig} from './kernels/ResizeBilinearGrad';\nimport {resizeNearestNeighborConfig} from './kernels/ResizeNearestNeighbor';\nimport {resizeNearestNeighborGradConfig} from './kernels/ResizeNearestNeighborGrad';\nimport {reverseConfig} from './kernels/Reverse';\nimport {rotateWithOffsetConfig} from './kernels/RotateWithOffset';\nimport {roundConfig} from './kernels/Round';\nimport {rsqrtConfig} from './kernels/Rsqrt';\nimport {scatterNdConfig} from './kernels/ScatterNd';\nimport {searchSortedConfig} from './kernels/SearchSorted';\nimport {selectConfig} from './kernels/Select';\nimport {seluConfig} from './kernels/Selu';\nimport {sigmoidConfig} from './kernels/Sigmoid';\nimport {signConfig} from './kernels/Sign';\nimport {sinConfig} from './kernels/Sin';\nimport {sinhConfig} from './kernels/Sinh';\nimport {sliceConfig} from './kernels/Slice';\nimport {softmaxConfig} from './kernels/Softmax';\nimport {softplusConfig} from './kernels/Softplus';\nimport {spaceToBatchNDConfig} from './kernels/SpaceToBatchND';\nimport {sparseFillEmptyRowsConfig} from './kernels/SparseFillEmptyRows';\nimport {sparseReshapeConfig} from './kernels/SparseReshape';\nimport {sparseSegmentMeanConfig} from './kernels/SparseSegmentMean';\nimport {sparseSegmentSumConfig} from './kernels/SparseSegmentSum';\nimport {sparseToDenseConfig} from './kernels/SparseToDense';\nimport {splitVConfig} from './kernels/SplitV';\nimport {sqrtConfig} from './kernels/Sqrt';\nimport {squareConfig} from './kernels/Square';\nimport {squaredDifferenceConfig} from './kernels/SquaredDifference';\nimport {stepConfig} from './kernels/Step';\nimport {stridedSliceConfig} from './kernels/StridedSlice';\nimport {stringNGramsConfig} from './kernels/StringNGrams';\nimport {stringSplitConfig} from './kernels/StringSplit';\nimport {stringToHashBucketFastConfig} from './kernels/StringToHashBucketFast';\nimport {subConfig} from './kernels/Sub';\nimport {sumConfig} from './kernels/Sum';\nimport {tanConfig} from './kernels/Tan';\nimport {tanhConfig} from './kernels/Tanh';\nimport {tileConfig} from './kernels/Tile';\nimport {topKConfig} from './kernels/TopK';\nimport {transformConfig} from './kernels/Transform';\nimport {transposeConfig} from './kernels/Transpose';\nimport {uniqueConfig} from './kernels/Unique';\nimport {unpackConfig} from './kernels/Unpack';\nimport {unsortedSegmentSumConfig} from './kernels/UnsortedSegmentSum';\nimport {zerosLikeConfig} from './kernels/ZerosLike';\n\n// List all kernel configs here\nconst kernelConfigs: KernelConfig[] = [\n _fusedMatMulConfig,\n absConfig,\n acosConfig,\n acoshConfig,\n addConfig,\n addNConfig,\n allConfig,\n anyConfig,\n argMaxConfig,\n argMinConfig,\n asinConfig,\n asinhConfig,\n atanConfig,\n atan2Config,\n atanhConfig,\n avgPoolConfig,\n avgPool3DConfig,\n avgPool3DGradConfig,\n avgPoolGradConfig,\n batchMatMulConfig,\n batchNormConfig,\n batchToSpaceNDConfig,\n bincountConfig,\n broadcastArgsConfig,\n castConfig,\n ceilConfig,\n clipByValueConfig,\n complexConfig,\n complexAbsConfig,\n concatConfig,\n conv2DConfig,\n conv2DBackpropFilterConfig,\n conv2DBackpropInputConfig,\n conv3DConfig,\n conv3DBackpropFilterV2Config,\n conv3DBackpropInputConfig,\n cosConfig,\n coshConfig,\n cropAndResizeConfig,\n cumprodConfig,\n cumsumConfig,\n denseBincountConfig,\n depthToSpaceConfig,\n depthwiseConv2dNativeConfig,\n depthwiseConv2dNativeBackpropFilterConfig,\n depthwiseConv2dNativeBackpropInputConfig,\n diagConfig,\n dilation2DConfig,\n einsumConfig,\n eluConfig,\n eluGradConfig,\n equalConfig,\n erfConfig,\n expConfig,\n expandDimsConfig,\n expm1Config,\n fftConfig,\n fillConfig,\n flipLeftRightConfig,\n floorConfig,\n floorDivConfig,\n fromPixelsConfig,\n fusedConv2DConfig,\n fusedDepthwiseConv2DConfig,\n gatherNdConfig,\n gatherV2Config,\n greaterConfig,\n greaterEqualConfig,\n identityConfig,\n ifftConfig,\n imagConfig,\n isFiniteConfig,\n isInfConfig,\n isNaNConfig,\n leakyReluConfig,\n lessConfig,\n lessEqualConfig,\n linSpaceConfig,\n logConfig,\n log1pConfig,\n logicalAndConfig,\n logicalNotConfig,\n logicalOrConfig,\n LRNConfig,\n LRNGradConfig,\n maxConfig,\n maximumConfig,\n maxPoolConfig,\n maxPool3DConfig,\n maxPool3DGradConfig,\n maxPoolGradConfig,\n maxPoolWithArgmaxConfig,\n meanConfig,\n minConfig,\n minimumConfig,\n mirrorPadConfig,\n modConfig,\n multinomialConfig,\n multiplyConfig,\n negConfig,\n nonMaxSuppressionV3Config,\n nonMaxSuppressionV4Config,\n nonMaxSuppressionV5Config,\n notEqualConfig,\n oneHotConfig,\n onesLikeConfig,\n packConfig,\n padV2Config,\n powConfig,\n preluConfig,\n prodConfig,\n raggedGatherConfig,\n raggedRangeConfig,\n raggedTensorToTensorConfig,\n rangeConfig,\n realConfig,\n realDivConfig,\n reciprocalConfig,\n reluConfig,\n relu6Config,\n reshapeConfig,\n resizeBilinearConfig,\n resizeBilinearGradConfig,\n resizeNearestNeighborConfig,\n resizeNearestNeighborGradConfig,\n reverseConfig,\n rotateWithOffsetConfig,\n roundConfig,\n rsqrtConfig,\n scatterNdConfig,\n searchSortedConfig,\n selectConfig,\n seluConfig,\n sigmoidConfig,\n signConfig,\n sinConfig,\n sinhConfig,\n sliceConfig,\n softmaxConfig,\n softplusConfig,\n spaceToBatchNDConfig,\n sparseFillEmptyRowsConfig,\n sparseReshapeConfig,\n sparseSegmentMeanConfig,\n sparseSegmentSumConfig,\n sparseToDenseConfig,\n splitVConfig,\n sqrtConfig,\n squareConfig,\n squaredDifferenceConfig,\n stepConfig,\n stridedSliceConfig,\n stringNGramsConfig,\n stringSplitConfig,\n stringToHashBucketFastConfig,\n subConfig,\n sumConfig,\n tanConfig,\n tanhConfig,\n tileConfig,\n topKConfig,\n transformConfig,\n transposeConfig,\n uniqueConfig,\n unpackConfig,\n unsortedSegmentSumConfig,\n zerosLikeConfig\n];\n\nfor (const kernelConfig of kernelConfigs) {\n registerKernel(kernelConfig);\n}\n","/**\n * @license\n * Copyright 2020 Google LLC. All Rights Reserved.\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n * =============================================================================\n */\n\nimport {backend_util, DataType, KernelConfig, KernelFunc, sumOutType, TensorInfo, UnsortedSegmentSum, UnsortedSegmentSumAttrs, UnsortedSegmentSumInputs, util} from '@tensorflow/tfjs-core';\n\nimport {MathBackendWebGL} from '../backend_webgl';\nimport {SegmentOpProgram} from '../segment_gpu';\n\nimport {range} from './Range';\nimport {reshape} from './Reshape';\nimport {tile} from './Tile';\nimport {transpose} from './Transpose';\n\nexport function unsortedSegmentSum(args: {\n inputs: UnsortedSegmentSumInputs,\n backend: MathBackendWebGL,\n attrs: UnsortedSegmentSumAttrs\n}): TensorInfo {\n const {inputs, backend, attrs} = args;\n const {x, segmentIds} = inputs;\n const {numSegments} = attrs;\n\n const xRank = x.shape.length;\n\n const toDispose = [];\n\n let axis = 0;\n const permutation = backend_util.getAxesPermutation([axis], xRank);\n let permutedX = x;\n if (permutation != null) {\n permutedX = transpose({inputs: {x}, backend, attrs: {perm: permutation}});\n toDispose.push(permutedX);\n axis = backend_util.getInnerMostAxes(1, xRank)[0];\n }\n\n const outShape = backend_util.segment_util.computeOutShape(\n permutedX.shape, axis, numSegments);\n const inSize = util.sizeFromShape([permutedX.shape[axis]]);\n const a2D =\n reshape({inputs: {x: permutedX}, backend, attrs: {shape: [-1, inSize]}});\n toDispose.push(a2D);\n\n const outputDType = sumOutType(x.dtype);\n\n const segOpCompute =\n (x: TensorInfo, segOpType: 'unsortedSegmentSum', segmentIds: TensorInfo,\n dtype: DataType, numSegments: number): TensorInfo => {\n const batchSize = x.shape[0];\n const inSize = x.shape[1];\n const windowSize =\n backend_util.segment_util.segOpComputeOptimalWindowSize(\n inSize, numSegments);\n const segOpInfo = {windowSize, inSize, batchSize, numSegments};\n const program = new SegmentOpProgram(segOpInfo, segOpType);\n const output = backend.compileAndRun(program, [x, segmentIds], dtype);\n toDispose.push(output);\n // No need to run another GPGPU program.\n if (output.shape[1] === numSegments) {\n return output;\n }\n const rangeInfo = range({\n backend,\n attrs: {start: 0, stop: numSegments, step: 1, dtype: 'float32'}\n });\n const tileInfo = tile({\n inputs: {x: rangeInfo},\n backend,\n attrs: {reps: [inSize / windowSize]}\n });\n\n toDispose.push(rangeInfo);\n toDispose.push(tileInfo);\n\n const result =\n segOpCompute(output, segOpType, tileInfo, dtype, numSegments);\n return result;\n };\n\n const segOpResult = segOpCompute(\n a2D, 'unsortedSegmentSum', segmentIds, outputDType, numSegments);\n\n const reshaped =\n reshape({inputs: {x: segOpResult}, backend, attrs: {shape: outShape}});\n\n let result = reshaped;\n if (permutation != null) {\n toDispose.push(reshaped);\n const perm = backend_util.getUndoAxesPermutation(permutation);\n result = transpose({inputs: {x: result}, backend, attrs: {perm}});\n }\n\n toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t));\n return result;\n}\n\nexport const unsortedSegmentSumConfig: KernelConfig = {\n kernelName: UnsortedSegmentSum,\n backendName: 'webgl',\n kernelFunc: unsortedSegmentSum as unknown as KernelFunc\n};\n","/** @license See the LICENSE file. */\n\n// This code is auto-generated, do not modify this file!\nconst version = '4.2.0';\nexport {version};\n","export enum AlertType {\n Error,\n Warning,\n Info,\n}\n\nexport enum HotkeyView {\n Annotator,\n Classifier,\n Segmenter,\n CreateCategoryDialog,\n DeleteAnnotationCategoryDialog,\n DeleteAllCategoriesDialog,\n DeleteCategoryDialog,\n DeleteImagesDialog,\n EditCategoryDialog,\n ExampleClassifierDialog,\n ImageShapeDialog,\n ImportTensorflowModelDialog,\n MainImageGrid,\n MainImageGridAppBar,\n NewProjectDialog,\n ExportAnnotationsDialog,\n SaveFittedModelDialog,\n SaveProjectDialog,\n ProjectView,\n DialogWithAction,\n}\n\nexport enum Languages {\n Arabic = \"Arabic\",\n English = \"English\",\n Farsi = \"Farsi\",\n Finnish = \"Finnish\",\n French = \"French\",\n German = \"German\",\n Greek = \"Greek\",\n Hindi = \"Hindi\",\n Hungarian = \"Hungarian\",\n Spanish = \"Spanish\",\n}\n\nexport enum ImageSortKey {\n None,\n FileName,\n Category,\n Random,\n Image,\n}\n\nexport enum ThingSortKey {\n None = \"None\",\n FileName = \"File Name\",\n Category = \"Category\",\n Random = \"Random\",\n Name = \"Name\",\n}\n","import { AlertType, ImageSortKey } from \"./enums\";\nimport { AlertState, ImageSortKeyType } from \"./types\";\n\nexport const CATEGORY_COLORS = {\n black: \"#000000\",\n indianred: \"#C84C4C\",\n red: \"#E60000\",\n darkred: \"#8B0000\",\n mediumvioletred: \"#C71585\",\n palevioletred: \"#DB7093\",\n sherpablue: \"#004949\",\n darkcyan: \"#009292\",\n indigo: \"#490092\",\n navyblue: \"#006ddb\",\n heliotrope: \"#b66dff\",\n mayablue: \"#6db6ff\",\n columbiablue: \"#b6dbff\",\n olive: \"#924900\",\n mangotango: \"#db6d00\",\n green: \"#237700\",\n citrus: \"#a89d00\",\n};\n\n//the default colors assigned to a loaded image\nexport const DEFAULT_COLORS: Array<[number, number, number]> = [\n [1, 0, 0], // red\n [0, 1, 0], // green\n [0, 0, 1], // blue\n [1, 1, 0], // yellow\n [0, 1, 1], // cyan\n [1, 0, 1], // magneta\n];\n\nexport const UNKNOWN_IMAGE_CATEGORY_COLOR = \"#AAAAAA\";\nexport const UNKNOWN_ANNOTATION_CATEGORY_COLOR = \"#920000\";\n\nexport const APPLICATION_COLORS = {\n classifierList: \"#DCF3F450\",\n segmenterList: \"#E9E5FA50\",\n borderColor: \"#0000001f\",\n highlightColor: \"#0000000a\",\n};\n\nexport const dimensions = {\n leftDrawerWidth: 256,\n toolDrawerWidth: 56,\n annotatorToolOptionsWidth: 200,\n stagePaddingX: 50,\n stageInfoHeight: 21,\n};\n\nexport const defaultAlert: AlertState = {\n alertType: AlertType.Info,\n name: \"None\",\n description: \"default state\",\n visible: false,\n};\n\nexport const defaultImageSortKey: ImageSortKeyType = {\n imageSortKeyName: \"None\",\n imageSortKey: ImageSortKey.None,\n comparerFunction: (a, b) => 0,\n objectType: \"All\",\n};\n\nexport const availableImageSortKeys: ImageSortKeyType[] = [\n {\n imageSortKeyName: \"File name\",\n imageSortKey: ImageSortKey.FileName,\n comparerFunction: (a, b) => a.name.localeCompare(b.name),\n objectType: \"Images\",\n },\n {\n imageSortKeyName: \"Category\",\n imageSortKey: ImageSortKey.Category,\n comparerFunction: (a, b) => a.category.name.localeCompare(b.category.name),\n objectType: \"All\",\n },\n {\n imageSortKeyName: \"Random\",\n imageSortKey: ImageSortKey.Random,\n comparerFunction: (a, b) => (Math.round(Math.random() * 10) >= 5 ? 1 : -1),\n objectType: \"All\",\n },\n {\n imageSortKeyName: \"Image\",\n imageSortKey: ImageSortKey.Image,\n comparerFunction: (a, b) => a.name.localeCompare(b.name),\n objectType: \"Annotations\",\n },\n];\n\nexport const mobileBreakpoints = [\"xs\", \"sm\"];\n","export const UNKNOWN_IMAGE_CATEGORY_ID: string =\n \"00000000-0000-0000-0000-000000000000\";\nexport const UNKNOWN_ANNOTATION_CATEGORY_ID: string =\n \"00000000-0000-1111-0000-000000000000\";\nexport const UNKNOWN_CATEGORY_NAME: string = \"Unknown\";\n\nexport const CATEGORY_COLORS = {\n black: \"#000000\",\n indianred: \"#C84C4C\",\n red: \"#E60000\",\n darkred: \"#8B0000\",\n mediumvioletred: \"#C71585\",\n palevioletred: \"#DB7093\",\n sherpablue: \"#004949\",\n darkcyan: \"#009292\",\n indigo: \"#490092\",\n navyblue: \"#006ddb\",\n heliotrope: \"#b66dff\",\n mayablue: \"#6db6ff\",\n columbiablue: \"#b6dbff\",\n olive: \"#924900\",\n mangotango: \"#db6d00\",\n green: \"#237700\",\n citrus: \"#a89d00\",\n};\n\n//the default colors assigned to a loaded image\nexport const DEFAULT_COLORS: Array<[number, number, number]> = [\n [1, 0, 0], // red\n [0, 1, 0], // green\n [0, 0, 1], // blue\n [1, 1, 0], // yellow\n [0, 1, 1], // cyan\n [1, 0, 1], // magneta\n];\n\nexport const UNKNOWN_IMAGE_CATEGORY_COLOR = \"#AAAAAA\";\nexport const UNKNOWN_ANNOTATION_CATEGORY_COLOR = \"#920000\";\n","// ignore-no-logs\n\nimport { v4 as uuidv4 } from \"uuid\";\nimport StackTrace from \"stacktrace-js\";\nimport { BitDepth, DataArray } from \"utils/file-io/types\";\nimport * as ImageJS from \"image-js\";\nimport { tensor2d, image as tfImage } from \"@tensorflow/tfjs\";\nimport {\n availableImageSortKeys,\n defaultImageSortKey,\n UNKNOWN_IMAGE_CATEGORY_COLOR,\n} from \"./constants\";\nimport { AlertType, ImageSortKey } from \"./enums\";\nimport { FilterType, ImageSortKeyType } from \"./types\";\nimport { Category, ImageObject, Shape, ShapeArray } from \"store/data/types\";\nimport { UNKNOWN_CATEGORY_NAME } from \"store/data/constants\";\n\n/* \n ERROR HANDLING / LOGGING\n*/\nexport const getStackTraceFromError = async (error: Error): Promise => {\n var stacktrace = error.stack ? error.stack : \"\";\n try {\n var stackFrames = await StackTrace.fromError(error);\n stacktrace = stackFrames\n .map((stackFrame) => stackFrame.toString())\n .join(\"\\n\");\n } catch (error) {\n console.error(\"Could not resolve stacktrace\", error);\n }\n\n return stacktrace;\n};\n\nexport const createGitHubIssue = (\n title: string,\n body: string,\n alertType: AlertType = AlertType.Error\n) => {\n const label = alertType === AlertType.Error ? \"bug\" : \"help%20wanted\";\n const url =\n \"https://github.com/piximi/piximi/issues/new?title=\" +\n encodeURIComponent(title) +\n \"&labels=\" +\n label +\n \"&body=\" +\n encodeURIComponent(body);\n window.open(url);\n};\n\nexport const logger = (\n message: any | any[],\n level: \"log\" | \"warn\" | \"error\" = \"log\"\n) => {\n if (Array.isArray(message)) {\n message = message.join(\"\");\n }\n switch (level) {\n case \"log\":\n console.log(message);\n break;\n case \"warn\":\n console.warn(message);\n break;\n case \"error\":\n console.error(message);\n break;\n default:\n break;\n }\n};\n\n/* \n ARRAY HELPERS\n*/\n\nexport const enumKeys = (\n obj: O\n): K[] => {\n return Object.keys(obj).filter((k) => Number.isNaN(+k)) as K[];\n};\n\nexport const mutatingFilter = (\n array: Array,\n condition: (arg: T) => boolean\n): void => {\n for (let l = array.length - 1; l >= 0; l -= 1) {\n if (!condition(array[l])) array.splice(l, 1);\n }\n};\n\nexport const toUnique = (array: T[]): T[] => {\n return [...new Set(array)];\n};\n\nexport const isFiltered = (\n object: T,\n filters: FilterType\n): boolean => {\n return Object.keys(object).some((key) => {\n const itemValue = object[key as keyof T];\n const filterValues = filters[key as keyof T];\n\n if (Array.isArray(filterValues)) {\n return (filterValues as Array).includes(itemValue);\n }\n\n // If the key is not present in the record, include the item\n return false;\n });\n};\n\nexport const filterObjects = (\n objectArr: T[],\n filters: FilterType\n): T[] => {\n return objectArr.filter((item) => {\n return Object.keys(item).every((key) => {\n const itemValue = item[key as keyof T];\n const filterValues = filters[key as keyof T];\n\n if (Array.isArray(filterValues)) {\n return !(filterValues as Array).includes(itemValue);\n }\n\n // If the key is not present in the record, include the item\n return true;\n });\n });\n};\n\nexport const distinctFilter = (value: T, index: number, self: T[]) => {\n return self.indexOf(value) === index;\n};\n\nexport const getSubset = (object: T, keys: K[]) => {\n const subset: Record = {};\n\n keys.forEach((key) => {\n subset[key as string] = object[key];\n });\n return subset;\n};\n\nexport const convertShapeToArray = (shape: Shape): ShapeArray => {\n return Object.values(shape) as ShapeArray;\n};\n\nexport const convertArrayToShape = (array: ShapeArray): Shape => {\n return {\n planes: array[0],\n height: array[1],\n width: array[2],\n channels: array[3],\n };\n};\n\n/*\n CATEGORY HELPERS\n*/\n\nexport const generateUUID = (options?: { definesUnknown: boolean }) => {\n let id = uuidv4();\n let unknownFlag: string;\n if (options?.definesUnknown) {\n unknownFlag = \"0\";\n } else {\n unknownFlag = \"1\";\n }\n return unknownFlag + id.slice(1);\n};\n\nexport const isUnknownCategory = (categoryId: string) => {\n return categoryId[0] === \"0\";\n};\n\nexport const generateUnknownCategory = (kind: string) => {\n const unknownCategoryId = generateUUID({ definesUnknown: true });\n const unknownCategory: Category = {\n id: unknownCategoryId,\n name: UNKNOWN_CATEGORY_NAME,\n color: UNKNOWN_IMAGE_CATEGORY_COLOR,\n containing: [],\n kind: kind,\n visible: true,\n };\n return unknownCategory;\n};\n\n/*\n FILE HELPERS\n*/\n\nexport class PseudoFileList {\n private _files: File[];\n\n constructor(files: File[]) {\n let self = this;\n this._files = files;\n\n return new Proxy(this, {\n get(target, prop) {\n if (!isNaN(Number(prop)) && !(prop in target)) {\n return self._files[Number(prop)];\n } else {\n return Reflect.get(target, prop);\n }\n },\n }) satisfies FileList;\n }\n\n public item(elem: number) {\n return this._files[elem];\n }\n\n public get length() {\n return this._files.length;\n }\n\n [key: number]: File;\n\n [Symbol.iterator](): IterableIterator {\n return this._files[Symbol.iterator]();\n }\n}\n\n/*\n =======================\n General Utility Methods\n =======================\n */\n\n/*\n * Method to rename a cateogry/image if a category/image with this name already exists\n */\nexport const replaceDuplicateName = (\n newName: string,\n existingNames: Array\n) => {\n let currentName = newName;\n let i = 1;\n while (existingNames.includes(currentName)) {\n currentName = newName + `_${i}`;\n i += 1;\n }\n return currentName;\n};\n\n//HACK: new\nexport const newReplaceDuplicateName = (\n newName: string,\n existingNames: Array\n) => {\n let currentName = newName;\n let count = 0;\n // eslint-disable-next-line\n const nameRe = new RegExp(`${newName}(_\\d+)?`, \"g\");\n existingNames.forEach((name) => {\n if (!!nameRe.exec(name)) {\n const suffix = +name.split(\"_\")[1];\n if (suffix > count) {\n count = suffix;\n }\n }\n });\n return !count ? currentName : `${currentName}_${count + 1}`;\n};\n\nexport const scaleUpRange = (\n range: [number, number],\n bitDepth: BitDepth\n): [number, number] => {\n return [\n Math.floor(range[0] * (2 ** bitDepth - 1)),\n Math.floor(range[1] * (2 ** bitDepth - 1)),\n ];\n};\n\nexport const scaleUpRanges = (\n ranges: { [channel: number]: [number, number] },\n bitDepth: BitDepth,\n opts: { inPlace: boolean } = { inPlace: false }\n): { [channel: number]: [number, number] } => {\n let operandRanges = opts.inPlace ? ranges : { ...ranges };\n\n for (const ch of Object.keys(ranges)) {\n const chKey = parseInt(ch);\n operandRanges[chKey] = scaleUpRange(ranges[chKey], bitDepth);\n }\n\n return ranges;\n};\n\nexport const scaleDownRange = (\n range: [number, number],\n bitDepth: BitDepth\n): [number, number] => {\n return [range[0] / (2 ** bitDepth - 1), range[1] / (2 ** bitDepth - 1)];\n};\n\nexport const scaleDownRanges = (\n ranges: { [channel: number]: [number, number] },\n bitDepth: BitDepth,\n opts: { inPlace: boolean } = { inPlace: false }\n): { [channel: number]: [number, number] } => {\n let operandRanges = opts.inPlace ? ranges : { ...ranges };\n\n for (const ch of Object.keys(ranges)) {\n const chKey = parseInt(ch);\n operandRanges[chKey] = scaleDownRange(ranges[chKey], bitDepth);\n }\n\n return ranges;\n};\n\nexport const extractMinMax = (ranges: {\n [channel: number]: [number, number];\n}) => {\n const channels = Object.keys(ranges).map((ch) => parseInt(ch));\n const mins = Array(channels.length);\n const maxs = Array(channels.length);\n\n for (const ch of channels) {\n const [min, max] = ranges[ch];\n mins[ch] = min;\n maxs[ch] = max;\n }\n\n return { mins, maxs };\n};\n\nexport const convertToDataArray = (\n depth: number,\n source: DataArray | Array\n): DataArray => {\n switch (depth) {\n case 1:\n throw Error(\"Binary bit depth not (yet) supported\");\n case 8:\n return Uint8Array.from(source);\n case 16:\n return Uint16Array.from(source);\n case 32:\n return Float32Array.from(source);\n default:\n throw Error(\"Unrecognized bit depth\");\n }\n};\n\nexport const getPropertiesFromImage = async (\n image: ImageObject,\n annotation: { boundingBox: number[] }\n) => {\n const renderedIm = await ImageJS.Image.load(image.src);\n const normalizingWidth = image.shape.width - 1;\n const normalizingHeight = image.shape.height - 1;\n const bbox = annotation.boundingBox;\n const x1 = bbox[0] / normalizingWidth;\n const x2 = bbox[2] / normalizingWidth;\n const y1 = bbox[1] / normalizingHeight;\n const y2 = bbox[3] / normalizingHeight;\n const box = tensor2d([[y1, x1, y2, x2]]);\n const width = bbox[2] - bbox[0];\n const height = bbox[3] - bbox[1];\n const objectImage = renderedIm.crop({\n x: Math.abs(bbox[0]),\n y: Math.abs(bbox[1]),\n width: Math.abs(Math.min(image.shape.width, bbox[2]) - bbox[0]),\n height: Math.abs(Math.min(image.shape.height, bbox[3]) - bbox[1]),\n });\n const objSrc = objectImage.getCanvas().toDataURL();\n const data = tfImage.cropAndResize(image.data, box, [0], [height, width]);\n\n return {\n data: data,\n src: objSrc,\n imageId: image.id,\n boundingBox: bbox,\n };\n};\n\nexport const getPropertiesFromImageSync = (\n renderedIm: ImageJS.Image,\n image: ImageObject,\n annotation: { boundingBox: number[] }\n) => {\n const normalizingWidth = image.shape.width - 1;\n const normalizingHeight = image.shape.height - 1;\n const bbox = annotation.boundingBox;\n const x1 = bbox[0] / normalizingWidth;\n const x2 = bbox[2] / normalizingWidth;\n const y1 = bbox[1] / normalizingHeight;\n const y2 = bbox[3] / normalizingHeight;\n const box = tensor2d([[y1, x1, y2, x2]]);\n const width = bbox[2] - bbox[0];\n const height = bbox[3] - bbox[1];\n const objectImage = renderedIm.crop({\n x: Math.abs(bbox[0]),\n y: Math.abs(bbox[1]),\n width: Math.abs(Math.min(image.shape.width, bbox[2]) - bbox[0]),\n height: Math.abs(Math.min(image.shape.height, bbox[3]) - bbox[1]),\n });\n const objSrc = objectImage.getCanvas().toDataURL();\n const data = tfImage.cropAndResize(image.data, box, [0], [height, width]);\n\n return {\n data: data,\n src: objSrc,\n imageId: image.id,\n boundingBox: bbox,\n };\n};\nconst componentToHex = (c: number) => {\n var hex = (c * 255).toString(16);\n return hex.length === 1 ? \"0\" + hex : hex;\n};\n\nexport const rgbToHex = (rgb: [number, number, number]) => {\n return (\n \"#\" +\n componentToHex(rgb[0]) +\n componentToHex(rgb[1]) +\n componentToHex(rgb[2])\n );\n};\n\nexport const sortTypeByKey = (key: ImageSortKey): ImageSortKeyType => {\n const sortKeyIdx = availableImageSortKeys\n .map((e) => e.imageSortKey)\n .indexOf(key);\n\n if (sortKeyIdx >= 0) {\n return availableImageSortKeys[sortKeyIdx];\n } else {\n return defaultImageSortKey;\n }\n};\n\nexport const updateRecord = (\n record: Record,\n key: T,\n value: K\n) => {\n if (key in record) {\n record[key].push(value);\n } else {\n record[key] = [value];\n }\n};\n","import { Node } from \"ngraph.graph\";\n\nimport { fromIdxToCoord, PiximiGraph, PiximiNode } from \"./graphHelper\";\nimport { NodeHeap } from \"./NodeHeap\";\nimport { Point } from \"./types\";\nimport { logger } from \"utils/common/helpers\";\n\n/**\n * Performs a uni-directional A Star search on graph.\n *\n * We will try to minimize f(n) = g(n) + h(n), where\n * g(n) is actual distance from source node to `n`, and\n * h(n) is heuristic distance from `n` to target node.\n */\n\nconst NO_PATH: never[] = [];\n\n/**\n * Creates a new instance of pathfinder. A pathfinder has just one method:\n * `find(fromId, toId)`, it may be extended in future.\n *\n * @param graph instance. See https://github.com/anvaka/ngraph.graph\n * @param {width} width of the original image\n * @param {factor} factor for scaling between image and graph\n *\n * @returns {Object} A pathfinder with single method `find()`.\n */\nexport function cachedAStarPathSearch(\n graph: PiximiGraph,\n width: number,\n factor: number\n) {\n // whether traversal should be considered over oriented graph.\n const oriented = true;\n\n const heuristic = (fromNode: PiximiNode, toNode: PiximiNode) => {\n const p1 = fromIdxToCoord(fromNode.id as number, width);\n const p2 = fromIdxToCoord(toNode.id as number, width);\n if (p1.x === p2.x || p1.y === p2.y) {\n return 1;\n }\n return 5;\n };\n\n const distance = (fromNode: Node, toNode: Node) => {\n const p1 = fromIdxToCoord(fromNode.id as number, width);\n const p2 = fromIdxToCoord(toNode.id as number, width);\n if (p1.x === p2.x || p1.y === p2.y) {\n return 1 * toNode.data;\n }\n return 1.41 * toNode.data;\n };\n\n return {\n /**\n * Finds a path between node `fromId` and `toId`.\n * @returns {Array} of nodes between `toId` and `fromId`. Empty array is returned\n * if no path is found.\n */\n find: find,\n };\n\n function find(fromId: number, toId: number) {\n const from = graph.getNode(fromId) as PiximiNode;\n if (!from) return NO_PATH;\n const to = graph.getNode(toId) as PiximiNode;\n if (!to) return NO_PATH;\n\n let cameFrom: any;\n // Maps nodeId to NodeSearchState.\n\n const dest = graph.getNode(toId) as PiximiNode;\n if (dest) {\n if (dest.fromId === fromId && dest.trace.length > 0) {\n return dest.trace;\n }\n }\n\n if (graph.fromId !== fromId || graph.openSet.length === 0) {\n graph.openSet = new NodeHeap();\n // For the first node, fScore is completely heuristic.\n from.fScore = heuristic(from, to);\n\n // The cost of going from start to start is zero.\n from.distanceToSource = 0;\n graph.openSet.push(from);\n from.open = 1;\n graph.fromId = fromId;\n }\n\n while (graph.openSet.length > 0) {\n cameFrom = graph.openSet.pop();\n\n if (cameFrom.id === toId) {\n cameFrom.trace = reconstructPath(graph, cameFrom, width, factor);\n cameFrom.fromId = fromId;\n return cameFrom.trace;\n }\n\n // no need to visit this node anymore\n cameFrom.trace = reconstructPath(graph, cameFrom, width, factor);\n cameFrom.fromId = fromId;\n cameFrom.closed = true;\n graph.forEachLinkedNode(cameFrom.id, visitNeighbour, oriented);\n }\n\n // If we got here, then there is no path.\n return NO_PATH;\n\n function visitNeighbour(otherNode: any) {\n if (otherNode.fromId !== graph.fromId) {\n // This is old data, reset all params\n otherNode.fromId = graph.fromId;\n otherNode.trace = [];\n otherNode.parentId = null;\n otherNode.closed = false;\n otherNode.open = 0;\n otherNode.distanceToSource = Number.POSITIVE_INFINITY;\n otherNode.fScore = Number.POSITIVE_INFINITY;\n otherNode.heapIndex = -1;\n }\n\n if (otherNode.closed) {\n // Already processed this node.\n return;\n }\n if (otherNode.open === 0) {\n // Remember this node.\n graph.openSet.push(otherNode);\n otherNode.open = 1;\n }\n\n const tentativeDistance =\n cameFrom.distanceToSource + distance(otherNode, cameFrom);\n if (tentativeDistance >= otherNode.distanceToSource) {\n // This would only make our path longer. Ignore this route.\n return;\n }\n\n // bingo! we found shorter path:\n otherNode.parentId = cameFrom.id;\n otherNode.distanceToSource = tentativeDistance;\n otherNode.fScore = tentativeDistance + heuristic(otherNode, to);\n\n graph.openSet.updateItem(otherNode.heapIndex);\n }\n }\n}\n\nfunction reconstructPath(\n graph: PiximiGraph,\n searchNode: PiximiNode | null,\n width: number,\n factor: number = 1\n) {\n if (!searchNode) return;\n\n const point = fromIdxToCoord(searchNode.id as number, width);\n const newCoord = { x: point.x / factor, y: point.y / factor };\n let coords: Array = [];\n const fromId = graph.fromId;\n if (searchNode!.parentId !== null) {\n const parentNode = graph.getNode(searchNode!.parentId) as PiximiNode;\n if (typeof parentNode !== \"undefined\" && parentNode.fromId === fromId) {\n // Fetch a trace from the last coordinate\n coords.push(...parentNode.trace);\n if (coords.length > 1) {\n const oldDirection = pathDirection(coords.at(-2)!, coords.at(-1)!);\n const newDirection = pathDirection(coords.at(-1)!, newCoord);\n if (oldDirection === newDirection) {\n coords.pop();\n }\n }\n }\n }\n coords.push(newCoord);\n return coords;\n}\n\nconst pathDirection = (from: Point, to: Point) => {\n // Generate a number representing the relative direction of the coordinates.\n // Assuming TopLeft of an image is 0,0\n const dx = to.x - from.x;\n const dy = to.y - from.y;\n if (dx === 0) {\n if (dy < 0) {\n return 0; // Up\n }\n if (dy > 0) {\n return 4; // Down\n }\n }\n if (dy === 0) {\n if (dx > 0) {\n return 2; // Right\n }\n if (dx < 0) {\n return 6; // Left\n }\n }\n if (dx > 0) {\n if (dy < 0) {\n return 1; // Up+Right\n }\n if (dy > 0) {\n return 3; // Down+Right\n }\n }\n if (dx < 0) {\n if (dy < 0) {\n return 7; // Up+Left\n }\n if (dy > 0) {\n return 5; // Down+Left\n }\n }\n logger(\n `Invalid direction, this should never appear so check the code ${dx} ${dy}`\n );\n};\n","import { AnnotationObject, DecodedAnnotationObject } from \"store/data/types\";\n\n/**\n * Decode a Run-length encoded input array.\n * @param encoded Run-length encoded input array\n * @returns The decoded input array\n */\nexport const decode = (encoded: Array): Uint8ClampedArray => {\n let decoded = [];\n\n let background = true;\n\n for (let i = 0; i < encoded.length; i++) {\n for (let j = 0; j < encoded[i]; j++) {\n decoded.push(background ? 0 : 255);\n }\n\n background = !background;\n }\n\n return new Uint8ClampedArray(decoded);\n};\n\nexport const decodeAnnotation = (\n encodedAnnotation: AnnotationObject\n): DecodedAnnotationObject => {\n // TODO - serializtion: temporary measure, remove when done\n if (!encodedAnnotation.encodedMask)\n throw Error(`Annotation ${encodedAnnotation.id} has no encoded mask`);\n\n const decodedAnnotation = {\n ...encodedAnnotation,\n decodedMask: Uint8Array.from(decode(encodedAnnotation.encodedMask)),\n };\n\n return decodedAnnotation;\n};\n\n/**\n * Compute the Run-length encoding of the input array.\n * @param decoded (decoded) input array\n * @param expectBinary true if decoded mask is binary array (only consists of the values 0 or 1)\n false if the two values are 0 and 26^bitDepth-1\n * @returns Encoded array\n */\nexport const encode = (\n decoded: Uint8Array | Uint8ClampedArray | Uint16Array | Float32Array,\n expectBinary: boolean = false\n): Array => {\n let highVal: number;\n\n if (expectBinary) {\n highVal = 1;\n } else {\n const bitDepth =\n decoded.constructor === Uint16Array\n ? 16\n : decoded.constructor === Float32Array\n ? 32\n : 8; // Uint8[Clamped]Array\n\n highVal = 2 ** bitDepth - 1;\n }\n\n let lastElement = decoded[0];\n\n let lastSequenceSize = 1;\n\n let encoded = [];\n\n // Float32Array data usually holds normalized data between 0 and 1,\n // in which case it must be denormalized before calling this func\n // such that 0 -> 0 and 1 -> 2*16-1\n // or expectBinary should be set true\n if (decoded[0] === highVal) {\n encoded.push(0);\n }\n\n for (let i = 1; i < decoded.length; i++) {\n if (lastElement !== decoded[i]) {\n encoded.push(lastSequenceSize);\n\n lastElement = decoded[i];\n\n lastSequenceSize = 1;\n } else {\n lastSequenceSize += 1;\n }\n }\n\n encoded.push(lastSequenceSize);\n\n return encoded;\n};\n\nexport const encodeAnnotation = (\n decodedAnnotation: DecodedAnnotationObject\n): AnnotationObject => {\n // TODO - serializtion: temporary measure, remove when done\n if (!decodedAnnotation.decodedMask)\n throw Error(`Annotation ${decodedAnnotation.id} has no decoded mask`);\n\n const encodedAnnotation = {\n ...decodedAnnotation,\n encodedMask: encode(decodedAnnotation.decodedMask),\n };\n\n return encodedAnnotation;\n};\n","import * as ImageJS from \"image-js\";\n\nimport {\n getXofYMax,\n getXofYMin,\n getYMax,\n getYMin,\n interpolateX,\n pointsToEdges,\n} from \"./point-operations\";\nimport { Edge, Point } from \"./types\";\n\n/**\n * Scan Line Polygon Fill (SLPF) algorithm to fill the annotation polygon.\n * @param polygon Polygon that defines the annotation.\n * @param imageWidth Width of the annotated image.\n * @param imageHeight Height of the annotated image.\n * @returns Annotation Mask\n */\n\nexport function scanline(\n polygon: Array,\n imageWidth: number,\n imageHeight: number\n) {\n const maskImage = new ImageJS.Image({\n width: imageWidth,\n height: imageHeight,\n bitDepth: 8,\n components: 1,\n alpha: 0,\n kind: \"GREY\" as ImageJS.ImageKind,\n });\n\n if (polygon.length < 3) return maskImage;\n\n // initialize the edge and active edge tables\n\n const edgeTable: Array = pointsToEdges(polygon).sort(\n (e1: Edge, e2: Edge) => getYMin(e2) - getYMin(e1)\n );\n\n const activeEdgeTable: Array = [];\n let yScan = getYMin(edgeTable.at(-1)!);\n const allSpans = [];\n\n // repeat until both the edge and active edge tables are empty\n while (edgeTable.length > 0 || activeEdgeTable.length > 0) {\n // move active edges from edge table to active edge table\n while (edgeTable.length > 0 && yScan === getYMin(edgeTable.at(-1)!)) {\n if (edgeTable.length > 0) activeEdgeTable.push(edgeTable.pop()!);\n }\n // remove inactive edges from active edge table\n for (let i = 0; i < activeEdgeTable.length; i++) {\n if (yScan >= getYMax(activeEdgeTable[i])) {\n const last = activeEdgeTable.pop();\n if (i < activeEdgeTable.length && last) {\n activeEdgeTable[i] = last;\n i--;\n }\n }\n }\n activeEdgeTable.sort((e1, e2) => {\n const cmp = getXofYMin(e1) - getXofYMin(e2);\n return cmp === 0 ? getXofYMax(e1) - getXofYMax(e2) : cmp;\n });\n // fill spans on scanline\n const spans = getSpans(yScan, activeEdgeTable);\n drawSpans(spans, yScan, maskImage);\n allSpans.push([spans, [yScan, yScan]]);\n yScan++;\n }\n return maskImage;\n}\n/**\n * Finds the intersections of the scanline and active edges\n * @param yScan y-coordinate of the current scanline\n * @param activeEdgeTable active edge table\n * @returns Array containing x-coordinates of the active edges at the current scanline location\n */\nfunction getSpans(yScan: number, activeEdgeTable: Array) {\n const spans = [];\n for (const edge of activeEdgeTable) {\n spans.push(interpolateX(yScan, edge));\n }\n return spans;\n}\n\n/**\n * Sorts intersection points and couples pairs for filling\n * @param spans Array of intersection points\n * @param yScan\n * @param img\n */\nfunction drawSpans(spans: Array, yScan: number, img: ImageJS.Image) {\n spans.sort((e1, e2) => e1 - e2);\n\n for (let i = 0; i < spans.length; i += 2) {\n fillSpan(spans[i], spans[i + 1], yScan, img);\n }\n}\n\n/**\n * Fills in span from xMin to xMax\n * @param x1 xMin of span\n * @param x2 xMax of span\n * @param y y-coord of current spanline\n * @param img Image on which the annotation id drawn\n *\n */\nfunction fillSpan(x1: number, x2: number, y: number, img: ImageJS.Image) {\n for (let x = x1; x < x2; x++) {\n img.setPixelXY(x, y, [255]);\n }\n}\n","import { min, max } from \"lodash\";\n\nimport { connectPoints } from \"./imageHelper\";\nimport { scanline } from \"./scanline\";\nimport { simplifyPolygon } from \"./simplify\";\nimport { Edge, Point } from \"./types\";\n\nexport const pointsAreEqual = (p1: Point, p2: Point) => {\n return p1.x === p2.x && p1.y === p2.y;\n};\n\nexport const getDistance = (p1: Point, p2: Point) => {\n return Math.hypot(p1.x - p2.x, p1.y - p2.y);\n};\n\n// returns minimum y-value of two points\nexport function getYMin(edge: Edge) {\n return edge.p1.y <= edge.p2.y ? edge.p1.y : edge.p2.y;\n}\n\n// returns maximum y-value of two points\nexport function getYMax(edge: Edge) {\n return edge.p1.y > edge.p2.y ? edge.p1.y : edge.p2.y;\n}\n\n// returns the x-value of the point with the minimum y-value\nexport function getXofYMin(edge: Edge) {\n return edge.p1.y <= edge.p2.y ? edge.p1.x : edge.p2.x;\n}\n\n// returns the x-value of the point with the maximum y-value\nexport function getXofYMax(edge: Edge) {\n return edge.p1.y > edge.p2.y ? edge.p1.x : edge.p2.x;\n}\n\n// converts list of points to list of non-horizontal edges\nexport function pointsToEdges(points: Array) {\n let edges: Array = [];\n let p1 = points.at(-1)!;\n for (let i = 0; i < points.length; i++) {\n const p2 = points[i];\n // ignore horizontal edges\n if (p1.y !== p2.y) edges.push({ p1: p1, p2: p2 });\n p1 = p2;\n }\n return edges;\n}\n\n// linear interpolation\n// finds x-value from scanline intersecting edge\nexport function interpolateX(yScan: number, edge: Edge) {\n const y1 = edge.p1.y;\n const y2 = edge.p2.y;\n const x1 = edge.p1.x;\n const x2 = edge.p2.x;\n return Math.floor(((yScan - y1) / (y2 - y1)) * (x2 - x1) + x1);\n}\n\nexport function computeBoundingBoxFromContours(\n contour: Array\n): [number, number, number, number] {\n if (contour.length === 0) return [0, 0, 0, 0];\n\n const xValues = contour.map((point) => point.x);\n const yValues = contour.map((point) => point.y);\n return [\n Math.round(min(xValues)!),\n Math.round(min(yValues)!),\n Math.round(max(xValues)!),\n Math.round(max(yValues)!),\n ];\n}\n\n/**\n * Compute the mask image of the annotation polygon from the bounding box and the polygon points.\n * @returns Mask image of the annotation.\n */\nexport function maskFromPoints(\n coordinates: Array,\n imageDims: { width: number; height: number },\n boundingBox?: [number, number, number, number],\n simplifyPoints: boolean = true\n) {\n if (!boundingBox) {\n boundingBox = computeBoundingBoxFromContours(coordinates);\n }\n\n const width = boundingBox[2] - boundingBox[0];\n const height = boundingBox[3] - boundingBox[1];\n\n if (width <= 0 || height <= 0) {\n process.env.NODE_ENV !== \"production\" &&\n console.warn(\n `Received negative image dimensions w: ${width}, h: ${height}`\n );\n throw Error(\n \"Could not calculate mask from points, with given image dimensions\"\n );\n }\n\n // get coordinates of connected points and draw boundaries of mask\n let connectedPoints = connectPoints(coordinates);\n\n if (simplifyPoints) {\n connectedPoints = simplifyPolygon(connectedPoints);\n }\n\n const greyScaleMask = scanline(\n connectedPoints,\n imageDims.width,\n imageDims.height\n );\n\n const maskImage = greyScaleMask.crop({\n x: boundingBox[0],\n y: boundingBox[1],\n width: width,\n height: height,\n });\n\n // Uint8Array because scanline generated an image of bitdepth 8\n return maskImage.data as Uint8Array;\n}\n","/*\n © 2013, Vladimir Agafonkin. Released under BSD license.\n Simplify.js, a high-performance JS polyline simplification library\n mourner.github.io/simplify-js\n*/\n\nimport { Point } from \"./types\";\n\n// square distance between 2 points\nfunction getSqDist(p1: Point, p2: Point) {\n var dx = p1.x - p2.x,\n dy = p1.y - p2.y;\n\n return dx * dx + dy * dy;\n}\n\n// square distance from a point to a segment\nfunction getSqSegDist(p: Point, p1: Point, p2: Point) {\n var x = p1.x,\n y = p1.y,\n dx = p2.x - x,\n dy = p2.y - y;\n\n if (dx !== 0 || dy !== 0) {\n var t = ((p.x - x) * dx + (p.y - y) * dy) / (dx * dx + dy * dy);\n\n if (t > 1) {\n x = p2.x;\n y = p2.y;\n } else if (t > 0) {\n x += dx * t;\n y += dy * t;\n }\n }\n\n dx = p.x - x;\n dy = p.y - y;\n\n return dx * dx + dy * dy;\n}\n\n// basic distance-based simplification\nfunction simplifyRadialDist(points: Array, sqTolerance: number) {\n var prevPoint = points[0];\n var newPoints = [prevPoint];\n var point: Point | undefined;\n\n for (var i = 1, len = points.length; i < len; i++) {\n point = points[i];\n\n if (getSqDist(point, prevPoint) > sqTolerance) {\n newPoints.push(point);\n prevPoint = point;\n }\n }\n\n if (point && prevPoint !== point) newPoints.push(point);\n\n return newPoints;\n}\n\nfunction simplifyDPStep(\n points: Array,\n first: number,\n last: number,\n sqTolerance: number,\n simplified: Array\n) {\n let maxSqDist = sqTolerance;\n let index: number = 0;\n\n for (var i = first + 1; i < last; i++) {\n var sqDist = getSqSegDist(points[i], points[first], points[last]);\n\n if (sqDist > maxSqDist) {\n index = i;\n maxSqDist = sqDist;\n }\n }\n\n if (maxSqDist > sqTolerance) {\n if (index - first > 1)\n simplifyDPStep(points, first, index, sqTolerance, simplified);\n simplified.push(points[index]);\n if (last - index > 1)\n simplifyDPStep(points, index, last, sqTolerance, simplified);\n }\n}\n\n// simplification using Ramer-Douglas-Peucker algorithm\nfunction simplifyDouglasPeucker(points: Array, sqTolerance: number) {\n var last = points.length - 1;\n\n var simplified = [points[0]];\n simplifyDPStep(points, 0, last, sqTolerance, simplified);\n simplified.push(points[last]);\n\n return simplified;\n}\n\n/**\n * Polyline simplification using a combination of Douglas-Peucker and Radial Distance algorithm.\n * @param points Array of points (Polyline).\n * @param tolerance [default = 1] Affects the amount of simplification (in the same metric as the point coordinates).\n * @param highestQuality [default = true] Excludes distance-based preprocessing step which leads to highest quality simplification but runs ~10-20 times slower.\n * @returns Array of simplified points.\n */\nexport function simplifyPolygon(\n points: Array,\n tolerance = 1,\n highestQuality = true\n) {\n if (points.length <= 2) return points;\n\n const sqTolerance = tolerance * tolerance;\n\n points = highestQuality ? points : simplifyRadialDist(points, sqTolerance);\n points = simplifyDouglasPeucker(points, sqTolerance);\n return points;\n}\n","import * as ImageJS from \"image-js\";\nimport { saveAs } from \"file-saver\";\nimport JSZip from \"jszip\";\n\nimport { decode } from \"./rle\";\nimport { pointsAreEqual } from \"./point-operations\";\n\nimport { logger } from \"utils/common/helpers\";\nimport { Point } from \"./types\";\nimport { DataArray } from \"utils/file-io/types\";\nimport {\n OldCategory,\n AnnotationObject,\n Category,\n DecodedAnnotationObject,\n ImageObject,\n} from \"store/data/types\";\n\nexport const generatePoints = (buffer: Array | undefined) => {\n if (!buffer) return undefined;\n const pointArray: Array = [];\n buffer.forEach((q, idx) => {\n if ((idx + 1) % 2 === 0) {\n pointArray.push({ x: buffer[idx - 1], y: q });\n }\n });\n return pointArray;\n};\n\nconst pointInBox = (point: Point, box: [number, number, number, number]) => {\n return (\n point.x >= box[0] &&\n point.x <= box[2] &&\n point.y >= box[1] &&\n point.y <= box[3]\n );\n};\n\nexport const connectPoints = (coordinates: Array) => {\n let connectedPoints: Array = [];\n\n const consecutiveCoords = coordinates\n .slice(0, coordinates.length - 1)\n .map((coord, i) => [coord, coordinates[i + 1]]);\n\n const adjacentPoints = consecutiveCoords.filter(\n ([current, next]) => !pointsAreEqual(current, next)\n );\n\n adjacentPoints.forEach(([current, next]) => {\n const points = drawLine(current!, next!);\n connectedPoints = connectedPoints.concat(points);\n });\n\n return connectedPoints;\n};\n\nconst drawLine = (p1: Point, p2: Point) => {\n const coords: Array = [];\n\n let x: number,\n y: number,\n x1: number,\n y1: number,\n x2: number,\n y2: number,\n dx: number,\n dy: number,\n step: number,\n i: number;\n\n x1 = Math.round(p1.x);\n y1 = Math.round(p1.y);\n x2 = Math.round(p2.x);\n y2 = Math.round(p2.y);\n\n dx = x2 - x1;\n dy = y2 - y1;\n\n step = Math.abs(dx) >= Math.abs(dy) ? Math.abs(dx) : Math.abs(dy);\n\n dx = dx / step;\n dy = dy / step;\n x = x1;\n y = y1;\n i = 1;\n\n while (i <= step) {\n coords.push({ x: Math.round(x), y: Math.round(y) });\n x = x + dx;\n y = y + dy;\n i = i + 1;\n }\n\n return coords;\n};\n\nexport const drawRectangle = (\n origin: Point | undefined,\n width: number | undefined,\n height: number | undefined\n) => {\n if (!width || !height || !origin) return [];\n\n const points: Array = [];\n\n // Negative height and width may happen if the rectangle was drawn from right to left.\n if (width < 0) {\n width = Math.abs(width);\n origin.x = origin.x - width;\n }\n if (height < 0) {\n height = Math.abs(height);\n origin.y = origin.y - height;\n }\n\n // Add corners of the bounding box.\n const x1 = Math.round(origin.x);\n const y1 = Math.round(origin.y);\n const x2 = Math.round(origin.x + width);\n const y2 = Math.round(origin.y + height);\n points.push(\n ...[\n { x: x1, y: y1 },\n { x: x2, y: y2 },\n ]\n );\n\n return points;\n};\n\nexport const getIdx = (\n width: number,\n nchannels: number,\n x: number,\n y: number,\n index: number\n) => {\n index = index || 0;\n return Math.floor((width * y + x) * nchannels + index);\n};\n\n/*\nGiven a click at a position, return all overlapping annotations ids\n */\nexport const getOverlappingAnnotations = (\n position: { x: number; y: number },\n annotations: Array\n) => {\n const overlappingAnnotations = annotations.filter(\n (annotation: DecodedAnnotationObject) => {\n const boundingBox = annotation.boundingBox;\n if (pointInBox(position, boundingBox)) {\n const boundingBoxWidth = boundingBox[2] - boundingBox[0];\n const boundingBoxHeight = boundingBox[3] - boundingBox[1];\n if (boundingBoxHeight && boundingBoxWidth) {\n //return annotation if clicked on actual selected data\n const maskROI = new ImageJS.Image(\n boundingBoxWidth,\n boundingBoxHeight,\n annotation.decodedMask,\n { components: 1, alpha: 0 }\n );\n if (\n maskROI.getPixelXY(\n Math.round(position.x - boundingBox[0]),\n Math.round(position.y - boundingBox[1])\n )[0]\n )\n return true;\n }\n }\n return false;\n }\n );\n return overlappingAnnotations.map((annotation: DecodedAnnotationObject) => {\n return annotation.id;\n });\n};\n\nexport const getAnnotationsInBox = (\n minimum: { x: number; y: number },\n maximum: { x: number; y: number },\n annotations: Array\n) => {\n return annotations.filter((annotation: DecodedAnnotationObject) => {\n return (\n minimum.x <= annotation.boundingBox[0] &&\n minimum.y <= annotation.boundingBox[1] &&\n maximum.x >= annotation.boundingBox[2] &&\n maximum.y >= annotation.boundingBox[3]\n );\n });\n};\n\n/*\n * From encoded mask data, get the decoded data and return results as an HTMLImageElement to be used by Konva.Image\n * Warning: the mask produced from the decoded data is scaled to fit the stage.\n * when creating an image from mask, the original width/height should be scaled by the same scale factor\n */\nexport const colorOverlayROI = (\n decodedMask: DataArray,\n boundingBox: [number, number, number, number],\n imageWidth: number,\n imageHeight: number,\n color: Array,\n scalingFactor: number\n): HTMLImageElement | undefined => {\n if (!decodedMask) return undefined;\n\n const endX = Math.min(imageWidth, boundingBox[2]);\n const endY = Math.min(imageHeight, boundingBox[3]);\n\n //extract bounding box params\n const boxWidth = endX - boundingBox[0];\n const boxHeight = endY - boundingBox[1];\n\n // const boxWidth = boundingBox[2] - boundingBox[0];\n // const boxHeight = boundingBox[3] - boundingBox[1];\n\n if (!boxWidth || !boxHeight) return undefined;\n let croppedImage = new ImageJS.Image(\n boxWidth,\n boxHeight,\n Array(boxHeight * boxWidth).fill(0),\n {\n components: 1,\n alpha: 0,\n }\n ).resize({ factor: scalingFactor });\n try {\n croppedImage = new ImageJS.Image(boxWidth, boxHeight, decodedMask, {\n components: 1,\n alpha: 0,\n }).resize({ factor: scalingFactor });\n } catch (err) {\n if (process.env.NODE_ENV !== \"production\") {\n logger(\"could not create crop\", \"error\");\n logger(`boundingbox: ${boundingBox}`);\n logger(`boxWidth: ${boxWidth}`);\n logger(`boxHeight: ${boxHeight}`);\n logger(`bwxbh: ${boxHeight * boxWidth}`);\n logger(`decodedMask length: ${decodedMask.length}`);\n logger(`diff: ${boxHeight * boxWidth - decodedMask.length}`);\n logger(err, \"error\");\n }\n }\n\n const colorROIImage = new ImageJS.Image(boxWidth, boxHeight, {\n components: 3,\n alpha: 1,\n }).resize({ factor: scalingFactor });\n\n const checkNeighbors = (\n arr: ImageJS.Image,\n x: number,\n y: number\n ): boolean => {\n if (x === 0 || x === croppedImage.width - 1) return true;\n for (let [dx, dy] of [\n [0, 1],\n [1, 0],\n [0, -1],\n [-1, 0],\n ]) {\n if (!arr.getPixelXY(x + dx, y + dy)[0]) return true;\n }\n return false;\n };\n\n for (let i = 0; i < croppedImage.width; i++) {\n for (let j = 0; j < croppedImage.height; j++) {\n if (croppedImage.getPixelXY(i, j)[0] > 0) {\n if (checkNeighbors(croppedImage, i, j)) {\n colorROIImage.setPixelXY(i, j, [color[0], color[1], color[2], 255]);\n } else {\n colorROIImage.setPixelXY(i, j, [color[0], color[1], color[2], 128]);\n }\n } else {\n colorROIImage.setPixelXY(i, j, [0, 0, 0, 0]);\n }\n }\n }\n\n const src = colorROIImage.toDataURL(\"image-png\", {\n useCanvas: true,\n });\n const image = new Image();\n image.src = src;\n\n return image;\n};\n\n/*\n * from https://stackoverflow.com/questions/5623838/rgb-to-hex-and-hex-to-rgb\n * */\nexport const hexToRGBA = (color: string, alpha?: number) => {\n const r = parseInt(color.slice(1, 3), 16);\n const g = parseInt(color.slice(3, 5), 16);\n const b = parseInt(color.slice(5, 7), 16);\n const a = alpha\n ? alpha\n : color.length === 9\n ? parseInt(color.slice(7, 9), 16)\n : undefined;\n\n return a ? [r, g, b, a] : [r, g, b];\n // return `rgba(${r}, ${g}, ${b}, ${alpha})`;\n};\n\nexport const saveAnnotationsAsBinaryInstanceSegmentationMasks = (\n images: Array,\n annotations: Array,\n categories: Array,\n zip: any,\n projectName: string\n): any => {\n // imageId -> list of annotations it owns\n const annsByImId = annotations.reduce((idMap, ann) => {\n if (idMap[ann.imageId]) {\n idMap[ann.imageId].push(ann);\n } else {\n idMap[ann.imageId] = [ann];\n }\n return idMap;\n }, {} as { [imageId: string]: AnnotationObject[] });\n\n images.forEach((current) => {\n annsByImId[current.id].forEach((ann) => {\n const height = current.shape.height;\n const width = current.shape.width;\n\n const fullLabelImage = new ImageJS.Image(\n width,\n height,\n new Uint8Array().fill(0),\n {\n components: 1,\n alpha: 0,\n }\n );\n const decoded = decode(ann.encodedMask);\n const boundingBox = ann.boundingBox;\n const endX = Math.min(width, boundingBox[2]);\n const endY = Math.min(height, boundingBox[3]);\n\n //extract bounding box params\n const boundingBoxWidth = endX - boundingBox[0];\n const boundingBoxHeight = endY - boundingBox[1];\n\n const roiMask = new ImageJS.Image(\n boundingBoxWidth,\n boundingBoxHeight,\n decoded,\n {\n components: 1,\n alpha: 0,\n }\n );\n for (let i = 0; i < boundingBoxWidth; i++) {\n for (let j = 0; j < boundingBoxHeight; j++) {\n if (roiMask.getPixelXY(i, j)[0] > 0) {\n fullLabelImage.setPixelXY(\n i + ann.boundingBox[0],\n j + ann.boundingBox[1],\n [255, 255, 255]\n );\n }\n }\n }\n const blob = fullLabelImage.toBlob(\"image/png\");\n const category = categories.find((category: OldCategory) => {\n return category.id === ann.categoryId;\n });\n if (category) {\n zip.folder(`${current.name}/${category.name}`);\n zip.file(`${current.name}/${category.name}/${ann.id}.png`, blob, {\n base64: true,\n });\n }\n });\n });\n zip.generateAsync({ type: \"blob\" }).then((blob: Blob) => {\n saveAs(blob, `${projectName}.zip`);\n });\n};\n\nexport const saveAnnotationsAsLabeledSemanticSegmentationMasks = (\n images: Array,\n annotations: Array,\n categories: Array,\n zip: any,\n projectName: string\n): any => {\n // imageId -> list of annotations it owns\n const annsByImId = annotations.reduce((idMap, ann) => {\n if (idMap[ann.imageId]) {\n idMap[ann.imageId].push(ann);\n } else {\n idMap[ann.imageId] = [ann];\n }\n return idMap;\n }, {} as { [imageId: string]: AnnotationObject[] });\n\n images.forEach((current) => {\n const height = current.shape.height;\n const width = current.shape.width;\n\n const fullLabelImage = new ImageJS.Image(\n width,\n height,\n new Uint8Array().fill(0),\n {\n components: 1,\n alpha: 0,\n }\n );\n categories.forEach((category: OldCategory) => {\n const categoryColor = hexToRGBA(category.color);\n if (!categoryColor) return;\n\n for (let ann of annsByImId[current.id]) {\n if (ann.categoryId !== category.id) continue;\n const decoded = decode(ann.encodedMask!);\n const boundingBox = ann.boundingBox;\n const endX = Math.min(width, boundingBox[2]);\n const endY = Math.min(height, boundingBox[3]);\n\n //extract bounding box params\n const boundingBoxWidth = endX - boundingBox[0];\n const boundingBoxHeight = endY - boundingBox[1];\n\n const roiMask = new ImageJS.Image(\n boundingBoxWidth,\n boundingBoxHeight,\n decoded,\n {\n components: 1,\n alpha: 0,\n }\n );\n for (let i = 0; i < boundingBoxWidth; i++) {\n for (let j = 0; j < boundingBoxHeight; j++) {\n if (roiMask.getPixelXY(i, j)[0] > 0) {\n fullLabelImage.setPixelXY(\n i + ann.boundingBox[0],\n j + ann.boundingBox[1],\n categoryColor\n );\n }\n }\n }\n }\n });\n const blob = fullLabelImage.toBlob(\"image/png\");\n zip.file(`${current.name}.png`, blob, {\n base64: true,\n });\n });\n zip.generateAsync({ type: \"blob\" }).then((blob: Blob) => {\n saveAs(blob, `${projectName}.zip`);\n });\n};\n\nexport const saveAnnotationsAsLabelMatrix = async (\n images: Array,\n annotations: Array,\n categories: Array,\n zip: JSZip,\n random: boolean = false,\n binary: boolean = false\n) => {\n // image id -> image\n const imIdMap = images.reduce(\n (idMap, im) => ({ ...idMap, [im.id]: im }),\n {} as { [internalImageId: string]: ImageObject }\n );\n\n // cat id -> cat name\n const catIdMap = categories.reduce(\n (idMap, cat) => ({ ...idMap, [cat.id]: cat.name }),\n {} as { [internalCategoryId: string]: string }\n );\n\n // image name -> cat name -> annotations\n const annIdMap = {} as {\n [imName: string]: { [catName: string]: AnnotationObject[] };\n };\n\n for (const ann of annotations) {\n const im = imIdMap[ann.imageId];\n const catName = catIdMap[ann.categoryId];\n\n if (!annIdMap.hasOwnProperty(im.name)) {\n annIdMap[im.name] = {};\n }\n\n if (!annIdMap[im.name].hasOwnProperty(catName)) {\n annIdMap[im.name][catName] = [];\n }\n\n annIdMap[im.name][catName].push(ann);\n }\n\n for (const im of images) {\n // for image names like blah.png\n const imCleanName = im.name.split(\".\")[0];\n\n for (const cat of categories) {\n const fullLabelImage = new ImageJS.Image(\n im.shape.width,\n im.shape.height,\n new Uint8Array().fill(0),\n { components: 1, alpha: 0 }\n );\n\n let r = binary ? 255 : 1;\n let g = binary ? 255 : 1;\n let b = binary ? 255 : 1;\n\n const imCatAnns = annIdMap[im.name][cat.name];\n\n // no annotations for this category, in this image\n if (!imCatAnns) continue;\n\n for (const ann of imCatAnns) {\n if (random) {\n r = Math.round(Math.random() * 255);\n g = Math.round(Math.random() * 255);\n b = Math.round(Math.random() * 255);\n } else if (!binary) {\n r = r + 1;\n b = b + 1;\n g = g + 1;\n }\n\n const decoded = decode(ann.encodedMask);\n const boundingBox = ann.boundingBox;\n const endX = Math.min(im.shape.width, boundingBox[2]);\n const endY = Math.min(im.shape.height, boundingBox[3]);\n\n //extract bounding box params\n const boundingBoxWidth = endX - boundingBox[0];\n const boundingBoxHeight = endY - boundingBox[1];\n\n const roiMask = new ImageJS.Image(\n boundingBoxWidth,\n boundingBoxHeight,\n decoded,\n {\n components: 1,\n alpha: 0,\n }\n );\n for (let i = 0; i < boundingBoxWidth; i++) {\n for (let j = 0; j < boundingBoxHeight; j++) {\n if (roiMask.getPixelXY(i, j)[0] > 0) {\n fullLabelImage.setPixelXY(\n i + ann.boundingBox[0],\n j + ann.boundingBox[1],\n [r, g, b]\n );\n }\n }\n }\n }\n\n const imCatBlob = await fullLabelImage.toBlob(\"image/png\");\n zip.folder(`${imCleanName}`);\n zip.file(`${imCleanName}/${cat.name}.png`, imCatBlob, { base64: true });\n }\n }\n};\n","import createGraph, { Graph, Node } from \"ngraph.graph\";\n\nimport { NodeHeap } from \"./NodeHeap\";\nimport { cachedAStarPathSearch } from \"./PathFinder\";\n\nimport { getIdx } from \"utils/annotator/imageHelper\";\nimport { Point } from \"./types\";\nimport { DataArray } from \"utils/file-io/types\";\n\nexport interface PiximiGraph extends Graph {\n fromId: number;\n openSet: NodeHeap;\n}\n\nexport interface PiximiNode extends Node {\n fromId: number;\n trace: Array;\n\n parentId: number | null;\n closed: boolean;\n open: number;\n distanceToSource: number;\n fScore: number;\n heapIndex: number;\n}\n\nconst validNeighbours = (\n x: number,\n y: number,\n height: number,\n width: number\n) => {\n const xoffsets = [0];\n const yoffsets = [0];\n const output: Point[] = [];\n //David will know what to write here\n if (x > 0) {\n xoffsets.push(-1);\n }\n if (x < width - 1) {\n xoffsets.push(1);\n }\n if (y > 0) {\n yoffsets.push(-1);\n }\n if (y < height - 1) {\n yoffsets.push(1);\n }\n for (let xoffset of xoffsets) {\n for (let yoffset of yoffsets) {\n if (!(xoffset === 0 && yoffset === 0)) {\n output.push({ x: x + xoffset, y: y + yoffset });\n }\n }\n }\n return output;\n};\n\nexport const fromIdxToCoord = (idx: number, width: number) => {\n const row = Math.floor(idx / width);\n const col = idx - row * width;\n return { x: col, y: row };\n};\n\nexport const makeGraph = (\n edges: Uint8ClampedArray | DataArray,\n height: number,\n width: number\n) => {\n let graph: any = createGraph();\n graph.fromId = -1;\n graph.openSet = new NodeHeap();\n let cap = 255;\n if (edges.BYTES_PER_ELEMENT === 2) {\n cap = 65535;\n }\n for (let y = 0; y < height; y++) {\n for (let x = 0; x < width; x++) {\n const startIdx = getIdx(width, 1, x, y, 0);\n const dist = cap - edges[startIdx];\n\n graph.addNode(startIdx, dist);\n\n const pixels = validNeighbours(x, y, height, width);\n\n for (let pixel of pixels) {\n const idx = getIdx(width, 1, pixel.x, pixel.y, 0);\n graph.addLink(startIdx, idx);\n }\n }\n }\n return graph as PiximiGraph;\n};\n\nexport const createPathFinder = (\n graph: PiximiGraph,\n width: number,\n factor: number\n) => {\n return cachedAStarPathSearch(graph, width, factor);\n};\n\n// export const convertPathToCoords = (\n// foundPath: any,\n// width: number,\n// factor: number = 1\n// ) => {\n// const pathCoords = [];\n// for (let node of foundPath) {\n// const id = node.id as number;\n// const point = fromIdxToCoord(id, width);\n// pathCoords.push([point.x / factor, point.y / factor]);\n// }\n// return pathCoords;\n// };\n","/** Finding contours in binary images and approximating polylines.\n * Implements the same algorithms as OpenCV's findContours and approxPolyDP.\n *

\n * Made possible with support from The Frank-Ratchye STUDIO For Creative Inquiry\n * At Carnegie Mellon University. http://studioforcreativeinquiry.org/\n * @author Lingdong Huang\n *\n * original source: https://gist.github.com/LingDong-/b99cdbe814e600d8152c0eefeef01ab3\n *\n * Modified for use in piximi 2/23/2023\n * @author Nodari Gogoberidze\n */\n\nimport { logger } from \"utils/common/helpers\";\nimport { Point } from \"./types\";\n\n// this can be extended with other types, eg. number[], Int32Array, Float32Array\n// but must be data array that can hold negative numbers\n// DO NOT USE Uint[8|16|32][Clamped]Array\ntype MaskData = Int8Array;\n\ntype Border = {\n // sequential number\n seqNum: number;\n // (i,j) points belonging to the border\n points: Array;\n // whether or not the border constitutes a hole\n isHole?: boolean;\n // the sequential number of the parent border\n parent?: number;\n};\n\nconst N_PIXEL_NEIGHBOR = 8;\n\n// returns a padded and binary mask of input mask\nexport function padMask(\n unpaddedMask: Array | Uint8Array | Uint8ClampedArray,\n unPaddedWidth: number,\n unPaddedHeight: number\n): MaskData {\n const paddedWidth = unPaddedWidth + 2;\n const paddedHeight = unPaddedHeight + 2;\n\n const paddedMask = new Int8Array(paddedWidth * paddedHeight);\n\n for (let y = 0; y < unPaddedHeight; y++) {\n for (let x = 0; x < unPaddedWidth; x++) {\n const unPaddedIdx = y * unPaddedWidth + x;\n const paddedIdx = (y + 1) * paddedWidth + (x + 1);\n\n paddedMask[paddedIdx] = unpaddedMask[unPaddedIdx] === 0 ? 0 : 1;\n }\n }\n\n return paddedMask;\n}\n\n// realigns contours such that the points they contain are with respect\n// to the unpadded binary mask\nfunction unpadContours(contours: Array) {\n const unpaddedContours: Array = [];\n\n for (const contour of contours) {\n unpaddedContours.push({\n ...contour,\n points: contour.points.map((p) => ({ x: p.x - 1, y: p.y - 1 })),\n });\n }\n\n return unpaddedContours;\n}\n\nfunction _f_ij(F: MaskData, width: number, height: number) {\n return {\n get: (i: number, j: number) => F[i * width + j],\n set: (i: number, j: number, value: number) => {\n F[i * width + j] = value;\n },\n };\n}\n\n// give pixel neighborhood counter-clockwise ID's for\n// easier access with findContour algorithm\nfunction neighborIdxToCoord(i: number, j: number, id: number) {\n switch (id) {\n case 0:\n return [i, j + 1];\n case 1:\n return [i - 1, j + 1];\n case 2:\n return [i - 1, j];\n case 3:\n return [i - 1, j - 1];\n case 4:\n return [i, j - 1];\n case 5:\n return [i + 1, j - 1];\n case 6:\n return [i + 1, j];\n case 7:\n return [i + 1, j + 1];\n default:\n // return null;\n throw new Error(`Incorrect id, (${id}), must be in [0, 7]`);\n }\n}\n\nfunction neighborCoordToIdx(\n baseI: number,\n baseJ: number,\n neighborI: number,\n neighborJ: number\n) {\n let di = neighborI - baseI;\n let dj = neighborJ - baseJ;\n\n if (di === 0 && dj === 1) {\n return 0;\n }\n if (di === -1 && dj === 1) {\n return 1;\n }\n if (di === -1 && dj === 0) {\n return 2;\n }\n if (di === -1 && dj === -1) {\n return 3;\n }\n if (di === 0 && dj === -1) {\n return 4;\n }\n if (di === 1 && dj === -1) {\n return 5;\n }\n if (di === 1 && dj === 0) {\n return 6;\n }\n if (di === 1 && dj === 1) {\n return 7;\n }\n // return -1;\n throw new Error(`Cannot find id with di ${di}, dj ${dj}`);\n}\n\nfunction logNeighbors(\n fij: ReturnType,\n baseI: number,\n baseJ: number,\n startIdx: number,\n offset: number,\n logMessage?: string\n) {\n const neighborVals: Array = [\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n \"_\",\n ];\n\n for (let ccwIdx = 0; ccwIdx < N_PIXEL_NEIGHBOR; ccwIdx++) {\n let neighborIdx =\n (ccwIdx + startIdx + offset + N_PIXEL_NEIGHBOR * 2) % N_PIXEL_NEIGHBOR;\n let ij = neighborIdxToCoord(baseI, baseJ, neighborIdx);\n\n neighborVals[neighborIdx] =\n neighborIdx === startIdx + offset\n ? `(${fij.get(ij[0], ij[1])})`\n : ` ${fij.get(ij[0], ij[1])} `;\n }\n\n logMessage && logger(logMessage);\n logger(`neighborhood: (${baseI}, ${baseJ})`);\n logger(\n `|${neighborVals[3]}|${neighborVals[2]}|${neighborVals[1]}|\\n|${\n neighborVals[4]\n }| ${fij.get(baseI, baseJ)} |${neighborVals[0]}|\\n|${neighborVals[5]}|${\n neighborVals[6]\n }|${neighborVals[7]}|`\n );\n}\n\n/**\n * First counter-clockwise non-zero element in neighborhood\n * also responsible in determining if the neighbor directly to\n * the right of the base coordinate is crossed (inspected)\n *\n * @param baseI The i coordinate of the point to search the neighborhood of\n * @param baseJ The j coordinate of the point to search the neighbrohood of\n * @param neighborI The i coordinate of the first point in the neighborhood to search\n * should i + (1, 0, or -1)\n * @param neighborJ The j coordinate of the first point in the neighborhood to search\n * should j + (1, 0, or -1)\n * @param offset An offset counterclockwise from the base point\n * @return object\n * @parameter ij: The (i,j) coords of the non-zero neighbor or null if absent\n * @parameter rightExamined: wether or not the right neighbor was inspected\n **/\nfunction ccwNon0(\n fij: ReturnType,\n w: number,\n h: number,\n baseI: number,\n baseJ: number,\n neighborI: number,\n neighborJ: number,\n offset: number\n) {\n let rightExamined = false;\n\n let startIdx = neighborCoordToIdx(baseI, baseJ, neighborI, neighborJ);\n for (let ccwIdx = 0; ccwIdx < N_PIXEL_NEIGHBOR; ccwIdx++) {\n let neighborIdx =\n (ccwIdx + startIdx + offset + N_PIXEL_NEIGHBOR * 2) % N_PIXEL_NEIGHBOR;\n\n rightExamined = neighborIdx === 0 ? true : rightExamined;\n\n let ij = neighborIdxToCoord(baseI, baseJ, neighborIdx);\n\n if (fij.get(ij[0], ij[1]) !== 0) {\n return { ij, rightExamined };\n }\n }\n process.env.NODE_ENV !== \"production\" &&\n logNeighbors(\n fij,\n baseI,\n baseJ,\n startIdx,\n offset,\n \"ccw scan - nothing found\"\n );\n return { ij: null, rightExamined };\n}\n\n/**\n * First clockwise non-zero element in neighborhood\n * @param baseI The i coordinate of the point to search the neighborhood of\n * @param baseJ The j coordinate of the point to search the neighbrohood of\n * @param neighborI The i coordinate of the first point in the neighborhood to search\n * should i + (1, 0, or -1)\n * @param neighborJ The j coordinate of the first point in the neighborhood to search\n * should j + (1, 0, or -1)\n * @param offset An offset counterclockwise from the base point\n * @return The (i,j) coords of the non-zero neighbor or null if absent\n **/\nfunction cwNon0(\n fij: ReturnType,\n w: number,\n h: number,\n baseI: number,\n baseJ: number,\n neighborI: number,\n neighborJ: number,\n offset: number\n) {\n let startIdx = neighborCoordToIdx(baseI, baseJ, neighborI, neighborJ);\n for (let ccwIdx = 0; ccwIdx < N_PIXEL_NEIGHBOR; ccwIdx++) {\n let neighborIdx =\n (-ccwIdx + startIdx - offset + N_PIXEL_NEIGHBOR * 2) % N_PIXEL_NEIGHBOR;\n let ij = neighborIdxToCoord(baseI, baseJ, neighborIdx);\n if (fij.get(ij[0], ij[1]) !== 0) {\n return ij;\n }\n }\n return null;\n}\n\n/**\n * Find contours in a binary image\n *

\n * Implements Suzuki, S. and Abe, K.\n * Topological Structural Analysis of Digitized Binary Images by Border Following.\n *

\n * See source code for step-by-step correspondence to the paper's algorithm\n * description.\n * @param F The \"Frame\" (bitmap), stored in 1-dimensional row-major form.\n * 0=background, 1=foreground, will be modified by the function\n * to hold semantic information\n * @param width Width of the bitmap\n * @param height Height of the bitmap\n * @return An array of contours found in the image.\n * @see Contour\n */\nexport const findContours = (F: MaskData, width: number, height: number) => {\n // Topological Structural Analysis of Digitized Binary Images by Border Following.\n // Suzuki, S. and Abe, K., CVGIP 30 1, pp 32-46 (1985)\n\n let contours: Array = [];\n\n // Without loss of generality, we assume that 0-pixels fill the frame\n // of a binary picture\n for (let i = 1; i < height - 1; i++) {\n F[i * width] = 0;\n F[i * width + width - 1] = 0;\n }\n for (let i = 0; i < width; i++) {\n F[i] = 0;\n F[width * height - 1 - i] = 0;\n }\n\n // Set nitially NBD to 1\n // (the frame of F forms a special hole border and gets the sequential number 1;\n // NBD stands for the sequential number of the current border)\n let NBD = 1;\n\n // Scan the picture with a TV raster and perform the following steps\n // for each pixel such that fij != 0. Every time we begin to scan a\n // new row of the picture, reset LNBD to 1.\n // LNDB stands for the sequential number of the (outer or hole) border\n // encountered most recently\n let LNBD = 1;\n\n // The pixel located in the ith row and jth column is represented by the\n // row number (i, j)\n // fij is the value at coord (i,j) => F[i * width + j]\n const fij = _f_ij(F, width, height);\n\n // the row number i increases from top to bottom\n for (let iRaster = 1; iRaster < height - 1; iRaster++) {\n LNBD = 1;\n\n // the column number j from left to right\n for (let jRaster = 1; jRaster < width - 1; jRaster++) {\n let [i2CwStart, j2CwStart] = [0, 0];\n\n // scan until fij != 0\n if (fij.get(iRaster, jRaster) === 0) {\n continue;\n }\n\n // current border 0 used in (2)\n let B: Partial = {\n isHole: undefined,\n seqNum: undefined,\n points: [{ y: iRaster, x: jRaster }],\n };\n\n // (1) Select one of the following (1-a, 1-b, or 1-c):\n\n // (1-a) If fij = 1 and fi, j-1 = 0, then decide that the pixel\n // (i, j) is the border following starting point of an outer\n // border, increment NBD, and (i2, j2) <- (i, j - 1).\n if (\n fij.get(iRaster, jRaster) === 1 &&\n fij.get(iRaster, jRaster - 1) === 0\n ) {\n NBD++;\n [i2CwStart, j2CwStart] = [iRaster, jRaster - 1];\n B.isHole = false;\n B.seqNum = NBD;\n\n // (1-b) Else if fij >= 1 and fi,j+1 = 0, then decide that the\n // pixel (i, j) is the border following starting point of a\n // hole border, increment NBD, (i2, j2) <- (i, j + 1), and\n // LNBD + fij in case fij > 1.\n } else if (\n fij.get(iRaster, jRaster) >= 1 &&\n fij.get(iRaster, jRaster + 1) === 0\n // not in original paper\n // account for special case for outer border covered by hole border\n // && fij.get(iRaster, jRaster - 1) !== 0\n ) {\n NBD++;\n [i2CwStart, j2CwStart] = [iRaster, jRaster + 1];\n if (fij.get(iRaster, jRaster) > 1) {\n LNBD = fij.get(iRaster, jRaster);\n }\n B.isHole = true;\n B.seqNum = NBD;\n } else {\n // (1-c) Otherwise, go to (4).\n //\n // (4) If fij != 1, then LNBD <- |fij| and resume the raster\n // scan from pixel (i,j+1). The algorithm terminates when the\n // scan reaches the lower right corner of the picture\n if (fij.get(iRaster, jRaster) !== 1) {\n LNBD = Math.abs(fij.get(iRaster, jRaster));\n }\n continue;\n }\n\n // (2) Depending on the types of the newly found border\n // and the border with the sequential number LNBD\n // (i.e., the last border met on the current row),\n // decide the parent of the current border as shown in Table 1.\n //\n // TABLE 1\n // Decision Rule for the Parent Border of the Newly Found Border B\n // ----------------------------------------------------------------\n // Type of border B'\n // \\ with the sequential\n // \\ number LNBD\n // Type of B \\ Outer border Hole border\n // ---------------------------------------------------------------\n // Outer border The parent border The border B'\n // of the border B'\n //\n // Hole border The border B' The parent border\n // of the border B'\n // ----------------------------------------------------------------\n\n contours.push(B as Border);\n\n // default Bprime\n let Bprime: Border = {\n seqNum: LNBD,\n points: [],\n parent: LNBD,\n };\n\n // replace Bprime with already found border of that seqNum,\n // if available\n for (let c = 0; c < contours.length; c++) {\n if (contours[c].seqNum === LNBD) {\n Bprime = contours[c];\n break;\n }\n }\n\n if (Bprime.isHole) {\n if (B.isHole) {\n B.parent = Bprime.parent;\n } else {\n B.parent = Bprime.seqNum;\n }\n } else {\n if (B.isHole) {\n B.parent = Bprime.seqNum;\n } else {\n B.parent = Bprime.parent;\n }\n }\n\n // (3) From the starting point (i, j), follow the detected border:\n // this is done by the following substeps (3.1) through (3.5).\n\n // (3.1) Starting from (i2, j2), look around clockwise the pixels\n // in the neigh- borhood of (i, j) and find a nonzero pixel.\n // Let (i1, j1) be the first found nonzero pixel. If no nonzero\n // pixel is found, assign -NBD to fij and go to (4).\n let i1j1 = cwNon0(\n fij,\n width,\n height,\n iRaster,\n jRaster,\n i2CwStart,\n j2CwStart,\n 0\n );\n if (i1j1 === null) {\n fij.set(iRaster, jRaster, -NBD);\n //go to (4)\n if (fij.get(iRaster, jRaster) !== 1) {\n LNBD = Math.abs(fij.get(iRaster, jRaster));\n }\n continue;\n }\n let [i1CwFound, j1CwFound] = i1j1;\n\n // (3.2) (i2, j2) <- (i1, j1) and (i3,j3) <- (i, j).\n let [i2PrevStep, j2PrevStep] = [i1CwFound, j1CwFound];\n let [i3CurrStep, j3CurrStep] = [iRaster, jRaster];\n\n // walk the border\n while (true) {\n // (3.3) Starting from the next elementof the pixel (i2, j2)\n // in the counterclockwise order, examine counterclockwise\n // the pixels in the neighborhood of the current pixel (i3, j3)\n // to find a nonzero pixel and let the first one be (i4, j4).\n\n let { ij: i4j4, rightExamined } = ccwNon0(\n fij,\n width,\n height,\n i3CurrStep,\n j3CurrStep,\n i2PrevStep,\n j2PrevStep,\n 1\n );\n\n if (i4j4 === null) {\n process.env.NODE_ENV !== \"production\" &&\n console.warn(\n `i4j4 is invalid: i ${iRaster}, j ${jRaster}, i1 ${i1CwFound}, j1 ${j1CwFound}, i2 ${i2PrevStep}, j2 ${j2PrevStep}, i3 ${i3CurrStep}, j3 ${j3CurrStep}, i4j4j ${i4j4}`\n );\n // break;\n }\n\n var [i4CcwFound, j4CcwFound] = i4j4 as number[];\n\n // save the point\n contours[contours.length - 1].points.push({\n y: i4CcwFound,\n x: j4CcwFound,\n });\n\n // (3.4) Change the value fi3j3 of the pixel (i3, j3) as follows:\n\n // (3.4-a) If the pixel (i3, j3 + 1) is a O-pixel examined in the\n // substep (3.3) then fi3, j3 <- -NBD.\n if (fij.get(i3CurrStep, j3CurrStep + 1) === 0) {\n fij.set(\n i3CurrStep,\n j3CurrStep,\n // B.isHole to account for donut, see find-contours.test.ts\n rightExamined || B.isHole ? -NBD : NBD\n );\n\n // (3.4-b) If the pixel (i3, j3 + 1) is not a O-pixel examined\n // in the substep (3.3) and fi3j3 = 1, then fi3j3 <- NBD.\n } else if (\n fij.get(i3CurrStep, j3CurrStep + 1) !== 0 &&\n fij.get(i3CurrStep, j3CurrStep) === 1\n ) {\n fij.set(i3CurrStep, j3CurrStep, NBD);\n } else {\n //(3.4-c) Otherwise, do not change fi3j3.\n }\n\n // (3.5) If (i4, j4) = (i, j) and (i3, j3) = (i1, j1)\n // (coming back to the starting point), then go to (4);\n if (\n i4CcwFound === iRaster &&\n j4CcwFound === jRaster &&\n i3CurrStep === i1CwFound &&\n j3CurrStep === j1CwFound\n ) {\n if (fij.get(iRaster, jRaster) !== 1) {\n LNBD = Math.abs(fij.get(iRaster, jRaster));\n }\n break;\n\n // otherwise, (i2, j2) <- (i3, j3),(i3, j3) <- (i4, j4),\n // and go back to (3.3).\n } else {\n [i2PrevStep, j2PrevStep] = [i3CurrStep, j3CurrStep];\n [i3CurrStep, j3CurrStep] = [i4CcwFound, j4CcwFound];\n }\n }\n }\n }\n return unpadContours(contours);\n};\n","import {\n decode,\n padMask,\n findContours,\n simplifyPolygon,\n} from \"utils/annotator\";\n\nimport { logger } from \"utils/common/helpers\";\nimport {\n SerializedCOCOAnnotationType,\n SerializedCOCOCategoryType,\n SerializedCOCOFileType,\n SerializedCOCOImageType,\n} from \"../types\";\nimport { AnnotationObject, Category, ImageObject } from \"store/data/types\";\n\nexport const serializeCOCOFile = (\n images: Array,\n annotations: Array,\n categories: Array\n): SerializedCOCOFileType => {\n let imCount = 0;\n let catCount = 0;\n let annCount = 0;\n\n const imIdMap = images.reduce((idMap, im) => {\n idMap[im.id] = {\n id: imCount++,\n width: im.shape.width,\n height: im.shape.height,\n file_name: im.name,\n license: 0,\n flickr_url: \"\",\n coco_url: \"\",\n date_captured: \"\",\n };\n return idMap;\n }, {} as { [internalImageId: string]: SerializedCOCOImageType });\n\n const catIdMap = categories.reduce((idMap, cat) => {\n idMap[cat.id] = {\n id: catCount++,\n name: cat.name,\n supercategory: cat.kind,\n };\n return idMap;\n }, {} as { [internalCategoryId: string]: SerializedCOCOCategoryType });\n\n let serializedAnnotations: Array = [];\n\n for (const ann of annotations) {\n const boxWidth = ann.boundingBox[2] - ann.boundingBox[0];\n const boxHeight = ann.boundingBox[3] - ann.boundingBox[1];\n\n const paddedMask = padMask(decode(ann.encodedMask!), boxWidth, boxHeight);\n // +2 to W, H to account for padding on mask\n const contours = findContours(paddedMask, boxWidth + 2, boxHeight + 2);\n const outerBorder = contours.find((b) => b.seqNum === 2);\n\n if (!outerBorder) {\n process.env.NODE_ENV !== \"production\" &&\n logger(`Could not find outer border of annotation ${ann.id}`);\n throw new Error(\n `Could not determine contours of annotation belonging to image ${\n imIdMap[ann.imageId].file_name\n }`\n );\n }\n\n const SIMPLIFY = false;\n\n // coco polygon points are arranged as: [x_1, y_1, x_2, y_2, ...]\n const outerBorderPoints = (\n SIMPLIFY ? simplifyPolygon(outerBorder.points) : outerBorder.points\n )\n // add x_1, y_1 coordinates of box to translate polygon\n // points to wrt to image, rather than wrt mask\n .map((p) => [p.x + ann.boundingBox[0], p.y + ann.boundingBox[1]])\n .flat();\n\n serializedAnnotations.push({\n id: annCount++,\n image_id: imIdMap[ann.imageId].id,\n category_id: catIdMap[ann.categoryId].id,\n segmentation: [outerBorderPoints],\n area: 0,\n // x1, y1, width, height\n bbox: [ann.boundingBox[0], ann.boundingBox[1], boxWidth, boxHeight],\n iscrowd: 0,\n });\n }\n\n const info = {\n year: new Date().getFullYear(),\n // provided in package.json scripts\n version: process.env.REACT_APP_VERSION as string,\n description: \"\",\n contributor: \"\",\n url: \"\",\n date_created: \"\",\n };\n\n const licenses = [\n {\n id: 0,\n name: \"\",\n url: \"\",\n },\n ];\n\n return {\n info,\n images: Object.values(imIdMap),\n categories: Object.values(catIdMap),\n annotations: serializedAnnotations,\n licenses,\n };\n};\n","export enum CropSchema {\n None = \"None\", // do not crop\n // Smallest = \"Smallest\", // crop to match smallest square in training dataset\n // Biggest = \"Biggest\", // crop the biggest square possible per sample\n Match = \"Match\", // match crop size to architecture input shape\n}\n\nexport enum ModelTask {\n Classification,\n Segmentation,\n}\n\nexport enum ModelStatus {\n Uninitialized,\n InitFit,\n Loading,\n Training,\n Trained,\n Predicting,\n Suggesting,\n Evaluating,\n}\n\nexport enum OptimizationAlgorithm {\n Adadelta = \"Adadelta\",\n Adagrad = \"Adagrad\",\n Adam = \"Adam\",\n Adamax = \"Adamax\",\n Momentum = \"Momentum\",\n RMSProp = \"RMSProp\",\n StochasticGradientDescent = \"Stochastic gradient descent (SGD)\",\n}\n\nexport enum LossFunction {\n AbsoluteDifference = \"Absolute difference\",\n CategoricalCrossEntropy = \"Categorical (softmax) cross entropy\",\n /*\n * Disabled CosineDistance - see comment in\n * store/common/coroutines/compile.ts\n */\n // CosineDistance = \"Cosine distance\",\n Hinge = \"Hinge\",\n Huber = \"Huber\",\n Log = \"Log\",\n MeanSquaredError = \"Mean squared error (MSE)\",\n SigmoidCrossEntropy = \"Sigmoid cross entropy\",\n}\n\nexport enum Metric {\n BinaryAccuracy = \"Binary accuracy\",\n BinaryCrossEntropy = \"Binary cros-sentropy\",\n CategoricalAccuracy = \"Categorical accuracy\",\n CategoricalCrossEntropy = \"Categorical cross-entropy\",\n CosineProximity = \"Cosine proximity\",\n MeanAbsoluteError = \"Mean absolute error (MAE)\",\n MeanAbsolutePercentageError = \"Mean absolute percentage error\",\n MeanSquaredError = \"Mean squared error\",\n Precision = \"Precision\",\n Recall = \"Recall\",\n SparseCategoricalAccuracy = \"Sparse categorical accuracy\",\n}\n\nexport enum Partition {\n Training = \"Training\", // images to be used for training the model\n Validation = \"Validation\", // images to be used for validating a model\n Inference = \"Inference\", // images to be used for inference\n Unassigned = \"Unassigned\",\n}\n","export enum AnnotationMode {\n Add,\n Intersect,\n New,\n Subtract,\n}\nexport enum AnnotationState {\n Blank, // not yet annotating\n Annotating,\n Annotated,\n}\nexport enum ZoomMode {\n In,\n Out,\n}\nexport enum ToolType {\n ColorAdjustment,\n ColorAnnotation,\n EllipticalAnnotation,\n Hand,\n LassoAnnotation,\n MagneticAnnotation,\n ObjectAnnotation,\n PenAnnotation,\n Pointer,\n PolygonalAnnotation,\n QuickAnnotation,\n RectangularAnnotation,\n Zoom,\n ThresholdAnnotation,\n}\n","import * as ImageJS from \"image-js\";\n\nimport { Tool } from \"./Tool\";\n\nimport {\n computeBoundingBoxFromContours as _computeBoundingBoxFromContours,\n maskFromPoints,\n} from \"utils/annotator\";\nimport { convertToDataArray, generateUUID } from \"utils/common/helpers\";\nimport { Partition } from \"utils/models/enums\";\nimport { Point } from \"../types\";\nimport { AnnotationState } from \"../enums\";\nimport { DataArray } from \"utils/file-io/types\";\nimport { Category, PartialDecodedAnnotationObject } from \"store/data/types\";\n\nexport abstract class AnnotationTool extends Tool {\n /**\n * Polygon that defines the annotation area, array of (x, y) coordinates.\n */\n points: Array = [];\n /**\n * Coordinates of the annotation bounding box: [x1, y1, x2, y2].\n * Specifies the top left and bottom right points.\n */\n protected _boundingBox?: [number, number, number, number];\n /**\n * One-hot encoded encodedMask of the annotation.\n */\n protected _encodedMask?: Array;\n /**\n * Raw msk data\n */\n protected _decodedMask?: DataArray;\n /**\n * State of the annotation: Blank (not yet annotating), Annotating or Annotated\n */\n annotationState = AnnotationState.Blank;\n /**\n * Annotation object of the Tool.\n */\n annotation?: PartialDecodedAnnotationObject;\n anchor?: Point = undefined;\n origin?: Point = undefined;\n buffer?: Array = [];\n\n onAnnotating?: () => void;\n onAnnotated?: () => void;\n onDeselect?: () => void;\n\n get boundingBox(): [number, number, number, number] | undefined {\n return this._boundingBox;\n }\n\n set boundingBox(\n updatedBoundingBox: [number, number, number, number] | undefined\n ) {\n this._boundingBox = updatedBoundingBox;\n }\n\n /**\n * Compute the bounding box of the polygon that defined the annotation.\n * @returns bounding box [number, number, number, number] or undefined\n */\n protected computeBoundingBox(): [number, number, number, number] | undefined {\n if (this.points.length === 0) return undefined;\n return [\n this.points[0].x,\n this.points[0].y,\n this.points[1].x,\n this.points[1].y,\n ];\n }\n\n protected setBoundingBoxFromContours(contour: Array) {\n this.boundingBox = _computeBoundingBoxFromContours(contour);\n }\n\n get encodedMask(): Array | undefined {\n return this._encodedMask;\n }\n\n set encodedMask(updatedMask: Array | undefined) {\n this._encodedMask = updatedMask;\n }\n\n get decodedMask(): DataArray | undefined {\n return this._decodedMask;\n }\n\n set decodedMask(updatedMask: DataArray | undefined) {\n this._decodedMask = updatedMask;\n }\n\n /**\n * Compute the encodedMask image of the annotation polygon from the bounding box and the polygon points.\n */\n protected setAnnotationMaskFromPoints() {\n if (!this.boundingBox || this.points.length === 0) {\n return;\n }\n\n this.decodedMask = maskFromPoints(\n this.points,\n { width: this.image.width, height: this.image.height },\n this.boundingBox\n );\n }\n\n protected setAnnotating() {\n this.annotationState = AnnotationState.Annotating;\n if (this.onAnnotating) {\n this.onAnnotating();\n }\n }\n public registerOnAnnotatingHandler(handler: () => void): void {\n this.onAnnotating = handler;\n }\n\n protected setAnnotated() {\n this.annotationState = AnnotationState.Annotated;\n if (this.onAnnotated) {\n this.onAnnotated();\n }\n }\n public registerOnAnnotatedHandler(handler: () => void): void {\n this.onAnnotated = handler;\n }\n\n protected setBlank() {\n this.annotationState = AnnotationState.Blank;\n if (this.onDeselect) {\n this.onDeselect();\n }\n }\n public registerOnDeselectHandler(handler: () => void): void {\n this.onDeselect = handler;\n }\n\n public abstract deselect(): void;\n\n public abstract onMouseDown(position: { x: number; y: number }): void;\n\n public abstract onMouseMove(position: { x: number; y: number }): void;\n\n public abstract onMouseUp(position: { x: number; y: number }): void;\n\n /**\n * Creates and sets the annotation object.\n * @param category Category of the annotation.\n * @param plane Index of the image plane that corresponds to the annotation.\n * @returns\n */\n public annotate(category: Category, plane: number, imageId: string): void {\n if (!this.boundingBox || !this.decodedMask) return;\n\n this.annotation = {\n boundingBox: this.boundingBox,\n categoryId: category.id,\n id: this.annotation ? this.annotation.id : generateUUID(),\n imageId,\n decodedMask: this.decodedMask,\n activePlane: plane,\n partition: Partition.Unassigned,\n };\n }\n\n /**\n * Checks if a point lies within an annotation bounding box\n * @param x x-coord of point\n * @param y y-coord of point\n * @param boundingBox Bounding box of annotation\n * @returns true if point lies within the bounding box, false otherwise\n */\n private isInBoundingBox(\n x: number,\n y: number,\n boundingBox: [number, number, number, number]\n ) {\n if (x < 0 || y < 0) return false;\n if (x >= boundingBox[2] - boundingBox[0]) return false;\n if (y >= boundingBox[3] - boundingBox[1]) return false;\n return true;\n }\n\n /**\n * Add the areas selected by the current AnnotationTool from the selected annotation.\n * @param newEncodedMaskData Encoded encodedMask data of new annotation to be added\n * @param newBoundingBox Bounding box of new annotation to be added\n * @returns Bounding box and encodedMask of the combined annotation areas\n */\n public add(\n newEncodedMaskData: DataArray,\n newBoundingBox: [number, number, number, number]\n ): [Uint8Array, [number, number, number, number]] {\n if (!this._decodedMask || !this._boundingBox)\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n\n const newMaskData = newEncodedMaskData;\n const existingMaskData = this._decodedMask;\n const existingBoundingBox = this._boundingBox;\n\n const combinedBoundingBox = [\n existingBoundingBox[0] < newBoundingBox[0]\n ? existingBoundingBox[0]\n : newBoundingBox[0],\n existingBoundingBox[1] < newBoundingBox[1]\n ? existingBoundingBox[1]\n : newBoundingBox[1],\n existingBoundingBox[2] > newBoundingBox[2]\n ? existingBoundingBox[2]\n : newBoundingBox[2],\n existingBoundingBox[3] > newBoundingBox[3]\n ? existingBoundingBox[3]\n : newBoundingBox[3],\n ] as [number, number, number, number];\n\n const newBoundingBoxWidth = combinedBoundingBox[2] - combinedBoundingBox[0];\n const newBoundingBoxHeight =\n combinedBoundingBox[3] - combinedBoundingBox[1];\n\n const combinedMaskData = [];\n const deltaX1 = newBoundingBox[0] - combinedBoundingBox[0];\n const deltaY1 = newBoundingBox[1] - combinedBoundingBox[1];\n const deltaX2 = existingBoundingBox[0] - combinedBoundingBox[0];\n const deltaY2 = existingBoundingBox[1] - combinedBoundingBox[1];\n\n for (let i = 0; i < newBoundingBoxWidth * newBoundingBoxHeight; i++) {\n const x = i % newBoundingBoxWidth;\n const y = Math.floor(i / newBoundingBoxWidth);\n const b1x = x - deltaX1;\n const b1y = y - deltaY1;\n const b2x = x - deltaX2;\n const b2y = y - deltaY2;\n\n const b1i = b1x + b1y * (newBoundingBox[2] - newBoundingBox[0]);\n const b2i = b2x + b2y * (existingBoundingBox[2] - existingBoundingBox[0]);\n if (\n (this.isInBoundingBox(b1x, b1y, newBoundingBox) &&\n newMaskData[b1i] === 255) ||\n (this.isInBoundingBox(b2x, b2y, existingBoundingBox) &&\n existingMaskData[b2i] === 255)\n ) {\n combinedMaskData.push(255);\n } else {\n combinedMaskData.push(0);\n }\n }\n\n return [\n convertToDataArray(8, combinedMaskData) as Uint8Array,\n combinedBoundingBox,\n ];\n }\n\n /**\n * Intersect the areas selected by the current AnnotationTool and the selected annotation.\n * @param decodedMask1\n * @param boundingBox1\n * @returns Bounding box and encodedMask of the intersected annotation areas.\n */\n public intersect(\n decodedMask1: DataArray,\n boundingBox1: [number, number, number, number]\n ): [Uint8Array, [number, number, number, number]] {\n if (!this._decodedMask || !this._boundingBox)\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n\n const decodedMask2 = this._decodedMask;\n\n const boundingBox2 = this._boundingBox;\n\n const intersectionBoundingBox = [\n boundingBox2[0] > boundingBox1[0] ? boundingBox2[0] : boundingBox1[0],\n boundingBox2[1] > boundingBox1[1] ? boundingBox2[1] : boundingBox1[1],\n boundingBox2[2] < boundingBox1[2] ? boundingBox2[2] : boundingBox1[2],\n boundingBox2[3] < boundingBox1[3] ? boundingBox2[3] : boundingBox1[3],\n ] as [number, number, number, number];\n\n const intersectionBoundingBoxWidth =\n intersectionBoundingBox[2] - intersectionBoundingBox[0];\n const intersectionBoundingBoxHeight =\n intersectionBoundingBox[3] - intersectionBoundingBox[1];\n\n // Check if bounding box is valid: width and height of the intersection must be positive.\n if (intersectionBoundingBoxWidth < 0 || intersectionBoundingBoxHeight < 0) {\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n }\n\n const newMaskData = [];\n const deltaX1 = boundingBox1[0] - intersectionBoundingBox[0];\n const deltaY1 = boundingBox1[1] - intersectionBoundingBox[1];\n const deltaX2 = boundingBox2[0] - intersectionBoundingBox[0];\n const deltaY2 = boundingBox2[1] - intersectionBoundingBox[1];\n\n for (\n let i = 0;\n i < intersectionBoundingBoxWidth * intersectionBoundingBoxHeight;\n i++\n ) {\n const x = i % intersectionBoundingBoxWidth;\n const y = Math.floor(i / intersectionBoundingBoxWidth);\n const b1x = x - deltaX1;\n const b1y = y - deltaY1;\n const b2x = x - deltaX2;\n const b2y = y - deltaY2;\n\n const b1i = b1x + b1y * (boundingBox1[2] - boundingBox1[0]);\n const b2i = b2x + b2y * (boundingBox2[2] - boundingBox2[0]);\n if (\n this.isInBoundingBox(b1x, b1y, boundingBox1) &&\n decodedMask1[b1i] === 255 &&\n this.isInBoundingBox(b2x, b2y, boundingBox2) &&\n decodedMask2[b2i] === 255\n ) {\n newMaskData.push(255);\n } else {\n newMaskData.push(0);\n }\n }\n\n if (!newMaskData.length)\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n\n return [\n convertToDataArray(8, newMaskData) as Uint8Array,\n intersectionBoundingBox,\n ];\n }\n\n /**\n * Invert the selected annotation area\n * @param selectedMask\n * @param selectedBoundingBox\n * @returns Bounding box and encodedMask of the inverted annotation area\n */\n public invert(\n selectedMask: DataArray,\n selectedBoundingBox: [number, number, number, number]\n ): [Uint8Array, [number, number, number, number]] {\n const encodedMask = selectedMask;\n\n const imageWidth = this.image.width;\n const imageHeight = this.image.height;\n\n // Find min and max boundary points when computing the encodedMask.\n const invertedBoundingBox: [number, number, number, number] = [\n imageWidth,\n imageHeight,\n 0,\n 0,\n ];\n\n const invertedMask = new ImageJS.Image(imageWidth, imageHeight, {\n components: 1,\n alpha: 0,\n });\n for (let x = 0; x < imageWidth; x++) {\n for (let y = 0; y < imageHeight; y++) {\n const x_encodedMask = x - selectedBoundingBox[0];\n const y_encodedMask = y - selectedBoundingBox[1];\n const value =\n encodedMask[\n x_encodedMask +\n y_encodedMask * (selectedBoundingBox[2] - selectedBoundingBox[0])\n ];\n if (\n value > 0 &&\n this.isInBoundingBox(\n x_encodedMask,\n y_encodedMask,\n selectedBoundingBox\n )\n ) {\n invertedMask.setPixelXY(x, y, [0]);\n } else {\n invertedMask.setPixelXY(x, y, [255]);\n if (x < invertedBoundingBox[0]) {\n invertedBoundingBox[0] = x;\n } else if (x > invertedBoundingBox[2]) {\n invertedBoundingBox[2] = x + 1;\n }\n if (y < invertedBoundingBox[1]) {\n invertedBoundingBox[1] = y;\n } else if (y > invertedBoundingBox[3]) {\n invertedBoundingBox[3] = y + 1;\n }\n }\n }\n }\n\n // Crop the encodedMask using the new bounding box.\n const croppedInvertedMask = invertedMask.crop({\n x: invertedBoundingBox[0],\n y: invertedBoundingBox[1],\n width: invertedBoundingBox[2] - invertedBoundingBox[0],\n height: invertedBoundingBox[3] - invertedBoundingBox[1],\n });\n\n return [\n convertToDataArray(8, croppedInvertedMask.data) as Uint8Array,\n invertedBoundingBox,\n ];\n }\n\n /**\n * Subtract the areas selected by the current AnnotationTool (subtrahend) from the selected annotation (minuend).\n * [Difference = Minuend - Subtrahend]\n * @param encodedMinuendData\n * @param minuendBoundingBox\n * @returns Bounding box and encodedMask of the difference of the annotation areas.\n */\n public subtract(\n encodedMinuendData: DataArray,\n minuendBoundingBox: [number, number, number, number]\n ): [Uint8Array, [number, number, number, number]] {\n if (!this._decodedMask || !this._boundingBox)\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n\n // decode the selected annotation data\n const minuendData = encodedMinuendData;\n // decode the the subtrahend data\n const subtrahendData = this._decodedMask;\n\n const subtrahendBoundingBox = this._boundingBox;\n\n const resultingBoundingBox = [\n subtrahendBoundingBox[2] > minuendBoundingBox[0] &&\n subtrahendBoundingBox[0] < minuendBoundingBox[0] &&\n subtrahendBoundingBox[1] < minuendBoundingBox[1] &&\n subtrahendBoundingBox[3] > minuendBoundingBox[3]\n ? subtrahendBoundingBox[2]\n : minuendBoundingBox[0],\n subtrahendBoundingBox[3] > minuendBoundingBox[1] &&\n subtrahendBoundingBox[1] < minuendBoundingBox[1] &&\n subtrahendBoundingBox[0] < minuendBoundingBox[0] &&\n subtrahendBoundingBox[2] > minuendBoundingBox[2]\n ? subtrahendBoundingBox[3]\n : minuendBoundingBox[1],\n subtrahendBoundingBox[0] < minuendBoundingBox[2] &&\n subtrahendBoundingBox[2] > minuendBoundingBox[2] &&\n subtrahendBoundingBox[1] < minuendBoundingBox[1] &&\n subtrahendBoundingBox[3] > minuendBoundingBox[3]\n ? subtrahendBoundingBox[0]\n : minuendBoundingBox[2],\n subtrahendBoundingBox[1] < minuendBoundingBox[3] &&\n subtrahendBoundingBox[3] > minuendBoundingBox[3] &&\n subtrahendBoundingBox[0] < minuendBoundingBox[0] &&\n subtrahendBoundingBox[2] > minuendBoundingBox[2]\n ? subtrahendBoundingBox[1]\n : minuendBoundingBox[3],\n ] as [number, number, number, number];\n\n const resultingBoundingBoxWidth =\n resultingBoundingBox[2] - resultingBoundingBox[0];\n const resultingBoundingBoxHeight =\n resultingBoundingBox[3] - resultingBoundingBox[1];\n\n // Check if bounding box is valid: width and height of the intersection must be positive.\n if (resultingBoundingBoxWidth < 0 || resultingBoundingBoxHeight < 0) {\n return [convertToDataArray(8, []) as Uint8Array, [0, 0, 0, 0]];\n }\n\n const resultingMaskData: number[] = [];\n const deltaX1 = minuendBoundingBox[0] - resultingBoundingBox[0];\n const deltaY1 = minuendBoundingBox[1] - resultingBoundingBox[1];\n const deltaX2 = subtrahendBoundingBox[0] - resultingBoundingBox[0];\n const deltaY2 = subtrahendBoundingBox[1] - resultingBoundingBox[1];\n\n for (\n let i = 0;\n i < resultingBoundingBoxWidth * resultingBoundingBoxHeight;\n i++\n ) {\n const x = i % resultingBoundingBoxWidth;\n const y = Math.floor(i / resultingBoundingBoxWidth);\n const b1x = x - deltaX1;\n const b1y = y - deltaY1;\n const b2x = x - deltaX2;\n const b2y = y - deltaY2;\n\n const b1i = b1x + b1y * (minuendBoundingBox[2] - minuendBoundingBox[0]);\n const b2i =\n b2x + b2y * (subtrahendBoundingBox[2] - subtrahendBoundingBox[0]);\n if (\n this.isInBoundingBox(b1x, b1y, minuendBoundingBox) &&\n minuendData[b1i] === 255 &&\n this.isInBoundingBox(b2x, b2y, subtrahendBoundingBox) &&\n subtrahendData[b2i] === 255\n ) {\n resultingMaskData.push(0);\n } else {\n resultingMaskData.push(minuendData[b1i]);\n }\n }\n\n return [\n convertToDataArray(8, resultingMaskData) as Uint8Array,\n resultingBoundingBox,\n ];\n }\n}\n","import { Image } from \"image-js\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { AnnotationState } from \"../enums\";\n\n/*\n * Rather than having operator possibly undefined,\n * in `useAnnotationTool` we have a \"Dummy\" Annotation Tool\n * which implements the AnnotationTool Abstract class\n * but via dummy methods, or minimal methods\n * That way we don't have to do annotationTool checks\n * in the Stage, its children component and its hooks\n */\nexport class BlankAnnotationTool extends AnnotationTool {\n constructor(image?: Image) {\n const defaultImage = image ?? new Image();\n super(defaultImage);\n }\n\n deselect() {\n this.origin = undefined;\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n this.setAnnotating();\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n this.setAnnotated();\n }\n}\n","import * as ImageJS from \"image-js\";\nimport PriorityQueue from \"ts-priority-queue\";\n\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { Point } from \"../types\";\nimport { AnnotationState } from \"../enums\";\n\nexport class ColorAnnotationTool extends AnnotationTool {\n roiContour?: ImageJS.Image;\n roiMask?: ImageJS.Image;\n roiManager?: ImageJS.RoiManager;\n offset: { x: number; y: number } = { x: 0, y: 0 };\n overlayData: string = \"\";\n points: Array = [];\n origin: { x: number; y: number } = { x: 0, y: 0 };\n tolerance: number = 1;\n toleranceMap?: ImageJS.Image;\n floodMap?: ImageJS.Image;\n toleranceQueue: PriorityQueue> = new PriorityQueue({\n comparator: function (a: Array, b: Array) {\n return a[2] - b[2];\n },\n });\n toolTipPosition?: { x: number; y: number };\n seen: Set = new Set();\n\n deselect() {\n this.overlayData = \"\";\n this.roiManager = undefined;\n this.roiMask = undefined;\n this.points = [];\n this.origin = { x: 0, y: 0 };\n this.toolTipPosition = undefined;\n this.tolerance = 1;\n this.toleranceMap = undefined;\n this.toleranceQueue.clear();\n this.seen.clear();\n this.annotation = undefined;\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n this.origin = position;\n this.toolTipPosition = position;\n\n this.toleranceMap = this.createToleranceMap({\n x: Math.floor(position.x),\n y: Math.floor(position.y),\n image: this.image,\n });\n\n const empty = new Array(this.image.height * this.image.width).fill(\n Infinity\n );\n\n this.floodMap = new ImageJS.Image(\n this.image.width,\n this.image.height,\n empty,\n {\n alpha: 0,\n components: 1,\n }\n );\n\n this.toleranceQueue.clear();\n this.seen.clear();\n this.roiManager = this.floodMap.getRoiManager();\n\n this.toleranceQueue.queue([\n Math.floor(position.x),\n Math.floor(position.y),\n 0,\n ]);\n\n const idx =\n Math.floor(position.x) + Math.floor(position.y) * this.image.width;\n\n this.seen.add(idx);\n this.updateOverlay(position);\n this.setAnnotating();\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotating) {\n const diff = Math.ceil(\n Math.hypot(position.x - this.origin!.x, position.y - this.origin!.y)\n );\n if (diff !== this.tolerance) {\n this.tolerance = diff;\n this.updateOverlay(this.origin);\n }\n this.toolTipPosition = position;\n }\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n if (!this.roiManager || !this.roiMask) return;\n\n // @ts-ignore\n this.roiManager.fromMask(this.roiMask);\n // @ts-ignore\n this.roiMask = this.roiManager.getMasks()[0];\n //@ts-ignore\n const roi = this.roiManager.getRois()[0];\n\n this._boundingBox = [roi.minX, roi.minY, roi.maxX + 1, roi.maxY + 1];\n\n if (!this.roiMask || !this.boundingBox) return;\n\n const boundingBoxWidth = this.boundingBox[2] - this.boundingBox[0];\n const boundingBoxHeight = this.boundingBox[3] - this.boundingBox[1];\n\n if (!boundingBoxWidth || !boundingBoxHeight) return;\n\n //mask should be the whole image, not just the ROI\n const imgMask = new ImageJS.Image(boundingBoxWidth, boundingBoxHeight, {\n components: 1,\n alpha: 0,\n });\n\n for (let x = 0; x < boundingBoxWidth; x++) {\n for (let y = 0; y < boundingBoxHeight; y++) {\n //@ts-ignore\n if (this.roiMask.getBitXY(x, y)) {\n imgMask.setPixelXY(x, y, [255]);\n }\n }\n }\n\n this.decodedMask = imgMask.data as Uint8Array;\n\n this.setAnnotated();\n }\n\n private createToleranceMap = ({\n x,\n y,\n image,\n }: {\n x: number;\n y: number;\n image: ImageJS.Image;\n }) => {\n const tol: Array = [];\n\n const color = image.getPixelXY(x, y);\n\n if (image.data.length === image.width * image.height * 3) {\n //RGB image\n for (let i = 0; i < image.data.length; i += 3) {\n const red = Math.abs(image.data[i] - color[0]);\n const green = Math.abs(image.data[i + 1] - color[1]);\n const blue = Math.abs(image.data[i + 2] - color[2]);\n tol.push(Math.floor((red + green + blue) / 3));\n }\n } else if (image.data.length === image.width * image.height * 4) {\n //RGBA image\n for (let i = 0; i < image.data.length; i += 4) {\n const red = Math.abs(image.data[i] - color[0]);\n const green = Math.abs(image.data[i + 1] - color[1]);\n const blue = Math.abs(image.data[i + 2] - color[2]);\n tol.push(Math.floor((red + green + blue) / 3));\n }\n } else if (image.data.length === image.width * image.height) {\n //greyscale\n for (let i = 0; i < image.data.length; i++) {\n const grey = Math.abs(image.data[i] - color[0]);\n tol.push(Math.floor((grey / image.maxValue) * 255));\n }\n }\n\n return new ImageJS.Image(image.width, image.height, tol, {\n alpha: 0,\n components: 1,\n });\n };\n\n // Expand a watershed map until the desired tolerance is reached.\n private createFloodMap = (\n floodMap: ImageJS.Image,\n toleranceMap: ImageJS.Image,\n queue: PriorityQueue>,\n tolerance: number,\n maxTol: number,\n seen: Set\n ) => {\n const dirs = [\n [1, 0],\n [0, 1],\n [-1, 0],\n [0, -1],\n ];\n while (queue.length > 0 && queue.peek()[2] <= tolerance) {\n let currentPoint = queue.dequeue();\n maxTol = Math.max(currentPoint[2], maxTol);\n floodMap.setPixelXY(currentPoint[0], currentPoint[1], [maxTol]);\n for (let dir of dirs) {\n let newX = currentPoint[0] + dir[0];\n let newY = currentPoint[1] + dir[1];\n let idx = newX + newY * toleranceMap.width;\n if (\n !seen.has(idx) &&\n newX >= 0 &&\n newY >= 0 &&\n newX < toleranceMap.width &&\n newY < toleranceMap.height\n ) {\n queue.queue([newX, newY, toleranceMap.getPixelXY(newX, newY)[0]]);\n seen.add(idx);\n }\n }\n }\n };\n\n private updateOverlay(position: { x: number; y: number }) {\n this.createFloodMap(\n this.floodMap!,\n this.toleranceMap!,\n this.toleranceQueue,\n this.tolerance,\n 0,\n this.seen\n );\n // Make a threshold mask\n this.roiMask = this.floodMap!.mask({\n threshold: this.tolerance,\n invert: true,\n });\n\n if (!this.roiMask) return;\n\n this.overlayData = this.colorOverlay(\n this.roiMask,\n this.offset,\n position,\n this.image.width,\n this.image.height\n );\n }\n\n private colorOverlay(\n mask: ImageJS.Image,\n offset: { x: number; y: number },\n position: { x: number; y: number },\n width: number,\n height: number\n ) {\n let overlay = new ImageJS.Image(\n width,\n height,\n new Uint8Array(width * height * 4),\n { alpha: 1 }\n );\n\n // roiPaint doesn't respect alpha, so we'll paint it ourselves.\n for (let x = 0; x < mask.width; x++) {\n for (let y = 0; y < mask.height; y++) {\n if (mask.getBitXY(x, y)) {\n overlay.setPixelXY(x + offset.x, y + offset.y, [237, 0, 0, 150]);\n }\n }\n }\n\n // Set the origin point to white, for visibility.\n overlay.setPixelXY(position.x, position.y, [255, 255, 255, 255]);\n\n return overlay.toDataURL(\"image/png\", { useCanvas: true });\n }\n}\n","import { AnnotationState } from \"../enums\";\nimport { Point } from \"../types\";\nimport { AnnotationTool } from \"./AnnotationTool\";\n\nexport class EllipticalAnnotationTool extends AnnotationTool {\n center?: Point;\n radius?: { x: number; y: number };\n\n deselect() {\n this.center = undefined;\n this.origin = undefined;\n this.radius = undefined;\n this.annotation = undefined;\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (!this.radius) {\n this.origin = position;\n\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n this.resize(position);\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating || !this.radius)\n return;\n this.points = this.convertToPoints();\n\n this.setBoundingBoxFromContours(this.points);\n\n this.setAnnotationMaskFromPoints();\n\n if (!this.decodedMask) return;\n\n this.setAnnotated();\n }\n\n private convertToPoints() {\n if (!this.radius || !this.origin || !this.center) return [];\n\n const centerX = Math.round(this.center.x);\n const centerY = Math.round(this.center.y);\n const points: Array = [];\n\n const r = (\n theta: number,\n a: number = this.radius!.x,\n b: number = this.radius!.y\n ) => {\n return (\n (a * b) /\n Math.sqrt(b ** 2 * Math.cos(theta) ** 2 + a ** 2 * Math.sin(theta) ** 2)\n );\n };\n\n for (let theta = 0; theta <= 2 * Math.PI; theta += 0.05) {\n const x = r(theta) * Math.cos(theta) + centerX;\n const y = r(theta) * Math.sin(theta) + centerY;\n points.push({ x: x, y: y });\n }\n\n return points;\n }\n\n private resize(position: Point) {\n if (this.origin) {\n this.center = {\n x: (position.x - this.origin.x) / 2 + this.origin.x,\n y: (position.y - this.origin.y) / 2 + this.origin.y,\n };\n\n this.radius = {\n x: Math.abs((position.x - this.origin.x) / 2),\n y: Math.abs((position.y - this.origin.y) / 2),\n };\n }\n }\n}\n","import { AnnotationState } from \"../enums\";\nimport { Point } from \"../types\";\nimport { AnnotationTool } from \"./AnnotationTool\";\n\nexport class LassoAnnotationTool extends AnnotationTool {\n buffer: Array = [];\n points: Array = [];\n\n deselect() {\n this.annotation = undefined;\n\n this.anchor = undefined;\n this.buffer = [];\n this.origin = undefined;\n this.points = [];\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (this.annotationState === AnnotationState.Blank) {\n this.origin = position;\n this.buffer = [position, this.origin];\n\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n if (\n Math.abs(this.buffer[this.buffer.length - 2].x - position.x) >= 1 ||\n Math.abs(this.buffer[this.buffer.length - 2].y - position.y) >= 1\n ) {\n this.buffer = [\n ...this.buffer.slice(0, this.buffer.length - 1),\n position,\n this.origin!,\n ];\n }\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating || !this.origin) {\n return;\n }\n if (this.buffer.length < 6) {\n this.deselect();\n return;\n } else {\n this.points = this.buffer;\n\n this.setBoundingBoxFromContours(this.points);\n\n this.setAnnotationMaskFromPoints();\n\n if (!this.decodedMask) {\n return;\n }\n\n this.buffer = [];\n\n this.setAnnotated();\n }\n }\n}\n","import * as ImageJS from \"image-js\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport {\n createPathFinder,\n getDistance,\n makeGraph,\n PiximiGraph,\n pointsAreEqual,\n getIdx,\n} from \"utils/annotator\";\nimport { Point } from \"../types\";\nimport { AnnotationState } from \"../enums\";\n\nexport class MagneticAnnotationTool extends AnnotationTool {\n buffer: Array = [];\n factor: number;\n graph?: PiximiGraph;\n path: Array = [];\n pathfinder?: { find: (fromId: number, toId: number) => any };\n points: Array = [];\n previous: Array = [];\n response?: ImageJS.Image;\n\n constructor(image: ImageJS.Image, factor: number = 0.5) {\n super(image);\n\n this.factor = factor;\n\n this.filter();\n\n if (!this.image || !this.response) return;\n\n this.graph = makeGraph(\n this.response.data,\n this.response.height,\n this.response.width\n );\n\n this.pathfinder = createPathFinder(\n this.graph,\n this.image.width * factor,\n factor\n );\n }\n\n deselect() {\n this.annotation = undefined;\n\n this.anchor = undefined;\n this.buffer = [];\n this.graph = undefined;\n this.origin = undefined;\n this.points = [];\n this.previous = [];\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (this.buffer && this.buffer.length === 0) {\n if (!this.origin) {\n this.origin = position;\n }\n\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (\n !this.image ||\n !this.pathfinder ||\n this.annotationState !== AnnotationState.Annotating\n )\n return;\n if (this.anchor) {\n const source = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.anchor.x * this.factor),\n Math.floor(this.anchor.y * this.factor),\n 0\n );\n\n const destination = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(position.x * this.factor),\n Math.floor(position.y * this.factor),\n 0\n );\n\n this.path = this.pathfinder.find(source, destination).flat();\n\n if (!pointsAreEqual(this.buffer.at(-1)!, this.anchor)) {\n this.buffer.pop();\n }\n\n this.buffer = [...this.previous, this.anchor, ...this.path];\n\n return;\n }\n\n if (this.origin) {\n const source = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.origin.x * this.factor),\n Math.floor(this.origin.y * this.factor),\n 0\n );\n\n const destination = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(position.x * this.factor),\n Math.floor(position.y * this.factor),\n 0\n );\n\n this.path = this.pathfinder.find(source, destination).flat();\n\n this.buffer.pop();\n this.buffer.pop();\n\n this.buffer = [this.origin, ...this.path];\n }\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n if (\n this.connected(position) &&\n this.origin &&\n this.buffer &&\n this.buffer.length > 0\n ) {\n this.buffer = [...this.buffer, position, this.origin];\n\n this.points = this.buffer;\n\n this.setBoundingBoxFromContours(this.points);\n\n this.setAnnotationMaskFromPoints();\n\n if (!this.decodedMask) return;\n\n this.buffer = [];\n\n this.setAnnotated();\n\n return;\n }\n\n if (this.anchor && this.image) {\n const source = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.anchor.x * this.factor),\n Math.floor(this.anchor.y * this.factor),\n 0\n );\n\n const destination = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.buffer.at(-1)!.x * this.factor),\n Math.floor(this.buffer.at(-1)!.y * this.factor),\n 0\n );\n\n if (!this.pathfinder) return;\n\n this.path = this.pathfinder.find(source, destination).flat();\n\n this.buffer.pop();\n\n this.buffer = [...this.previous, ...this.path];\n\n this.previous = [...this.previous, ...this.path];\n\n this.anchor = this.buffer.at(-1);\n\n return;\n }\n\n if (this.origin && this.buffer.length > 0) {\n if (!this.image || !this.origin || !this.pathfinder) return;\n\n this.anchor = this.buffer.at(-1);\n\n const source = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.origin.x * this.factor),\n Math.floor(this.origin.y * this.factor),\n 0\n );\n\n const destination = getIdx(\n this.image.width * this.factor,\n 1,\n Math.floor(this.buffer.at(-1)!.x * this.factor),\n Math.floor(this.buffer.at(-1)!.y * this.factor),\n 0\n );\n\n this.path = this.pathfinder.find(source, destination).flat();\n\n this.buffer = [this.origin, ...this.path];\n\n this.previous = [...this.previous, this.origin, ...this.path];\n return;\n }\n }\n\n private connected(\n position: { x: number; y: number },\n threshold: number = 4\n ): boolean | undefined {\n if (!this.origin) return undefined;\n\n const distance = getDistance(position, this.origin);\n\n return distance < threshold;\n }\n\n private filter() {\n if (!this.image) return;\n\n const options = { factor: this.factor };\n\n //scharr filter?\n this.response = this.image.resize(options).grey().sobelFilter();\n }\n}\n","import { AnnotationState } from \"../enums\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { drawRectangle } from \"utils/annotator\";\n\nexport class RectangularAnnotationTool extends AnnotationTool {\n width?: number;\n height?: number;\n\n deselect() {\n this.origin = undefined;\n\n this.width = undefined;\n this.height = undefined;\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n if (!this.width) {\n this.origin = position;\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n this.resize(position);\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (\n this.annotationState !== AnnotationState.Annotating ||\n !(this.width && this.height)\n )\n return;\n\n if (Math.abs(this.width * this.height) < 10) {\n return;\n }\n this.resize(position);\n this.points = drawRectangle(this.origin, this.width, this.height);\n this._boundingBox = this.computeBoundingBox();\n\n const decodedMask = this.convertToMask();\n if (!decodedMask) return;\n\n this._decodedMask = decodedMask;\n this.setAnnotated();\n }\n\n private convertToMask() {\n if (!this.points || !this.boundingBox) return;\n\n const p1 = this.points[0];\n const p2 = this.points[1];\n\n return new Uint8Array((p2.x - p1.x) * (p2.y - p1.y)).fill(255);\n }\n\n private resize(position: { x: number; y: number }) {\n if (this.origin) {\n this.width = position.x - this.origin.x;\n this.height = position.y - this.origin.y;\n }\n }\n}\n","import {\n LayersModel,\n loadLayersModel,\n Tensor,\n Tensor3D,\n Rank,\n train,\n tidy,\n image,\n browser,\n scalar,\n} from \"@tensorflow/tfjs\";\nimport * as ImageJS from \"image-js\";\n\nimport { RectangularAnnotationTool } from \"./RectangularAnnotationTool\";\nimport { Point } from \"../types\";\nimport { AnnotationState } from \"../enums\";\n\nexport class ObjectAnnotationTool extends RectangularAnnotationTool {\n graph?: LayersModel;\n prediction?: ImageJS.Image;\n points: Array = [];\n // @ts-ignore\n roi?: ImageJS.Roi;\n offset?: { x: number; y: number };\n output?: ImageJS.Image;\n\n deselect() {\n this.prediction = undefined;\n this.points = [];\n this.roi = undefined;\n this.offset = undefined;\n this.output = undefined;\n\n this.origin = undefined;\n this.width = undefined;\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n async onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n await this.predict();\n }\n\n static async compile(image: ImageJS.Image) {\n const instance = new ObjectAnnotationTool(image);\n\n const pathname =\n \"https://raw.githubusercontent.com/zaidalyafeai/HostedModels/master/unet-128/model.json\";\n\n instance.graph = await loadLayersModel(pathname);\n\n const optimizer = train.adam();\n\n instance.graph.compile({\n optimizer: optimizer,\n loss: \"categoricalCrossentropy\",\n metrics: [\"accuracy\"],\n });\n\n return instance;\n }\n\n private async predict() {\n if (!this.image || !this.origin || !this.width || !this.height) return;\n\n const width = Math.round(this.width);\n const height = Math.round(this.height);\n\n const crop = this.image.crop({\n x: this.origin.x,\n y: this.origin.y,\n width: width,\n height: height,\n });\n\n const prediction = tidy(() => {\n if (crop) {\n const cropped: Tensor3D = browser.fromPixels(crop.getCanvas());\n\n const size: [number, number] = [128, 128];\n const resized = image.resizeBilinear(cropped, size);\n const standardized = resized.div(scalar(255));\n const batch = standardized.expandDims(0);\n\n if (!this.height || !this.width || !this.origin) return;\n\n if (this.graph) {\n const prediction = this.graph.predict(batch) as Tensor;\n\n return prediction\n .squeeze([0])\n .tile([1, 1, 3])\n .sub(0.3)\n .sign()\n .relu()\n .resizeBilinear([height, width])\n .pad([\n [this.origin.y, this.image.height - (this.origin.y + height)],\n [this.origin.x, this.image.width - (this.origin.x + width)],\n [0, 0],\n ]);\n }\n }\n });\n\n if (prediction) {\n const clamped: Uint8ClampedArray = await browser.toPixels(\n prediction as Tensor3D\n );\n // .then(async (clamped) => {\n this.output = new ImageJS.Image({\n width: this.image.width,\n height: this.image.height,\n data: clamped,\n });\n\n const greyMask = this.output.grey();\n\n //compute bounding box with ROI manager\n const roiManager = this.image.getRoiManager();\n //@ts-ignore\n const binaryMask = greyMask.mask({\n algorithm: \"threshold\",\n threshold: 1,\n });\n // @ts-ignore\n roiManager.fromMask(binaryMask);\n // @ts-ignore\n const rois = roiManager.getRois();\n const roi = rois.sort((a: any, b: any) => {\n return b.surface - a.surface;\n })[1]; // take the second roi because the first one will be of the size of the image,the second one is the actual largest roi\n this._boundingBox = [roi.minX, roi.minY, roi.maxX, roi.maxY];\n\n //threshold\n const thresholded = greyMask.data.map((i: number) => (i > 1 ? 255 : 0)); //threshold necessary because output of NN is not binary\n\n // @ts-ignore\n this.decodedMask = thresholded;\n\n this.width = undefined;\n }\n\n this.setAnnotated();\n }\n}\n","import * as ImageJS from \"image-js\";\n\nimport { AnnotationTool } from \"./AnnotationTool\";\n\nimport { connectPoints, computeBoundingBoxFromContours } from \"utils/annotator\";\nimport { Point } from \"../types\";\nimport { AnnotationState } from \"../enums\";\n\nexport class PenAnnotationTool extends AnnotationTool {\n brushSize: number = 8;\n buffer: Array = [];\n points: Array = [];\n\n deselect() {\n this.buffer = [];\n this.points = [];\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n this.buffer = [...this.buffer, position];\n\n this.setAnnotating();\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n this.buffer = [...this.buffer, position];\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n this.points = this.buffer;\n\n const circlesData = this.computeCircleData();\n\n if (!circlesData) {\n this.deselect();\n return;\n }\n\n this.decodedMask = circlesData;\n\n this.setAnnotated();\n }\n\n private computeCircleData(): Uint8Array | undefined {\n const canvas = document.createElement(\"canvas\");\n canvas.width = this.image.width;\n canvas.height = this.image.height;\n const ctx = canvas.getContext(\"2d\");\n\n if (!ctx) return undefined;\n\n let connectedPoints: Array;\n if (this.points.length === 1) {\n // Handling the case in which a single point has been clicked.\n connectedPoints = this.points;\n } else {\n connectedPoints = connectPoints(this.points);\n }\n // Compute bounding box coordinates.\n // don't use this this.setBoundingBoxFromContours here\n const boundingBox = computeBoundingBoxFromContours(connectedPoints);\n\n // Make sure the bounding box is valid.\n if (boundingBox.some((x) => Number.isNaN(x))) return undefined;\n\n this._boundingBox = [\n Math.max(0, Math.round(boundingBox[0] - this.brushSize)),\n Math.max(0, Math.round(boundingBox[1] - this.brushSize)),\n Math.min(this.image.width, Math.round(boundingBox[2] + this.brushSize)),\n Math.min(this.image.height, Math.round(boundingBox[3] + this.brushSize)),\n ];\n\n // Compute mask by drawing circles over canvas.\n connectedPoints.forEach((point) => {\n ctx.beginPath();\n ctx.arc(\n Math.round(point.x),\n Math.round(point.y),\n this.brushSize,\n 0,\n Math.PI * 2,\n true\n );\n ctx.fill();\n });\n\n const rgbMask = ImageJS.Image.fromCanvas(canvas);\n\n const width = this._boundingBox[2] - this._boundingBox[0];\n const height = this._boundingBox[3] - this._boundingBox[1];\n if (width <= 0 || height <= 0) {\n return undefined;\n }\n\n const croppedRgbMask = rgbMask.crop({\n x: this._boundingBox[0],\n y: this._boundingBox[1],\n width: width,\n height: height,\n });\n\n // @ts-ignore: getChannel API is not exposed\n const thresholdMaskImg = this.thresholdMask(croppedRgbMask.getChannel(3));\n return thresholdMaskImg.data as Uint8Array;\n }\n\n private thresholdMask = (mask: ImageJS.Image) => {\n for (let x = 0; x < mask.width; x++) {\n for (let y = 0; y < mask.height; y++) {\n if (mask.getPixelXY(x, y)[0] > 1) {\n mask.setPixelXY(x, y, [255]);\n } else {\n mask.setPixelXY(x, y, [0]);\n }\n }\n }\n return mask;\n };\n}\n","import { AnnotationState } from \"../enums\";\nimport { Point } from \"../types\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { getDistance, pointsAreEqual } from \"utils/annotator\";\n\nexport class PolygonalAnnotationTool extends AnnotationTool {\n buffer: Array = [];\n points: Array = [];\n private _initialMove: boolean = true;\n\n set newAnchor(position: Point | undefined) {\n if (position) {\n this._initialMove = true;\n this.anchor = position;\n } else {\n this._initialMove = true;\n this.anchor = undefined;\n }\n }\n\n set terminal(position: Point) {\n if (this._initialMove) {\n this._initialMove = false;\n this.buffer.push(position);\n } else {\n this.buffer[this.buffer.length - 1] = position;\n }\n }\n\n deselect() {\n this.annotation = undefined;\n this.newAnchor = undefined;\n this.buffer = [];\n this.origin = undefined;\n this.points = [];\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (!this.origin) {\n this.origin = position;\n this.buffer.push(position);\n\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n if (this.anchor && pointsAreEqual(this.anchor, position)) return;\n\n this.terminal = position;\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n if (\n this.connected(position) &&\n this.anchor &&\n this.origin &&\n this.buffer &&\n this.buffer.length > 0\n ) {\n this.buffer = [...this.buffer, position, this.origin];\n this.points = this.buffer;\n\n this.setBoundingBoxFromContours(this.points);\n\n this.setAnnotationMaskFromPoints();\n\n if (!this.decodedMask) return;\n\n this.buffer = [];\n this.newAnchor = undefined;\n this.origin = undefined;\n\n this.setAnnotated();\n } else {\n this.newAnchor = this.buffer.at(-1);\n }\n }\n\n private connected(\n position: Point,\n threshold: number = 4\n ): boolean | undefined {\n if (!this.origin) return undefined;\n\n const distance = getDistance(position, this.origin);\n\n return distance < threshold;\n }\n}\n","import * as ImageJS from \"image-js\";\n\nimport { AnnotationTool } from \"./AnnotationTool\";\n\nimport { slic } from \"utils/annotator\";\nimport { AnnotationState } from \"../enums\";\n\nexport class QuickAnnotationTool extends AnnotationTool {\n regionSize?: number;\n colorMasks?: Array;\n currentSuperpixels: Set = new Set();\n lastSuperpixel: number = 0;\n superpixels?: Int32Array;\n superpixelsMap?: { [key: number]: Array };\n currentMask?: ImageJS.Image;\n map?: Uint8Array | Uint8ClampedArray;\n startAnnotating = false;\n throttleTimer: boolean = false;\n\n _initializeSuperpixelse(regionSize: number) {\n this.regionSize = Math.round(regionSize);\n\n const superpixels = this.computeSuperpixels();\n\n if (!superpixels.length) return;\n\n this.superpixels = superpixels;\n this.superpixelsMap = {};\n\n superpixels.forEach((pixel: number, index: number) => {\n if (!(pixel in this.superpixelsMap!)) {\n this.superpixelsMap![pixel] = [];\n }\n this.superpixelsMap![pixel].push(index);\n });\n }\n\n // throttled to prevent repeated expensive calls while resizing\n initializeSuperpixels(regionSize: number) {\n if (process.env.NODE_ENV !== \"test\") {\n if (this.throttleTimer) return;\n this.throttleTimer = true;\n\n setTimeout(() => {\n this._initializeSuperpixelse(regionSize);\n this.throttleTimer = false;\n }, 500);\n } else {\n this._initializeSuperpixelse(regionSize);\n }\n }\n\n computeSuperpixels() {\n const data = this.image.getRGBAData();\n\n const { superpixels } = slic(\n data,\n this.image.width,\n this.image.height,\n this.regionSize\n );\n\n return superpixels;\n }\n\n deselect() {\n this.colorMasks = undefined;\n this.currentSuperpixels.clear();\n this.lastSuperpixel = 0;\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (!this.currentMask) {\n this.currentMask = new ImageJS.Image(\n this.image.width,\n this.image.height,\n new Uint8Array(this.image.width * this.image.height * 4),\n { alpha: 1 }\n );\n }\n\n if (!this.superpixels) return;\n\n this.setAnnotating();\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (\n this.annotationState === AnnotationState.Annotated ||\n !this.superpixels ||\n !this.superpixelsMap\n )\n return;\n // fixes superpixel overflow\n position.x =\n position.x === this.image.width ? this.image.width - 1 : position.x;\n const pixel =\n Math.round(position.x) + Math.round(position.y) * this.image.width;\n\n const superpixel = this.superpixels[pixel];\n if (!superpixel || this.currentSuperpixels.has(superpixel)) return;\n\n this.lastSuperpixel = superpixel;\n\n if (this.annotationState !== AnnotationState.Annotating) {\n this.currentSuperpixels.clear();\n\n this.currentMask = new ImageJS.Image(\n this.image.width,\n this.image.height,\n new Uint8Array(this.image.width * this.image.height * 4),\n { alpha: 1 }\n );\n }\n\n this.currentSuperpixels.add(superpixel);\n\n this.superpixelsMap[superpixel].forEach((index: number) => {\n this.currentMask!.setPixel(index, [255, 0, 0, 150]);\n });\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n if (!this.currentMask) return;\n\n const greyMask = this.currentMask.grey();\n //@ts-ignore\n const binaryMask = greyMask.mask({ algorithm: \"threshold\", threshold: 1 });\n\n //compute bounding box with ROI manager\n const roiManager = this.image.getRoiManager();\n // @ts-ignore\n roiManager.fromMask(binaryMask);\n // @ts-ignore\n const roi = roiManager.getRois()[0];\n this._boundingBox = [roi.minX, roi.minY, roi.maxX, roi.maxY];\n\n const width = this._boundingBox[2] - this._boundingBox[0];\n const height = this._boundingBox[3] - this._boundingBox[1];\n if (width <= 0 || height <= 0) {\n return;\n }\n\n const croppedGreyMask = greyMask.crop({\n x: this._boundingBox[0],\n y: this._boundingBox[1],\n width: width,\n height: height,\n });\n\n const thresholdMask = croppedGreyMask.data.map((i: number) =>\n i > 1 ? 255 : 0\n );\n\n this.decodedMask = Uint8Array.from(thresholdMask);\n this.setAnnotated();\n }\n}\n","export const slic = (\n data: Uint8Array | Uint8ClampedArray,\n width: number,\n height: number,\n regionSize: number = 40,\n minRegionSize?: number\n) => {\n if (!minRegionSize) {\n minRegionSize = (regionSize * regionSize) / 4;\n }\n\n const imWidth = width;\n const imHeight = height;\n const numRegionsX = parseInt(String(imWidth / regionSize), 10);\n const numRegionsY = parseInt(String(imHeight / regionSize), 10);\n const numRegions = parseInt(String(numRegionsX * numRegionsY), 10);\n const numPixels = parseInt(String(imWidth * imHeight), 10);\n const masses = new Array(numPixels);\n const currentCenters = new Float32Array((2 + 3) * numRegions);\n const newCenters = new Float32Array((2 + 3) * numRegions);\n const parameters = new Float32Array(2 * numRegions);\n const mcMap = new Float32Array(numPixels);\n const msMap = new Float32Array(numPixels);\n const distanceMap = new Float32Array(numPixels);\n\n /*\n * RGB to XYZ\n */\n const xyz = new Float32Array(3 * imWidth * imHeight);\n\n const gamma = 2.2;\n\n for (let i = 0; i < imWidth * imHeight; i++) {\n const r = Math.pow(parseFloat(String(data[4 * i])) * 0.00392156862, gamma);\n\n const g = Math.pow(\n parseFloat(String(data[4 * i + 1])) * 0.00392156862,\n gamma\n );\n\n const b = Math.pow(\n parseFloat(String(data[4 * i + 2])) * 0.00392156862,\n gamma\n );\n\n xyz[i] = r * 0.488718 + g * 0.31068 + b * 0.200602;\n xyz[i + imWidth * imHeight] = r * 0.176204 + g * 0.812985 + b * 0.0108109;\n xyz[i + 2 * imWidth * imHeight] = g * 0.0102048 + b * 0.989795;\n }\n\n /*\n * XYZ to Lab\n */\n const xw = 1.0 / 3.0;\n const yw = 1.0 / 3.0;\n const Yw = 1.0;\n const Xw = xw / yw;\n const Zw = ((1 - xw - yw) / yw) * Yw;\n const ix = 1.0 / Xw;\n const iy = 1.0 / Yw;\n const iz = 1.0 / Zw;\n const lab = new Float32Array(3 * imWidth * imHeight);\n\n for (let i = 0; i < imWidth * imHeight; i++) {\n const fx =\n xyz[i] * ix > 0.00856\n ? Math.pow(xyz[i] * ix, 0.33333333)\n : 7.78706891568 * xyz[i] * ix + 0.1379310336;\n\n const fy =\n xyz[imWidth * imHeight + i] * iy > 0.00856\n ? Math.pow(xyz[imWidth * imHeight + i] * iy, 0.33333333)\n : 7.78706891568 * xyz[imWidth * imHeight + i] * iy + 0.1379310336;\n\n const fz =\n xyz[2 * imWidth * imHeight + i] * iz > 0.00856\n ? Math.pow(xyz[2 * imWidth * imHeight + i] * iz, 0.33333333)\n : 7.78706891568 * xyz[2 * imWidth * imHeight + i] * iz + 0.1379310336;\n\n lab[i] = 116.0 * fy - 16.0;\n lab[i + imWidth * imHeight] = 500.0 * (fx - fy);\n lab[i + 2 * imWidth * imHeight] = 200.0 * (fy - fz);\n }\n\n /*\n * Compute gradient\n */\n const gradient = new Float32Array(numPixels);\n\n for (let k = 0; k < 3; k++) {\n for (let y = 1; y < imHeight - 1; y++) {\n for (let x = 1; x < imWidth - 1; x++) {\n const a = lab[k * imWidth * imHeight + y * imWidth + x - 1];\n const b = lab[k * imWidth * imHeight + y * imWidth + x + 1];\n const c = lab[k * imWidth * imHeight + (y + 1) * imWidth + x];\n const d = lab[k * imWidth * imHeight + (y - 1) * imWidth + x];\n\n gradient[y * imWidth + x] =\n gradient[y * imWidth + x] + (a - b) * (a - b) + (c - d) * (c - d);\n }\n }\n }\n\n /*\n * Initialize k-means centroids\n */\n let i = 0;\n let j = 0;\n\n for (let v = 0; v < numRegionsY; v++) {\n for (let u = 0; u < numRegionsX; u++) {\n let centerx = 0;\n let centery = 0;\n let minEdgeValue = Infinity;\n\n let x = parseInt(String(Math.round(regionSize * (u + 0.5))), 10);\n let y = parseInt(String(Math.round(regionSize * (v + 0.5))), 10);\n\n x = Math.max(Math.min(x, imWidth - 1), 0);\n y = Math.max(Math.min(y, imHeight - 1), 0);\n\n for (\n let yp = Math.max(0, y - 1);\n yp <= Math.min(imHeight - 1, y + 1);\n yp++\n ) {\n for (\n let xp = Math.max(0, x - 1);\n xp <= Math.min(imWidth - 1, x + 1);\n xp++\n ) {\n const thisEdgeValue = gradient[yp * imWidth + xp];\n\n if (thisEdgeValue < minEdgeValue) {\n minEdgeValue = thisEdgeValue;\n centerx = xp;\n centery = yp;\n }\n }\n }\n\n currentCenters[i++] = parseFloat(String(centerx));\n currentCenters[i++] = parseFloat(String(centery));\n\n currentCenters[i++] = lab[centery * imWidth + centerx];\n currentCenters[i++] =\n lab[imWidth * imHeight + centery * imWidth + centerx];\n currentCenters[i++] =\n lab[2 * imWidth * imHeight + centery * imWidth + centerx];\n\n parameters[j++] = 10 * 10;\n parameters[j++] = regionSize * regionSize;\n }\n }\n\n const iterations = 10;\n\n const segmentation = new Int32Array(numPixels);\n\n for (let iteration = 0; iteration < iterations; ++iteration) {\n /*\n * Label\n */\n for (let i1 = 0; i1 < distanceMap.length; ++i1) {\n distanceMap[i1] = Infinity;\n }\n\n for (let region = 0; region < numRegionsX * numRegionsY; ++region) {\n const cx = Math.round(currentCenters[region * 5]);\n const cy = Math.round(currentCenters[region * 5 + 1]);\n\n for (\n let y2 = Math.max(0, cy - regionSize);\n y2 < Math.min(imHeight, cy + regionSize);\n ++y2\n ) {\n for (\n let x2 = Math.max(0, cx - regionSize);\n x2 < Math.min(imWidth, cx + regionSize);\n ++x2\n ) {\n const spatial = (x2 - cx) * (x2 - cx) + (y2 - cy) * (y2 - cy);\n const dR = lab[y2 * imWidth + x2] - currentCenters[5 * region + 2];\n const dG =\n lab[imWidth * imHeight + y2 * imWidth + x2] -\n currentCenters[5 * region + 3];\n const dB =\n lab[2 * imWidth * imHeight + y2 * imWidth + x2] -\n currentCenters[5 * region + 4];\n const appearance = dR * dR + dG * dG + dB * dB;\n\n const distance = Math.sqrt(\n appearance / parameters[region * 2] +\n spatial / parameters[region * 2 + 1]\n );\n\n if (distance < distanceMap[y2 * imWidth + x2]) {\n distanceMap[y2 * imWidth + x2] = distance;\n segmentation[y2 * imWidth + x2] = region;\n }\n }\n }\n }\n\n for (let y2 = 0; y2 < imHeight; ++y2) {\n for (let x2 = 0; x2 < imWidth; ++x2) {\n if (\n parameters[segmentation[y2 * imWidth + x2] * 2] <\n mcMap[y2 * imWidth + x2]\n ) {\n parameters[segmentation[y2 * imWidth + x2] * 2] =\n mcMap[y2 * imWidth + x2];\n }\n\n if (\n parameters[segmentation[y2 * imWidth + x2] * 2 + 1] <\n msMap[y2 * imWidth + x2]\n ) {\n parameters[segmentation[y2 * imWidth + x2] * 2 + 1] =\n msMap[y2 * imWidth + x2];\n }\n }\n }\n\n /*\n * Update parameters\n */\n const mc = new Float32Array(parameters.length / 2);\n const ms = new Float32Array(parameters.length / 2);\n\n for (let i1 = 0; i1 < segmentation.length; i1++) {\n const region = segmentation[i1];\n\n if (mc[region] < mcMap[region]) {\n mc[region] = mcMap[region];\n\n parameters[region * 2] = mcMap[region];\n }\n\n if (ms[region] < msMap[region]) {\n ms[region] = msMap[region];\n\n parameters[region * 2 + 1] = msMap[region];\n }\n }\n\n for (let i = 0; i < masses.length; ++i) {\n masses[i] = 0;\n }\n\n for (let i = 0; i < newCenters.length; ++i) {\n newCenters[i] = 0;\n }\n\n /*\n * Compute centroids\n */\n let region;\n\n for (let y1 = 0; y1 < imHeight; y1++) {\n for (let x1 = 0; x1 < imWidth; x1++) {\n region = segmentation[x1 + y1 * imWidth];\n\n masses[region]++;\n\n newCenters[region * 5] += x1;\n newCenters[region * 5 + 1] += y1;\n newCenters[region * 5 + 2] += lab[y1 * imWidth + x1];\n newCenters[region * 5 + 3] +=\n lab[imWidth * imHeight + y1 * imWidth + x1];\n newCenters[region * 5 + 4] +=\n lab[2 * imWidth * imHeight + y1 * imWidth + x1];\n }\n }\n\n for (region = 0; region < numRegions; region++) {\n const iMass = 1.0 / Math.max(masses[region], 1e-8);\n\n newCenters[region * 5] = newCenters[region * 5] * iMass;\n newCenters[region * 5 + 1] = newCenters[region * 5 + 1] * iMass;\n newCenters[region * 5 + 2] = newCenters[region * 5 + 2] * iMass;\n newCenters[region * 5 + 3] = newCenters[region * 5 + 3] * iMass;\n newCenters[region * 5 + 4] = newCenters[region * 5 + 4] * iMass;\n }\n\n /*\n * Compute residual error\n */\n let error = 0.0;\n\n for (let index = 0; index < currentCenters.length; ++index) {\n const d = currentCenters[index] - newCenters[index];\n\n error += Math.sqrt(d * d);\n }\n\n if (error < 1e-5) {\n break;\n }\n\n for (let i = 0; i < currentCenters.length; ++i) {\n currentCenters[i] = newCenters[i];\n }\n }\n\n /*\n * Remove small objects\n */\n const cleaned = new Int32Array(numPixels);\n const segment = new Int32Array(numPixels);\n\n const dx = [1, -1, 0, 0];\n const dy = [0, 0, 1, -1];\n\n let segmentSize;\n let label;\n let cleanedLabel;\n let numExpanded;\n let x1;\n let y1;\n let xp;\n let yp;\n let neighbor;\n\n for (let pixel = 0; pixel < numPixels; pixel++) {\n if (cleaned[pixel]) continue;\n\n label = segmentation[pixel];\n numExpanded = 0;\n segmentSize = 0;\n segment[segmentSize++] = pixel;\n\n cleanedLabel = label + 1;\n cleaned[pixel] = label + 1;\n x1 = pixel % imWidth;\n y1 = Math.floor(pixel / imWidth);\n\n for (let direction = 0; direction < 4; direction++) {\n xp = x1 + dx[direction];\n yp = y1 + dy[direction];\n neighbor = xp + yp * imWidth;\n\n if (\n 0 <= xp &&\n xp < imWidth &&\n 0 <= yp &&\n yp < imHeight &&\n cleaned[neighbor]\n ) {\n cleanedLabel = cleaned[neighbor];\n }\n }\n\n while (numExpanded < segmentSize) {\n const open = segment[numExpanded++];\n\n x1 = open % imWidth;\n y1 = Math.floor(open / imWidth);\n\n for (let direction = 0; direction < 4; ++direction) {\n xp = x1 + dx[direction];\n yp = y1 + dy[direction];\n neighbor = xp + yp * imWidth;\n\n if (\n 0 <= xp &&\n xp < imWidth &&\n 0 <= yp &&\n yp < imHeight &&\n cleaned[neighbor] === 0 &&\n segmentation[neighbor] === label\n ) {\n cleaned[neighbor] = label + 1;\n segment[segmentSize++] = neighbor;\n }\n }\n }\n\n if (segmentSize < minRegionSize) {\n while (segmentSize > 0) {\n cleaned[segment[--segmentSize]] = cleanedLabel;\n }\n }\n }\n\n for (let pixel = 0; pixel < numPixels; ++pixel) {\n --cleaned[pixel];\n }\n\n for (let index = 0; index < numPixels; ++index) {\n segmentation[index] = cleaned[index];\n }\n\n return {\n count: 0,\n map: data,\n superpixels: segmentation,\n };\n};\n","import { AnnotationState } from \"../enums\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { drawRectangle } from \"utils/annotator\";\n\nexport class SelectionTool extends AnnotationTool {\n width?: number;\n height?: number;\n\n deselect() {\n this.origin = undefined;\n\n this.width = undefined;\n this.height = undefined;\n this.annotation = undefined;\n\n this.setBlank();\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n if (!this.width) {\n this.origin = position;\n this.setAnnotating();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n this.resize(position);\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n if (\n !(this.width && this.height) ||\n Math.abs(this.width * this.height) < 10\n ) {\n this.deselect();\n return;\n }\n this.resize(position);\n this.points = drawRectangle(this.origin, this.width, this.height);\n this._boundingBox = this.computeBoundingBox();\n\n this.deselect();\n }\n\n private resize(position: { x: number; y: number }) {\n if (this.origin) {\n this.width = position.x - this.origin.x;\n this.height = position.y - this.origin.y;\n }\n }\n}\n","import { AnnotationState } from \"../enums\";\nimport { AnnotationTool } from \"./AnnotationTool\";\nimport { drawRectangle } from \"utils/annotator\";\n\nexport class ThresholdAnnotationTool extends AnnotationTool {\n threshold = 255;\n width?: number;\n height?: number;\n\n updateMask(threshold: number) {\n this.threshold = Math.round(threshold);\n if (this.decodedMask) {\n if (!this._boundingBox) return;\n\n const maskImg = this.applyThreshold(this._boundingBox);\n\n if (!maskImg) {\n this.deselect();\n return;\n }\n\n this.decodedMask = maskImg;\n\n this.setAnnotated();\n }\n }\n\n deselect() {\n this.origin = undefined;\n\n this.width = undefined;\n this.height = undefined;\n\n this.setBlank();\n this.annotation = undefined;\n }\n\n onMouseDown(position: { x: number; y: number }) {\n if (this.annotationState === AnnotationState.Annotated) return;\n\n if (!this.width) {\n this.origin = position;\n\n this.setAnnotating();\n } else {\n this.resize(position);\n\n this.computeMask();\n }\n }\n\n onMouseMove(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n this.resize(position);\n }\n\n onMouseUp(position: { x: number; y: number }) {\n if (this.annotationState !== AnnotationState.Annotating) return;\n\n this.computeMask();\n this.setAnnotated();\n }\n\n computeMask() {\n this.points = drawRectangle(this.origin, this.width, this.height);\n\n const boundingBox = this.computeBoundingBox();\n this._boundingBox = boundingBox;\n\n if (!boundingBox) return;\n\n const thresholdMask = this.applyThreshold(boundingBox);\n if (!thresholdMask) {\n this.deselect();\n return;\n }\n\n this.decodedMask = thresholdMask;\n }\n\n private applyThreshold(boundingBox: [number, number, number, number]) {\n const x1 = boundingBox[0];\n const y1 = boundingBox[1];\n\n const width = boundingBox[2] - boundingBox[0];\n const height = boundingBox[3] - boundingBox[1];\n\n if (width <= 0 || height <= 0) {\n return undefined;\n }\n\n const image = this.image;\n const greyMask = image.grey();\n\n const binaryMask = greyMask.data as Uint8Array;\n\n const thresholdMask = new Uint8Array(width * height);\n\n for (let i = 0; i < height; i++) {\n for (let j = 0; j < width; j++) {\n const imgY = y1 + i;\n const imgX = x1 + j;\n const imgIdx = imgY * this.image.width + imgX;\n if (binaryMask[imgIdx] > this.threshold) {\n thresholdMask[i * width + j] = 255;\n }\n }\n }\n\n return thresholdMask;\n }\n\n private resize(position: { x: number; y: number }) {\n if (this.origin) {\n this.width = position.x - this.origin.x;\n this.height = position.y - this.origin.y;\n }\n }\n}\n","// Cache implementation based on Erik Rasmussen's `lru-memoize`:\n// https://github.com/erikras/lru-memoize\nvar NOT_FOUND = 'NOT_FOUND';\n\nfunction createSingletonCache(equals) {\n var entry;\n return {\n get: function get(key) {\n if (entry && equals(entry.key, key)) {\n return entry.value;\n }\n\n return NOT_FOUND;\n },\n put: function put(key, value) {\n entry = {\n key: key,\n value: value\n };\n },\n getEntries: function getEntries() {\n return entry ? [entry] : [];\n },\n clear: function clear() {\n entry = undefined;\n }\n };\n}\n\nfunction createLruCache(maxSize, equals) {\n var entries = [];\n\n function get(key) {\n var cacheIndex = entries.findIndex(function (entry) {\n return equals(key, entry.key);\n }); // We found a cached entry\n\n if (cacheIndex > -1) {\n var entry = entries[cacheIndex]; // Cached entry not at top of cache, move it to the top\n\n if (cacheIndex > 0) {\n entries.splice(cacheIndex, 1);\n entries.unshift(entry);\n }\n\n return entry.value;\n } // No entry found in cache, return sentinel\n\n\n return NOT_FOUND;\n }\n\n function put(key, value) {\n if (get(key) === NOT_FOUND) {\n // TODO Is unshift slow?\n entries.unshift({\n key: key,\n value: value\n });\n\n if (entries.length > maxSize) {\n entries.pop();\n }\n }\n }\n\n function getEntries() {\n return entries;\n }\n\n function clear() {\n entries = [];\n }\n\n return {\n get: get,\n put: put,\n getEntries: getEntries,\n clear: clear\n };\n}\n\nexport var defaultEqualityCheck = function defaultEqualityCheck(a, b) {\n return a === b;\n};\nexport function createCacheKeyComparator(equalityCheck) {\n return function areArgumentsShallowlyEqual(prev, next) {\n if (prev === null || next === null || prev.length !== next.length) {\n return false;\n } // Do this in a for loop (and not a `forEach` or an `every`) so we can determine equality as fast as possible.\n\n\n var length = prev.length;\n\n for (var i = 0; i < length; i++) {\n if (!equalityCheck(prev[i], next[i])) {\n return false;\n }\n }\n\n return true;\n };\n}\n// defaultMemoize now supports a configurable cache size with LRU behavior,\n// and optional comparison of the result value with existing values\nexport function defaultMemoize(func, equalityCheckOrOptions) {\n var providedOptions = typeof equalityCheckOrOptions === 'object' ? equalityCheckOrOptions : {\n equalityCheck: equalityCheckOrOptions\n };\n var _providedOptions$equa = providedOptions.equalityCheck,\n equalityCheck = _providedOptions$equa === void 0 ? defaultEqualityCheck : _providedOptions$equa,\n _providedOptions$maxS = providedOptions.maxSize,\n maxSize = _providedOptions$maxS === void 0 ? 1 : _providedOptions$maxS,\n resultEqualityCheck = providedOptions.resultEqualityCheck;\n var comparator = createCacheKeyComparator(equalityCheck);\n var cache = maxSize === 1 ? createSingletonCache(comparator) : createLruCache(maxSize, comparator); // we reference arguments instead of spreading them for performance reasons\n\n function memoized() {\n var value = cache.get(arguments);\n\n if (value === NOT_FOUND) {\n // @ts-ignore\n value = func.apply(null, arguments);\n\n if (resultEqualityCheck) {\n var entries = cache.getEntries();\n var matchingEntry = entries.find(function (entry) {\n return resultEqualityCheck(entry.value, value);\n });\n\n if (matchingEntry) {\n value = matchingEntry.value;\n }\n }\n\n cache.put(arguments, value);\n }\n\n return value;\n }\n\n memoized.clearCache = function () {\n return cache.clear();\n };\n\n return memoized;\n}","import { defaultMemoize, defaultEqualityCheck } from './defaultMemoize';\nexport { defaultMemoize, defaultEqualityCheck };\n\nfunction getDependencies(funcs) {\n var dependencies = Array.isArray(funcs[0]) ? funcs[0] : funcs;\n\n if (!dependencies.every(function (dep) {\n return typeof dep === 'function';\n })) {\n var dependencyTypes = dependencies.map(function (dep) {\n return typeof dep === 'function' ? \"function \" + (dep.name || 'unnamed') + \"()\" : typeof dep;\n }).join(', ');\n throw new Error(\"createSelector expects all input-selectors to be functions, but received the following types: [\" + dependencyTypes + \"]\");\n }\n\n return dependencies;\n}\n\nexport function createSelectorCreator(memoize) {\n for (var _len = arguments.length, memoizeOptionsFromArgs = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n memoizeOptionsFromArgs[_key - 1] = arguments[_key];\n }\n\n var createSelector = function createSelector() {\n for (var _len2 = arguments.length, funcs = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {\n funcs[_key2] = arguments[_key2];\n }\n\n var _recomputations = 0;\n\n var _lastResult; // Due to the intricacies of rest params, we can't do an optional arg after `...funcs`.\n // So, start by declaring the default value here.\n // (And yes, the words 'memoize' and 'options' appear too many times in this next sequence.)\n\n\n var directlyPassedOptions = {\n memoizeOptions: undefined\n }; // Normally, the result func or \"output selector\" is the last arg\n\n var resultFunc = funcs.pop(); // If the result func is actually an _object_, assume it's our options object\n\n if (typeof resultFunc === 'object') {\n directlyPassedOptions = resultFunc; // and pop the real result func off\n\n resultFunc = funcs.pop();\n }\n\n if (typeof resultFunc !== 'function') {\n throw new Error(\"createSelector expects an output function after the inputs, but received: [\" + typeof resultFunc + \"]\");\n } // Determine which set of options we're using. Prefer options passed directly,\n // but fall back to options given to createSelectorCreator.\n\n\n var _directlyPassedOption = directlyPassedOptions,\n _directlyPassedOption2 = _directlyPassedOption.memoizeOptions,\n memoizeOptions = _directlyPassedOption2 === void 0 ? memoizeOptionsFromArgs : _directlyPassedOption2; // Simplifying assumption: it's unlikely that the first options arg of the provided memoizer\n // is an array. In most libs I've looked at, it's an equality function or options object.\n // Based on that, if `memoizeOptions` _is_ an array, we assume it's a full\n // user-provided array of options. Otherwise, it must be just the _first_ arg, and so\n // we wrap it in an array so we can apply it.\n\n var finalMemoizeOptions = Array.isArray(memoizeOptions) ? memoizeOptions : [memoizeOptions];\n var dependencies = getDependencies(funcs);\n var memoizedResultFunc = memoize.apply(void 0, [function recomputationWrapper() {\n _recomputations++; // apply arguments instead of spreading for performance.\n\n return resultFunc.apply(null, arguments);\n }].concat(finalMemoizeOptions)); // If a selector is called with the exact same arguments we don't need to traverse our dependencies again.\n\n var selector = memoize(function dependenciesChecker() {\n var params = [];\n var length = dependencies.length;\n\n for (var i = 0; i < length; i++) {\n // apply arguments instead of spreading and mutate a local list of params for performance.\n // @ts-ignore\n params.push(dependencies[i].apply(null, arguments));\n } // apply arguments instead of spreading for performance.\n\n\n _lastResult = memoizedResultFunc.apply(null, params);\n return _lastResult;\n });\n Object.assign(selector, {\n resultFunc: resultFunc,\n memoizedResultFunc: memoizedResultFunc,\n dependencies: dependencies,\n lastResult: function lastResult() {\n return _lastResult;\n },\n recomputations: function recomputations() {\n return _recomputations;\n },\n resetRecomputations: function resetRecomputations() {\n return _recomputations = 0;\n }\n });\n return selector;\n }; // @ts-ignore\n\n\n return createSelector;\n}\nexport var createSelector = /* #__PURE__ */createSelectorCreator(defaultMemoize);\n// Manual definition of state and output arguments\nexport var createStructuredSelector = function createStructuredSelector(selectors, selectorCreator) {\n if (selectorCreator === void 0) {\n selectorCreator = createSelector;\n }\n\n if (typeof selectors !== 'object') {\n throw new Error('createStructuredSelector expects first argument to be an object ' + (\"where each property is a selector, instead received a \" + typeof selectors));\n }\n\n var objectKeys = Object.keys(selectors);\n var resultSelector = selectorCreator( // @ts-ignore\n objectKeys.map(function (key) {\n return selectors[key];\n }), function () {\n for (var _len3 = arguments.length, values = new Array(_len3), _key3 = 0; _key3 < _len3; _key3++) {\n values[_key3] = arguments[_key3];\n }\n\n return values.reduce(function (composition, value, index) {\n composition[objectKeys[index]] = value;\n return composition;\n }, {});\n });\n return resultSelector;\n};","import objectWithoutPropertiesLoose from \"./objectWithoutPropertiesLoose.js\";\nexport default function _objectWithoutProperties(source, excluded) {\n if (source == null) return {};\n var target = objectWithoutPropertiesLoose(source, excluded);\n var key, i;\n if (Object.getOwnPropertySymbols) {\n var sourceSymbolKeys = Object.getOwnPropertySymbols(source);\n for (i = 0; i < sourceSymbolKeys.length; i++) {\n key = sourceSymbolKeys[i];\n if (excluded.indexOf(key) >= 0) continue;\n if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue;\n target[key] = source[key];\n }\n }\n return target;\n}","const errors = {\n\t0: \"Illegal state\",\n\t1: \"Immer drafts cannot have computed properties\",\n\t2: \"This object has been frozen and should not be mutated\",\n\t3(data: any) {\n\t\treturn (\n\t\t\t\"Cannot use a proxy that has been revoked. Did you pass an object from inside an immer function to an async process? \" +\n\t\t\tdata\n\t\t)\n\t},\n\t4: \"An immer producer returned a new value *and* modified its draft. Either return a new value *or* modify the draft.\",\n\t5: \"Immer forbids circular references\",\n\t6: \"The first or second argument to `produce` must be a function\",\n\t7: \"The third argument to `produce` must be a function or undefined\",\n\t8: \"First argument to `createDraft` must be a plain object, an array, or an immerable object\",\n\t9: \"First argument to `finishDraft` must be a draft returned by `createDraft`\",\n\t10: \"The given draft is already finalized\",\n\t11: \"Object.defineProperty() cannot be used on an Immer draft\",\n\t12: \"Object.setPrototypeOf() cannot be used on an Immer draft\",\n\t13: \"Immer only supports deleting array indices\",\n\t14: \"Immer only supports setting array indices and the 'length' property\",\n\t15(path: string) {\n\t\treturn \"Cannot apply patch, path doesn't resolve: \" + path\n\t},\n\t16: 'Sets cannot have \"replace\" patches.',\n\t17(op: string) {\n\t\treturn \"Unsupported patch operation: \" + op\n\t},\n\t18(plugin: string) {\n\t\treturn `The plugin for '${plugin}' has not been loaded into Immer. To enable the plugin, import and call \\`enable${plugin}()\\` when initializing your application.`\n\t},\n\t20: \"Cannot use proxies if Proxy, Proxy.revocable or Reflect are not available\",\n\t21(thing: string) {\n\t\treturn `produce can only be called on things that are draftable: plain objects, arrays, Map, Set or classes that are marked with '[immerable]: true'. Got '${thing}'`\n\t},\n\t22(thing: string) {\n\t\treturn `'current' expects a draft, got: ${thing}`\n\t},\n\t23(thing: string) {\n\t\treturn `'original' expects a draft, got: ${thing}`\n\t},\n\t24: \"Patching reserved attributes like __proto__, prototype and constructor is not allowed\"\n} as const\n\nexport function die(error: keyof typeof errors, ...args: any[]): never {\n\tif (__DEV__) {\n\t\tconst e = errors[error]\n\t\tconst msg = !e\n\t\t\t? \"unknown error nr: \" + error\n\t\t\t: typeof e === \"function\"\n\t\t\t? e.apply(null, args as any)\n\t\t\t: e\n\t\tthrow new Error(`[Immer] ${msg}`)\n\t}\n\tthrow new Error(\n\t\t`[Immer] minified error nr: ${error}${\n\t\t\targs.length ? \" \" + args.map(s => `'${s}'`).join(\",\") : \"\"\n\t\t}. Find the full error at: https://bit.ly/3cXEKWf`\n\t)\n}\n","import {\n\tDRAFT_STATE,\n\tDRAFTABLE,\n\thasSet,\n\tObjectish,\n\tDrafted,\n\tAnyObject,\n\tAnyMap,\n\tAnySet,\n\tImmerState,\n\thasMap,\n\tArchtype,\n\tdie\n} from \"../internal\"\n\n/** Returns true if the given value is an Immer draft */\n/*#__PURE__*/\nexport function isDraft(value: any): boolean {\n\treturn !!value && !!value[DRAFT_STATE]\n}\n\n/** Returns true if the given value can be drafted by Immer */\n/*#__PURE__*/\nexport function isDraftable(value: any): boolean {\n\tif (!value) return false\n\treturn (\n\t\tisPlainObject(value) ||\n\t\tArray.isArray(value) ||\n\t\t!!value[DRAFTABLE] ||\n\t\t!!value.constructor?.[DRAFTABLE] ||\n\t\tisMap(value) ||\n\t\tisSet(value)\n\t)\n}\n\nconst objectCtorString = Object.prototype.constructor.toString()\n/*#__PURE__*/\nexport function isPlainObject(value: any): boolean {\n\tif (!value || typeof value !== \"object\") return false\n\tconst proto = Object.getPrototypeOf(value)\n\tif (proto === null) {\n\t\treturn true\n\t}\n\tconst Ctor =\n\t\tObject.hasOwnProperty.call(proto, \"constructor\") && proto.constructor\n\n\tif (Ctor === Object) return true\n\n\treturn (\n\t\ttypeof Ctor == \"function\" &&\n\t\tFunction.toString.call(Ctor) === objectCtorString\n\t)\n}\n\n/** Get the underlying object that is represented by the given draft */\n/*#__PURE__*/\nexport function original(value: T): T | undefined\nexport function original(value: Drafted): any {\n\tif (!isDraft(value)) die(23, value)\n\treturn value[DRAFT_STATE].base_\n}\n\n/*#__PURE__*/\nexport const ownKeys: (target: AnyObject) => PropertyKey[] =\n\ttypeof Reflect !== \"undefined\" && Reflect.ownKeys\n\t\t? Reflect.ownKeys\n\t\t: typeof Object.getOwnPropertySymbols !== \"undefined\"\n\t\t? obj =>\n\t\t\t\tObject.getOwnPropertyNames(obj).concat(\n\t\t\t\t\tObject.getOwnPropertySymbols(obj) as any\n\t\t\t\t)\n\t\t: /* istanbul ignore next */ Object.getOwnPropertyNames\n\nexport const getOwnPropertyDescriptors =\n\tObject.getOwnPropertyDescriptors ||\n\tfunction getOwnPropertyDescriptors(target: any) {\n\t\t// Polyfill needed for Hermes and IE, see https://github.com/facebook/hermes/issues/274\n\t\tconst res: any = {}\n\t\townKeys(target).forEach(key => {\n\t\t\tres[key] = Object.getOwnPropertyDescriptor(target, key)\n\t\t})\n\t\treturn res\n\t}\n\nexport function each(\n\tobj: T,\n\titer: (key: string | number, value: any, source: T) => void,\n\tenumerableOnly?: boolean\n): void\nexport function each(obj: any, iter: any, enumerableOnly = false) {\n\tif (getArchtype(obj) === Archtype.Object) {\n\t\t;(enumerableOnly ? Object.keys : ownKeys)(obj).forEach(key => {\n\t\t\tif (!enumerableOnly || typeof key !== \"symbol\") iter(key, obj[key], obj)\n\t\t})\n\t} else {\n\t\tobj.forEach((entry: any, index: any) => iter(index, entry, obj))\n\t}\n}\n\n/*#__PURE__*/\nexport function getArchtype(thing: any): Archtype {\n\t/* istanbul ignore next */\n\tconst state: undefined | ImmerState = thing[DRAFT_STATE]\n\treturn state\n\t\t? state.type_ > 3\n\t\t\t? state.type_ - 4 // cause Object and Array map back from 4 and 5\n\t\t\t: (state.type_ as any) // others are the same\n\t\t: Array.isArray(thing)\n\t\t? Archtype.Array\n\t\t: isMap(thing)\n\t\t? Archtype.Map\n\t\t: isSet(thing)\n\t\t? Archtype.Set\n\t\t: Archtype.Object\n}\n\n/*#__PURE__*/\nexport function has(thing: any, prop: PropertyKey): boolean {\n\treturn getArchtype(thing) === Archtype.Map\n\t\t? thing.has(prop)\n\t\t: Object.prototype.hasOwnProperty.call(thing, prop)\n}\n\n/*#__PURE__*/\nexport function get(thing: AnyMap | AnyObject, prop: PropertyKey): any {\n\t// @ts-ignore\n\treturn getArchtype(thing) === Archtype.Map ? thing.get(prop) : thing[prop]\n}\n\n/*#__PURE__*/\nexport function set(thing: any, propOrOldValue: PropertyKey, value: any) {\n\tconst t = getArchtype(thing)\n\tif (t === Archtype.Map) thing.set(propOrOldValue, value)\n\telse if (t === Archtype.Set) {\n\t\tthing.add(value)\n\t} else thing[propOrOldValue] = value\n}\n\n/*#__PURE__*/\nexport function is(x: any, y: any): boolean {\n\t// From: https://github.com/facebook/fbjs/blob/c69904a511b900266935168223063dd8772dfc40/packages/fbjs/src/core/shallowEqual.js\n\tif (x === y) {\n\t\treturn x !== 0 || 1 / x === 1 / y\n\t} else {\n\t\treturn x !== x && y !== y\n\t}\n}\n\n/*#__PURE__*/\nexport function isMap(target: any): target is AnyMap {\n\treturn hasMap && target instanceof Map\n}\n\n/*#__PURE__*/\nexport function isSet(target: any): target is AnySet {\n\treturn hasSet && target instanceof Set\n}\n/*#__PURE__*/\nexport function latest(state: ImmerState): any {\n\treturn state.copy_ || state.base_\n}\n\n/*#__PURE__*/\nexport function shallowCopy(base: any) {\n\tif (Array.isArray(base)) return Array.prototype.slice.call(base)\n\tconst descriptors = getOwnPropertyDescriptors(base)\n\tdelete descriptors[DRAFT_STATE as any]\n\tlet keys = ownKeys(descriptors)\n\tfor (let i = 0; i < keys.length; i++) {\n\t\tconst key: any = keys[i]\n\t\tconst desc = descriptors[key]\n\t\tif (desc.writable === false) {\n\t\t\tdesc.writable = true\n\t\t\tdesc.configurable = true\n\t\t}\n\t\t// like object.assign, we will read any _own_, get/set accessors. This helps in dealing\n\t\t// with libraries that trap values, like mobx or vue\n\t\t// unlike object.assign, non-enumerables will be copied as well\n\t\tif (desc.get || desc.set)\n\t\t\tdescriptors[key] = {\n\t\t\t\tconfigurable: true,\n\t\t\t\twritable: true, // could live with !!desc.set as well here...\n\t\t\t\tenumerable: desc.enumerable,\n\t\t\t\tvalue: base[key]\n\t\t\t}\n\t}\n\treturn Object.create(Object.getPrototypeOf(base), descriptors)\n}\n\n/**\n * Freezes draftable objects. Returns the original object.\n * By default freezes shallowly, but if the second argument is `true` it will freeze recursively.\n *\n * @param obj\n * @param deep\n */\nexport function freeze(obj: T, deep?: boolean): T\nexport function freeze(obj: any, deep: boolean = false): T {\n\tif (isFrozen(obj) || isDraft(obj) || !isDraftable(obj)) return obj\n\tif (getArchtype(obj) > 1 /* Map or Set */) {\n\t\tobj.set = obj.add = obj.clear = obj.delete = dontMutateFrozenCollections as any\n\t}\n\tObject.freeze(obj)\n\tif (deep) each(obj, (key, value) => freeze(value, true), true)\n\treturn obj\n}\n\nfunction dontMutateFrozenCollections() {\n\tdie(2)\n}\n\nexport function isFrozen(obj: any): boolean {\n\tif (obj == null || typeof obj !== \"object\") return true\n\t// See #600, IE dies on non-objects in Object.isFrozen\n\treturn Object.isFrozen(obj)\n}\n","import {\n\tImmerState,\n\tPatch,\n\tImmerScope,\n\tDrafted,\n\tAnyObject,\n\tImmerBaseState,\n\tAnyMap,\n\tAnySet,\n\tProxyType,\n\tdie\n} from \"../internal\"\n\n/** Plugin utilities */\nconst plugins: {\n\tPatches?: {\n\t\tgeneratePatches_(\n\t\t\tstate: ImmerState,\n\t\t\tbasePath: PatchPath,\n\t\t\tpatches: Patch[],\n\t\t\tinversePatches: Patch[]\n\t\t): void\n\t\tgenerateReplacementPatches_(\n\t\t\tbase: any,\n\t\t\treplacement: any,\n\t\t\tpatches: Patch[],\n\t\t\tinversePatches: Patch[]\n\t\t): void\n\t\tapplyPatches_(draft: T, patches: Patch[]): T\n\t}\n\tES5?: {\n\t\twillFinalizeES5_(scope: ImmerScope, result: any, isReplaced: boolean): void\n\t\tcreateES5Proxy_(\n\t\t\tbase: T,\n\t\t\tparent?: ImmerState\n\t\t): Drafted\n\t\thasChanges_(state: ES5ArrayState | ES5ObjectState): boolean\n\t}\n\tMapSet?: {\n\t\tproxyMap_(target: T, parent?: ImmerState): T\n\t\tproxySet_(target: T, parent?: ImmerState): T\n\t}\n} = {}\n\ntype Plugins = typeof plugins\n\nexport function getPlugin(\n\tpluginKey: K\n): Exclude {\n\tconst plugin = plugins[pluginKey]\n\tif (!plugin) {\n\t\tdie(18, pluginKey)\n\t}\n\t// @ts-ignore\n\treturn plugin\n}\n\nexport function loadPlugin(\n\tpluginKey: K,\n\timplementation: Plugins[K]\n): void {\n\tif (!plugins[pluginKey]) plugins[pluginKey] = implementation\n}\n\n/** ES5 Plugin */\n\ninterface ES5BaseState extends ImmerBaseState {\n\tassigned_: {[key: string]: any}\n\tparent_?: ImmerState\n\trevoked_: boolean\n}\n\nexport interface ES5ObjectState extends ES5BaseState {\n\ttype_: ProxyType.ES5Object\n\tdraft_: Drafted\n\tbase_: AnyObject\n\tcopy_: AnyObject | null\n}\n\nexport interface ES5ArrayState extends ES5BaseState {\n\ttype_: ProxyType.ES5Array\n\tdraft_: Drafted\n\tbase_: any\n\tcopy_: any\n}\n\n/** Map / Set plugin */\n\nexport interface MapState extends ImmerBaseState {\n\ttype_: ProxyType.Map\n\tcopy_: AnyMap | undefined\n\tassigned_: Map | undefined\n\tbase_: AnyMap\n\trevoked_: boolean\n\tdraft_: Drafted\n}\n\nexport interface SetState extends ImmerBaseState {\n\ttype_: ProxyType.Set\n\tcopy_: AnySet | undefined\n\tbase_: AnySet\n\tdrafts_: Map // maps the original value to the draft value in the new set\n\trevoked_: boolean\n\tdraft_: Drafted\n}\n\n/** Patches plugin */\n\nexport type PatchPath = (string | number)[]\n","import {\n\tPatch,\n\tPatchListener,\n\tDrafted,\n\tImmer,\n\tDRAFT_STATE,\n\tImmerState,\n\tProxyType,\n\tgetPlugin\n} from \"../internal\"\nimport {die} from \"../utils/errors\"\n\n/** Each scope represents a `produce` call. */\n\nexport interface ImmerScope {\n\tpatches_?: Patch[]\n\tinversePatches_?: Patch[]\n\tcanAutoFreeze_: boolean\n\tdrafts_: any[]\n\tparent_?: ImmerScope\n\tpatchListener_?: PatchListener\n\timmer_: Immer\n\tunfinalizedDrafts_: number\n}\n\nlet currentScope: ImmerScope | undefined\n\nexport function getCurrentScope() {\n\tif (__DEV__ && !currentScope) die(0)\n\treturn currentScope!\n}\n\nfunction createScope(\n\tparent_: ImmerScope | undefined,\n\timmer_: Immer\n): ImmerScope {\n\treturn {\n\t\tdrafts_: [],\n\t\tparent_,\n\t\timmer_,\n\t\t// Whenever the modified draft contains a draft from another scope, we\n\t\t// need to prevent auto-freezing so the unowned draft can be finalized.\n\t\tcanAutoFreeze_: true,\n\t\tunfinalizedDrafts_: 0\n\t}\n}\n\nexport function usePatchesInScope(\n\tscope: ImmerScope,\n\tpatchListener?: PatchListener\n) {\n\tif (patchListener) {\n\t\tgetPlugin(\"Patches\") // assert we have the plugin\n\t\tscope.patches_ = []\n\t\tscope.inversePatches_ = []\n\t\tscope.patchListener_ = patchListener\n\t}\n}\n\nexport function revokeScope(scope: ImmerScope) {\n\tleaveScope(scope)\n\tscope.drafts_.forEach(revokeDraft)\n\t// @ts-ignore\n\tscope.drafts_ = null\n}\n\nexport function leaveScope(scope: ImmerScope) {\n\tif (scope === currentScope) {\n\t\tcurrentScope = scope.parent_\n\t}\n}\n\nexport function enterScope(immer: Immer) {\n\treturn (currentScope = createScope(currentScope, immer))\n}\n\nfunction revokeDraft(draft: Drafted) {\n\tconst state: ImmerState = draft[DRAFT_STATE]\n\tif (\n\t\tstate.type_ === ProxyType.ProxyObject ||\n\t\tstate.type_ === ProxyType.ProxyArray\n\t)\n\t\tstate.revoke_()\n\telse state.revoked_ = true\n}\n","import {\n\tImmerScope,\n\tDRAFT_STATE,\n\tisDraftable,\n\tNOTHING,\n\tPatchPath,\n\teach,\n\thas,\n\tfreeze,\n\tImmerState,\n\tisDraft,\n\tSetState,\n\tset,\n\tProxyType,\n\tgetPlugin,\n\tdie,\n\trevokeScope,\n\tisFrozen,\n\tshallowCopy\n} from \"../internal\"\n\nexport function processResult(result: any, scope: ImmerScope) {\n\tscope.unfinalizedDrafts_ = scope.drafts_.length\n\tconst baseDraft = scope.drafts_![0]\n\tconst isReplaced = result !== undefined && result !== baseDraft\n\tif (!scope.immer_.useProxies_)\n\t\tgetPlugin(\"ES5\").willFinalizeES5_(scope, result, isReplaced)\n\tif (isReplaced) {\n\t\tif (baseDraft[DRAFT_STATE].modified_) {\n\t\t\trevokeScope(scope)\n\t\t\tdie(4)\n\t\t}\n\t\tif (isDraftable(result)) {\n\t\t\t// Finalize the result in case it contains (or is) a subset of the draft.\n\t\t\tresult = finalize(scope, result)\n\t\t\tif (!scope.parent_) maybeFreeze(scope, result)\n\t\t}\n\t\tif (scope.patches_) {\n\t\t\tgetPlugin(\"Patches\").generateReplacementPatches_(\n\t\t\t\tbaseDraft[DRAFT_STATE].base_,\n\t\t\t\tresult,\n\t\t\t\tscope.patches_,\n\t\t\t\tscope.inversePatches_!\n\t\t\t)\n\t\t}\n\t} else {\n\t\t// Finalize the base draft.\n\t\tresult = finalize(scope, baseDraft, [])\n\t}\n\trevokeScope(scope)\n\tif (scope.patches_) {\n\t\tscope.patchListener_!(scope.patches_, scope.inversePatches_!)\n\t}\n\treturn result !== NOTHING ? result : undefined\n}\n\nfunction finalize(rootScope: ImmerScope, value: any, path?: PatchPath) {\n\t// Don't recurse in tho recursive data structures\n\tif (isFrozen(value)) return value\n\n\tconst state: ImmerState = value[DRAFT_STATE]\n\t// A plain object, might need freezing, might contain drafts\n\tif (!state) {\n\t\teach(\n\t\t\tvalue,\n\t\t\t(key, childValue) =>\n\t\t\t\tfinalizeProperty(rootScope, state, value, key, childValue, path),\n\t\t\ttrue // See #590, don't recurse into non-enumerable of non drafted objects\n\t\t)\n\t\treturn value\n\t}\n\t// Never finalize drafts owned by another scope.\n\tif (state.scope_ !== rootScope) return value\n\t// Unmodified draft, return the (frozen) original\n\tif (!state.modified_) {\n\t\tmaybeFreeze(rootScope, state.base_, true)\n\t\treturn state.base_\n\t}\n\t// Not finalized yet, let's do that now\n\tif (!state.finalized_) {\n\t\tstate.finalized_ = true\n\t\tstate.scope_.unfinalizedDrafts_--\n\t\tconst result =\n\t\t\t// For ES5, create a good copy from the draft first, with added keys and without deleted keys.\n\t\t\tstate.type_ === ProxyType.ES5Object || state.type_ === ProxyType.ES5Array\n\t\t\t\t? (state.copy_ = shallowCopy(state.draft_))\n\t\t\t\t: state.copy_\n\t\t// Finalize all children of the copy\n\t\t// For sets we clone before iterating, otherwise we can get in endless loop due to modifying during iteration, see #628\n\t\t// To preserve insertion order in all cases we then clear the set\n\t\t// And we let finalizeProperty know it needs to re-add non-draft children back to the target\n\t\tlet resultEach = result\n\t\tlet isSet = false\n\t\tif (state.type_ === ProxyType.Set) {\n\t\t\tresultEach = new Set(result)\n\t\t\tresult.clear()\n\t\t\tisSet = true\n\t\t}\n\t\teach(resultEach, (key, childValue) =>\n\t\t\tfinalizeProperty(rootScope, state, result, key, childValue, path, isSet)\n\t\t)\n\t\t// everything inside is frozen, we can freeze here\n\t\tmaybeFreeze(rootScope, result, false)\n\t\t// first time finalizing, let's create those patches\n\t\tif (path && rootScope.patches_) {\n\t\t\tgetPlugin(\"Patches\").generatePatches_(\n\t\t\t\tstate,\n\t\t\t\tpath,\n\t\t\t\trootScope.patches_,\n\t\t\t\trootScope.inversePatches_!\n\t\t\t)\n\t\t}\n\t}\n\treturn state.copy_\n}\n\nfunction finalizeProperty(\n\trootScope: ImmerScope,\n\tparentState: undefined | ImmerState,\n\ttargetObject: any,\n\tprop: string | number,\n\tchildValue: any,\n\trootPath?: PatchPath,\n\ttargetIsSet?: boolean\n) {\n\tif (__DEV__ && childValue === targetObject) die(5)\n\tif (isDraft(childValue)) {\n\t\tconst path =\n\t\t\trootPath &&\n\t\t\tparentState &&\n\t\t\tparentState!.type_ !== ProxyType.Set && // Set objects are atomic since they have no keys.\n\t\t\t!has((parentState as Exclude).assigned_!, prop) // Skip deep patches for assigned keys.\n\t\t\t\t? rootPath!.concat(prop)\n\t\t\t\t: undefined\n\t\t// Drafts owned by `scope` are finalized here.\n\t\tconst res = finalize(rootScope, childValue, path)\n\t\tset(targetObject, prop, res)\n\t\t// Drafts from another scope must prevented to be frozen\n\t\t// if we got a draft back from finalize, we're in a nested produce and shouldn't freeze\n\t\tif (isDraft(res)) {\n\t\t\trootScope.canAutoFreeze_ = false\n\t\t} else return\n\t} else if (targetIsSet) {\n\t\ttargetObject.add(childValue)\n\t}\n\t// Search new objects for unfinalized drafts. Frozen objects should never contain drafts.\n\tif (isDraftable(childValue) && !isFrozen(childValue)) {\n\t\tif (!rootScope.immer_.autoFreeze_ && rootScope.unfinalizedDrafts_ < 1) {\n\t\t\t// optimization: if an object is not a draft, and we don't have to\n\t\t\t// deepfreeze everything, and we are sure that no drafts are left in the remaining object\n\t\t\t// cause we saw and finalized all drafts already; we can stop visiting the rest of the tree.\n\t\t\t// This benefits especially adding large data tree's without further processing.\n\t\t\t// See add-data.js perf test\n\t\t\treturn\n\t\t}\n\t\tfinalize(rootScope, childValue)\n\t\t// immer deep freezes plain objects, so if there is no parent state, we freeze as well\n\t\tif (!parentState || !parentState.scope_.parent_)\n\t\t\tmaybeFreeze(rootScope, childValue)\n\t}\n}\n\nfunction maybeFreeze(scope: ImmerScope, value: any, deep = false) {\n\t// we never freeze for a non-root scope; as it would prevent pruning for drafts inside wrapping objects\n\tif (!scope.parent_ && scope.immer_.autoFreeze_ && scope.canAutoFreeze_) {\n\t\tfreeze(value, deep)\n\t}\n}\n","import {\n\teach,\n\thas,\n\tis,\n\tisDraftable,\n\tshallowCopy,\n\tlatest,\n\tImmerBaseState,\n\tImmerState,\n\tDrafted,\n\tAnyObject,\n\tAnyArray,\n\tObjectish,\n\tgetCurrentScope,\n\tDRAFT_STATE,\n\tdie,\n\tcreateProxy,\n\tProxyType\n} from \"../internal\"\n\ninterface ProxyBaseState extends ImmerBaseState {\n\tassigned_: {\n\t\t[property: string]: boolean\n\t}\n\tparent_?: ImmerState\n\trevoke_(): void\n}\n\nexport interface ProxyObjectState extends ProxyBaseState {\n\ttype_: ProxyType.ProxyObject\n\tbase_: any\n\tcopy_: any\n\tdraft_: Drafted\n}\n\nexport interface ProxyArrayState extends ProxyBaseState {\n\ttype_: ProxyType.ProxyArray\n\tbase_: AnyArray\n\tcopy_: AnyArray | null\n\tdraft_: Drafted\n}\n\ntype ProxyState = ProxyObjectState | ProxyArrayState\n\n/**\n * Returns a new draft of the `base` object.\n *\n * The second argument is the parent draft-state (used internally).\n */\nexport function createProxyProxy(\n\tbase: T,\n\tparent?: ImmerState\n): Drafted {\n\tconst isArray = Array.isArray(base)\n\tconst state: ProxyState = {\n\t\ttype_: isArray ? ProxyType.ProxyArray : (ProxyType.ProxyObject as any),\n\t\t// Track which produce call this is associated with.\n\t\tscope_: parent ? parent.scope_ : getCurrentScope()!,\n\t\t// True for both shallow and deep changes.\n\t\tmodified_: false,\n\t\t// Used during finalization.\n\t\tfinalized_: false,\n\t\t// Track which properties have been assigned (true) or deleted (false).\n\t\tassigned_: {},\n\t\t// The parent draft state.\n\t\tparent_: parent,\n\t\t// The base state.\n\t\tbase_: base,\n\t\t// The base proxy.\n\t\tdraft_: null as any, // set below\n\t\t// The base copy with any updated values.\n\t\tcopy_: null,\n\t\t// Called by the `produce` function.\n\t\trevoke_: null as any,\n\t\tisManual_: false\n\t}\n\n\t// the traps must target something, a bit like the 'real' base.\n\t// but also, we need to be able to determine from the target what the relevant state is\n\t// (to avoid creating traps per instance to capture the state in closure,\n\t// and to avoid creating weird hidden properties as well)\n\t// So the trick is to use 'state' as the actual 'target'! (and make sure we intercept everything)\n\t// Note that in the case of an array, we put the state in an array to have better Reflect defaults ootb\n\tlet target: T = state as any\n\tlet traps: ProxyHandler> = objectTraps\n\tif (isArray) {\n\t\ttarget = [state] as any\n\t\ttraps = arrayTraps\n\t}\n\n\tconst {revoke, proxy} = Proxy.revocable(target, traps)\n\tstate.draft_ = proxy as any\n\tstate.revoke_ = revoke\n\treturn proxy as any\n}\n\n/**\n * Object drafts\n */\nexport const objectTraps: ProxyHandler = {\n\tget(state, prop) {\n\t\tif (prop === DRAFT_STATE) return state\n\n\t\tconst source = latest(state)\n\t\tif (!has(source, prop)) {\n\t\t\t// non-existing or non-own property...\n\t\t\treturn readPropFromProto(state, source, prop)\n\t\t}\n\t\tconst value = source[prop]\n\t\tif (state.finalized_ || !isDraftable(value)) {\n\t\t\treturn value\n\t\t}\n\t\t// Check for existing draft in modified state.\n\t\t// Assigned values are never drafted. This catches any drafts we created, too.\n\t\tif (value === peek(state.base_, prop)) {\n\t\t\tprepareCopy(state)\n\t\t\treturn (state.copy_![prop as any] = createProxy(\n\t\t\t\tstate.scope_.immer_,\n\t\t\t\tvalue,\n\t\t\t\tstate\n\t\t\t))\n\t\t}\n\t\treturn value\n\t},\n\thas(state, prop) {\n\t\treturn prop in latest(state)\n\t},\n\townKeys(state) {\n\t\treturn Reflect.ownKeys(latest(state))\n\t},\n\tset(\n\t\tstate: ProxyObjectState,\n\t\tprop: string /* strictly not, but helps TS */,\n\t\tvalue\n\t) {\n\t\tconst desc = getDescriptorFromProto(latest(state), prop)\n\t\tif (desc?.set) {\n\t\t\t// special case: if this write is captured by a setter, we have\n\t\t\t// to trigger it with the correct context\n\t\t\tdesc.set.call(state.draft_, value)\n\t\t\treturn true\n\t\t}\n\t\tif (!state.modified_) {\n\t\t\t// the last check is because we need to be able to distinguish setting a non-existing to undefined (which is a change)\n\t\t\t// from setting an existing property with value undefined to undefined (which is not a change)\n\t\t\tconst current = peek(latest(state), prop)\n\t\t\t// special case, if we assigning the original value to a draft, we can ignore the assignment\n\t\t\tconst currentState: ProxyObjectState = current?.[DRAFT_STATE]\n\t\t\tif (currentState && currentState.base_ === value) {\n\t\t\t\tstate.copy_![prop] = value\n\t\t\t\tstate.assigned_[prop] = false\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif (is(value, current) && (value !== undefined || has(state.base_, prop)))\n\t\t\t\treturn true\n\t\t\tprepareCopy(state)\n\t\t\tmarkChanged(state)\n\t\t}\n\n\t\tif (\n\t\t\t(state.copy_![prop] === value &&\n\t\t\t\t// special case: handle new props with value 'undefined'\n\t\t\t\t(value !== undefined || prop in state.copy_)) ||\n\t\t\t// special case: NaN\n\t\t\t(Number.isNaN(value) && Number.isNaN(state.copy_![prop]))\n\t\t)\n\t\t\treturn true\n\n\t\t// @ts-ignore\n\t\tstate.copy_![prop] = value\n\t\tstate.assigned_[prop] = true\n\t\treturn true\n\t},\n\tdeleteProperty(state, prop: string) {\n\t\t// The `undefined` check is a fast path for pre-existing keys.\n\t\tif (peek(state.base_, prop) !== undefined || prop in state.base_) {\n\t\t\tstate.assigned_[prop] = false\n\t\t\tprepareCopy(state)\n\t\t\tmarkChanged(state)\n\t\t} else {\n\t\t\t// if an originally not assigned property was deleted\n\t\t\tdelete state.assigned_[prop]\n\t\t}\n\t\t// @ts-ignore\n\t\tif (state.copy_) delete state.copy_[prop]\n\t\treturn true\n\t},\n\t// Note: We never coerce `desc.value` into an Immer draft, because we can't make\n\t// the same guarantee in ES5 mode.\n\tgetOwnPropertyDescriptor(state, prop) {\n\t\tconst owner = latest(state)\n\t\tconst desc = Reflect.getOwnPropertyDescriptor(owner, prop)\n\t\tif (!desc) return desc\n\t\treturn {\n\t\t\twritable: true,\n\t\t\tconfigurable: state.type_ !== ProxyType.ProxyArray || prop !== \"length\",\n\t\t\tenumerable: desc.enumerable,\n\t\t\tvalue: owner[prop]\n\t\t}\n\t},\n\tdefineProperty() {\n\t\tdie(11)\n\t},\n\tgetPrototypeOf(state) {\n\t\treturn Object.getPrototypeOf(state.base_)\n\t},\n\tsetPrototypeOf() {\n\t\tdie(12)\n\t}\n}\n\n/**\n * Array drafts\n */\n\nconst arrayTraps: ProxyHandler<[ProxyArrayState]> = {}\neach(objectTraps, (key, fn) => {\n\t// @ts-ignore\n\tarrayTraps[key] = function() {\n\t\targuments[0] = arguments[0][0]\n\t\treturn fn.apply(this, arguments)\n\t}\n})\narrayTraps.deleteProperty = function(state, prop) {\n\tif (__DEV__ && isNaN(parseInt(prop as any))) die(13)\n\t// @ts-ignore\n\treturn arrayTraps.set!.call(this, state, prop, undefined)\n}\narrayTraps.set = function(state, prop, value) {\n\tif (__DEV__ && prop !== \"length\" && isNaN(parseInt(prop as any))) die(14)\n\treturn objectTraps.set!.call(this, state[0], prop, value, state[0])\n}\n\n// Access a property without creating an Immer draft.\nfunction peek(draft: Drafted, prop: PropertyKey) {\n\tconst state = draft[DRAFT_STATE]\n\tconst source = state ? latest(state) : draft\n\treturn source[prop]\n}\n\nfunction readPropFromProto(state: ImmerState, source: any, prop: PropertyKey) {\n\tconst desc = getDescriptorFromProto(source, prop)\n\treturn desc\n\t\t? `value` in desc\n\t\t\t? desc.value\n\t\t\t: // This is a very special case, if the prop is a getter defined by the\n\t\t\t // prototype, we should invoke it with the draft as context!\n\t\t\t desc.get?.call(state.draft_)\n\t\t: undefined\n}\n\nfunction getDescriptorFromProto(\n\tsource: any,\n\tprop: PropertyKey\n): PropertyDescriptor | undefined {\n\t// 'in' checks proto!\n\tif (!(prop in source)) return undefined\n\tlet proto = Object.getPrototypeOf(source)\n\twhile (proto) {\n\t\tconst desc = Object.getOwnPropertyDescriptor(proto, prop)\n\t\tif (desc) return desc\n\t\tproto = Object.getPrototypeOf(proto)\n\t}\n\treturn undefined\n}\n\nexport function markChanged(state: ImmerState) {\n\tif (!state.modified_) {\n\t\tstate.modified_ = true\n\t\tif (state.parent_) {\n\t\t\tmarkChanged(state.parent_)\n\t\t}\n\t}\n}\n\nexport function prepareCopy(state: {base_: any; copy_: any}) {\n\tif (!state.copy_) {\n\t\tstate.copy_ = shallowCopy(state.base_)\n\t}\n}\n","import {\n\tIProduceWithPatches,\n\tIProduce,\n\tImmerState,\n\tDrafted,\n\tisDraftable,\n\tprocessResult,\n\tPatch,\n\tObjectish,\n\tDRAFT_STATE,\n\tDraft,\n\tPatchListener,\n\tisDraft,\n\tisMap,\n\tisSet,\n\tcreateProxyProxy,\n\tgetPlugin,\n\tdie,\n\thasProxies,\n\tenterScope,\n\trevokeScope,\n\tleaveScope,\n\tusePatchesInScope,\n\tgetCurrentScope,\n\tNOTHING,\n\tfreeze,\n\tcurrent\n} from \"../internal\"\n\ninterface ProducersFns {\n\tproduce: IProduce\n\tproduceWithPatches: IProduceWithPatches\n}\n\nexport class Immer implements ProducersFns {\n\tuseProxies_: boolean = hasProxies\n\n\tautoFreeze_: boolean = true\n\n\tconstructor(config?: {useProxies?: boolean; autoFreeze?: boolean}) {\n\t\tif (typeof config?.useProxies === \"boolean\")\n\t\t\tthis.setUseProxies(config!.useProxies)\n\t\tif (typeof config?.autoFreeze === \"boolean\")\n\t\t\tthis.setAutoFreeze(config!.autoFreeze)\n\t}\n\n\t/**\n\t * The `produce` function takes a value and a \"recipe function\" (whose\n\t * return value often depends on the base state). The recipe function is\n\t * free to mutate its first argument however it wants. All mutations are\n\t * only ever applied to a __copy__ of the base state.\n\t *\n\t * Pass only a function to create a \"curried producer\" which relieves you\n\t * from passing the recipe function every time.\n\t *\n\t * Only plain objects and arrays are made mutable. All other objects are\n\t * considered uncopyable.\n\t *\n\t * Note: This function is __bound__ to its `Immer` instance.\n\t *\n\t * @param {any} base - the initial state\n\t * @param {Function} recipe - function that receives a proxy of the base state as first argument and which can be freely modified\n\t * @param {Function} patchListener - optional function that will be called with all the patches produced here\n\t * @returns {any} a new state, or the initial state if nothing was modified\n\t */\n\tproduce: IProduce = (base: any, recipe?: any, patchListener?: any) => {\n\t\t// curried invocation\n\t\tif (typeof base === \"function\" && typeof recipe !== \"function\") {\n\t\t\tconst defaultBase = recipe\n\t\t\trecipe = base\n\n\t\t\tconst self = this\n\t\t\treturn function curriedProduce(\n\t\t\t\tthis: any,\n\t\t\t\tbase = defaultBase,\n\t\t\t\t...args: any[]\n\t\t\t) {\n\t\t\t\treturn self.produce(base, (draft: Drafted) => recipe.call(this, draft, ...args)) // prettier-ignore\n\t\t\t}\n\t\t}\n\n\t\tif (typeof recipe !== \"function\") die(6)\n\t\tif (patchListener !== undefined && typeof patchListener !== \"function\")\n\t\t\tdie(7)\n\n\t\tlet result\n\n\t\t// Only plain objects, arrays, and \"immerable classes\" are drafted.\n\t\tif (isDraftable(base)) {\n\t\t\tconst scope = enterScope(this)\n\t\t\tconst proxy = createProxy(this, base, undefined)\n\t\t\tlet hasError = true\n\t\t\ttry {\n\t\t\t\tresult = recipe(proxy)\n\t\t\t\thasError = false\n\t\t\t} finally {\n\t\t\t\t// finally instead of catch + rethrow better preserves original stack\n\t\t\t\tif (hasError) revokeScope(scope)\n\t\t\t\telse leaveScope(scope)\n\t\t\t}\n\t\t\tif (typeof Promise !== \"undefined\" && result instanceof Promise) {\n\t\t\t\treturn result.then(\n\t\t\t\t\tresult => {\n\t\t\t\t\t\tusePatchesInScope(scope, patchListener)\n\t\t\t\t\t\treturn processResult(result, scope)\n\t\t\t\t\t},\n\t\t\t\t\terror => {\n\t\t\t\t\t\trevokeScope(scope)\n\t\t\t\t\t\tthrow error\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t}\n\t\t\tusePatchesInScope(scope, patchListener)\n\t\t\treturn processResult(result, scope)\n\t\t} else if (!base || typeof base !== \"object\") {\n\t\t\tresult = recipe(base)\n\t\t\tif (result === undefined) result = base\n\t\t\tif (result === NOTHING) result = undefined\n\t\t\tif (this.autoFreeze_) freeze(result, true)\n\t\t\tif (patchListener) {\n\t\t\t\tconst p: Patch[] = []\n\t\t\t\tconst ip: Patch[] = []\n\t\t\t\tgetPlugin(\"Patches\").generateReplacementPatches_(base, result, p, ip)\n\t\t\t\tpatchListener(p, ip)\n\t\t\t}\n\t\t\treturn result\n\t\t} else die(21, base)\n\t}\n\n\tproduceWithPatches: IProduceWithPatches = (base: any, recipe?: any): any => {\n\t\t// curried invocation\n\t\tif (typeof base === \"function\") {\n\t\t\treturn (state: any, ...args: any[]) =>\n\t\t\t\tthis.produceWithPatches(state, (draft: any) => base(draft, ...args))\n\t\t}\n\n\t\tlet patches: Patch[], inversePatches: Patch[]\n\t\tconst result = this.produce(base, recipe, (p: Patch[], ip: Patch[]) => {\n\t\t\tpatches = p\n\t\t\tinversePatches = ip\n\t\t})\n\n\t\tif (typeof Promise !== \"undefined\" && result instanceof Promise) {\n\t\t\treturn result.then(nextState => [nextState, patches!, inversePatches!])\n\t\t}\n\t\treturn [result, patches!, inversePatches!]\n\t}\n\n\tcreateDraft(base: T): Draft {\n\t\tif (!isDraftable(base)) die(8)\n\t\tif (isDraft(base)) base = current(base)\n\t\tconst scope = enterScope(this)\n\t\tconst proxy = createProxy(this, base, undefined)\n\t\tproxy[DRAFT_STATE].isManual_ = true\n\t\tleaveScope(scope)\n\t\treturn proxy as any\n\t}\n\n\tfinishDraft>(\n\t\tdraft: D,\n\t\tpatchListener?: PatchListener\n\t): D extends Draft ? T : never {\n\t\tconst state: ImmerState = draft && (draft as any)[DRAFT_STATE]\n\t\tif (__DEV__) {\n\t\t\tif (!state || !state.isManual_) die(9)\n\t\t\tif (state.finalized_) die(10)\n\t\t}\n\t\tconst {scope_: scope} = state\n\t\tusePatchesInScope(scope, patchListener)\n\t\treturn processResult(undefined, scope)\n\t}\n\n\t/**\n\t * Pass true to automatically freeze all copies created by Immer.\n\t *\n\t * By default, auto-freezing is enabled.\n\t */\n\tsetAutoFreeze(value: boolean) {\n\t\tthis.autoFreeze_ = value\n\t}\n\n\t/**\n\t * Pass true to use the ES2015 `Proxy` class when creating drafts, which is\n\t * always faster than using ES5 proxies.\n\t *\n\t * By default, feature detection is used, so calling this is rarely necessary.\n\t */\n\tsetUseProxies(value: boolean) {\n\t\tif (value && !hasProxies) {\n\t\t\tdie(20)\n\t\t}\n\t\tthis.useProxies_ = value\n\t}\n\n\tapplyPatches(base: T, patches: Patch[]): T {\n\t\t// If a patch replaces the entire state, take that replacement as base\n\t\t// before applying patches\n\t\tlet i: number\n\t\tfor (i = patches.length - 1; i >= 0; i--) {\n\t\t\tconst patch = patches[i]\n\t\t\tif (patch.path.length === 0 && patch.op === \"replace\") {\n\t\t\t\tbase = patch.value\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// If there was a patch that replaced the entire state, start from the\n\t\t// patch after that.\n\t\tif (i > -1) {\n\t\t\tpatches = patches.slice(i + 1)\n\t\t}\n\n\t\tconst applyPatchesImpl = getPlugin(\"Patches\").applyPatches_\n\t\tif (isDraft(base)) {\n\t\t\t// N.B: never hits if some patch a replacement, patches are never drafts\n\t\t\treturn applyPatchesImpl(base, patches)\n\t\t}\n\t\t// Otherwise, produce a copy of the base state.\n\t\treturn this.produce(base, (draft: Drafted) =>\n\t\t\tapplyPatchesImpl(draft, patches)\n\t\t)\n\t}\n}\n\nexport function createProxy(\n\timmer: Immer,\n\tvalue: T,\n\tparent?: ImmerState\n): Drafted {\n\t// precondition: createProxy should be guarded by isDraftable, so we know we can safely draft\n\tconst draft: Drafted = isMap(value)\n\t\t? getPlugin(\"MapSet\").proxyMap_(value, parent)\n\t\t: isSet(value)\n\t\t? getPlugin(\"MapSet\").proxySet_(value, parent)\n\t\t: immer.useProxies_\n\t\t? createProxyProxy(value, parent)\n\t\t: getPlugin(\"ES5\").createES5Proxy_(value, parent)\n\n\tconst scope = parent ? parent.scope_ : getCurrentScope()\n\tscope.drafts_.push(draft)\n\treturn draft\n}\n","import {\n\tdie,\n\tisDraft,\n\tshallowCopy,\n\teach,\n\tDRAFT_STATE,\n\tget,\n\tset,\n\tImmerState,\n\tisDraftable,\n\tArchtype,\n\tgetArchtype,\n\tgetPlugin\n} from \"../internal\"\n\n/** Takes a snapshot of the current state of a draft and finalizes it (but without freezing). This is a great utility to print the current state during debugging (no Proxies in the way). The output of current can also be safely leaked outside the producer. */\nexport function current(value: T): T\nexport function current(value: any): any {\n\tif (!isDraft(value)) die(22, value)\n\treturn currentImpl(value)\n}\n\nfunction currentImpl(value: any): any {\n\tif (!isDraftable(value)) return value\n\tconst state: ImmerState | undefined = value[DRAFT_STATE]\n\tlet copy: any\n\tconst archType = getArchtype(value)\n\tif (state) {\n\t\tif (\n\t\t\t!state.modified_ &&\n\t\t\t(state.type_ < 4 || !getPlugin(\"ES5\").hasChanges_(state as any))\n\t\t)\n\t\t\treturn state.base_\n\t\t// Optimization: avoid generating new drafts during copying\n\t\tstate.finalized_ = true\n\t\tcopy = copyHelper(value, archType)\n\t\tstate.finalized_ = false\n\t} else {\n\t\tcopy = copyHelper(value, archType)\n\t}\n\n\teach(copy, (key, childValue) => {\n\t\tif (state && get(state.base_, key) === childValue) return // no need to copy or search in something that didn't change\n\t\tset(copy, key, currentImpl(childValue))\n\t})\n\t// In the future, we might consider freezing here, based on the current settings\n\treturn archType === Archtype.Set ? new Set(copy) : copy\n}\n\nfunction copyHelper(value: any, archType: number): any {\n\t// creates a shallow copy, even if it is a map or set\n\tswitch (archType) {\n\t\tcase Archtype.Map:\n\t\t\treturn new Map(value)\n\t\tcase Archtype.Set:\n\t\t\t// Set will be cloned as array temporarily, so that we can replace individual items\n\t\t\treturn Array.from(value)\n\t}\n\treturn shallowCopy(value)\n}\n","import {\n\tImmerState,\n\tDrafted,\n\tES5ArrayState,\n\tES5ObjectState,\n\teach,\n\thas,\n\tisDraft,\n\tlatest,\n\tDRAFT_STATE,\n\tis,\n\tloadPlugin,\n\tImmerScope,\n\tProxyType,\n\tgetCurrentScope,\n\tdie,\n\tmarkChanged,\n\tobjectTraps,\n\townKeys,\n\tgetOwnPropertyDescriptors\n} from \"../internal\"\n\ntype ES5State = ES5ArrayState | ES5ObjectState\n\nexport function enableES5() {\n\tfunction willFinalizeES5_(\n\t\tscope: ImmerScope,\n\t\tresult: any,\n\t\tisReplaced: boolean\n\t) {\n\t\tif (!isReplaced) {\n\t\t\tif (scope.patches_) {\n\t\t\t\tmarkChangesRecursively(scope.drafts_![0])\n\t\t\t}\n\t\t\t// This is faster when we don't care about which attributes changed.\n\t\t\tmarkChangesSweep(scope.drafts_)\n\t\t}\n\t\t// When a child draft is returned, look for changes.\n\t\telse if (\n\t\t\tisDraft(result) &&\n\t\t\t(result[DRAFT_STATE] as ES5State).scope_ === scope\n\t\t) {\n\t\t\tmarkChangesSweep(scope.drafts_)\n\t\t}\n\t}\n\n\tfunction createES5Draft(isArray: boolean, base: any) {\n\t\tif (isArray) {\n\t\t\tconst draft = new Array(base.length)\n\t\t\tfor (let i = 0; i < base.length; i++)\n\t\t\t\tObject.defineProperty(draft, \"\" + i, proxyProperty(i, true))\n\t\t\treturn draft\n\t\t} else {\n\t\t\tconst descriptors = getOwnPropertyDescriptors(base)\n\t\t\tdelete descriptors[DRAFT_STATE as any]\n\t\t\tconst keys = ownKeys(descriptors)\n\t\t\tfor (let i = 0; i < keys.length; i++) {\n\t\t\t\tconst key: any = keys[i]\n\t\t\t\tdescriptors[key] = proxyProperty(\n\t\t\t\t\tkey,\n\t\t\t\t\tisArray || !!descriptors[key].enumerable\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn Object.create(Object.getPrototypeOf(base), descriptors)\n\t\t}\n\t}\n\n\tfunction createES5Proxy_(\n\t\tbase: T,\n\t\tparent?: ImmerState\n\t): Drafted {\n\t\tconst isArray = Array.isArray(base)\n\t\tconst draft = createES5Draft(isArray, base)\n\n\t\tconst state: ES5ObjectState | ES5ArrayState = {\n\t\t\ttype_: isArray ? ProxyType.ES5Array : (ProxyType.ES5Object as any),\n\t\t\tscope_: parent ? parent.scope_ : getCurrentScope(),\n\t\t\tmodified_: false,\n\t\t\tfinalized_: false,\n\t\t\tassigned_: {},\n\t\t\tparent_: parent,\n\t\t\t// base is the object we are drafting\n\t\t\tbase_: base,\n\t\t\t// draft is the draft object itself, that traps all reads and reads from either the base (if unmodified) or copy (if modified)\n\t\t\tdraft_: draft,\n\t\t\tcopy_: null,\n\t\t\trevoked_: false,\n\t\t\tisManual_: false\n\t\t}\n\n\t\tObject.defineProperty(draft, DRAFT_STATE, {\n\t\t\tvalue: state,\n\t\t\t// enumerable: false <- the default\n\t\t\twritable: true\n\t\t})\n\t\treturn draft\n\t}\n\n\t// property descriptors are recycled to make sure we don't create a get and set closure per property,\n\t// but share them all instead\n\tconst descriptors: {[prop: string]: PropertyDescriptor} = {}\n\n\tfunction proxyProperty(\n\t\tprop: string | number,\n\t\tenumerable: boolean\n\t): PropertyDescriptor {\n\t\tlet desc = descriptors[prop]\n\t\tif (desc) {\n\t\t\tdesc.enumerable = enumerable\n\t\t} else {\n\t\t\tdescriptors[prop] = desc = {\n\t\t\t\tconfigurable: true,\n\t\t\t\tenumerable,\n\t\t\t\tget(this: any) {\n\t\t\t\t\tconst state = this[DRAFT_STATE]\n\t\t\t\t\tif (__DEV__) assertUnrevoked(state)\n\t\t\t\t\t// @ts-ignore\n\t\t\t\t\treturn objectTraps.get(state, prop)\n\t\t\t\t},\n\t\t\t\tset(this: any, value) {\n\t\t\t\t\tconst state = this[DRAFT_STATE]\n\t\t\t\t\tif (__DEV__) assertUnrevoked(state)\n\t\t\t\t\t// @ts-ignore\n\t\t\t\t\tobjectTraps.set(state, prop, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn desc\n\t}\n\n\t// This looks expensive, but only proxies are visited, and only objects without known changes are scanned.\n\tfunction markChangesSweep(drafts: Drafted[]) {\n\t\t// The natural order of drafts in the `scope` array is based on when they\n\t\t// were accessed. By processing drafts in reverse natural order, we have a\n\t\t// better chance of processing leaf nodes first. When a leaf node is known to\n\t\t// have changed, we can avoid any traversal of its ancestor nodes.\n\t\tfor (let i = drafts.length - 1; i >= 0; i--) {\n\t\t\tconst state: ES5State = drafts[i][DRAFT_STATE]\n\t\t\tif (!state.modified_) {\n\t\t\t\tswitch (state.type_) {\n\t\t\t\t\tcase ProxyType.ES5Array:\n\t\t\t\t\t\tif (hasArrayChanges(state)) markChanged(state)\n\t\t\t\t\t\tbreak\n\t\t\t\t\tcase ProxyType.ES5Object:\n\t\t\t\t\t\tif (hasObjectChanges(state)) markChanged(state)\n\t\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfunction markChangesRecursively(object: any) {\n\t\tif (!object || typeof object !== \"object\") return\n\t\tconst state: ES5State | undefined = object[DRAFT_STATE]\n\t\tif (!state) return\n\t\tconst {base_, draft_, assigned_, type_} = state\n\t\tif (type_ === ProxyType.ES5Object) {\n\t\t\t// Look for added keys.\n\t\t\t// probably there is a faster way to detect changes, as sweep + recurse seems to do some\n\t\t\t// unnecessary work.\n\t\t\t// also: probably we can store the information we detect here, to speed up tree finalization!\n\t\t\teach(draft_, key => {\n\t\t\t\tif ((key as any) === DRAFT_STATE) return\n\t\t\t\t// The `undefined` check is a fast path for pre-existing keys.\n\t\t\t\tif ((base_ as any)[key] === undefined && !has(base_, key)) {\n\t\t\t\t\tassigned_[key] = true\n\t\t\t\t\tmarkChanged(state)\n\t\t\t\t} else if (!assigned_[key]) {\n\t\t\t\t\t// Only untouched properties trigger recursion.\n\t\t\t\t\tmarkChangesRecursively(draft_[key])\n\t\t\t\t}\n\t\t\t})\n\t\t\t// Look for removed keys.\n\t\t\teach(base_, key => {\n\t\t\t\t// The `undefined` check is a fast path for pre-existing keys.\n\t\t\t\tif (draft_[key] === undefined && !has(draft_, key)) {\n\t\t\t\t\tassigned_[key] = false\n\t\t\t\t\tmarkChanged(state)\n\t\t\t\t}\n\t\t\t})\n\t\t} else if (type_ === ProxyType.ES5Array) {\n\t\t\tif (hasArrayChanges(state as ES5ArrayState)) {\n\t\t\t\tmarkChanged(state)\n\t\t\t\tassigned_.length = true\n\t\t\t}\n\n\t\t\tif (draft_.length < base_.length) {\n\t\t\t\tfor (let i = draft_.length; i < base_.length; i++) assigned_[i] = false\n\t\t\t} else {\n\t\t\t\tfor (let i = base_.length; i < draft_.length; i++) assigned_[i] = true\n\t\t\t}\n\n\t\t\t// Minimum count is enough, the other parts has been processed.\n\t\t\tconst min = Math.min(draft_.length, base_.length)\n\n\t\t\tfor (let i = 0; i < min; i++) {\n\t\t\t\t// Only untouched indices trigger recursion.\n\t\t\t\tif (!draft_.hasOwnProperty(i)) {\n\t\t\t\t\tassigned_[i] = true\n\t\t\t\t}\n\t\t\t\tif (assigned_[i] === undefined) markChangesRecursively(draft_[i])\n\t\t\t}\n\t\t}\n\t}\n\n\tfunction hasObjectChanges(state: ES5ObjectState) {\n\t\tconst {base_, draft_} = state\n\n\t\t// Search for added keys and changed keys. Start at the back, because\n\t\t// non-numeric keys are ordered by time of definition on the object.\n\t\tconst keys = ownKeys(draft_)\n\t\tfor (let i = keys.length - 1; i >= 0; i--) {\n\t\t\tconst key: any = keys[i]\n\t\t\tif (key === DRAFT_STATE) continue\n\t\t\tconst baseValue = base_[key]\n\t\t\t// The `undefined` check is a fast path for pre-existing keys.\n\t\t\tif (baseValue === undefined && !has(base_, key)) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Once a base key is deleted, future changes go undetected, because its\n\t\t\t// descriptor is erased. This branch detects any missed changes.\n\t\t\telse {\n\t\t\t\tconst value = draft_[key]\n\t\t\t\tconst state: ImmerState = value && value[DRAFT_STATE]\n\t\t\t\tif (state ? state.base_ !== baseValue : !is(value, baseValue)) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// At this point, no keys were added or changed.\n\t\t// Compare key count to determine if keys were deleted.\n\t\tconst baseIsDraft = !!base_[DRAFT_STATE as any]\n\t\treturn keys.length !== ownKeys(base_).length + (baseIsDraft ? 0 : 1) // + 1 to correct for DRAFT_STATE\n\t}\n\n\tfunction hasArrayChanges(state: ES5ArrayState) {\n\t\tconst {draft_} = state\n\t\tif (draft_.length !== state.base_.length) return true\n\t\t// See #116\n\t\t// If we first shorten the length, our array interceptors will be removed.\n\t\t// If after that new items are added, result in the same original length,\n\t\t// those last items will have no intercepting property.\n\t\t// So if there is no own descriptor on the last position, we know that items were removed and added\n\t\t// N.B.: splice, unshift, etc only shift values around, but not prop descriptors, so we only have to check\n\t\t// the last one\n\t\t// last descriptor can be not a trap, if the array was extended\n\t\tconst descriptor = Object.getOwnPropertyDescriptor(\n\t\t\tdraft_,\n\t\t\tdraft_.length - 1\n\t\t)\n\t\t// descriptor can be null, but only for newly created sparse arrays, eg. new Array(10)\n\t\tif (descriptor && !descriptor.get) return true\n\t\t// if we miss a property, it has been deleted, so array probobaly changed\n\t\tfor (let i = 0; i < draft_.length; i++) {\n\t\t\tif (!draft_.hasOwnProperty(i)) return true\n\t\t}\n\t\t// For all other cases, we don't have to compare, as they would have been picked up by the index setters\n\t\treturn false\n\t}\n\n\tfunction hasChanges_(state: ES5State) {\n\t\treturn state.type_ === ProxyType.ES5Object\n\t\t\t? hasObjectChanges(state)\n\t\t\t: hasArrayChanges(state)\n\t}\n\n\tfunction assertUnrevoked(state: any /*ES5State | MapState | SetState*/) {\n\t\tif (state.revoked_) die(3, JSON.stringify(latest(state)))\n\t}\n\n\tloadPlugin(\"ES5\", {\n\t\tcreateES5Proxy_,\n\t\twillFinalizeES5_,\n\t\thasChanges_\n\t})\n}\n","import {\n\tIProduce,\n\tIProduceWithPatches,\n\tImmer,\n\tDraft,\n\tImmutable\n} from \"./internal\"\n\nexport {\n\tDraft,\n\tImmutable,\n\tPatch,\n\tPatchListener,\n\toriginal,\n\tcurrent,\n\tisDraft,\n\tisDraftable,\n\tNOTHING as nothing,\n\tDRAFTABLE as immerable,\n\tfreeze\n} from \"./internal\"\n\nconst immer = new Immer()\n\n/**\n * The `produce` function takes a value and a \"recipe function\" (whose\n * return value often depends on the base state). The recipe function is\n * free to mutate its first argument however it wants. All mutations are\n * only ever applied to a __copy__ of the base state.\n *\n * Pass only a function to create a \"curried producer\" which relieves you\n * from passing the recipe function every time.\n *\n * Only plain objects and arrays are made mutable. All other objects are\n * considered uncopyable.\n *\n * Note: This function is __bound__ to its `Immer` instance.\n *\n * @param {any} base - the initial state\n * @param {Function} producer - function that receives a proxy of the base state as first argument and which can be freely modified\n * @param {Function} patchListener - optional function that will be called with all the patches produced here\n * @returns {any} a new state, or the initial state if nothing was modified\n */\nexport const produce: IProduce = immer.produce\nexport default produce\n\n/**\n * Like `produce`, but `produceWithPatches` always returns a tuple\n * [nextState, patches, inversePatches] (instead of just the next state)\n */\nexport const produceWithPatches: IProduceWithPatches = immer.produceWithPatches.bind(\n\timmer\n)\n\n/**\n * Pass true to automatically freeze all copies created by Immer.\n *\n * Always freeze by default, even in production mode\n */\nexport const setAutoFreeze = immer.setAutoFreeze.bind(immer)\n\n/**\n * Pass true to use the ES2015 `Proxy` class when creating drafts, which is\n * always faster than using ES5 proxies.\n *\n * By default, feature detection is used, so calling this is rarely necessary.\n */\nexport const setUseProxies = immer.setUseProxies.bind(immer)\n\n/**\n * Apply an array of Immer patches to the first argument.\n *\n * This function is a producer, which means copy-on-write is in effect.\n */\nexport const applyPatches = immer.applyPatches.bind(immer)\n\n/**\n * Create an Immer draft from the given base state, which may be a draft itself.\n * The draft can be modified until you finalize it with the `finishDraft` function.\n */\nexport const createDraft = immer.createDraft.bind(immer)\n\n/**\n * Finalize an Immer draft from a `createDraft` call, returning the base state\n * (if no changes were made) or a modified copy. The draft must *not* be\n * mutated afterwards.\n *\n * Pass a function as the 2nd argument to generate Immer patches based on the\n * changes that were made.\n */\nexport const finishDraft = immer.finishDraft.bind(immer)\n\n/**\n * This function is actually a no-op, but can be used to cast an immutable type\n * to an draft type and make TypeScript happy\n *\n * @param value\n */\nexport function castDraft(value: T): Draft {\n\treturn value as any\n}\n\n/**\n * This function is actually a no-op, but can be used to cast a mutable type\n * to an immutable type and make TypeScript happy\n * @param value\n */\nexport function castImmutable(value: T): Immutable {\n\treturn value as any\n}\n\nexport {Immer}\n\nexport {enableES5} from \"./plugins/es5\"\nexport {enablePatches} from \"./plugins/patches\"\nexport {enableMapSet} from \"./plugins/mapset\"\nexport {enableAllPlugins} from \"./plugins/all\"\n","// Should be no imports here!\n\n// Some things that should be evaluated before all else...\n\n// We only want to know if non-polyfilled symbols are available\nconst hasSymbol =\n\ttypeof Symbol !== \"undefined\" && typeof Symbol(\"x\") === \"symbol\"\nexport const hasMap = typeof Map !== \"undefined\"\nexport const hasSet = typeof Set !== \"undefined\"\nexport const hasProxies =\n\ttypeof Proxy !== \"undefined\" &&\n\ttypeof Proxy.revocable !== \"undefined\" &&\n\ttypeof Reflect !== \"undefined\"\n\n/**\n * The sentinel value returned by producers to replace the draft with undefined.\n */\nexport const NOTHING: Nothing = hasSymbol\n\t? Symbol.for(\"immer-nothing\")\n\t: ({[\"immer-nothing\"]: true} as any)\n\n/**\n * To let Immer treat your class instances as plain immutable objects\n * (albeit with a custom prototype), you must define either an instance property\n * or a static property on each of your custom classes.\n *\n * Otherwise, your class instance will never be drafted, which means it won't be\n * safe to mutate in a produce callback.\n */\nexport const DRAFTABLE: unique symbol = hasSymbol\n\t? Symbol.for(\"immer-draftable\")\n\t: (\"__$immer_draftable\" as any)\n\nexport const DRAFT_STATE: unique symbol = hasSymbol\n\t? Symbol.for(\"immer-state\")\n\t: (\"__$immer_state\" as any)\n\n// Even a polyfilled Symbol might provide Symbol.iterator\nexport const iteratorSymbol: typeof Symbol.iterator =\n\t(typeof Symbol != \"undefined\" && Symbol.iterator) || (\"@@iterator\" as any)\n\n/** Use a class type for `nothing` so its type is unique */\nexport class Nothing {\n\t// This lets us do `Exclude`\n\t// @ts-ignore\n\tprivate _!: unique symbol\n}\n","/** A function that accepts a potential \"extra argument\" value to be injected later,\r\n * and returns an instance of the thunk middleware that uses that value\r\n */\nfunction createThunkMiddleware(extraArgument) {\n // Standard Redux middleware definition pattern:\n // See: https://redux.js.org/tutorials/fundamentals/part-4-store#writing-custom-middleware\n var middleware = function middleware(_ref) {\n var dispatch = _ref.dispatch,\n getState = _ref.getState;\n return function (next) {\n return function (action) {\n // The thunk middleware looks for any functions that were passed to `store.dispatch`.\n // If this \"action\" is really a function, call it and return the result.\n if (typeof action === 'function') {\n // Inject the store's `dispatch` and `getState` methods, as well as any \"extra arg\"\n return action(dispatch, getState, extraArgument);\n } // Otherwise, pass the action down the middleware chain as usual\n\n\n return next(action);\n };\n };\n };\n\n return middleware;\n}\n\nvar thunk = createThunkMiddleware(); // Attach the factory function so users can create a customized version\n// with whatever \"extra arg\" they want to inject into their thunks\n\nthunk.withExtraArgument = createThunkMiddleware;\nexport default thunk;","import { current, isDraft } from 'immer'\r\nimport { createSelector } from 'reselect'\r\n\r\n/**\r\n * \"Draft-Safe\" version of `reselect`'s `createSelector`:\r\n * If an `immer`-drafted object is passed into the resulting selector's first argument,\r\n * the selector will act on the current draft value, instead of returning a cached value\r\n * that might be possibly outdated if the draft has been modified since.\r\n * @public\r\n */\r\nexport const createDraftSafeSelector: typeof createSelector = (\r\n ...args: unknown[]\r\n) => {\r\n const selector = (createSelector as any)(...args)\r\n const wrappedSelector = (value: unknown, ...rest: unknown[]) =>\r\n selector(isDraft(value) ? current(value) : value, ...rest)\r\n return wrappedSelector as any\r\n}\r\n","import type { Action, ActionCreator, StoreEnhancer } from 'redux'\r\nimport { compose } from 'redux'\r\n\r\n/**\r\n * @public\r\n */\r\nexport interface DevToolsEnhancerOptions {\r\n /**\r\n * the instance name to be showed on the monitor page. Default value is `document.title`.\r\n * If not specified and there's no document title, it will consist of `tabId` and `instanceId`.\r\n */\r\n name?: string\r\n /**\r\n * action creators functions to be available in the Dispatcher.\r\n */\r\n actionCreators?: ActionCreator[] | { [key: string]: ActionCreator }\r\n /**\r\n * if more than one action is dispatched in the indicated interval, all new actions will be collected and sent at once.\r\n * It is the joint between performance and speed. When set to `0`, all actions will be sent instantly.\r\n * Set it to a higher value when experiencing perf issues (also `maxAge` to a lower value).\r\n *\r\n * @default 500 ms.\r\n */\r\n latency?: number\r\n /**\r\n * (> 1) - maximum allowed actions to be stored in the history tree. The oldest actions are removed once maxAge is reached. It's critical for performance.\r\n *\r\n * @default 50\r\n */\r\n maxAge?: number\r\n /**\r\n * Customizes how actions and state are serialized and deserialized. Can be a boolean or object. If given a boolean, the behavior is the same as if you\r\n * were to pass an object and specify `options` as a boolean. Giving an object allows fine-grained customization using the `replacer` and `reviver`\r\n * functions.\r\n */\r\n serialize?:\r\n | boolean\r\n | {\r\n /**\r\n * - `undefined` - will use regular `JSON.stringify` to send data (it's the fast mode).\r\n * - `false` - will handle also circular references.\r\n * - `true` - will handle also date, regex, undefined, error objects, symbols, maps, sets and functions.\r\n * - object, which contains `date`, `regex`, `undefined`, `error`, `symbol`, `map`, `set` and `function` keys.\r\n * For each of them you can indicate if to include (by setting as `true`).\r\n * For `function` key you can also specify a custom function which handles serialization.\r\n * See [`jsan`](https://github.com/kolodny/jsan) for more details.\r\n */\r\n options?:\r\n | undefined\r\n | boolean\r\n | {\r\n date?: true\r\n regex?: true\r\n undefined?: true\r\n error?: true\r\n symbol?: true\r\n map?: true\r\n set?: true\r\n function?: true | ((fn: (...args: any[]) => any) => string)\r\n }\r\n /**\r\n * [JSON replacer function](https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#The_replacer_parameter) used for both actions and states stringify.\r\n * In addition, you can specify a data type by adding a [`__serializedType__`](https://github.com/zalmoxisus/remotedev-serialize/blob/master/helpers/index.js#L4)\r\n * key. So you can deserialize it back while importing or persisting data.\r\n * Moreover, it will also [show a nice preview showing the provided custom type](https://cloud.githubusercontent.com/assets/7957859/21814330/a17d556a-d761-11e6-85ef-159dd12f36c5.png):\r\n */\r\n replacer?: (key: string, value: unknown) => any\r\n /**\r\n * [JSON `reviver` function](https://developer.mozilla.org/en/docs/Web/JavaScript/Reference/Global_Objects/JSON/parse#Using_the_reviver_parameter)\r\n * used for parsing the imported actions and states. See [`remotedev-serialize`](https://github.com/zalmoxisus/remotedev-serialize/blob/master/immutable/serialize.js#L8-L41)\r\n * as an example on how to serialize special data types and get them back.\r\n */\r\n reviver?: (key: string, value: unknown) => any\r\n /**\r\n * Automatically serialize/deserialize immutablejs via [remotedev-serialize](https://github.com/zalmoxisus/remotedev-serialize).\r\n * Just pass the Immutable library. It will support all ImmutableJS structures. You can even export them into a file and get them back.\r\n * The only exception is `Record` class, for which you should pass this in addition the references to your classes in `refs`.\r\n */\r\n immutable?: any\r\n /**\r\n * ImmutableJS `Record` classes used to make possible restore its instances back when importing, persisting...\r\n */\r\n refs?: any\r\n }\r\n /**\r\n * function which takes `action` object and id number as arguments, and should return `action` object back.\r\n */\r\n actionSanitizer?: (action: A, id: number) => A\r\n /**\r\n * function which takes `state` object and index as arguments, and should return `state` object back.\r\n */\r\n stateSanitizer?: (state: S, index: number) => S\r\n /**\r\n * *string or array of strings as regex* - actions types to be hidden / shown in the monitors (while passed to the reducers).\r\n * If `actionsWhitelist` specified, `actionsBlacklist` is ignored.\r\n * @deprecated Use actionsDenylist instead.\r\n */\r\n actionsBlacklist?: string | string[]\r\n /**\r\n * *string or array of strings as regex* - actions types to be hidden / shown in the monitors (while passed to the reducers).\r\n * If `actionsWhitelist` specified, `actionsBlacklist` is ignored.\r\n * @deprecated Use actionsAllowlist instead.\r\n */\r\n actionsWhitelist?: string | string[]\r\n /**\r\n * *string or array of strings as regex* - actions types to be hidden / shown in the monitors (while passed to the reducers).\r\n * If `actionsAllowlist` specified, `actionsDenylist` is ignored.\r\n */\r\n actionsDenylist?: string | string[]\r\n /**\r\n * *string or array of strings as regex* - actions types to be hidden / shown in the monitors (while passed to the reducers).\r\n * If `actionsAllowlist` specified, `actionsDenylist` is ignored.\r\n */\r\n actionsAllowlist?: string | string[]\r\n /**\r\n * called for every action before sending, takes `state` and `action` object, and returns `true` in case it allows sending the current data to the monitor.\r\n * Use it as a more advanced version of `actionsDenylist`/`actionsAllowlist` parameters.\r\n */\r\n predicate?: (state: S, action: A) => boolean\r\n /**\r\n * if specified as `false`, it will not record the changes till clicking on `Start recording` button.\r\n * Available only for Redux enhancer, for others use `autoPause`.\r\n *\r\n * @default true\r\n */\r\n shouldRecordChanges?: boolean\r\n /**\r\n * if specified, whenever clicking on `Pause recording` button and there are actions in the history log, will add this action type.\r\n * If not specified, will commit when paused. Available only for Redux enhancer.\r\n *\r\n * @default \"@@PAUSED\"\"\r\n */\r\n pauseActionType?: string\r\n /**\r\n * auto pauses when the extension’s window is not opened, and so has zero impact on your app when not in use.\r\n * Not available for Redux enhancer (as it already does it but storing the data to be sent).\r\n *\r\n * @default false\r\n */\r\n autoPause?: boolean\r\n /**\r\n * if specified as `true`, it will not allow any non-monitor actions to be dispatched till clicking on `Unlock changes` button.\r\n * Available only for Redux enhancer.\r\n *\r\n * @default false\r\n */\r\n shouldStartLocked?: boolean\r\n /**\r\n * if set to `false`, will not recompute the states on hot reloading (or on replacing the reducers). Available only for Redux enhancer.\r\n *\r\n * @default true\r\n */\r\n shouldHotReload?: boolean\r\n /**\r\n * if specified as `true`, whenever there's an exception in reducers, the monitors will show the error message, and next actions will not be dispatched.\r\n *\r\n * @default false\r\n */\r\n shouldCatchErrors?: boolean\r\n /**\r\n * If you want to restrict the extension, specify the features you allow.\r\n * If not specified, all of the features are enabled. When set as an object, only those included as `true` will be allowed.\r\n * Note that except `true`/`false`, `import` and `export` can be set as `custom` (which is by default for Redux enhancer), meaning that the importing/exporting occurs on the client side.\r\n * Otherwise, you'll get/set the data right from the monitor part.\r\n */\r\n features?: {\r\n /**\r\n * start/pause recording of dispatched actions\r\n */\r\n pause?: boolean\r\n /**\r\n * lock/unlock dispatching actions and side effects\r\n */\r\n lock?: boolean\r\n /**\r\n * persist states on page reloading\r\n */\r\n persist?: boolean\r\n /**\r\n * export history of actions in a file\r\n */\r\n export?: boolean | 'custom'\r\n /**\r\n * import history of actions from a file\r\n */\r\n import?: boolean | 'custom'\r\n /**\r\n * jump back and forth (time travelling)\r\n */\r\n jump?: boolean\r\n /**\r\n * skip (cancel) actions\r\n */\r\n skip?: boolean\r\n /**\r\n * drag and drop actions in the history list\r\n */\r\n reorder?: boolean\r\n /**\r\n * dispatch custom actions or action creators\r\n */\r\n dispatch?: boolean\r\n /**\r\n * generate tests for the selected actions\r\n */\r\n test?: boolean\r\n }\r\n /**\r\n * Set to true or a stacktrace-returning function to record call stack traces for dispatched actions.\r\n * Defaults to false.\r\n */\r\n trace?: boolean | ((action: A) => string)\r\n /**\r\n * The maximum number of stack trace entries to record per action. Defaults to 10.\r\n */\r\n traceLimit?: number\r\n}\r\n\r\ntype Compose = typeof compose\r\n\r\ninterface ComposeWithDevTools {\r\n (options: DevToolsEnhancerOptions): Compose\r\n (...funcs: StoreEnhancer[]): StoreEnhancer\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport const composeWithDevTools: ComposeWithDevTools =\r\n typeof window !== 'undefined' &&\r\n (window as any).__REDUX_DEVTOOLS_EXTENSION_COMPOSE__\r\n ? (window as any).__REDUX_DEVTOOLS_EXTENSION_COMPOSE__\r\n : function () {\r\n if (arguments.length === 0) return undefined\r\n if (typeof arguments[0] === 'object') return compose\r\n return compose.apply(null, arguments as any as Function[])\r\n }\r\n\r\n/**\r\n * @public\r\n */\r\nexport const devToolsEnhancer: {\r\n (options: DevToolsEnhancerOptions): StoreEnhancer\r\n} =\r\n typeof window !== 'undefined' && (window as any).__REDUX_DEVTOOLS_EXTENSION__\r\n ? (window as any).__REDUX_DEVTOOLS_EXTENSION__\r\n : function () {\r\n return function (noop) {\r\n return noop\r\n }\r\n }\r\n","/**\r\n * Returns true if the passed value is \"plain\" object, i.e. an object whose\r\n * prototype is the root `Object.prototype`. This includes objects created\r\n * using object literals, but not for instance for class instances.\r\n *\r\n * @param {any} value The value to inspect.\r\n * @returns {boolean} True if the argument appears to be a plain object.\r\n *\r\n * @public\r\n */\r\nexport default function isPlainObject(value: unknown): value is object {\r\n if (typeof value !== 'object' || value === null) return false\r\n\r\n let proto = Object.getPrototypeOf(value)\r\n if (proto === null) return true\r\n\r\n let baseProto = proto\r\n while (Object.getPrototypeOf(baseProto) !== null) {\r\n baseProto = Object.getPrototypeOf(baseProto)\r\n }\r\n\r\n return proto === baseProto\r\n}\r\n","import createNextState, { isDraftable } from 'immer'\r\nimport type { Middleware, StoreEnhancer } from 'redux'\r\n\r\nexport function getTimeMeasureUtils(maxDelay: number, fnName: string) {\r\n let elapsed = 0\r\n return {\r\n measureTime(fn: () => T): T {\r\n const started = Date.now()\r\n try {\r\n return fn()\r\n } finally {\r\n const finished = Date.now()\r\n elapsed += finished - started\r\n }\r\n },\r\n warnIfExceeded() {\r\n if (elapsed > maxDelay) {\r\n console.warn(`${fnName} took ${elapsed}ms, which is more than the warning threshold of ${maxDelay}ms. \r\nIf your state or actions are very large, you may want to disable the middleware as it might cause too much of a slowdown in development mode. See https://redux-toolkit.js.org/api/getDefaultMiddleware for instructions.\r\nIt is disabled in production builds, so you don't need to worry about that.`)\r\n }\r\n },\r\n }\r\n}\r\n\r\nexport function delay(ms: number) {\r\n return new Promise((resolve) => setTimeout(resolve, ms))\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport class MiddlewareArray<\r\n Middlewares extends Middleware[]\r\n> extends Array {\r\n constructor(...items: Middlewares)\r\n constructor(...args: any[]) {\r\n super(...args)\r\n Object.setPrototypeOf(this, MiddlewareArray.prototype)\r\n }\r\n\r\n static get [Symbol.species]() {\r\n return MiddlewareArray as any\r\n }\r\n\r\n concat>>(\r\n items: AdditionalMiddlewares\r\n ): MiddlewareArray<[...Middlewares, ...AdditionalMiddlewares]>\r\n\r\n concat>>(\r\n ...items: AdditionalMiddlewares\r\n ): MiddlewareArray<[...Middlewares, ...AdditionalMiddlewares]>\r\n concat(...arr: any[]) {\r\n return super.concat.apply(this, arr)\r\n }\r\n\r\n prepend>>(\r\n items: AdditionalMiddlewares\r\n ): MiddlewareArray<[...AdditionalMiddlewares, ...Middlewares]>\r\n\r\n prepend>>(\r\n ...items: AdditionalMiddlewares\r\n ): MiddlewareArray<[...AdditionalMiddlewares, ...Middlewares]>\r\n\r\n prepend(...arr: any[]) {\r\n if (arr.length === 1 && Array.isArray(arr[0])) {\r\n return new MiddlewareArray(...arr[0].concat(this))\r\n }\r\n return new MiddlewareArray(...arr.concat(this))\r\n }\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport class EnhancerArray<\r\n Enhancers extends StoreEnhancer[]\r\n> extends Array {\r\n constructor(...items: Enhancers)\r\n constructor(...args: any[]) {\r\n super(...args)\r\n Object.setPrototypeOf(this, EnhancerArray.prototype)\r\n }\r\n\r\n static get [Symbol.species]() {\r\n return EnhancerArray as any\r\n }\r\n\r\n concat>>(\r\n items: AdditionalEnhancers\r\n ): EnhancerArray<[...Enhancers, ...AdditionalEnhancers]>\r\n\r\n concat>>(\r\n ...items: AdditionalEnhancers\r\n ): EnhancerArray<[...Enhancers, ...AdditionalEnhancers]>\r\n concat(...arr: any[]) {\r\n return super.concat.apply(this, arr)\r\n }\r\n\r\n prepend>>(\r\n items: AdditionalEnhancers\r\n ): EnhancerArray<[...AdditionalEnhancers, ...Enhancers]>\r\n\r\n prepend>>(\r\n ...items: AdditionalEnhancers\r\n ): EnhancerArray<[...AdditionalEnhancers, ...Enhancers]>\r\n\r\n prepend(...arr: any[]) {\r\n if (arr.length === 1 && Array.isArray(arr[0])) {\r\n return new EnhancerArray(...arr[0].concat(this))\r\n }\r\n return new EnhancerArray(...arr.concat(this))\r\n }\r\n}\r\n\r\nexport function freezeDraftable(val: T) {\r\n return isDraftable(val) ? createNextState(val, () => {}) : val\r\n}\r\n","import type { Middleware, AnyAction } from 'redux'\r\nimport type { ThunkMiddleware } from 'redux-thunk'\r\nimport thunkMiddleware from 'redux-thunk'\r\nimport type { ImmutableStateInvariantMiddlewareOptions } from './immutableStateInvariantMiddleware'\r\n/* PROD_START_REMOVE_UMD */\r\nimport { createImmutableStateInvariantMiddleware } from './immutableStateInvariantMiddleware'\r\n/* PROD_STOP_REMOVE_UMD */\r\n\r\nimport type { SerializableStateInvariantMiddlewareOptions } from './serializableStateInvariantMiddleware'\r\nimport { createSerializableStateInvariantMiddleware } from './serializableStateInvariantMiddleware'\r\nimport type { ExcludeFromTuple } from './tsHelpers'\r\nimport { MiddlewareArray } from './utils'\r\n\r\nfunction isBoolean(x: any): x is boolean {\r\n return typeof x === 'boolean'\r\n}\r\n\r\ninterface ThunkOptions {\r\n extraArgument: E\r\n}\r\n\r\ninterface GetDefaultMiddlewareOptions {\r\n thunk?: boolean | ThunkOptions\r\n immutableCheck?: boolean | ImmutableStateInvariantMiddlewareOptions\r\n serializableCheck?: boolean | SerializableStateInvariantMiddlewareOptions\r\n}\r\n\r\nexport type ThunkMiddlewareFor<\r\n S,\r\n O extends GetDefaultMiddlewareOptions = {}\r\n> = O extends {\r\n thunk: false\r\n}\r\n ? never\r\n : O extends { thunk: { extraArgument: infer E } }\r\n ? ThunkMiddleware\r\n : ThunkMiddleware\r\n\r\nexport type CurriedGetDefaultMiddleware = <\r\n O extends Partial = {\r\n thunk: true\r\n immutableCheck: true\r\n serializableCheck: true\r\n }\r\n>(\r\n options?: O\r\n) => MiddlewareArray], never>>\r\n\r\nexport function curryGetDefaultMiddleware<\r\n S = any\r\n>(): CurriedGetDefaultMiddleware {\r\n return function curriedGetDefaultMiddleware(options) {\r\n return getDefaultMiddleware(options)\r\n }\r\n}\r\n\r\n/**\r\n * Returns any array containing the default middleware installed by\r\n * `configureStore()`. Useful if you want to configure your store with a custom\r\n * `middleware` array but still keep the default set.\r\n *\r\n * @return The default middleware used by `configureStore()`.\r\n *\r\n * @public\r\n *\r\n * @deprecated Prefer to use the callback notation for the `middleware` option in `configureStore`\r\n * to access a pre-typed `getDefaultMiddleware` instead.\r\n */\r\nexport function getDefaultMiddleware<\r\n S = any,\r\n O extends Partial = {\r\n thunk: true\r\n immutableCheck: true\r\n serializableCheck: true\r\n }\r\n>(\r\n options: O = {} as O\r\n): MiddlewareArray], never>> {\r\n const {\r\n thunk = true,\r\n immutableCheck = true,\r\n serializableCheck = true,\r\n } = options\r\n\r\n let middlewareArray = new MiddlewareArray()\r\n\r\n if (thunk) {\r\n if (isBoolean(thunk)) {\r\n middlewareArray.push(thunkMiddleware)\r\n } else {\r\n middlewareArray.push(\r\n thunkMiddleware.withExtraArgument(thunk.extraArgument)\r\n )\r\n }\r\n }\r\n\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (immutableCheck) {\r\n /* PROD_START_REMOVE_UMD */\r\n let immutableOptions: ImmutableStateInvariantMiddlewareOptions = {}\r\n\r\n if (!isBoolean(immutableCheck)) {\r\n immutableOptions = immutableCheck\r\n }\r\n\r\n middlewareArray.unshift(\r\n createImmutableStateInvariantMiddleware(immutableOptions)\r\n )\r\n /* PROD_STOP_REMOVE_UMD */\r\n }\r\n\r\n if (serializableCheck) {\r\n let serializableOptions: SerializableStateInvariantMiddlewareOptions = {}\r\n\r\n if (!isBoolean(serializableCheck)) {\r\n serializableOptions = serializableCheck\r\n }\r\n\r\n middlewareArray.push(\r\n createSerializableStateInvariantMiddleware(serializableOptions)\r\n )\r\n }\r\n }\r\n\r\n return middlewareArray as any\r\n}\r\n","import type {\r\n Reducer,\r\n ReducersMapObject,\r\n Middleware,\r\n Action,\r\n AnyAction,\r\n StoreEnhancer,\r\n Store,\r\n Dispatch,\r\n PreloadedState,\r\n CombinedState,\r\n} from 'redux'\r\nimport { createStore, compose, applyMiddleware, combineReducers } from 'redux'\r\nimport type { DevToolsEnhancerOptions as DevToolsOptions } from './devtoolsExtension'\r\nimport { composeWithDevTools } from './devtoolsExtension'\r\n\r\nimport isPlainObject from './isPlainObject'\r\nimport type {\r\n ThunkMiddlewareFor,\r\n CurriedGetDefaultMiddleware,\r\n} from './getDefaultMiddleware'\r\nimport { curryGetDefaultMiddleware } from './getDefaultMiddleware'\r\nimport type {\r\n NoInfer,\r\n ExtractDispatchExtensions,\r\n ExtractStoreExtensions,\r\n ExtractStateExtensions,\r\n} from './tsHelpers'\r\nimport { EnhancerArray } from './utils'\r\n\r\nconst IS_PRODUCTION = process.env.NODE_ENV === 'production'\r\n\r\n/**\r\n * Callback function type, to be used in `ConfigureStoreOptions.enhancers`\r\n *\r\n * @public\r\n */\r\nexport type ConfigureEnhancersCallback = (\r\n defaultEnhancers: EnhancerArray<[StoreEnhancer<{}, {}>]>\r\n) => E\r\n\r\n/**\r\n * Options for `configureStore()`.\r\n *\r\n * @public\r\n */\r\nexport interface ConfigureStoreOptions<\r\n S = any,\r\n A extends Action = AnyAction,\r\n M extends Middlewares = Middlewares,\r\n E extends Enhancers = Enhancers\r\n> {\r\n /**\r\n * A single reducer function that will be used as the root reducer, or an\r\n * object of slice reducers that will be passed to `combineReducers()`.\r\n */\r\n reducer: Reducer | ReducersMapObject\r\n\r\n /**\r\n * An array of Redux middleware to install. If not supplied, defaults to\r\n * the set of middleware returned by `getDefaultMiddleware()`.\r\n *\r\n * @example `middleware: (gDM) => gDM().concat(logger, apiMiddleware, yourCustomMiddleware)`\r\n * @see https://redux-toolkit.js.org/api/getDefaultMiddleware#intended-usage\r\n */\r\n middleware?: ((getDefaultMiddleware: CurriedGetDefaultMiddleware) => M) | M\r\n\r\n /**\r\n * Whether to enable Redux DevTools integration. Defaults to `true`.\r\n *\r\n * Additional configuration can be done by passing Redux DevTools options\r\n */\r\n devTools?: boolean | DevToolsOptions\r\n\r\n /**\r\n * The initial state, same as Redux's createStore.\r\n * You may optionally specify it to hydrate the state\r\n * from the server in universal apps, or to restore a previously serialized\r\n * user session. If you use `combineReducers()` to produce the root reducer\r\n * function (either directly or indirectly by passing an object as `reducer`),\r\n * this must be an object with the same shape as the reducer map keys.\r\n */\r\n /*\r\n Not 100% correct but the best approximation we can get:\r\n - if S is a `CombinedState` applying a second `CombinedState` on it does not change anything.\r\n - if it is not, there could be two cases:\r\n - `ReducersMapObject` is being passed in. In this case, we will call `combineReducers` on it and `CombinedState` is correct\r\n - `Reducer` is being passed in. In this case, actually `CombinedState` is wrong and `S` would be correct.\r\n As we cannot distinguish between those two cases without adding another generic parameter,\r\n we just make the pragmatic assumption that the latter almost never happens.\r\n */\r\n preloadedState?: PreloadedState>>\r\n\r\n /**\r\n * The store enhancers to apply. See Redux's `createStore()`.\r\n * All enhancers will be included before the DevTools Extension enhancer.\r\n * If you need to customize the order of enhancers, supply a callback\r\n * function that will receive the original array (ie, `[applyMiddleware]`),\r\n * and should return a new array (such as `[applyMiddleware, offline]`).\r\n * If you only need to add middleware, you can use the `middleware` parameter instead.\r\n */\r\n enhancers?: E | ConfigureEnhancersCallback\r\n}\r\n\r\ntype Middlewares = ReadonlyArray>\r\n\r\ntype Enhancers = ReadonlyArray\r\n\r\nexport interface ToolkitStore<\r\n S = any,\r\n A extends Action = AnyAction,\r\n M extends Middlewares = Middlewares\r\n> extends Store {\r\n /**\r\n * The `dispatch` method of your store, enhanced by all its middlewares.\r\n *\r\n * @inheritdoc\r\n */\r\n dispatch: ExtractDispatchExtensions & Dispatch\r\n}\r\n\r\n/**\r\n * A Redux store returned by `configureStore()`. Supports dispatching\r\n * side-effectful _thunks_ in addition to plain actions.\r\n *\r\n * @public\r\n */\r\nexport type EnhancedStore<\r\n S = any,\r\n A extends Action = AnyAction,\r\n M extends Middlewares = Middlewares,\r\n E extends Enhancers = Enhancers\r\n> = ToolkitStore, A, M> &\r\n ExtractStoreExtensions\r\n\r\n/**\r\n * A friendly abstraction over the standard Redux `createStore()` function.\r\n *\r\n * @param options The store configuration.\r\n * @returns A configured Redux store.\r\n *\r\n * @public\r\n */\r\nexport function configureStore<\r\n S = any,\r\n A extends Action = AnyAction,\r\n M extends Middlewares = [ThunkMiddlewareFor],\r\n E extends Enhancers = [StoreEnhancer]\r\n>(options: ConfigureStoreOptions): EnhancedStore {\r\n const curriedGetDefaultMiddleware = curryGetDefaultMiddleware()\r\n\r\n const {\r\n reducer = undefined,\r\n middleware = curriedGetDefaultMiddleware(),\r\n devTools = true,\r\n preloadedState = undefined,\r\n enhancers = undefined,\r\n } = options || {}\r\n\r\n let rootReducer: Reducer\r\n\r\n if (typeof reducer === 'function') {\r\n rootReducer = reducer\r\n } else if (isPlainObject(reducer)) {\r\n rootReducer = combineReducers(reducer) as unknown as Reducer\r\n } else {\r\n throw new Error(\r\n '\"reducer\" is a required argument, and must be a function or an object of functions that can be passed to combineReducers'\r\n )\r\n }\r\n\r\n let finalMiddleware = middleware\r\n if (typeof finalMiddleware === 'function') {\r\n finalMiddleware = finalMiddleware(curriedGetDefaultMiddleware)\r\n\r\n if (!IS_PRODUCTION && !Array.isArray(finalMiddleware)) {\r\n throw new Error(\r\n 'when using a middleware builder function, an array of middleware must be returned'\r\n )\r\n }\r\n }\r\n if (\r\n !IS_PRODUCTION &&\r\n finalMiddleware.some((item: any) => typeof item !== 'function')\r\n ) {\r\n throw new Error(\r\n 'each middleware provided to configureStore must be a function'\r\n )\r\n }\r\n\r\n const middlewareEnhancer: StoreEnhancer = applyMiddleware(...finalMiddleware)\r\n\r\n let finalCompose = compose\r\n\r\n if (devTools) {\r\n finalCompose = composeWithDevTools({\r\n // Enable capture of stack traces for dispatched Redux actions\r\n trace: !IS_PRODUCTION,\r\n ...(typeof devTools === 'object' && devTools),\r\n })\r\n }\r\n\r\n const defaultEnhancers = new EnhancerArray(middlewareEnhancer)\r\n let storeEnhancers: Enhancers = defaultEnhancers\r\n\r\n if (Array.isArray(enhancers)) {\r\n storeEnhancers = [middlewareEnhancer, ...enhancers]\r\n } else if (typeof enhancers === 'function') {\r\n storeEnhancers = enhancers(defaultEnhancers)\r\n }\r\n\r\n const composedEnhancer = finalCompose(...storeEnhancers) as StoreEnhancer\r\n\r\n return createStore(rootReducer, preloadedState, composedEnhancer)\r\n}\r\n","import type { Action } from 'redux'\r\nimport type {\r\n IsUnknownOrNonInferrable,\r\n IfMaybeUndefined,\r\n IfVoid,\r\n IsAny,\r\n} from './tsHelpers'\r\nimport isPlainObject from './isPlainObject'\r\n\r\n/**\r\n * An action with a string type and an associated payload. This is the\r\n * type of action returned by `createAction()` action creators.\r\n *\r\n * @template P The type of the action's payload.\r\n * @template T the type used for the action type.\r\n * @template M The type of the action's meta (optional)\r\n * @template E The type of the action's error (optional)\r\n *\r\n * @public\r\n */\r\nexport type PayloadAction<\r\n P = void,\r\n T extends string = string,\r\n M = never,\r\n E = never\r\n> = {\r\n payload: P\r\n type: T\r\n} & ([M] extends [never]\r\n ? {}\r\n : {\r\n meta: M\r\n }) &\r\n ([E] extends [never]\r\n ? {}\r\n : {\r\n error: E\r\n })\r\n\r\n/**\r\n * A \"prepare\" method to be used as the second parameter of `createAction`.\r\n * Takes any number of arguments and returns a Flux Standard Action without\r\n * type (will be added later) that *must* contain a payload (might be undefined).\r\n *\r\n * @public\r\n */\r\nexport type PrepareAction

=\r\n | ((...args: any[]) => { payload: P })\r\n | ((...args: any[]) => { payload: P; meta: any })\r\n | ((...args: any[]) => { payload: P; error: any })\r\n | ((...args: any[]) => { payload: P; meta: any; error: any })\r\n\r\n/**\r\n * Internal version of `ActionCreatorWithPreparedPayload`. Not to be used externally.\r\n *\r\n * @internal\r\n */\r\nexport type _ActionCreatorWithPreparedPayload<\r\n PA extends PrepareAction | void,\r\n T extends string = string\r\n> = PA extends PrepareAction\r\n ? ActionCreatorWithPreparedPayload<\r\n Parameters,\r\n P,\r\n T,\r\n ReturnType extends {\r\n error: infer E\r\n }\r\n ? E\r\n : never,\r\n ReturnType extends {\r\n meta: infer M\r\n }\r\n ? M\r\n : never\r\n >\r\n : void\r\n\r\n/**\r\n * Basic type for all action creators.\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n */\r\nexport interface BaseActionCreator {\r\n type: T\r\n match: (action: Action) => action is PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator that takes multiple arguments that are passed\r\n * to a `PrepareAction` method to create the final Action.\r\n * @typeParam Args arguments for the action creator function\r\n * @typeParam P `payload` type\r\n * @typeParam T `type` name\r\n * @typeParam E optional `error` type\r\n * @typeParam M optional `meta` type\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n *\r\n * @public\r\n */\r\nexport interface ActionCreatorWithPreparedPayload<\r\n Args extends unknown[],\r\n P,\r\n T extends string = string,\r\n E = never,\r\n M = never\r\n> extends BaseActionCreator {\r\n /**\r\n * Calling this {@link redux#ActionCreator} with `Args` will return\r\n * an Action with a payload of type `P` and (depending on the `PrepareAction`\r\n * method used) a `meta`- and `error` property of types `M` and `E` respectively.\r\n */\r\n (...args: Args): PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator of type `T` that takes an optional payload of type `P`.\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n *\r\n * @public\r\n */\r\nexport interface ActionCreatorWithOptionalPayload\r\n extends BaseActionCreator {\r\n /**\r\n * Calling this {@link redux#ActionCreator} with an argument will\r\n * return a {@link PayloadAction} of type `T` with a payload of `P`.\r\n * Calling it without an argument will return a PayloadAction with a payload of `undefined`.\r\n */\r\n (payload?: P): PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator of type `T` that takes no payload.\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n *\r\n * @public\r\n */\r\nexport interface ActionCreatorWithoutPayload\r\n extends BaseActionCreator {\r\n /**\r\n * Calling this {@link redux#ActionCreator} will\r\n * return a {@link PayloadAction} of type `T` with a payload of `undefined`\r\n */\r\n (noArgument: void): PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator of type `T` that requires a payload of type P.\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n *\r\n * @public\r\n */\r\nexport interface ActionCreatorWithPayload\r\n extends BaseActionCreator {\r\n /**\r\n * Calling this {@link redux#ActionCreator} with an argument will\r\n * return a {@link PayloadAction} of type `T` with a payload of `P`\r\n */\r\n (payload: P): PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator of type `T` whose `payload` type could not be inferred. Accepts everything as `payload`.\r\n *\r\n * @inheritdoc {redux#ActionCreator}\r\n *\r\n * @public\r\n */\r\nexport interface ActionCreatorWithNonInferrablePayload<\r\n T extends string = string\r\n> extends BaseActionCreator {\r\n /**\r\n * Calling this {@link redux#ActionCreator} with an argument will\r\n * return a {@link PayloadAction} of type `T` with a payload\r\n * of exactly the type of the argument.\r\n */\r\n (payload: PT): PayloadAction\r\n}\r\n\r\n/**\r\n * An action creator that produces actions with a `payload` attribute.\r\n *\r\n * @typeParam P the `payload` type\r\n * @typeParam T the `type` of the resulting action\r\n * @typeParam PA if the resulting action is preprocessed by a `prepare` method, the signature of said method.\r\n *\r\n * @public\r\n */\r\nexport type PayloadActionCreator<\r\n P = void,\r\n T extends string = string,\r\n PA extends PrepareAction

| void = void\r\n> = IfPrepareActionMethodProvided<\r\n PA,\r\n _ActionCreatorWithPreparedPayload,\r\n // else\r\n IsAny<\r\n P,\r\n ActionCreatorWithPayload,\r\n IsUnknownOrNonInferrable<\r\n P,\r\n ActionCreatorWithNonInferrablePayload,\r\n // else\r\n IfVoid<\r\n P,\r\n ActionCreatorWithoutPayload,\r\n // else\r\n IfMaybeUndefined<\r\n P,\r\n ActionCreatorWithOptionalPayload,\r\n // else\r\n ActionCreatorWithPayload\r\n >\r\n >\r\n >\r\n >\r\n>\r\n\r\n/**\r\n * A utility function to create an action creator for the given action type\r\n * string. The action creator accepts a single argument, which will be included\r\n * in the action object as a field called payload. The action creator function\r\n * will also have its toString() overridden so that it returns the action type,\r\n * allowing it to be used in reducer logic that is looking for that action type.\r\n *\r\n * @param type The action type to use for created actions.\r\n * @param prepare (optional) a method that takes any number of arguments and returns { payload } or { payload, meta }.\r\n * If this is given, the resulting action creator will pass its arguments to this method to calculate payload & meta.\r\n *\r\n * @public\r\n */\r\nexport function createAction

= PayloadActionCreator

\r\n\r\n/**\r\n * The return value of `createSlice`\r\n *\r\n * @public\r\n */\r\nexport interface Slice<\r\n State = any,\r\n CaseReducers extends SliceCaseReducers = SliceCaseReducers,\r\n Name extends string = string\r\n> {\r\n /**\r\n * The slice name.\r\n */\r\n name: Name\r\n\r\n /**\r\n * The slice's reducer.\r\n */\r\n reducer: Reducer\r\n\r\n /**\r\n * Action creators for the types of actions that are handled by the slice\r\n * reducer.\r\n */\r\n actions: CaseReducerActions\r\n\r\n /**\r\n * The individual case reducer functions that were passed in the `reducers` parameter.\r\n * This enables reuse and testing if they were defined inline when calling `createSlice`.\r\n */\r\n caseReducers: SliceDefinedCaseReducers\r\n\r\n /**\r\n * Provides access to the initial state value given to the slice.\r\n * If a lazy state initializer was provided, it will be called and a fresh value returned.\r\n */\r\n getInitialState: () => State\r\n}\r\n\r\n/**\r\n * Options for `createSlice()`.\r\n *\r\n * @public\r\n */\r\nexport interface CreateSliceOptions<\r\n State = any,\r\n CR extends SliceCaseReducers = SliceCaseReducers,\r\n Name extends string = string\r\n> {\r\n /**\r\n * The slice's name. Used to namespace the generated action types.\r\n */\r\n name: Name\r\n\r\n /**\r\n * The initial state that should be used when the reducer is called the first time. This may also be a \"lazy initializer\" function, which should return an initial state value when called. This will be used whenever the reducer is called with `undefined` as its state value, and is primarily useful for cases like reading initial state from `localStorage`.\r\n */\r\n initialState: State | (() => State)\r\n\r\n /**\r\n * A mapping from action types to action-type-specific *case reducer*\r\n * functions. For every action type, a matching action creator will be\r\n * generated using `createAction()`.\r\n */\r\n reducers: ValidateSliceCaseReducers\r\n\r\n /**\r\n * A callback that receives a *builder* object to define\r\n * case reducers via calls to `builder.addCase(actionCreatorOrType, reducer)`.\r\n * \r\n * Alternatively, a mapping from action types to action-type-specific *case reducer*\r\n * functions. These reducers should have existing action types used\r\n * as the keys, and action creators will _not_ be generated.\r\n * \r\n * @example\r\n```ts\r\nimport { createAction, createSlice, Action, AnyAction } from '@reduxjs/toolkit'\r\nconst incrementBy = createAction('incrementBy')\r\nconst decrement = createAction('decrement')\r\n\r\ninterface RejectedAction extends Action {\r\n error: Error\r\n}\r\n\r\nfunction isRejectedAction(action: AnyAction): action is RejectedAction {\r\n return action.type.endsWith('rejected')\r\n}\r\n\r\ncreateSlice({\r\n name: 'counter',\r\n initialState: 0,\r\n reducers: {},\r\n extraReducers: builder => {\r\n builder\r\n .addCase(incrementBy, (state, action) => {\r\n // action is inferred correctly here if using TS\r\n })\r\n // You can chain calls, or have separate `builder.addCase()` lines each time\r\n .addCase(decrement, (state, action) => {})\r\n // You can match a range of action types\r\n .addMatcher(\r\n isRejectedAction,\r\n // `action` will be inferred as a RejectedAction due to isRejectedAction being defined as a type guard\r\n (state, action) => {}\r\n )\r\n // and provide a default case if no other handlers matched\r\n .addDefaultCase((state, action) => {})\r\n }\r\n})\r\n```\r\n */\r\n extraReducers?:\r\n | CaseReducers, any>\r\n | ((builder: ActionReducerMapBuilder>) => void)\r\n}\r\n\r\n/**\r\n * A CaseReducer with a `prepare` method.\r\n *\r\n * @public\r\n */\r\nexport type CaseReducerWithPrepare = {\r\n reducer: CaseReducer\r\n prepare: PrepareAction\r\n}\r\n\r\n/**\r\n * The type describing a slice's `reducers` option.\r\n *\r\n * @public\r\n */\r\nexport type SliceCaseReducers = {\r\n [K: string]:\r\n | CaseReducer>\r\n | CaseReducerWithPrepare>\r\n}\r\n\r\ntype SliceActionType<\r\n SliceName extends string,\r\n ActionName extends keyof any\r\n> = ActionName extends string | number ? `${SliceName}/${ActionName}` : string\r\n\r\n/**\r\n * Derives the slice's `actions` property from the `reducers` options\r\n *\r\n * @public\r\n */\r\nexport type CaseReducerActions<\r\n CaseReducers extends SliceCaseReducers,\r\n SliceName extends string\r\n> = {\r\n [Type in keyof CaseReducers]: CaseReducers[Type] extends { prepare: any }\r\n ? ActionCreatorForCaseReducerWithPrepare<\r\n CaseReducers[Type],\r\n SliceActionType\r\n >\r\n : ActionCreatorForCaseReducer<\r\n CaseReducers[Type],\r\n SliceActionType\r\n >\r\n}\r\n\r\n/**\r\n * Get a `PayloadActionCreator` type for a passed `CaseReducerWithPrepare`\r\n *\r\n * @internal\r\n */\r\ntype ActionCreatorForCaseReducerWithPrepare<\r\n CR extends { prepare: any },\r\n Type extends string\r\n> = _ActionCreatorWithPreparedPayload\r\n\r\n/**\r\n * Get a `PayloadActionCreator` type for a passed `CaseReducer`\r\n *\r\n * @internal\r\n */\r\ntype ActionCreatorForCaseReducer = CR extends (\r\n state: any,\r\n action: infer Action\r\n) => any\r\n ? Action extends { payload: infer P }\r\n ? PayloadActionCreator\r\n : ActionCreatorWithoutPayload\r\n : ActionCreatorWithoutPayload\r\n\r\n/**\r\n * Extracts the CaseReducers out of a `reducers` object, even if they are\r\n * tested into a `CaseReducerWithPrepare`.\r\n *\r\n * @internal\r\n */\r\ntype SliceDefinedCaseReducers> = {\r\n [Type in keyof CaseReducers]: CaseReducers[Type] extends {\r\n reducer: infer Reducer\r\n }\r\n ? Reducer\r\n : CaseReducers[Type]\r\n}\r\n\r\n/**\r\n * Used on a SliceCaseReducers object.\r\n * Ensures that if a CaseReducer is a `CaseReducerWithPrepare`, that\r\n * the `reducer` and the `prepare` function use the same type of `payload`.\r\n *\r\n * Might do additional such checks in the future.\r\n *\r\n * This type is only ever useful if you want to write your own wrapper around\r\n * `createSlice`. Please don't use it otherwise!\r\n *\r\n * @public\r\n */\r\nexport type ValidateSliceCaseReducers<\r\n S,\r\n ACR extends SliceCaseReducers\r\n> = ACR &\r\n {\r\n [T in keyof ACR]: ACR[T] extends {\r\n reducer(s: S, action?: infer A): any\r\n }\r\n ? {\r\n prepare(...a: never[]): Omit\r\n }\r\n : {}\r\n }\r\n\r\nfunction getType(slice: string, actionKey: string): string {\r\n return `${slice}/${actionKey}`\r\n}\r\n\r\n/**\r\n * A function that accepts an initial state, an object full of reducer\r\n * functions, and a \"slice name\", and automatically generates\r\n * action creators and action types that correspond to the\r\n * reducers and state.\r\n *\r\n * The `reducer` argument is passed to `createReducer()`.\r\n *\r\n * @public\r\n */\r\nexport function createSlice<\r\n State,\r\n CaseReducers extends SliceCaseReducers,\r\n Name extends string = string\r\n>(\r\n options: CreateSliceOptions\r\n): Slice {\r\n const { name } = options\r\n if (!name) {\r\n throw new Error('`name` is a required option for createSlice')\r\n }\r\n\r\n if (\r\n typeof process !== 'undefined' &&\r\n process.env.NODE_ENV === 'development'\r\n ) {\r\n if (options.initialState === undefined) {\r\n console.error(\r\n 'You must provide an `initialState` value that is not `undefined`. You may have misspelled `initialState`'\r\n )\r\n }\r\n }\r\n\r\n const initialState =\r\n typeof options.initialState == 'function'\r\n ? options.initialState\r\n : freezeDraftable(options.initialState)\r\n\r\n const reducers = options.reducers || {}\r\n\r\n const reducerNames = Object.keys(reducers)\r\n\r\n const sliceCaseReducersByName: Record = {}\r\n const sliceCaseReducersByType: Record = {}\r\n const actionCreators: Record = {}\r\n\r\n reducerNames.forEach((reducerName) => {\r\n const maybeReducerWithPrepare = reducers[reducerName]\r\n const type = getType(name, reducerName)\r\n\r\n let caseReducer: CaseReducer\r\n let prepareCallback: PrepareAction | undefined\r\n\r\n if ('reducer' in maybeReducerWithPrepare) {\r\n caseReducer = maybeReducerWithPrepare.reducer\r\n prepareCallback = maybeReducerWithPrepare.prepare\r\n } else {\r\n caseReducer = maybeReducerWithPrepare\r\n }\r\n\r\n sliceCaseReducersByName[reducerName] = caseReducer\r\n sliceCaseReducersByType[type] = caseReducer\r\n actionCreators[reducerName] = prepareCallback\r\n ? createAction(type, prepareCallback)\r\n : createAction(type)\r\n })\r\n\r\n function buildReducer() {\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (typeof options.extraReducers === 'object') {\r\n if (!hasWarnedAboutObjectNotation) {\r\n hasWarnedAboutObjectNotation = true\r\n console.warn(\r\n \"The object notation for `createSlice.extraReducers` is deprecated, and will be removed in RTK 2.0. Please use the 'builder callback' notation instead: https://redux-toolkit.js.org/api/createSlice\"\r\n )\r\n }\r\n }\r\n }\r\n const [\r\n extraReducers = {},\r\n actionMatchers = [],\r\n defaultCaseReducer = undefined,\r\n ] =\r\n typeof options.extraReducers === 'function'\r\n ? executeReducerBuilderCallback(options.extraReducers)\r\n : [options.extraReducers]\r\n\r\n const finalCaseReducers = { ...extraReducers, ...sliceCaseReducersByType }\r\n\r\n return createReducer(initialState, (builder) => {\r\n for (let key in finalCaseReducers) {\r\n builder.addCase(key, finalCaseReducers[key] as CaseReducer)\r\n }\r\n for (let m of actionMatchers) {\r\n builder.addMatcher(m.matcher, m.reducer)\r\n }\r\n if (defaultCaseReducer) {\r\n builder.addDefaultCase(defaultCaseReducer)\r\n }\r\n })\r\n }\r\n\r\n let _reducer: ReducerWithInitialState\r\n\r\n return {\r\n name,\r\n reducer(state, action) {\r\n if (!_reducer) _reducer = buildReducer()\r\n\r\n return _reducer(state, action)\r\n },\r\n actions: actionCreators as any,\r\n caseReducers: sliceCaseReducersByName as any,\r\n getInitialState() {\r\n if (!_reducer) _reducer = buildReducer()\r\n\r\n return _reducer.getInitialState()\r\n },\r\n }\r\n}\r\n","import type { Draft } from 'immer'\r\nimport createNextState, { isDraft, isDraftable } from 'immer'\r\nimport type { AnyAction, Action, Reducer } from 'redux'\r\nimport type { ActionReducerMapBuilder } from './mapBuilders'\r\nimport { executeReducerBuilderCallback } from './mapBuilders'\r\nimport type { NoInfer } from './tsHelpers'\r\nimport { freezeDraftable } from './utils'\r\n\r\n/**\r\n * Defines a mapping from action types to corresponding action object shapes.\r\n *\r\n * @deprecated This should not be used manually - it is only used for internal\r\n * inference purposes and should not have any further value.\r\n * It might be removed in the future.\r\n * @public\r\n */\r\nexport type Actions = Record\r\n\r\n/**\r\n * @deprecated use `TypeGuard` instead\r\n */\r\nexport interface ActionMatcher {\r\n (action: AnyAction): action is A\r\n}\r\n\r\nexport type ActionMatcherDescription = {\r\n matcher: ActionMatcher\r\n reducer: CaseReducer>\r\n}\r\n\r\nexport type ReadonlyActionMatcherDescriptionCollection = ReadonlyArray<\r\n ActionMatcherDescription\r\n>\r\n\r\nexport type ActionMatcherDescriptionCollection = Array<\r\n ActionMatcherDescription\r\n>\r\n\r\n/**\r\n * A *case reducer* is a reducer function for a specific action type. Case\r\n * reducers can be composed to full reducers using `createReducer()`.\r\n *\r\n * Unlike a normal Redux reducer, a case reducer is never called with an\r\n * `undefined` state to determine the initial state. Instead, the initial\r\n * state is explicitly specified as an argument to `createReducer()`.\r\n *\r\n * In addition, a case reducer can choose to mutate the passed-in `state`\r\n * value directly instead of returning a new state. This does not actually\r\n * cause the store state to be mutated directly; instead, thanks to\r\n * [immer](https://github.com/mweststrate/immer), the mutations are\r\n * translated to copy operations that result in a new state.\r\n *\r\n * @public\r\n */\r\nexport type CaseReducer = (\r\n state: Draft,\r\n action: A\r\n) => NoInfer | void | Draft>\r\n\r\n/**\r\n * A mapping from action types to case reducers for `createReducer()`.\r\n *\r\n * @deprecated This should not be used manually - it is only used\r\n * for internal inference purposes and using it manually\r\n * would lead to type erasure.\r\n * It might be removed in the future.\r\n * @public\r\n */\r\nexport type CaseReducers = {\r\n [T in keyof AS]: AS[T] extends Action ? CaseReducer : void\r\n}\r\n\r\nexport type NotFunction = T extends Function ? never : T\r\n\r\nfunction isStateFunction(x: unknown): x is () => S {\r\n return typeof x === 'function'\r\n}\r\n\r\nexport type ReducerWithInitialState> = Reducer & {\r\n getInitialState: () => S\r\n}\r\n\r\nlet hasWarnedAboutObjectNotation = false\r\n\r\n/**\r\n * A utility function that allows defining a reducer as a mapping from action\r\n * type to *case reducer* functions that handle these action types. The\r\n * reducer's initial state is passed as the first argument.\r\n *\r\n * @remarks\r\n * The body of every case reducer is implicitly wrapped with a call to\r\n * `produce()` from the [immer](https://github.com/mweststrate/immer) library.\r\n * This means that rather than returning a new state object, you can also\r\n * mutate the passed-in state object directly; these mutations will then be\r\n * automatically and efficiently translated into copies, giving you both\r\n * convenience and immutability.\r\n *\r\n * @overloadSummary\r\n * This overload accepts a callback function that receives a `builder` object as its argument.\r\n * That builder provides `addCase`, `addMatcher` and `addDefaultCase` functions that may be\r\n * called to define what actions this reducer will handle.\r\n *\r\n * @param initialState - `State | (() => State)`: The initial state that should be used when the reducer is called the first time. This may also be a \"lazy initializer\" function, which should return an initial state value when called. This will be used whenever the reducer is called with `undefined` as its state value, and is primarily useful for cases like reading initial state from `localStorage`.\r\n * @param builderCallback - `(builder: Builder) => void` A callback that receives a *builder* object to define\r\n * case reducers via calls to `builder.addCase(actionCreatorOrType, reducer)`.\r\n * @example\r\n```ts\r\nimport {\r\n createAction,\r\n createReducer,\r\n AnyAction,\r\n PayloadAction,\r\n} from \"@reduxjs/toolkit\";\r\n\r\nconst increment = createAction(\"increment\");\r\nconst decrement = createAction(\"decrement\");\r\n\r\nfunction isActionWithNumberPayload(\r\n action: AnyAction\r\n): action is PayloadAction {\r\n return typeof action.payload === \"number\";\r\n}\r\n\r\nconst reducer = createReducer(\r\n {\r\n counter: 0,\r\n sumOfNumberPayloads: 0,\r\n unhandledActions: 0,\r\n },\r\n (builder) => {\r\n builder\r\n .addCase(increment, (state, action) => {\r\n // action is inferred correctly here\r\n state.counter += action.payload;\r\n })\r\n // You can chain calls, or have separate `builder.addCase()` lines each time\r\n .addCase(decrement, (state, action) => {\r\n state.counter -= action.payload;\r\n })\r\n // You can apply a \"matcher function\" to incoming actions\r\n .addMatcher(isActionWithNumberPayload, (state, action) => {})\r\n // and provide a default case if no other handlers matched\r\n .addDefaultCase((state, action) => {});\r\n }\r\n);\r\n```\r\n * @public\r\n */\r\nexport function createReducer>(\r\n initialState: S | (() => S),\r\n builderCallback: (builder: ActionReducerMapBuilder) => void\r\n): ReducerWithInitialState\r\n\r\n/**\r\n * A utility function that allows defining a reducer as a mapping from action\r\n * type to *case reducer* functions that handle these action types. The\r\n * reducer's initial state is passed as the first argument.\r\n *\r\n * The body of every case reducer is implicitly wrapped with a call to\r\n * `produce()` from the [immer](https://github.com/mweststrate/immer) library.\r\n * This means that rather than returning a new state object, you can also\r\n * mutate the passed-in state object directly; these mutations will then be\r\n * automatically and efficiently translated into copies, giving you both\r\n * convenience and immutability.\r\n * \r\n * @overloadSummary\r\n * This overload accepts an object where the keys are string action types, and the values\r\n * are case reducer functions to handle those action types.\r\n *\r\n * @param initialState - `State | (() => State)`: The initial state that should be used when the reducer is called the first time. This may also be a \"lazy initializer\" function, which should return an initial state value when called. This will be used whenever the reducer is called with `undefined` as its state value, and is primarily useful for cases like reading initial state from `localStorage`.\r\n * @param actionsMap - An object mapping from action types to _case reducers_, each of which handles one specific action type.\r\n * @param actionMatchers - An array of matcher definitions in the form `{matcher, reducer}`.\r\n * All matching reducers will be executed in order, independently if a case reducer matched or not.\r\n * @param defaultCaseReducer - A \"default case\" reducer that is executed if no case reducer and no matcher\r\n * reducer was executed for this action.\r\n *\r\n * @example\r\n```js\r\nconst counterReducer = createReducer(0, {\r\n increment: (state, action) => state + action.payload,\r\n decrement: (state, action) => state - action.payload\r\n})\r\n\r\n// Alternately, use a \"lazy initializer\" to provide the initial state\r\n// (works with either form of createReducer)\r\nconst initialState = () => 0\r\nconst counterReducer = createReducer(initialState, {\r\n increment: (state, action) => state + action.payload,\r\n decrement: (state, action) => state - action.payload\r\n})\r\n```\r\n \r\n * Action creators that were generated using [`createAction`](./createAction) may be used directly as the keys here, using computed property syntax:\r\n\r\n```js\r\nconst increment = createAction('increment')\r\nconst decrement = createAction('decrement')\r\n\r\nconst counterReducer = createReducer(0, {\r\n [increment]: (state, action) => state + action.payload,\r\n [decrement.type]: (state, action) => state - action.payload\r\n})\r\n```\r\n * @public\r\n */\r\nexport function createReducer<\r\n S extends NotFunction,\r\n CR extends CaseReducers = CaseReducers\r\n>(\r\n initialState: S | (() => S),\r\n actionsMap: CR,\r\n actionMatchers?: ActionMatcherDescriptionCollection,\r\n defaultCaseReducer?: CaseReducer\r\n): ReducerWithInitialState\r\n\r\nexport function createReducer>(\r\n initialState: S | (() => S),\r\n mapOrBuilderCallback:\r\n | CaseReducers\r\n | ((builder: ActionReducerMapBuilder) => void),\r\n actionMatchers: ReadonlyActionMatcherDescriptionCollection = [],\r\n defaultCaseReducer?: CaseReducer\r\n): ReducerWithInitialState {\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (typeof mapOrBuilderCallback === 'object') {\r\n if (!hasWarnedAboutObjectNotation) {\r\n hasWarnedAboutObjectNotation = true\r\n console.warn(\r\n \"The object notation for `createReducer` is deprecated, and will be removed in RTK 2.0. Please use the 'builder callback' notation instead: https://redux-toolkit.js.org/api/createReducer\"\r\n )\r\n }\r\n }\r\n }\r\n\r\n let [actionsMap, finalActionMatchers, finalDefaultCaseReducer] =\r\n typeof mapOrBuilderCallback === 'function'\r\n ? executeReducerBuilderCallback(mapOrBuilderCallback)\r\n : [mapOrBuilderCallback, actionMatchers, defaultCaseReducer]\r\n\r\n // Ensure the initial state gets frozen either way (if draftable)\r\n let getInitialState: () => S\r\n if (isStateFunction(initialState)) {\r\n getInitialState = () => freezeDraftable(initialState())\r\n } else {\r\n const frozenInitialState = freezeDraftable(initialState)\r\n getInitialState = () => frozenInitialState\r\n }\r\n\r\n function reducer(state = getInitialState(), action: any): S {\r\n let caseReducers = [\r\n actionsMap[action.type],\r\n ...finalActionMatchers\r\n .filter(({ matcher }) => matcher(action))\r\n .map(({ reducer }) => reducer),\r\n ]\r\n if (caseReducers.filter((cr) => !!cr).length === 0) {\r\n caseReducers = [finalDefaultCaseReducer]\r\n }\r\n\r\n return caseReducers.reduce((previousState, caseReducer): S => {\r\n if (caseReducer) {\r\n if (isDraft(previousState)) {\r\n // If it's already a draft, we must already be inside a `createNextState` call,\r\n // likely because this is being wrapped in `createReducer`, `createSlice`, or nested\r\n // inside an existing draft. It's safe to just pass the draft to the mutator.\r\n const draft = previousState as Draft // We can assume this is already a draft\r\n const result = caseReducer(draft, action)\r\n\r\n if (result === undefined) {\r\n return previousState\r\n }\r\n\r\n return result as S\r\n } else if (!isDraftable(previousState)) {\r\n // If state is not draftable (ex: a primitive, such as 0), we want to directly\r\n // return the caseReducer func and not wrap it with produce.\r\n const result = caseReducer(previousState as any, action)\r\n\r\n if (result === undefined) {\r\n if (previousState === null) {\r\n return previousState\r\n }\r\n throw Error(\r\n 'A case reducer on a non-draftable value must not return undefined'\r\n )\r\n }\r\n\r\n return result as S\r\n } else {\r\n // @ts-ignore createNextState() produces an Immutable> rather\r\n // than an Immutable, and TypeScript cannot find out how to reconcile\r\n // these two types.\r\n return createNextState(previousState, (draft: Draft) => {\r\n return caseReducer(draft, action)\r\n })\r\n }\r\n }\r\n\r\n return previousState\r\n }, state)\r\n }\r\n\r\n reducer.getInitialState = getInitialState\r\n\r\n return reducer as ReducerWithInitialState\r\n}\r\n","// Borrowed from https://github.com/ai/nanoid/blob/3.0.2/non-secure/index.js\r\n// This alphabet uses `A-Za-z0-9_-` symbols. A genetic algorithm helped\r\n// optimize the gzip compression for this alphabet.\r\nlet urlAlphabet =\r\n 'ModuleSymbhasOwnPr-0123456789ABCDEFGHNRVfgctiUvz_KqYTJkLxpZXIjQW'\r\n\r\n/**\r\n *\r\n * @public\r\n */\r\nexport let nanoid = (size = 21) => {\r\n let id = ''\r\n // A compact alternative for `for (var i = 0; i < step; i++)`.\r\n let i = size\r\n while (i--) {\r\n // `| 0` is more compact and faster than `Math.floor()`.\r\n id += urlAlphabet[(Math.random() * 64) | 0]\r\n }\r\n return id\r\n}\r\n","import type { Dispatch, AnyAction } from 'redux'\r\nimport type {\r\n PayloadAction,\r\n ActionCreatorWithPreparedPayload,\r\n} from './createAction'\r\nimport { createAction } from './createAction'\r\nimport type { ThunkDispatch } from 'redux-thunk'\r\nimport type { FallbackIfUnknown, Id, IsAny, IsUnknown } from './tsHelpers'\r\nimport { nanoid } from './nanoid'\r\n\r\n// @ts-ignore we need the import of these types due to a bundling issue.\r\ntype _Keep = PayloadAction | ActionCreatorWithPreparedPayload\r\n\r\nexport type BaseThunkAPI<\r\n S,\r\n E,\r\n D extends Dispatch = Dispatch,\r\n RejectedValue = undefined,\r\n RejectedMeta = unknown,\r\n FulfilledMeta = unknown\r\n> = {\r\n dispatch: D\r\n getState: () => S\r\n extra: E\r\n requestId: string\r\n signal: AbortSignal\r\n abort: (reason?: string) => void\r\n rejectWithValue: IsUnknown<\r\n RejectedMeta,\r\n (value: RejectedValue) => RejectWithValue,\r\n (\r\n value: RejectedValue,\r\n meta: RejectedMeta\r\n ) => RejectWithValue\r\n >\r\n fulfillWithValue: IsUnknown<\r\n FulfilledMeta,\r\n (value: FulfilledValue) => FulfilledValue,\r\n (\r\n value: FulfilledValue,\r\n meta: FulfilledMeta\r\n ) => FulfillWithMeta\r\n >\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport interface SerializedError {\r\n name?: string\r\n message?: string\r\n stack?: string\r\n code?: string\r\n}\r\n\r\nconst commonProperties: Array = [\r\n 'name',\r\n 'message',\r\n 'stack',\r\n 'code',\r\n]\r\n\r\nclass RejectWithValue {\r\n /*\r\n type-only property to distinguish between RejectWithValue and FulfillWithMeta\r\n does not exist at runtime\r\n */\r\n private readonly _type!: 'RejectWithValue'\r\n constructor(\r\n public readonly payload: Payload,\r\n public readonly meta: RejectedMeta\r\n ) {}\r\n}\r\n\r\nclass FulfillWithMeta {\r\n /*\r\n type-only property to distinguish between RejectWithValue and FulfillWithMeta\r\n does not exist at runtime\r\n */\r\n private readonly _type!: 'FulfillWithMeta'\r\n constructor(\r\n public readonly payload: Payload,\r\n public readonly meta: FulfilledMeta\r\n ) {}\r\n}\r\n\r\n/**\r\n * Serializes an error into a plain object.\r\n * Reworked from https://github.com/sindresorhus/serialize-error\r\n *\r\n * @public\r\n */\r\nexport const miniSerializeError = (value: any): SerializedError => {\r\n if (typeof value === 'object' && value !== null) {\r\n const simpleError: SerializedError = {}\r\n for (const property of commonProperties) {\r\n if (typeof value[property] === 'string') {\r\n simpleError[property] = value[property]\r\n }\r\n }\r\n\r\n return simpleError\r\n }\r\n\r\n return { message: String(value) }\r\n}\r\n\r\ntype AsyncThunkConfig = {\r\n state?: unknown\r\n dispatch?: Dispatch\r\n extra?: unknown\r\n rejectValue?: unknown\r\n serializedErrorType?: unknown\r\n pendingMeta?: unknown\r\n fulfilledMeta?: unknown\r\n rejectedMeta?: unknown\r\n}\r\n\r\ntype GetState = ThunkApiConfig extends {\r\n state: infer State\r\n}\r\n ? State\r\n : unknown\r\ntype GetExtra = ThunkApiConfig extends { extra: infer Extra }\r\n ? Extra\r\n : unknown\r\ntype GetDispatch = ThunkApiConfig extends {\r\n dispatch: infer Dispatch\r\n}\r\n ? FallbackIfUnknown<\r\n Dispatch,\r\n ThunkDispatch<\r\n GetState,\r\n GetExtra,\r\n AnyAction\r\n >\r\n >\r\n : ThunkDispatch, GetExtra, AnyAction>\r\n\r\ntype GetThunkAPI = BaseThunkAPI<\r\n GetState,\r\n GetExtra,\r\n GetDispatch,\r\n GetRejectValue,\r\n GetRejectedMeta,\r\n GetFulfilledMeta\r\n>\r\n\r\ntype GetRejectValue = ThunkApiConfig extends {\r\n rejectValue: infer RejectValue\r\n}\r\n ? RejectValue\r\n : unknown\r\n\r\ntype GetPendingMeta = ThunkApiConfig extends {\r\n pendingMeta: infer PendingMeta\r\n}\r\n ? PendingMeta\r\n : unknown\r\n\r\ntype GetFulfilledMeta = ThunkApiConfig extends {\r\n fulfilledMeta: infer FulfilledMeta\r\n}\r\n ? FulfilledMeta\r\n : unknown\r\n\r\ntype GetRejectedMeta = ThunkApiConfig extends {\r\n rejectedMeta: infer RejectedMeta\r\n}\r\n ? RejectedMeta\r\n : unknown\r\n\r\ntype GetSerializedErrorType = ThunkApiConfig extends {\r\n serializedErrorType: infer GetSerializedErrorType\r\n}\r\n ? GetSerializedErrorType\r\n : SerializedError\r\n\r\ntype MaybePromise = T | Promise | (T extends any ? Promise : never)\r\n\r\n/**\r\n * A type describing the return value of the `payloadCreator` argument to `createAsyncThunk`.\r\n * Might be useful for wrapping `createAsyncThunk` in custom abstractions.\r\n *\r\n * @public\r\n */\r\nexport type AsyncThunkPayloadCreatorReturnValue<\r\n Returned,\r\n ThunkApiConfig extends AsyncThunkConfig\r\n> = MaybePromise<\r\n | IsUnknown<\r\n GetFulfilledMeta,\r\n Returned,\r\n FulfillWithMeta>\r\n >\r\n | RejectWithValue<\r\n GetRejectValue,\r\n GetRejectedMeta\r\n >\r\n>\r\n/**\r\n * A type describing the `payloadCreator` argument to `createAsyncThunk`.\r\n * Might be useful for wrapping `createAsyncThunk` in custom abstractions.\r\n *\r\n * @public\r\n */\r\nexport type AsyncThunkPayloadCreator<\r\n Returned,\r\n ThunkArg = void,\r\n ThunkApiConfig extends AsyncThunkConfig = {}\r\n> = (\r\n arg: ThunkArg,\r\n thunkAPI: GetThunkAPI\r\n) => AsyncThunkPayloadCreatorReturnValue\r\n\r\n/**\r\n * A ThunkAction created by `createAsyncThunk`.\r\n * Dispatching it returns a Promise for either a\r\n * fulfilled or rejected action.\r\n * Also, the returned value contains an `abort()` method\r\n * that allows the asyncAction to be cancelled from the outside.\r\n *\r\n * @public\r\n */\r\nexport type AsyncThunkAction<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig extends AsyncThunkConfig\r\n> = (\r\n dispatch: GetDispatch,\r\n getState: () => GetState,\r\n extra: GetExtra\r\n) => Promise<\r\n | ReturnType>\r\n | ReturnType>\r\n> & {\r\n abort: (reason?: string) => void\r\n requestId: string\r\n arg: ThunkArg\r\n unwrap: () => Promise\r\n}\r\n\r\ntype AsyncThunkActionCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig extends AsyncThunkConfig\r\n> = IsAny<\r\n ThunkArg,\r\n // any handling\r\n (arg: ThunkArg) => AsyncThunkAction,\r\n // unknown handling\r\n unknown extends ThunkArg\r\n ? (arg: ThunkArg) => AsyncThunkAction // argument not specified or specified as void or undefined\r\n : [ThunkArg] extends [void] | [undefined]\r\n ? () => AsyncThunkAction // argument contains void\r\n : [void] extends [ThunkArg] // make optional\r\n ? (arg?: ThunkArg) => AsyncThunkAction // argument contains undefined\r\n : [undefined] extends [ThunkArg]\r\n ? WithStrictNullChecks<\r\n // with strict nullChecks: make optional\r\n (\r\n arg?: ThunkArg\r\n ) => AsyncThunkAction,\r\n // without strict null checks this will match everything, so don't make it optional\r\n (arg: ThunkArg) => AsyncThunkAction\r\n > // default case: normal argument\r\n : (arg: ThunkArg) => AsyncThunkAction\r\n>\r\n\r\n/**\r\n * Options object for `createAsyncThunk`.\r\n *\r\n * @public\r\n */\r\nexport type AsyncThunkOptions<\r\n ThunkArg = void,\r\n ThunkApiConfig extends AsyncThunkConfig = {}\r\n> = {\r\n /**\r\n * A method to control whether the asyncThunk should be executed. Has access to the\r\n * `arg`, `api.getState()` and `api.extra` arguments.\r\n *\r\n * @returns `false` if it should be skipped\r\n */\r\n condition?(\r\n arg: ThunkArg,\r\n api: Pick, 'getState' | 'extra'>\r\n ): MaybePromise\r\n /**\r\n * If `condition` returns `false`, the asyncThunk will be skipped.\r\n * This option allows you to control whether a `rejected` action with `meta.condition == false`\r\n * will be dispatched or not.\r\n *\r\n * @default `false`\r\n */\r\n dispatchConditionRejection?: boolean\r\n\r\n serializeError?: (x: unknown) => GetSerializedErrorType\r\n\r\n /**\r\n * A function to use when generating the `requestId` for the request sequence.\r\n *\r\n * @default `nanoid`\r\n */\r\n idGenerator?: (arg: ThunkArg) => string\r\n} & IsUnknown<\r\n GetPendingMeta,\r\n {\r\n /**\r\n * A method to generate additional properties to be added to `meta` of the pending action.\r\n *\r\n * Using this optional overload will not modify the types correctly, this overload is only in place to support JavaScript users.\r\n * Please use the `ThunkApiConfig` parameter `pendingMeta` to get access to a correctly typed overload\r\n */\r\n getPendingMeta?(\r\n base: {\r\n arg: ThunkArg\r\n requestId: string\r\n },\r\n api: Pick, 'getState' | 'extra'>\r\n ): GetPendingMeta\r\n },\r\n {\r\n /**\r\n * A method to generate additional properties to be added to `meta` of the pending action.\r\n */\r\n getPendingMeta(\r\n base: {\r\n arg: ThunkArg\r\n requestId: string\r\n },\r\n api: Pick, 'getState' | 'extra'>\r\n ): GetPendingMeta\r\n }\r\n>\r\n\r\nexport type AsyncThunkPendingActionCreator<\r\n ThunkArg,\r\n ThunkApiConfig = {}\r\n> = ActionCreatorWithPreparedPayload<\r\n [string, ThunkArg, GetPendingMeta?],\r\n undefined,\r\n string,\r\n never,\r\n {\r\n arg: ThunkArg\r\n requestId: string\r\n requestStatus: 'pending'\r\n } & GetPendingMeta\r\n>\r\n\r\nexport type AsyncThunkRejectedActionCreator<\r\n ThunkArg,\r\n ThunkApiConfig = {}\r\n> = ActionCreatorWithPreparedPayload<\r\n [\r\n Error | null,\r\n string,\r\n ThunkArg,\r\n GetRejectValue?,\r\n GetRejectedMeta?\r\n ],\r\n GetRejectValue | undefined,\r\n string,\r\n GetSerializedErrorType,\r\n {\r\n arg: ThunkArg\r\n requestId: string\r\n requestStatus: 'rejected'\r\n aborted: boolean\r\n condition: boolean\r\n } & (\r\n | ({ rejectedWithValue: false } & {\r\n [K in keyof GetRejectedMeta]?: undefined\r\n })\r\n | ({ rejectedWithValue: true } & GetRejectedMeta)\r\n )\r\n>\r\n\r\nexport type AsyncThunkFulfilledActionCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig = {}\r\n> = ActionCreatorWithPreparedPayload<\r\n [Returned, string, ThunkArg, GetFulfilledMeta?],\r\n Returned,\r\n string,\r\n never,\r\n {\r\n arg: ThunkArg\r\n requestId: string\r\n requestStatus: 'fulfilled'\r\n } & GetFulfilledMeta\r\n>\r\n\r\n/**\r\n * A type describing the return value of `createAsyncThunk`.\r\n * Might be useful for wrapping `createAsyncThunk` in custom abstractions.\r\n *\r\n * @public\r\n */\r\nexport type AsyncThunk<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig extends AsyncThunkConfig\r\n> = AsyncThunkActionCreator & {\r\n pending: AsyncThunkPendingActionCreator\r\n rejected: AsyncThunkRejectedActionCreator\r\n fulfilled: AsyncThunkFulfilledActionCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig\r\n >\r\n typePrefix: string\r\n}\r\n\r\ntype OverrideThunkApiConfigs = Id<\r\n NewConfig & Omit\r\n>\r\n\r\ntype CreateAsyncThunk = {\r\n /**\r\n *\r\n * @param typePrefix\r\n * @param payloadCreator\r\n * @param options\r\n *\r\n * @public\r\n */\r\n // separate signature without `AsyncThunkConfig` for better inference\r\n (\r\n typePrefix: string,\r\n payloadCreator: AsyncThunkPayloadCreator<\r\n Returned,\r\n ThunkArg,\r\n CurriedThunkApiConfig\r\n >,\r\n options?: AsyncThunkOptions\r\n ): AsyncThunk\r\n\r\n /**\r\n *\r\n * @param typePrefix\r\n * @param payloadCreator\r\n * @param options\r\n *\r\n * @public\r\n */\r\n (\r\n typePrefix: string,\r\n payloadCreator: AsyncThunkPayloadCreator<\r\n Returned,\r\n ThunkArg,\r\n OverrideThunkApiConfigs\r\n >,\r\n options?: AsyncThunkOptions<\r\n ThunkArg,\r\n OverrideThunkApiConfigs\r\n >\r\n ): AsyncThunk<\r\n Returned,\r\n ThunkArg,\r\n OverrideThunkApiConfigs\r\n >\r\n\r\n withTypes(): CreateAsyncThunk<\r\n OverrideThunkApiConfigs\r\n >\r\n}\r\n\r\nexport const createAsyncThunk = (() => {\r\n function createAsyncThunk<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig extends AsyncThunkConfig\r\n >(\r\n typePrefix: string,\r\n payloadCreator: AsyncThunkPayloadCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig\r\n >,\r\n options?: AsyncThunkOptions\r\n ): AsyncThunk {\r\n type RejectedValue = GetRejectValue\r\n type PendingMeta = GetPendingMeta\r\n type FulfilledMeta = GetFulfilledMeta\r\n type RejectedMeta = GetRejectedMeta\r\n\r\n const fulfilled: AsyncThunkFulfilledActionCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig\r\n > = createAction(\r\n typePrefix + '/fulfilled',\r\n (\r\n payload: Returned,\r\n requestId: string,\r\n arg: ThunkArg,\r\n meta?: FulfilledMeta\r\n ) => ({\r\n payload,\r\n meta: {\r\n ...((meta as any) || {}),\r\n arg,\r\n requestId,\r\n requestStatus: 'fulfilled' as const,\r\n },\r\n })\r\n )\r\n\r\n const pending: AsyncThunkPendingActionCreator =\r\n createAction(\r\n typePrefix + '/pending',\r\n (requestId: string, arg: ThunkArg, meta?: PendingMeta) => ({\r\n payload: undefined,\r\n meta: {\r\n ...((meta as any) || {}),\r\n arg,\r\n requestId,\r\n requestStatus: 'pending' as const,\r\n },\r\n })\r\n )\r\n\r\n const rejected: AsyncThunkRejectedActionCreator =\r\n createAction(\r\n typePrefix + '/rejected',\r\n (\r\n error: Error | null,\r\n requestId: string,\r\n arg: ThunkArg,\r\n payload?: RejectedValue,\r\n meta?: RejectedMeta\r\n ) => ({\r\n payload,\r\n error: ((options && options.serializeError) || miniSerializeError)(\r\n error || 'Rejected'\r\n ) as GetSerializedErrorType,\r\n meta: {\r\n ...((meta as any) || {}),\r\n arg,\r\n requestId,\r\n rejectedWithValue: !!payload,\r\n requestStatus: 'rejected' as const,\r\n aborted: error?.name === 'AbortError',\r\n condition: error?.name === 'ConditionError',\r\n },\r\n })\r\n )\r\n\r\n let displayedWarning = false\r\n\r\n const AC =\r\n typeof AbortController !== 'undefined'\r\n ? AbortController\r\n : class implements AbortController {\r\n signal = {\r\n aborted: false,\r\n addEventListener() {},\r\n dispatchEvent() {\r\n return false\r\n },\r\n onabort() {},\r\n removeEventListener() {},\r\n reason: undefined,\r\n throwIfAborted() {},\r\n }\r\n abort() {\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (!displayedWarning) {\r\n displayedWarning = true\r\n console.info(\r\n `This platform does not implement AbortController. \r\nIf you want to use the AbortController to react to \\`abort\\` events, please consider importing a polyfill like 'abortcontroller-polyfill/dist/abortcontroller-polyfill-only'.`\r\n )\r\n }\r\n }\r\n }\r\n }\r\n\r\n function actionCreator(\r\n arg: ThunkArg\r\n ): AsyncThunkAction {\r\n return (dispatch, getState, extra) => {\r\n const requestId = options?.idGenerator\r\n ? options.idGenerator(arg)\r\n : nanoid()\r\n\r\n const abortController = new AC()\r\n let abortReason: string | undefined\r\n\r\n let started = false\r\n function abort(reason?: string) {\r\n abortReason = reason\r\n abortController.abort()\r\n }\r\n\r\n const promise = (async function () {\r\n let finalAction: ReturnType\r\n try {\r\n let conditionResult = options?.condition?.(arg, { getState, extra })\r\n if (isThenable(conditionResult)) {\r\n conditionResult = await conditionResult\r\n }\r\n\r\n if (conditionResult === false || abortController.signal.aborted) {\r\n // eslint-disable-next-line no-throw-literal\r\n throw {\r\n name: 'ConditionError',\r\n message: 'Aborted due to condition callback returning false.',\r\n }\r\n }\r\n started = true\r\n\r\n const abortedPromise = new Promise((_, reject) =>\r\n abortController.signal.addEventListener('abort', () =>\r\n reject({\r\n name: 'AbortError',\r\n message: abortReason || 'Aborted',\r\n })\r\n )\r\n )\r\n dispatch(\r\n pending(\r\n requestId,\r\n arg,\r\n options?.getPendingMeta?.(\r\n { requestId, arg },\r\n { getState, extra }\r\n )\r\n )\r\n )\r\n finalAction = await Promise.race([\r\n abortedPromise,\r\n Promise.resolve(\r\n payloadCreator(arg, {\r\n dispatch,\r\n getState,\r\n extra,\r\n requestId,\r\n signal: abortController.signal,\r\n abort,\r\n rejectWithValue: ((\r\n value: RejectedValue,\r\n meta?: RejectedMeta\r\n ) => {\r\n return new RejectWithValue(value, meta)\r\n }) as any,\r\n fulfillWithValue: ((value: unknown, meta?: FulfilledMeta) => {\r\n return new FulfillWithMeta(value, meta)\r\n }) as any,\r\n })\r\n ).then((result) => {\r\n if (result instanceof RejectWithValue) {\r\n throw result\r\n }\r\n if (result instanceof FulfillWithMeta) {\r\n return fulfilled(result.payload, requestId, arg, result.meta)\r\n }\r\n return fulfilled(result as any, requestId, arg)\r\n }),\r\n ])\r\n } catch (err) {\r\n finalAction =\r\n err instanceof RejectWithValue\r\n ? rejected(null, requestId, arg, err.payload, err.meta)\r\n : rejected(err as any, requestId, arg)\r\n }\r\n // We dispatch the result action _after_ the catch, to avoid having any errors\r\n // here get swallowed by the try/catch block,\r\n // per https://twitter.com/dan_abramov/status/770914221638942720\r\n // and https://github.com/reduxjs/redux-toolkit/blob/e85eb17b39a2118d859f7b7746e0f3fee523e089/docs/tutorials/advanced-tutorial.md#async-error-handling-logic-in-thunks\r\n\r\n const skipDispatch =\r\n options &&\r\n !options.dispatchConditionRejection &&\r\n rejected.match(finalAction) &&\r\n (finalAction as any).meta.condition\r\n\r\n if (!skipDispatch) {\r\n dispatch(finalAction)\r\n }\r\n return finalAction\r\n })()\r\n return Object.assign(promise as Promise, {\r\n abort,\r\n requestId,\r\n arg,\r\n unwrap() {\r\n return promise.then(unwrapResult)\r\n },\r\n })\r\n }\r\n }\r\n\r\n return Object.assign(\r\n actionCreator as AsyncThunkActionCreator<\r\n Returned,\r\n ThunkArg,\r\n ThunkApiConfig\r\n >,\r\n {\r\n pending,\r\n rejected,\r\n fulfilled,\r\n typePrefix,\r\n }\r\n )\r\n }\r\n createAsyncThunk.withTypes = () => createAsyncThunk\r\n\r\n return createAsyncThunk as CreateAsyncThunk\r\n})()\r\n\r\ninterface UnwrappableAction {\r\n payload: any\r\n meta?: any\r\n error?: any\r\n}\r\n\r\ntype UnwrappedActionPayload = Exclude<\r\n T,\r\n { error: any }\r\n>['payload']\r\n\r\n/**\r\n * @public\r\n */\r\nexport function unwrapResult(\r\n action: R\r\n): UnwrappedActionPayload {\r\n if (action.meta && action.meta.rejectedWithValue) {\r\n throw action.payload\r\n }\r\n if (action.error) {\r\n throw action.error\r\n }\r\n return action.payload\r\n}\r\n\r\ntype WithStrictNullChecks = undefined extends boolean\r\n ? False\r\n : True\r\n\r\nfunction isThenable(value: any): value is PromiseLike {\r\n return (\r\n value !== null &&\r\n typeof value === 'object' &&\r\n typeof value.then === 'function'\r\n )\r\n}\r\n","import type { AbortSignalWithReason } from './types'\r\n\r\nexport const assertFunction: (\r\n func: unknown,\r\n expected: string\r\n) => asserts func is (...args: unknown[]) => unknown = (\r\n func: unknown,\r\n expected: string\r\n) => {\r\n if (typeof func !== 'function') {\r\n throw new TypeError(`${expected} is not a function`)\r\n }\r\n}\r\n\r\nexport const noop = () => {}\r\n\r\nexport const catchRejection = (\r\n promise: Promise,\r\n onError = noop\r\n): Promise => {\r\n promise.catch(onError)\r\n\r\n return promise\r\n}\r\n\r\nexport const addAbortSignalListener = (\r\n abortSignal: AbortSignal,\r\n callback: (evt: Event) => void\r\n) => {\r\n abortSignal.addEventListener('abort', callback, { once: true })\r\n return () => abortSignal.removeEventListener('abort', callback)\r\n}\r\n\r\n/**\r\n * Calls `abortController.abort(reason)` and patches `signal.reason`.\r\n * if it is not supported.\r\n *\r\n * At the time of writing `signal.reason` is available in FF chrome, edge node 17 and deno.\r\n * @param abortController\r\n * @param reason\r\n * @returns\r\n * @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal/reason\r\n */\r\nexport const abortControllerWithReason = (\r\n abortController: AbortController,\r\n reason: T\r\n): void => {\r\n type Consumer = (val: T) => void\r\n\r\n const signal = abortController.signal as AbortSignalWithReason\r\n\r\n if (signal.aborted) {\r\n return\r\n }\r\n\r\n // Patch `reason` if necessary.\r\n // - We use defineProperty here because reason is a getter of `AbortSignal.__proto__`.\r\n // - We need to patch 'reason' before calling `.abort()` because listeners to the 'abort'\r\n // event are are notified immediately.\r\n if (!('reason' in signal)) {\r\n Object.defineProperty(signal, 'reason', {\r\n enumerable: true,\r\n value: reason,\r\n configurable: true,\r\n writable: true,\r\n })\r\n }\r\n\r\n ;(abortController.abort as Consumer)(reason)\r\n}\r\n","import type { SerializedError } from '@reduxjs/toolkit'\r\n\r\nconst task = 'task'\r\nconst listener = 'listener'\r\nconst completed = 'completed'\r\nconst cancelled = 'cancelled'\r\n\r\n/* TaskAbortError error codes */\r\nexport const taskCancelled = `task-${cancelled}` as const\r\nexport const taskCompleted = `task-${completed}` as const\r\nexport const listenerCancelled = `${listener}-${cancelled}` as const\r\nexport const listenerCompleted = `${listener}-${completed}` as const\r\n\r\nexport class TaskAbortError implements SerializedError {\r\n name = 'TaskAbortError'\r\n message: string\r\n constructor(public code: string | undefined) {\r\n this.message = `${task} ${cancelled} (reason: ${code})`\r\n }\r\n}\r\n","import { TaskAbortError } from './exceptions'\r\nimport type { AbortSignalWithReason, TaskResult } from './types'\r\nimport { addAbortSignalListener, catchRejection, noop } from './utils'\r\n\r\n/**\r\n * Synchronously raises {@link TaskAbortError} if the task tied to the input `signal` has been cancelled.\r\n * @param signal\r\n * @param reason\r\n * @see {TaskAbortError}\r\n */\r\nexport const validateActive = (signal: AbortSignal): void => {\r\n if (signal.aborted) {\r\n throw new TaskAbortError((signal as AbortSignalWithReason).reason)\r\n }\r\n}\r\n\r\n/**\r\n * Generates a race between the promise(s) and the AbortSignal\r\n * This avoids `Promise.race()`-related memory leaks:\r\n * https://github.com/nodejs/node/issues/17469#issuecomment-349794909\r\n */\r\nexport function raceWithSignal(\r\n signal: AbortSignalWithReason,\r\n promise: Promise\r\n): Promise {\r\n let cleanup = noop\r\n return new Promise((resolve, reject) => {\r\n const notifyRejection = () => reject(new TaskAbortError(signal.reason))\r\n\r\n if (signal.aborted) {\r\n notifyRejection()\r\n return\r\n }\r\n\r\n cleanup = addAbortSignalListener(signal, notifyRejection)\r\n promise.finally(() => cleanup()).then(resolve, reject)\r\n }).finally(() => {\r\n // after this point, replace `cleanup` with a noop, so there is no reference to `signal` any more\r\n cleanup = noop\r\n })\r\n}\r\n\r\n/**\r\n * Runs a task and returns promise that resolves to {@link TaskResult}.\r\n * Second argument is an optional `cleanUp` function that always runs after task.\r\n *\r\n * **Note:** `runTask` runs the executor in the next microtask.\r\n * @returns\r\n */\r\nexport const runTask = async (\r\n task: () => Promise,\r\n cleanUp?: () => void\r\n): Promise> => {\r\n try {\r\n await Promise.resolve()\r\n const value = await task()\r\n return {\r\n status: 'ok',\r\n value,\r\n }\r\n } catch (error: any) {\r\n return {\r\n status: error instanceof TaskAbortError ? 'cancelled' : 'rejected',\r\n error,\r\n }\r\n } finally {\r\n cleanUp?.()\r\n }\r\n}\r\n\r\n/**\r\n * Given an input `AbortSignal` and a promise returns another promise that resolves\r\n * as soon the input promise is provided or rejects as soon as\r\n * `AbortSignal.abort` is `true`.\r\n * @param signal\r\n * @returns\r\n */\r\nexport const createPause = (signal: AbortSignal) => {\r\n return (promise: Promise): Promise => {\r\n return catchRejection(\r\n raceWithSignal(signal, promise).then((output) => {\r\n validateActive(signal)\r\n return output\r\n })\r\n )\r\n }\r\n}\r\n\r\n/**\r\n * Given an input `AbortSignal` and `timeoutMs` returns a promise that resolves\r\n * after `timeoutMs` or rejects as soon as `AbortSignal.abort` is `true`.\r\n * @param signal\r\n * @returns\r\n */\r\nexport const createDelay = (signal: AbortSignal) => {\r\n const pause = createPause(signal)\r\n return (timeoutMs: number): Promise => {\r\n return pause(new Promise((resolve) => setTimeout(resolve, timeoutMs)))\r\n }\r\n}\r\n","import type { Dispatch, AnyAction, MiddlewareAPI } from 'redux'\r\nimport type { ThunkDispatch } from 'redux-thunk'\r\nimport { createAction, isAction } from '../createAction'\r\nimport { nanoid } from '../nanoid'\r\n\r\nimport type {\r\n ListenerMiddleware,\r\n ListenerMiddlewareInstance,\r\n AddListenerOverloads,\r\n AnyListenerPredicate,\r\n CreateListenerMiddlewareOptions,\r\n TypedAddListener,\r\n TypedCreateListenerEntry,\r\n FallbackAddListenerOptions,\r\n ListenerEntry,\r\n ListenerErrorHandler,\r\n UnsubscribeListener,\r\n TakePattern,\r\n ListenerErrorInfo,\r\n ForkedTaskExecutor,\r\n ForkedTask,\r\n TypedRemoveListener,\r\n TaskResult,\r\n AbortSignalWithReason,\r\n UnsubscribeListenerOptions,\r\n} from './types'\r\nimport {\r\n abortControllerWithReason,\r\n addAbortSignalListener,\r\n assertFunction,\r\n catchRejection,\r\n} from './utils'\r\nimport {\r\n listenerCancelled,\r\n listenerCompleted,\r\n TaskAbortError,\r\n taskCancelled,\r\n taskCompleted,\r\n} from './exceptions'\r\nimport {\r\n runTask,\r\n validateActive,\r\n createPause,\r\n createDelay,\r\n raceWithSignal,\r\n} from './task'\r\nexport { TaskAbortError } from './exceptions'\r\nexport type {\r\n ListenerEffect,\r\n ListenerMiddleware,\r\n ListenerEffectAPI,\r\n ListenerMiddlewareInstance,\r\n CreateListenerMiddlewareOptions,\r\n ListenerErrorHandler,\r\n TypedStartListening,\r\n TypedAddListener,\r\n TypedStopListening,\r\n TypedRemoveListener,\r\n UnsubscribeListener,\r\n UnsubscribeListenerOptions,\r\n ForkedTaskExecutor,\r\n ForkedTask,\r\n ForkedTaskAPI,\r\n AsyncTaskExecutor,\r\n SyncTaskExecutor,\r\n TaskCancelled,\r\n TaskRejected,\r\n TaskResolved,\r\n TaskResult,\r\n} from './types'\r\n\r\n//Overly-aggressive byte-shaving\r\nconst { assign } = Object\r\n/**\r\n * @internal\r\n */\r\nconst INTERNAL_NIL_TOKEN = {} as const\r\n\r\nconst alm = 'listenerMiddleware' as const\r\n\r\nconst createFork = (parentAbortSignal: AbortSignalWithReason) => {\r\n const linkControllers = (controller: AbortController) =>\r\n addAbortSignalListener(parentAbortSignal, () =>\r\n abortControllerWithReason(controller, parentAbortSignal.reason)\r\n )\r\n\r\n return (taskExecutor: ForkedTaskExecutor): ForkedTask => {\r\n assertFunction(taskExecutor, 'taskExecutor')\r\n const childAbortController = new AbortController()\r\n\r\n linkControllers(childAbortController)\r\n\r\n const result = runTask(\r\n async (): Promise => {\r\n validateActive(parentAbortSignal)\r\n validateActive(childAbortController.signal)\r\n const result = (await taskExecutor({\r\n pause: createPause(childAbortController.signal),\r\n delay: createDelay(childAbortController.signal),\r\n signal: childAbortController.signal,\r\n })) as T\r\n validateActive(childAbortController.signal)\r\n return result\r\n },\r\n () => abortControllerWithReason(childAbortController, taskCompleted)\r\n )\r\n\r\n return {\r\n result: createPause>(parentAbortSignal)(result),\r\n cancel() {\r\n abortControllerWithReason(childAbortController, taskCancelled)\r\n },\r\n }\r\n }\r\n}\r\n\r\nconst createTakePattern = (\r\n startListening: AddListenerOverloads<\r\n UnsubscribeListener,\r\n S,\r\n Dispatch\r\n >,\r\n signal: AbortSignal\r\n): TakePattern => {\r\n /**\r\n * A function that takes a ListenerPredicate and an optional timeout,\r\n * and resolves when either the predicate returns `true` based on an action\r\n * state combination or when the timeout expires.\r\n * If the parent listener is canceled while waiting, this will throw a\r\n * TaskAbortError.\r\n */\r\n const take = async

>(\r\n predicate: P,\r\n timeout: number | undefined\r\n ) => {\r\n validateActive(signal)\r\n\r\n // Placeholder unsubscribe function until the listener is added\r\n let unsubscribe: UnsubscribeListener = () => {}\r\n\r\n const tuplePromise = new Promise<[AnyAction, S, S]>((resolve, reject) => {\r\n // Inside the Promise, we synchronously add the listener.\r\n let stopListening = startListening({\r\n predicate: predicate as any,\r\n effect: (action, listenerApi): void => {\r\n // One-shot listener that cleans up as soon as the predicate passes\r\n listenerApi.unsubscribe()\r\n // Resolve the promise with the same arguments the predicate saw\r\n resolve([\r\n action,\r\n listenerApi.getState(),\r\n listenerApi.getOriginalState(),\r\n ])\r\n },\r\n })\r\n unsubscribe = () => {\r\n stopListening()\r\n reject()\r\n }\r\n })\r\n\r\n const promises: (Promise | Promise<[AnyAction, S, S]>)[] = [\r\n tuplePromise,\r\n ]\r\n\r\n if (timeout != null) {\r\n promises.push(\r\n new Promise((resolve) => setTimeout(resolve, timeout, null))\r\n )\r\n }\r\n\r\n try {\r\n const output = await raceWithSignal(signal, Promise.race(promises))\r\n\r\n validateActive(signal)\r\n return output\r\n } finally {\r\n // Always clean up the listener\r\n unsubscribe()\r\n }\r\n }\r\n\r\n return ((predicate: AnyListenerPredicate, timeout: number | undefined) =>\r\n catchRejection(take(predicate, timeout))) as TakePattern\r\n}\r\n\r\nconst getListenerEntryPropsFrom = (options: FallbackAddListenerOptions) => {\r\n let { type, actionCreator, matcher, predicate, effect } = options\r\n\r\n if (type) {\r\n predicate = createAction(type).match\r\n } else if (actionCreator) {\r\n type = actionCreator!.type\r\n predicate = actionCreator.match\r\n } else if (matcher) {\r\n predicate = matcher\r\n } else if (predicate) {\r\n // pass\r\n } else {\r\n throw new Error(\r\n 'Creating or removing a listener requires one of the known fields for matching an action'\r\n )\r\n }\r\n\r\n assertFunction(effect, 'options.listener')\r\n\r\n return { predicate, type, effect }\r\n}\r\n\r\n/** Accepts the possible options for creating a listener, and returns a formatted listener entry */\r\nexport const createListenerEntry: TypedCreateListenerEntry = (\r\n options: FallbackAddListenerOptions\r\n) => {\r\n const { type, predicate, effect } = getListenerEntryPropsFrom(options)\r\n\r\n const id = nanoid()\r\n const entry: ListenerEntry = {\r\n id,\r\n effect,\r\n type,\r\n predicate,\r\n pending: new Set(),\r\n unsubscribe: () => {\r\n throw new Error('Unsubscribe not initialized')\r\n },\r\n }\r\n\r\n return entry\r\n}\r\n\r\nconst cancelActiveListeners = (\r\n entry: ListenerEntry>\r\n) => {\r\n entry.pending.forEach((controller) => {\r\n abortControllerWithReason(controller, listenerCancelled)\r\n })\r\n}\r\n\r\nconst createClearListenerMiddleware = (\r\n listenerMap: Map\r\n) => {\r\n return () => {\r\n listenerMap.forEach(cancelActiveListeners)\r\n\r\n listenerMap.clear()\r\n }\r\n}\r\n\r\n/**\r\n * Safely reports errors to the `errorHandler` provided.\r\n * Errors that occur inside `errorHandler` are notified in a new task.\r\n * Inspired by [rxjs reportUnhandledError](https://github.com/ReactiveX/rxjs/blob/6fafcf53dc9e557439b25debaeadfd224b245a66/src/internal/util/reportUnhandledError.ts)\r\n * @param errorHandler\r\n * @param errorToNotify\r\n */\r\nconst safelyNotifyError = (\r\n errorHandler: ListenerErrorHandler,\r\n errorToNotify: unknown,\r\n errorInfo: ListenerErrorInfo\r\n): void => {\r\n try {\r\n errorHandler(errorToNotify, errorInfo)\r\n } catch (errorHandlerError) {\r\n // We cannot let an error raised here block the listener queue.\r\n // The error raised here will be picked up by `window.onerror`, `process.on('error')` etc...\r\n setTimeout(() => {\r\n throw errorHandlerError\r\n }, 0)\r\n }\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport const addListener = createAction(\r\n `${alm}/add`\r\n) as TypedAddListener\r\n\r\n/**\r\n * @public\r\n */\r\nexport const clearAllListeners = createAction(`${alm}/removeAll`)\r\n\r\n/**\r\n * @public\r\n */\r\nexport const removeListener = createAction(\r\n `${alm}/remove`\r\n) as TypedRemoveListener\r\n\r\nconst defaultErrorHandler: ListenerErrorHandler = (...args: unknown[]) => {\r\n console.error(`${alm}/error`, ...args)\r\n}\r\n\r\n/**\r\n * @public\r\n */\r\nexport function createListenerMiddleware<\r\n S = unknown,\r\n D extends Dispatch = ThunkDispatch,\r\n ExtraArgument = unknown\r\n>(middlewareOptions: CreateListenerMiddlewareOptions = {}) {\r\n const listenerMap = new Map()\r\n const { extra, onError = defaultErrorHandler } = middlewareOptions\r\n\r\n assertFunction(onError, 'onError')\r\n\r\n const insertEntry = (entry: ListenerEntry) => {\r\n entry.unsubscribe = () => listenerMap.delete(entry!.id)\r\n\r\n listenerMap.set(entry.id, entry)\r\n return (cancelOptions?: UnsubscribeListenerOptions) => {\r\n entry.unsubscribe()\r\n if (cancelOptions?.cancelActive) {\r\n cancelActiveListeners(entry)\r\n }\r\n }\r\n }\r\n\r\n const findListenerEntry = (\r\n comparator: (entry: ListenerEntry) => boolean\r\n ): ListenerEntry | undefined => {\r\n for (const entry of Array.from(listenerMap.values())) {\r\n if (comparator(entry)) {\r\n return entry\r\n }\r\n }\r\n\r\n return undefined\r\n }\r\n\r\n const startListening = (options: FallbackAddListenerOptions) => {\r\n let entry = findListenerEntry(\r\n (existingEntry) => existingEntry.effect === options.effect\r\n )\r\n\r\n if (!entry) {\r\n entry = createListenerEntry(options as any)\r\n }\r\n\r\n return insertEntry(entry)\r\n }\r\n\r\n const stopListening = (\r\n options: FallbackAddListenerOptions & UnsubscribeListenerOptions\r\n ): boolean => {\r\n const { type, effect, predicate } = getListenerEntryPropsFrom(options)\r\n\r\n const entry = findListenerEntry((entry) => {\r\n const matchPredicateOrType =\r\n typeof type === 'string'\r\n ? entry.type === type\r\n : entry.predicate === predicate\r\n\r\n return matchPredicateOrType && entry.effect === effect\r\n })\r\n\r\n if (entry) {\r\n entry.unsubscribe()\r\n if (options.cancelActive) {\r\n cancelActiveListeners(entry)\r\n }\r\n }\r\n\r\n return !!entry\r\n }\r\n\r\n const notifyListener = async (\r\n entry: ListenerEntry>,\r\n action: AnyAction,\r\n api: MiddlewareAPI,\r\n getOriginalState: () => S\r\n ) => {\r\n const internalTaskController = new AbortController()\r\n const take = createTakePattern(\r\n startListening,\r\n internalTaskController.signal\r\n )\r\n\r\n try {\r\n entry.pending.add(internalTaskController)\r\n await Promise.resolve(\r\n entry.effect(\r\n action,\r\n // Use assign() rather than ... to avoid extra helper functions added to bundle\r\n assign({}, api, {\r\n getOriginalState,\r\n condition: (\r\n predicate: AnyListenerPredicate,\r\n timeout?: number\r\n ) => take(predicate, timeout).then(Boolean),\r\n take,\r\n delay: createDelay(internalTaskController.signal),\r\n pause: createPause(internalTaskController.signal),\r\n extra,\r\n signal: internalTaskController.signal,\r\n fork: createFork(internalTaskController.signal),\r\n unsubscribe: entry.unsubscribe,\r\n subscribe: () => {\r\n listenerMap.set(entry.id, entry)\r\n },\r\n cancelActiveListeners: () => {\r\n entry.pending.forEach((controller, _, set) => {\r\n if (controller !== internalTaskController) {\r\n abortControllerWithReason(controller, listenerCancelled)\r\n set.delete(controller)\r\n }\r\n })\r\n },\r\n })\r\n )\r\n )\r\n } catch (listenerError) {\r\n if (!(listenerError instanceof TaskAbortError)) {\r\n safelyNotifyError(onError, listenerError, {\r\n raisedBy: 'effect',\r\n })\r\n }\r\n } finally {\r\n abortControllerWithReason(internalTaskController, listenerCompleted) // Notify that the task has completed\r\n entry.pending.delete(internalTaskController)\r\n }\r\n }\r\n\r\n const clearListenerMiddleware = createClearListenerMiddleware(listenerMap)\r\n\r\n const middleware: ListenerMiddleware =\r\n (api) => (next) => (action) => {\r\n if (!isAction(action)) {\r\n // we only want to notify listeners for action objects\r\n return next(action)\r\n }\r\n\r\n if (addListener.match(action)) {\r\n return startListening(action.payload)\r\n }\r\n\r\n if (clearAllListeners.match(action)) {\r\n clearListenerMiddleware()\r\n return\r\n }\r\n\r\n if (removeListener.match(action)) {\r\n return stopListening(action.payload)\r\n }\r\n\r\n // Need to get this state _before_ the reducer processes the action\r\n let originalState: S | typeof INTERNAL_NIL_TOKEN = api.getState()\r\n\r\n // `getOriginalState` can only be called synchronously.\r\n // @see https://github.com/reduxjs/redux-toolkit/discussions/1648#discussioncomment-1932820\r\n const getOriginalState = (): S => {\r\n if (originalState === INTERNAL_NIL_TOKEN) {\r\n throw new Error(\r\n `${alm}: getOriginalState can only be called synchronously`\r\n )\r\n }\r\n\r\n return originalState as S\r\n }\r\n\r\n let result: unknown\r\n\r\n try {\r\n // Actually forward the action to the reducer before we handle listeners\r\n result = next(action)\r\n\r\n if (listenerMap.size > 0) {\r\n let currentState = api.getState()\r\n // Work around ESBuild+TS transpilation issue\r\n const listenerEntries = Array.from(listenerMap.values())\r\n for (let entry of listenerEntries) {\r\n let runListener = false\r\n\r\n try {\r\n runListener = entry.predicate(action, currentState, originalState)\r\n } catch (predicateError) {\r\n runListener = false\r\n\r\n safelyNotifyError(onError, predicateError, {\r\n raisedBy: 'predicate',\r\n })\r\n }\r\n\r\n if (!runListener) {\r\n continue\r\n }\r\n\r\n notifyListener(entry, action, api, getOriginalState)\r\n }\r\n }\r\n } finally {\r\n // Remove `originalState` store from this scope.\r\n originalState = INTERNAL_NIL_TOKEN\r\n }\r\n\r\n return result\r\n }\r\n\r\n return {\r\n middleware,\r\n startListening,\r\n stopListening,\r\n clearListeners: clearListenerMiddleware,\r\n } as ListenerMiddlewareInstance\r\n}\r\n","import type { StoreEnhancer } from 'redux'\r\n\r\nexport const SHOULD_AUTOBATCH = 'RTK_autoBatch'\r\n\r\nexport const prepareAutoBatched =\r\n () =>\r\n (payload: T): { payload: T; meta: unknown } => ({\r\n payload,\r\n meta: { [SHOULD_AUTOBATCH]: true },\r\n })\r\n\r\n// TODO Remove this in 2.0\r\n// Copied from https://github.com/feross/queue-microtask\r\nlet promise: Promise\r\nconst queueMicrotaskShim =\r\n typeof queueMicrotask === 'function'\r\n ? queueMicrotask.bind(\r\n typeof window !== 'undefined'\r\n ? window\r\n : typeof global !== 'undefined'\r\n ? global\r\n : globalThis\r\n )\r\n : // reuse resolved promise, and allocate it lazily\r\n (cb: () => void) =>\r\n (promise || (promise = Promise.resolve())).then(cb).catch((err: any) =>\r\n setTimeout(() => {\r\n throw err\r\n }, 0)\r\n )\r\n\r\nconst createQueueWithTimer = (timeout: number) => {\r\n return (notify: () => void) => {\r\n setTimeout(notify, timeout)\r\n }\r\n}\r\n\r\n// requestAnimationFrame won't exist in SSR environments.\r\n// Fall back to a vague approximation just to keep from erroring.\r\nconst rAF =\r\n typeof window !== 'undefined' && window.requestAnimationFrame\r\n ? window.requestAnimationFrame\r\n : createQueueWithTimer(10)\r\n\r\nexport type AutoBatchOptions =\r\n | { type: 'tick' }\r\n | { type: 'timer'; timeout: number }\r\n | { type: 'raf' }\r\n | { type: 'callback'; queueNotification: (notify: () => void) => void }\r\n\r\n/**\r\n * A Redux store enhancer that watches for \"low-priority\" actions, and delays\r\n * notifying subscribers until either the queued callback executes or the\r\n * next \"standard-priority\" action is dispatched.\r\n *\r\n * This allows dispatching multiple \"low-priority\" actions in a row with only\r\n * a single subscriber notification to the UI after the sequence of actions\r\n * is finished, thus improving UI re-render performance.\r\n *\r\n * Watches for actions with the `action.meta[SHOULD_AUTOBATCH]` attribute.\r\n * This can be added to `action.meta` manually, or by using the\r\n * `prepareAutoBatched` helper.\r\n *\r\n * By default, it will queue a notification for the end of the event loop tick.\r\n * However, you can pass several other options to configure the behavior:\r\n * - `{type: 'tick'}: queues using `queueMicrotask` (default)\r\n * - `{type: 'timer, timeout: number}`: queues using `setTimeout`\r\n * - `{type: 'raf'}`: queues using `requestAnimationFrame`\r\n * - `{type: 'callback', queueNotification: (notify: () => void) => void}: lets you provide your own callback\r\n *\r\n *\r\n */\r\nexport const autoBatchEnhancer =\r\n (options: AutoBatchOptions = { type: 'raf' }): StoreEnhancer =>\r\n (next) =>\r\n (...args) => {\r\n const store = next(...args)\r\n\r\n let notifying = true\r\n let shouldNotifyAtEndOfTick = false\r\n let notificationQueued = false\r\n\r\n const listeners = new Set<() => void>()\r\n\r\n const queueCallback =\r\n options.type === 'tick'\r\n ? queueMicrotaskShim\r\n : options.type === 'raf'\r\n ? rAF\r\n : options.type === 'callback'\r\n ? options.queueNotification\r\n : createQueueWithTimer(options.timeout)\r\n\r\n const notifyListeners = () => {\r\n // We're running at the end of the event loop tick.\r\n // Run the real listener callbacks to actually update the UI.\r\n notificationQueued = false\r\n if (shouldNotifyAtEndOfTick) {\r\n shouldNotifyAtEndOfTick = false\r\n listeners.forEach((l) => l())\r\n }\r\n }\r\n\r\n return Object.assign({}, store, {\r\n // Override the base `store.subscribe` method to keep original listeners\r\n // from running if we're delaying notifications\r\n subscribe(listener: () => void) {\r\n // Each wrapped listener will only call the real listener if\r\n // the `notifying` flag is currently active when it's called.\r\n // This lets the base store work as normal, while the actual UI\r\n // update becomes controlled by this enhancer.\r\n const wrappedListener: typeof listener = () => notifying && listener()\r\n const unsubscribe = store.subscribe(wrappedListener)\r\n listeners.add(listener)\r\n return () => {\r\n unsubscribe()\r\n listeners.delete(listener)\r\n }\r\n },\r\n // Override the base `store.dispatch` method so that we can check actions\r\n // for the `shouldAutoBatch` flag and determine if batching is active\r\n dispatch(action: any) {\r\n try {\r\n // If the action does _not_ have the `shouldAutoBatch` flag,\r\n // we resume/continue normal notify-after-each-dispatch behavior\r\n notifying = !action?.meta?.[SHOULD_AUTOBATCH]\r\n // If a `notifyListeners` microtask was queued, you can't cancel it.\r\n // Instead, we set a flag so that it's a no-op when it does run\r\n shouldNotifyAtEndOfTick = !notifying\r\n if (shouldNotifyAtEndOfTick) {\r\n // We've seen at least 1 action with `SHOULD_AUTOBATCH`. Try to queue\r\n // a microtask to notify listeners at the end of the event loop tick.\r\n // Make sure we only enqueue this _once_ per tick.\r\n if (!notificationQueued) {\r\n notificationQueued = true\r\n queueCallback(notifyListeners)\r\n }\r\n }\r\n // Go ahead and process the action as usual, including reducers.\r\n // If normal notification behavior is enabled, the store will notify\r\n // all of its own listeners, and the wrapper callbacks above will\r\n // see `notifying` is true and pass on to the real listener callbacks.\r\n // If we're \"batching\" behavior, then the wrapped callbacks will\r\n // bail out, causing the base store notification behavior to be no-ops.\r\n return store.dispatch(action)\r\n } finally {\r\n // Assume we're back to normal behavior after each action\r\n notifying = true\r\n }\r\n },\r\n })\r\n }\r\n","import { enableES5 } from 'immer'\r\nexport * from 'redux'\r\nexport {\r\n default as createNextState,\r\n current,\r\n freeze,\r\n original,\r\n isDraft,\r\n} from 'immer'\r\nexport type { Draft } from 'immer'\r\nexport { createSelector } from 'reselect'\r\nexport type {\r\n Selector,\r\n OutputParametricSelector,\r\n OutputSelector,\r\n ParametricSelector,\r\n} from 'reselect'\r\nexport { createDraftSafeSelector } from './createDraftSafeSelector'\r\nexport type { ThunkAction, ThunkDispatch, ThunkMiddleware } from 'redux-thunk'\r\n\r\n// We deliberately enable Immer's ES5 support, on the grounds that\r\n// we assume RTK will be used with React Native and other Proxy-less\r\n// environments. In addition, that's how Immer 4 behaved, and since\r\n// we want to ship this in an RTK minor, we should keep the same behavior.\r\nenableES5()\r\n\r\nexport {\r\n // js\r\n configureStore,\r\n} from './configureStore'\r\nexport type {\r\n // types\r\n ConfigureEnhancersCallback,\r\n ConfigureStoreOptions,\r\n EnhancedStore,\r\n} from './configureStore'\r\nexport type { DevToolsEnhancerOptions } from './devtoolsExtension'\r\nexport {\r\n // js\r\n createAction,\r\n getType,\r\n isAction,\r\n isFSA as isFluxStandardAction,\r\n} from './createAction'\r\nexport type {\r\n // types\r\n PayloadAction,\r\n PayloadActionCreator,\r\n ActionCreatorWithNonInferrablePayload,\r\n ActionCreatorWithOptionalPayload,\r\n ActionCreatorWithPayload,\r\n ActionCreatorWithoutPayload,\r\n ActionCreatorWithPreparedPayload,\r\n PrepareAction,\r\n} from './createAction'\r\nexport {\r\n // js\r\n createReducer,\r\n} from './createReducer'\r\nexport type {\r\n // types\r\n Actions,\r\n CaseReducer,\r\n CaseReducers,\r\n} from './createReducer'\r\nexport {\r\n // js\r\n createSlice,\r\n} from './createSlice'\r\n\r\nexport type {\r\n // types\r\n CreateSliceOptions,\r\n Slice,\r\n CaseReducerActions,\r\n SliceCaseReducers,\r\n ValidateSliceCaseReducers,\r\n CaseReducerWithPrepare,\r\n SliceActionCreator,\r\n} from './createSlice'\r\nexport {\r\n // js\r\n createImmutableStateInvariantMiddleware,\r\n isImmutableDefault,\r\n} from './immutableStateInvariantMiddleware'\r\nexport type {\r\n // types\r\n ImmutableStateInvariantMiddlewareOptions,\r\n} from './immutableStateInvariantMiddleware'\r\nexport {\r\n // js\r\n createSerializableStateInvariantMiddleware,\r\n findNonSerializableValue,\r\n isPlain,\r\n} from './serializableStateInvariantMiddleware'\r\nexport type {\r\n // types\r\n SerializableStateInvariantMiddlewareOptions,\r\n} from './serializableStateInvariantMiddleware'\r\nexport {\r\n // js\r\n getDefaultMiddleware,\r\n} from './getDefaultMiddleware'\r\nexport type {\r\n // types\r\n ActionReducerMapBuilder,\r\n} from './mapBuilders'\r\nexport { MiddlewareArray, EnhancerArray } from './utils'\r\n\r\nexport { createEntityAdapter } from './entities/create_adapter'\r\nexport type {\r\n Dictionary,\r\n EntityState,\r\n EntityAdapter,\r\n EntitySelectors,\r\n EntityStateAdapter,\r\n EntityId,\r\n Update,\r\n IdSelector,\r\n Comparer,\r\n} from './entities/models'\r\n\r\nexport {\r\n createAsyncThunk,\r\n unwrapResult,\r\n miniSerializeError,\r\n} from './createAsyncThunk'\r\nexport type {\r\n AsyncThunk,\r\n AsyncThunkOptions,\r\n AsyncThunkAction,\r\n AsyncThunkPayloadCreatorReturnValue,\r\n AsyncThunkPayloadCreator,\r\n SerializedError,\r\n} from './createAsyncThunk'\r\n\r\nexport {\r\n // js\r\n isAllOf,\r\n isAnyOf,\r\n isPending,\r\n isRejected,\r\n isFulfilled,\r\n isAsyncThunkAction,\r\n isRejectedWithValue,\r\n} from './matchers'\r\nexport type {\r\n // types\r\n ActionMatchingAllOf,\r\n ActionMatchingAnyOf,\r\n} from './matchers'\r\n\r\nexport { nanoid } from './nanoid'\r\n\r\nexport { default as isPlainObject } from './isPlainObject'\r\n\r\nexport type {\r\n ListenerEffect,\r\n ListenerMiddleware,\r\n ListenerEffectAPI,\r\n ListenerMiddlewareInstance,\r\n CreateListenerMiddlewareOptions,\r\n ListenerErrorHandler,\r\n TypedStartListening,\r\n TypedAddListener,\r\n TypedStopListening,\r\n TypedRemoveListener,\r\n UnsubscribeListener,\r\n UnsubscribeListenerOptions,\r\n ForkedTaskExecutor,\r\n ForkedTask,\r\n ForkedTaskAPI,\r\n AsyncTaskExecutor,\r\n SyncTaskExecutor,\r\n TaskCancelled,\r\n TaskRejected,\r\n TaskResolved,\r\n TaskResult,\r\n} from './listenerMiddleware/index'\r\nexport type { AnyListenerPredicate } from './listenerMiddleware/types'\r\n\r\nexport {\r\n createListenerMiddleware,\r\n addListener,\r\n removeListener,\r\n clearAllListeners,\r\n TaskAbortError,\r\n} from './listenerMiddleware/index'\r\n\r\nexport {\r\n SHOULD_AUTOBATCH,\r\n prepareAutoBatched,\r\n autoBatchEnhancer,\r\n} from './autoBatchEnhancer'\r\nexport type { AutoBatchOptions } from './autoBatchEnhancer'\r\n","import type {\n EntityId,\n DeferredEntityState,\n Deferred,\n IdSelector,\n Changes,\n DeferredEntity,\n} from \"./models\";\n\nexport function selectIdValue(entity: T, selectId: IdSelector) {\n const key = selectId(entity);\n\n if (process.env.NODE_ENV !== \"production\" && key === undefined) {\n console.warn(\n \"The entity passed to the `selectId` implementation returned undefined.\",\n \"You should probably provide your own `selectId` implementation.\",\n \"The entity that was passed:\",\n entity,\n \"The `selectId` implementation:\",\n selectId.toString()\n );\n }\n\n return key;\n}\n\nexport function ensureEntitiesArray(\n entities: readonly T[] | Record\n): readonly T[] {\n if (!Array.isArray(entities)) {\n entities = Object.values(entities);\n }\n\n return entities;\n}\n\nexport function getProperty(\n entity: T,\n property: S\n): T[S] {\n return entity[property];\n}\n\n/* Deferred */\n\nexport function getCompleteEntity(entity: DeferredEntity): T | undefined {\n if (entity.changes.deleted) return;\n const { added, deleted, ...completeEntity } = {\n ...entity.saved,\n ...entity.changes,\n };\n return completeEntity as T;\n}\nexport function getDeferredProperty(\n entity: DeferredEntity,\n property: S\n): T[S] | NonNullable[S]> {\n return entity.changes[property] ?? entity.saved[property];\n}\n\nexport function splitAddedDeferredEntities(\n newEntities: readonly T[] | Record,\n selectId: IdSelector,\n state: DeferredEntityState\n): [T[], Changes[], T[]] {\n newEntities = ensureEntitiesArray(newEntities);\n\n const added: T[] = [];\n const updated: Changes[] = [];\n const reset: T[] = [];\n\n for (const entity of newEntities) {\n const id = selectDeferredIdValue(entity, selectId);\n if (id in state.entities) {\n if (state.entities[id].changes.deleted) {\n reset.push(entity as T);\n } else {\n const { id, ...withoutId } = entity as any;\n\n updated.push({ id, changes: withoutId as Deferred });\n }\n } else {\n added.push(entity);\n }\n }\n return [added, updated, reset];\n}\n\nexport function selectDeferredIdValue(entity: T, selectId: IdSelector) {\n const key = selectId(entity);\n\n if (process.env.NODE_ENV !== \"production\" && key === undefined) {\n console.warn(\n \"The entity passed to the `selectId` implementation returned undefined.\",\n \"You should probably provide your own `selectId` implementation.\",\n \"The entity that was passed:\",\n entity,\n \"The `selectId` implementation:\",\n selectId.toString()\n );\n }\n\n return key;\n}\n","import createNextState, { isDraft } from \"immer\";\nimport type { DeferredEntityState, DeferredPreventAny } from \"./models\";\nimport { PayloadAction } from \"@reduxjs/toolkit\";\nfunction isPlainObject(value: unknown): value is object {\n if (typeof value !== \"object\" || value === null) return false;\n\n let proto = Object.getPrototypeOf(value);\n if (proto === null) return true;\n\n let baseProto = proto;\n while (Object.getPrototypeOf(baseProto) !== null) {\n baseProto = Object.getPrototypeOf(baseProto);\n }\n\n return proto === baseProto;\n}\nfunction isValidKey(key: string) {\n return [\"type\", \"payload\", \"error\", \"meta\"].indexOf(key) > -1;\n}\n\nexport function isFSA(action: unknown): action is {\n type: string;\n payload?: unknown;\n error?: unknown;\n meta?: unknown;\n} {\n return (\n isPlainObject(action) &&\n typeof (action as any).type === \"string\" &&\n Object.keys(action).every(isValidKey)\n );\n}\n\nexport function createSingleArgumentDeferredStateOperator(\n mutator: (state: DeferredEntityState) => void\n) {\n const operator = createDeferredStateOperator(\n (_: undefined, state: DeferredEntityState) => mutator(state)\n );\n\n return function operation>(\n state: DeferredPreventAny\n ): S {\n return operator(state as S, undefined);\n };\n}\n\nexport function createDeferredStateOperator(\n mutator: (arg: R, state: DeferredEntityState) => void\n) {\n return function operation>(\n state: S,\n arg: R | PayloadAction\n ): S {\n function isPayloadActionArgument(\n arg: R | PayloadAction\n ): arg is PayloadAction {\n return isFSA(arg);\n }\n\n const runMutator = (draft: DeferredEntityState) => {\n if (isPayloadActionArgument(arg)) {\n mutator(arg.payload, draft);\n } else {\n mutator(arg, draft);\n }\n };\n\n if (isDraft(state)) {\n // we must already be inside a `createNextState` call, likely because\n // this is being wrapped in `createReducer` or `createSlice`.\n // It's safe to just pass the draft to the mutator.\n runMutator(state);\n\n // since it's a draft, we'll just return it\n return state;\n } else {\n // @ts-ignore createNextState() produces an Immutable> rather\n // than an Immutable, and TypeScript cannot find out how to reconcile\n // these two types.\n return createNextState(state, runMutator);\n }\n };\n}\n","import type {\n EntityId,\n DeferredEntityStateAdapter,\n DeferredEntityState,\n Deferred,\n IdSelector,\n Changes,\n} from \"./models\";\nimport {\n createDeferredStateOperator,\n createSingleArgumentDeferredStateOperator,\n} from \"./deferred_state_adapter\";\nimport {\n selectIdValue,\n ensureEntitiesArray,\n splitAddedDeferredEntities,\n selectDeferredIdValue,\n} from \"./utils\";\n\nexport function createUnsortedDeferredStateAdapter(\n selectId: IdSelector\n): DeferredEntityStateAdapter {\n type R = DeferredEntityState;\n\n function addOneMutably(entity: T, state: R): void {\n const key = selectIdValue(entity, selectId);\n if (key in state.entities) {\n if (state.entities[key].changes.deleted) {\n state.entities[key].saved = entity;\n state.entities[key].changes = { added: true } as Deferred;\n }\n return;\n }\n state.ids.push(key);\n state.entities[key] = {\n saved: entity,\n changes: { added: true } as Deferred,\n };\n }\n\n function addManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n\n for (const entity of newEntities) {\n addOneMutably(entity, state);\n }\n }\n\n function setOneMutably(entity: T, state: R): void {\n const key = selectIdValue(entity, selectId);\n if (!(key in state.entities)) {\n state.ids.push(key);\n state.entities[key] = {\n saved: entity,\n changes: { added: true } as Deferred,\n };\n return;\n }\n if (state.entities[key].changes?.deleted) {\n state.entities[key] = {\n saved: entity,\n changes: { added: true } as Deferred,\n };\n return;\n }\n state.entities[key].changes = { ...entity, added: true } as Deferred;\n }\n\n function setManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n for (const entity of newEntities) {\n setOneMutably(entity, state);\n }\n }\n\n function setAllMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n\n state.ids = [];\n state.entities = {};\n\n addManyMutably(newEntities, state);\n }\n\n function removeOneMutably(key: EntityId, state: R): void {\n return removeManyMutably([key], state);\n }\n\n function removeManyMutably(keys: readonly EntityId[], state: R): void {\n keys.forEach((key) => {\n if (key in state.entities) {\n const added = state.entities[key].changes.added ? { added: true } : {};\n state.entities[key].changes = {\n ...added,\n deleted: true,\n } as Deferred;\n }\n });\n }\n\n function removeAllMutably(state: R): void {\n for (const key of state.ids) {\n const added = state.entities[key].changes.added ? { added: true } : {};\n state.entities[key].changes = { ...added, deleted: true } as Deferred;\n }\n }\n\n function takeNewKey(\n keys: { [id: string]: EntityId },\n update: Changes,\n state: R\n ): boolean {\n const original = state.entities[update.id];\n const updatedDeferred = {\n ...state.entities[update.id].changes,\n ...update.changes,\n };\n const updated: { saved: T; changes: Deferred } = Object.assign(\n {},\n original,\n\n { changes: updatedDeferred }\n );\n const newKey = selectDeferredIdValue(updated.saved, selectId);\n const hasNewKey = newKey !== update.id;\n\n if (hasNewKey) {\n keys[update.id] = newKey;\n delete state.entities[update.id];\n }\n\n state.entities[newKey] = updated;\n\n return hasNewKey;\n }\n\n function updateOneMutably(update: Changes, state: R): void {\n return updateManyMutably([update], state);\n }\n\n //XXX: Test this\n function updateManyMutably(\n updates: ReadonlyArray>,\n state: R\n ): void {\n const newKeys: { [id: string]: EntityId } = {};\n\n const updatesPerEntity: { [id: string]: Changes } = {};\n updates.forEach((update) => {\n // Only apply updates to entities that currently exist\n\n if (update.id in state.entities) {\n // If there are multiple updates to one entity, merge them together\n updatesPerEntity[update.id] = {\n id: update.id,\n changes: {\n ...(updatesPerEntity[update.id]\n ? updatesPerEntity[update.id].changes\n : null),\n ...update.changes,\n },\n };\n }\n });\n\n updates = Object.values(updatesPerEntity);\n\n const didMutateEntities = updates.length > 0;\n\n if (didMutateEntities) {\n const didMutateIds =\n updates.filter((update) => takeNewKey(newKeys, update, state)).length >\n 0;\n\n if (didMutateIds) {\n state.ids = Object.keys(state.entities);\n }\n }\n }\n\n function upsertOneMutably(entity: T, state: R): void {\n return upsertManyMutably([entity], state);\n }\n\n function upsertManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n const [added, updated, reset] = splitAddedDeferredEntities(\n newEntities,\n selectId,\n state\n );\n\n updateManyMutably(updated, state);\n addManyMutably(added, state);\n setManyMutably(reset, state);\n }\n function reconcile(keep: boolean, state: R): void {\n let hasModifiedIds = false;\n if (keep) {\n for (const id of state.ids) {\n const changes = state.entities[id].changes;\n if (changes) {\n if (changes.deleted) {\n delete state.entities[id];\n hasModifiedIds = true;\n } else {\n let { added, deleted, ...preparedDeferred } = changes;\n Object.assign(state.entities[id].saved!, preparedDeferred);\n state.entities[id].changes = {};\n }\n }\n }\n } else {\n for (const id of state.ids) {\n const changes = state.entities[id].changes;\n if (changes) {\n if (changes.added) {\n delete state.entities[id];\n hasModifiedIds = true;\n } else {\n state.entities[id].changes = {};\n }\n }\n }\n }\n if (hasModifiedIds) {\n state.ids = Object.keys(state.entities);\n }\n }\n\n return {\n //@ts-ignore\n removeAll: createSingleArgumentDeferredStateOperator(removeAllMutably),\n addOne: createDeferredStateOperator(addOneMutably),\n addMany: createDeferredStateOperator(addManyMutably),\n setOne: createDeferredStateOperator(setOneMutably),\n setMany: createDeferredStateOperator(setManyMutably),\n setAll: createDeferredStateOperator(setAllMutably),\n updateOne: createDeferredStateOperator(updateOneMutably),\n updateMany: createDeferredStateOperator(updateManyMutably),\n upsertOne: createDeferredStateOperator(upsertOneMutably),\n upsertMany: createDeferredStateOperator(upsertManyMutably),\n removeOne: createDeferredStateOperator(removeOneMutably),\n removeMany: createDeferredStateOperator(removeManyMutably),\n reconcile: createDeferredStateOperator(reconcile),\n };\n}\n","import type {\n Comparer,\n DeferredEntityAdapter,\n IdSelector,\n DeferredEntityDefinition,\n} from \"./models\";\nimport { createInitialDeferredStateFactory } from \"./deferred_entity_state\";\nimport { createDeferredSelectorsFactory } from \"./deferred_state_selectors\";\nimport { createSortedDeferredStateAdapter } from \"./deferred_sorted_state_adapter\";\nimport { createUnsortedDeferredStateAdapter } from \"./deferred_unsorted_state_adapter\";\n\n/**\n *\n * @param options\n *\n * @public\n */\nexport function createDeferredEntityAdapter(\n options: {\n selectId?: IdSelector;\n sortComparer?: false | Comparer;\n } = {}\n): DeferredEntityAdapter {\n const { selectId, sortComparer }: DeferredEntityDefinition = {\n sortComparer: false,\n selectId: (instance: any) => instance.id,\n ...options,\n };\n\n const deferredStateFactory = createInitialDeferredStateFactory();\n\n const deferredSelectorsFactory = createDeferredSelectorsFactory();\n\n const deferredStateAdapter = sortComparer\n ? createSortedDeferredStateAdapter(selectId, sortComparer)\n : createUnsortedDeferredStateAdapter(selectId);\n\n return {\n selectId,\n sortComparer,\n ...deferredStateFactory,\n ...deferredSelectorsFactory,\n ...deferredStateAdapter,\n };\n}\n","import type { DeferredEntityState } from \"./models\";\n\nfunction getInitialEntityState(): DeferredEntityState {\n return {\n ids: [],\n entities: {},\n };\n}\n\nexport function createInitialDeferredStateFactory() {\n function getInitialState(): DeferredEntityState;\n function getInitialState(\n additionalState: S\n ): DeferredEntityState & S;\n function getInitialState(additionalState: any = {}): any {\n return Object.assign(getInitialEntityState(), additionalState);\n }\n\n return { getInitialState };\n}\n","import type { Selector } from \"reselect\";\nimport type {\n EntityId,\n DeferredEntitySelectors,\n DeferredEntityState,\n Dictionary,\n} from \"./models\";\nimport { createDraftSafeSelector } from \"@reduxjs/toolkit\";\nimport { getCompleteEntity } from \"./utils\";\n\nexport function createDeferredSelectorsFactory() {\n function getSelectors(): DeferredEntitySelectors>;\n function getSelectors(\n selectState: (state: V) => DeferredEntityState\n ): DeferredEntitySelectors;\n function getSelectors(\n selectState?: (state: V) => DeferredEntityState\n ): DeferredEntitySelectors {\n // Select unmerged, unfilterd entities and ids\n const selectIds = (state: DeferredEntityState) => state.ids;\n const selectEntities = (state: DeferredEntityState) => state.entities;\n\n // Select all but deleted entities (draft safe because new object returnd)\n const selectAvailableEntities = createDraftSafeSelector(\n selectEntities,\n (entities): Record => {\n const availible: Record = {};\n for (const id of Object.keys(entities)) {\n if (!entities[id].changes.deleted) {\n availible[id] = getCompleteEntity(entities[id])!;\n }\n }\n return availible;\n }\n );\n\n // Select all but deleted ids (draft safe because new object returnd)\n const selectAvailableIds = createDraftSafeSelector(\n [selectIds, selectEntities],\n (ids, entities) => {\n return ids.filter((id) => !entities[id].changes.deleted);\n }\n );\n\n // Select All available entity objects\n const selectAllAvailable = createDraftSafeSelector(\n selectAvailableEntities,\n (entities): T[] => Object.values(entities)\n );\n\n const selectId = (_: unknown, id: EntityId) => id;\n\n const selectById = (entities: Dictionary, id: EntityId) => {\n if (Object.keys(entities)) {\n return entities[id];\n }\n };\n\n const selectTotal = createDraftSafeSelector(\n [selectAvailableIds],\n (ids) => ids.length\n );\n\n if (!selectState) {\n return {\n selectIds: selectAvailableIds,\n selectEntities: selectAvailableEntities,\n selectAll: selectAllAvailable,\n selectTotal,\n selectById: createDraftSafeSelector(\n selectAvailableEntities,\n selectId,\n selectById\n ),\n };\n }\n\n const selectGlobalizedEntities = createDraftSafeSelector(\n selectState as Selector>,\n selectAvailableEntities\n );\n\n return {\n selectIds: createDraftSafeSelector(selectState, selectAvailableIds),\n selectEntities: selectGlobalizedEntities,\n selectAll: createDraftSafeSelector(selectState, selectAllAvailable),\n selectTotal: createDraftSafeSelector(selectState, selectTotal),\n selectById: createDraftSafeSelector(\n selectGlobalizedEntities,\n selectId,\n selectById\n ),\n };\n }\n\n return { getSelectors };\n}\n","import type {\n Comparer,\n EntityId,\n DeferredEntityStateAdapter,\n DeferredEntityState,\n Deferred,\n IdSelector,\n Changes,\n DeferredEntity,\n} from \"./models\";\nimport { createDeferredStateOperator } from \"./deferred_state_adapter\";\nimport { createUnsortedDeferredStateAdapter } from \"./deferred_unsorted_state_adapter\";\nimport {\n selectIdValue,\n ensureEntitiesArray,\n splitAddedDeferredEntities,\n} from \"./utils\";\n\nexport function createSortedDeferredStateAdapter(\n selectId: IdSelector,\n sort: Comparer\n): DeferredEntityStateAdapter {\n type R = DeferredEntityState;\n\n const { removeOne, removeMany, removeAll } =\n createUnsortedDeferredStateAdapter(selectId);\n\n function addOneMutably(entity: T, state: R): void {\n return addManyMutably([entity], state);\n }\n\n function addManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n\n const models = newEntities.filter(\n (model) =>\n !(selectIdValue(model, selectId) in state.entities) ||\n state.entities[selectIdValue(model, selectId)].changes.deleted\n );\n\n if (models.length !== 0) {\n merge(models, state);\n }\n }\n\n function setOneMutably(entity: T, state: R): void {\n return setManyMutably([entity], state);\n }\n\n function setManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n if (newEntities.length !== 0) {\n merge(newEntities, state);\n }\n }\n\n function setAllMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n newEntities = ensureEntitiesArray(newEntities);\n state.entities = {};\n state.ids = [];\n\n addManyMutably(newEntities, state);\n }\n\n function updateOneMutably(update: Changes, state: R): void {\n return updateManyMutably([update], state);\n }\n\n function updateManyMutably(\n updates: ReadonlyArray>,\n state: R\n ): void {\n for (let update of updates) {\n const entityObject = state.entities[update.id];\n if (!entityObject) {\n continue;\n }\n\n const changes = state.entities[update.id].changes;\n\n Object.assign(changes, update.changes);\n state.entities[update.id].changes = changes;\n }\n }\n\n function upsertOneMutably(entity: T, state: R): void {\n return upsertManyMutably([entity], state);\n }\n\n function upsertManyMutably(\n newEntities: readonly T[] | Record,\n state: R\n ): void {\n const [added, updated] = splitAddedDeferredEntities(\n newEntities,\n selectId,\n state\n );\n\n updateManyMutably(updated, state);\n addManyMutably(added, state);\n }\n\n function areArraysEqual(a: readonly unknown[], b: readonly unknown[]) {\n if (a.length !== b.length) {\n return false;\n }\n\n for (let i = 0; i < a.length && i < b.length; i++) {\n if (a[i] === b[i]) {\n continue;\n }\n return false;\n }\n return true;\n }\n\n function merge(models: readonly T[], state: R): void {\n // Insert/overwrite all new/updated\n models.forEach((model) => {\n const id = selectId(model);\n if (!state.ids.includes(id)) {\n state.entities[id] = {\n saved: model,\n changes: { added: true } as Deferred,\n };\n\n return;\n }\n state.entities[selectId(model)].changes = {\n ...state.entities[id].changes,\n ...model,\n };\n });\n\n resortEntities(state);\n }\n\n function resortEntities(state: R) {\n const allEntities = Object.values(state.entities).map(\n (entity: DeferredEntity) => {\n return { ...entity.saved, ...entity.changes };\n }\n ) as T[];\n allEntities.sort(sort);\n\n const newSortedIds = allEntities.map((entity) => selectId(entity));\n const { ids } = state;\n\n if (!areArraysEqual(ids, newSortedIds)) {\n state.ids = newSortedIds;\n }\n }\n\n function reconcile(keep: boolean, state: R): void {\n let hasModifiedIds = false;\n if (keep) {\n for (const id of state.ids) {\n const changes = state.entities[id].changes;\n\n if (changes) {\n if (changes.deleted) {\n delete state.entities[id];\n hasModifiedIds = true;\n } else {\n const { deleted, added, ...filteredChanges } = changes;\n Object.assign(state.entities[id].saved!, filteredChanges);\n state.entities[id].changes = {};\n }\n }\n }\n } else {\n for (const id of state.ids) {\n const changes = state.entities[id].changes;\n\n if (changes) {\n if (changes.added) {\n delete state.entities[id];\n hasModifiedIds = true;\n } else {\n state.entities[id].changes = {};\n }\n }\n }\n }\n if (hasModifiedIds) {\n state.ids = Object.keys(state.entities);\n resortEntities(state);\n }\n }\n\n return {\n removeOne,\n removeMany,\n removeAll,\n addOne: createDeferredStateOperator(addOneMutably),\n updateOne: createDeferredStateOperator(updateOneMutably),\n upsertOne: createDeferredStateOperator(upsertOneMutably),\n setOne: createDeferredStateOperator(setOneMutably),\n setMany: createDeferredStateOperator(setManyMutably),\n setAll: createDeferredStateOperator(setAllMutably),\n addMany: createDeferredStateOperator(addManyMutably),\n updateMany: createDeferredStateOperator(updateManyMutably),\n upsertMany: createDeferredStateOperator(upsertManyMutably),\n reconcile: createDeferredStateOperator(reconcile),\n };\n}\n","import { createSlice, PayloadAction } from \"@reduxjs/toolkit\";\n\nimport { createDeferredEntityAdapter } from \"store/entities/create_deferred_adapter\";\n\nimport { getCompleteEntity, getDeferredProperty } from \"store/entities/utils\";\nimport { intersection, union } from \"lodash\";\n\nimport {\n generateUUID,\n isUnknownCategory,\n mutatingFilter,\n newReplaceDuplicateName,\n} from \"utils/common/helpers\";\nimport { dispose, TensorContainer } from \"@tensorflow/tfjs\";\nimport { DataState } from \"store/types\";\nimport { DeferredEntity, DeferredEntityState } from \"store/entities/models\";\nimport { encode } from \"utils/annotator/rle\";\nimport { Partition } from \"utils/models/enums\";\nimport { UNKNOWN_IMAGE_CATEGORY_COLOR } from \"utils/common/constants\";\nimport { PartialBy } from \"utils/common/types\";\nimport {\n Kind,\n AnnotationObject,\n Category,\n DecodedAnnotationObject,\n ImageObject,\n ThingsUpdates,\n CategoryUpdates,\n} from \"./types\";\nimport { UNKNOWN_CATEGORY_NAME } from \"./constants\";\n\nexport const kindsAdapter = createDeferredEntityAdapter();\nexport const categoriesAdapter = createDeferredEntityAdapter();\nexport const thingsAdapter = createDeferredEntityAdapter<\n ImageObject | AnnotationObject\n>();\n\nexport const initialState = (): DataState => {\n return {\n kinds: kindsAdapter.getInitialState(),\n categories: categoriesAdapter.getInitialState(),\n things: thingsAdapter.getInitialState(),\n };\n};\n\nconst updateContents = (\n previousContents: string[],\n contents: string[],\n updateType: \"add\" | \"remove\" | \"replace\"\n) => {\n var newContents: string[];\n\n switch (updateType) {\n case \"add\":\n newContents = union(previousContents, contents);\n break;\n case \"remove\":\n newContents = previousContents.filter((a) => !contents.includes(a));\n break;\n case \"replace\":\n newContents = contents;\n }\n return newContents;\n};\n\nexport const dataSlice = createSlice({\n name: \"new-data\",\n initialState: initialState,\n reducers: {\n resetData: (state) => initialState(),\n initializeState(\n state,\n action: PayloadAction<{\n data: {\n kinds: DeferredEntityState;\n categories: DeferredEntityState;\n things: DeferredEntityState;\n };\n }>\n ) {\n Object.values(state.things.entities).forEach((entity) => {\n dispose(entity as unknown as TensorContainer);\n });\n\n state.kinds = action.payload.data.kinds;\n state.categories = action.payload.data.categories;\n state.things = action.payload.data.things;\n },\n addKinds(\n state,\n action: PayloadAction<{\n kinds: Array>;\n isPermanent?: boolean;\n }>\n ) {\n const { kinds, isPermanent } = action.payload;\n for (const kind of kinds) {\n if (state.kinds.entities[kind.id]) continue;\n if (!kind.containing) kind.containing = [];\n\n if (isPermanent) {\n state.kinds.entities[kind.id] = { saved: kind as Kind, changes: {} };\n state.kinds.ids.push(kind.id);\n } else {\n kindsAdapter.addOne(state.kinds, kind as Kind);\n }\n }\n },\n updateKindContents(\n state,\n action: PayloadAction<{\n changes: Array<{\n kindId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n contents: string[];\n }>;\n isPermanent?: boolean;\n }>\n ) {\n const { changes, isPermanent } = action.payload;\n for (const { kindId, contents, updateType } of changes) {\n const previousContents = getDeferredProperty(\n state.kinds.entities[kindId],\n \"containing\"\n );\n\n if (!state.kinds.entities[kindId]) continue;\n\n const newContents = updateContents(\n previousContents,\n contents,\n updateType\n );\n if (isPermanent) {\n state.kinds.entities[kindId].saved.containing = newContents;\n state.kinds.entities[kindId].changes = {};\n } else {\n kindsAdapter.updateOne(state.kinds, {\n id: kindId,\n changes: { containing: newContents },\n });\n }\n }\n },\n updateKindCategories(\n state,\n action: PayloadAction<{\n changes: Array<{\n kindId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n categories: string[];\n }>;\n isPermanent?: boolean;\n }>\n ) {\n const { changes, isPermanent } = action.payload;\n\n for (const { kindId, categories, updateType } of changes) {\n if (!state.kinds.entities[kindId]) continue;\n const previousCategories = getDeferredProperty(\n state.kinds.entities[kindId],\n \"categories\"\n );\n\n const newCategories = updateContents(\n previousCategories,\n categories,\n updateType\n );\n if (isPermanent) {\n state.kinds.entities[kindId].saved.categories = newCategories;\n state.kinds.entities[kindId].changes = {};\n } else {\n kindsAdapter.updateOne(state.kinds, {\n id: kindId,\n changes: { categories: newCategories },\n });\n }\n }\n },\n deleteKind(\n state,\n action: PayloadAction<{\n deletedKindId: string;\n isPermanent?: boolean;\n }>\n ) {\n const { deletedKindId, isPermanent } = action.payload;\n const deletedKind = getCompleteEntity(\n state.kinds.entities[deletedKindId]\n );\n if (!deletedKind) return;\n const kindThings = deletedKind.containing;\n const kindCats = deletedKind.categories;\n\n dataSlice.caseReducers.deleteThings(state, {\n type: \"deleteThings\",\n payload: {\n thingIds: kindThings,\n isPermanent,\n disposeColorTensors: true,\n preparedByListener: true,\n },\n });\n dataSlice.caseReducers.deleteCategories(state, {\n type: \"deleteCategories\",\n payload: {\n categoryIds: kindCats,\n isPermanent,\n },\n });\n\n if (isPermanent) {\n mutatingFilter(state.kinds.ids, (id) => id !== deletedKindId);\n delete state.kinds.entities[deletedKindId];\n } else {\n kindsAdapter.removeOne(state.kinds, deletedKindId);\n }\n },\n addCategories(\n state,\n action: PayloadAction<{\n categories: Array;\n isPermanent?: boolean;\n }>\n ) {\n const { categories, isPermanent } = action.payload;\n for (const category of categories) {\n if (state.categories.ids.includes(category.id)) continue;\n\n dataSlice.caseReducers.updateKindCategories(state, {\n type: \"updateKindCategories\",\n payload: {\n changes: [\n {\n kindId: category.kind,\n updateType: \"add\",\n categories: [category.id],\n },\n ],\n isPermanent,\n },\n });\n\n categoriesAdapter.addOne(state.categories, category);\n if (isPermanent) {\n state.categories.entities[category.id].changes = {};\n }\n }\n },\n createCategory(\n state,\n action: PayloadAction<{\n name: string;\n color: string;\n kind: string;\n isPermanent?: boolean;\n }>\n ) {\n const { name, color, isPermanent, kind } = action.payload;\n\n let kindsToUpdate = [];\n\n if (kind === \"all\") {\n kindsToUpdate = state.kinds.ids;\n } else {\n kindsToUpdate.push(kind);\n }\n\n let id = generateUUID();\n let idIsUnique = !state.categories.ids.includes(id);\n\n while (!idIsUnique) {\n id = generateUUID();\n idIsUnique = !state.categories.ids.includes(id);\n }\n if (isPermanent) {\n state.categories.entities[id] = {\n saved: {\n id: id,\n name: name,\n color: color,\n visible: true,\n containing: [],\n kind: kind,\n } as Category,\n changes: {},\n };\n } else {\n categoriesAdapter.addOne(state.categories, {\n id: id,\n name: name,\n color: color,\n visible: true,\n containing: [],\n kind: kind,\n } as Category);\n }\n\n kindsToUpdate.forEach((kind) =>\n dataSlice.caseReducers.updateKindCategories(state, {\n type: \"updateKindCategories\",\n payload: {\n changes: [\n {\n kindId: kind as string,\n updateType: \"add\",\n categories: [id],\n },\n ],\n isPermanent,\n },\n })\n );\n },\n updateCategory(\n state,\n action: PayloadAction<{\n updates: CategoryUpdates;\n isPermanent?: boolean;\n }>\n ) {\n let { updates, isPermanent } = action.payload;\n\n const id = updates.id;\n\n if (isPermanent) {\n state.categories.entities[id].saved = {\n ...state.categories.entities[id].saved,\n ...updates.changes,\n };\n }\n categoriesAdapter.updateOne(state.categories, {\n id: id,\n changes: updates,\n });\n },\n updateCategoryContents(\n state,\n action: PayloadAction<{\n changes: Array<{\n categoryId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n contents: string[];\n }>;\n isPermanent?: boolean;\n }>\n ) {\n const { changes, isPermanent } = action.payload;\n for (const { categoryId, contents, updateType } of changes) {\n if (!state.categories.entities[categoryId]) continue;\n const previousContents = getDeferredProperty(\n state.categories.entities[categoryId],\n \"containing\"\n );\n\n const newContents = updateContents(\n previousContents,\n contents,\n updateType\n );\n if (isPermanent) {\n state.categories.entities[categoryId].saved.containing = newContents;\n state.categories.entities[categoryId].changes = {};\n } else {\n categoriesAdapter.updateOne(state.categories, {\n id: categoryId,\n changes: { containing: newContents },\n });\n }\n }\n },\n\n setCategories(\n state,\n action: PayloadAction<{\n categories: Array;\n isPermanent?: boolean;\n }>\n ) {\n const { categories, isPermanent } = action.payload;\n\n dataSlice.caseReducers.deleteCategories(state, {\n type: \"deleteCategories\",\n payload: { categoryIds: \"all\", isPermanent },\n });\n dataSlice.caseReducers.addCategories(state, {\n type: \"addCategories\",\n payload: {\n categories: categories,\n isPermanent: isPermanent,\n },\n });\n },\n\n deleteCategories(\n state,\n action: PayloadAction<{\n categoryIds: string[] | \"all\";\n isPermanent?: boolean;\n }>\n ) {\n let { categoryIds, isPermanent } = action.payload;\n if (categoryIds === \"all\") {\n categoryIds = state.categories.ids as string[];\n }\n for (const categoryId of categoryIds) {\n if (isUnknownCategory(categoryId)) continue;\n if (isPermanent) {\n delete state.categories.entities[categoryId];\n mutatingFilter(state.categories.ids, (catId) => catId !== categoryId);\n } else {\n categoriesAdapter.removeOne(state.categories, categoryId);\n }\n }\n },\n removeCategoriesFromKind(\n state,\n action: PayloadAction<{\n categoryIds: string[] | \"all\";\n kind: string;\n isPermanent?: boolean;\n }>\n ) {\n //HACK: Should check for empty category. if category empty, delete completely\n let { categoryIds, kind, isPermanent } = action.payload;\n if (categoryIds === \"all\") {\n categoryIds = state.categories.ids as string[];\n }\n\n for (const categoryId of categoryIds) {\n if (isUnknownCategory(categoryId)) continue;\n\n dataSlice.caseReducers.updateKindCategories(state, {\n type: \"updateKindCategories\",\n payload: {\n changes: [\n { kindId: kind, updateType: \"remove\", categories: [categoryId] },\n ],\n isPermanent,\n },\n });\n const thingsOfKind = getDeferredProperty(\n state.kinds.entities[kind],\n \"containing\"\n );\n const thingsOfCategory = getDeferredProperty(\n state.categories.entities[categoryId],\n \"containing\"\n );\n const thingsToRemove = intersection(thingsOfKind, thingsOfCategory);\n\n dataSlice.caseReducers.updateCategoryContents(state, {\n type: \"updateCategoryContents\",\n payload: {\n changes: [\n {\n categoryId: categoryId,\n updateType: \"remove\",\n contents: thingsToRemove,\n },\n ],\n isPermanent,\n },\n });\n dataSlice.caseReducers.updateCategoryContents(state, {\n type: \"updateCategoryContents\",\n payload: {\n changes: [\n {\n categoryId: state.kinds.entities[kind].saved.unknownCategoryId,\n updateType: \"add\",\n contents: thingsToRemove,\n },\n ],\n isPermanent,\n },\n });\n const thingUpdates = thingsToRemove.map((thing) => ({\n id: thing,\n categoryId: state.kinds.entities[kind].saved.unknownCategoryId,\n }));\n\n dataSlice.caseReducers.updateThings(state, {\n type: \"updateThings\",\n payload: { updates: thingUpdates, isPermanent },\n });\n }\n },\n\n addThings(\n state,\n action: PayloadAction<{\n things: Array;\n isPermanent?: boolean;\n }>\n ) {\n const { things, isPermanent } = action.payload;\n for (const thing of things) {\n const [name, ext] = thing.name!.split(\".\");\n\n const existingImageIds =\n state.kinds.entities[thing.kind]?.saved.containing ?? [];\n\n const existingPrefixes = Object.values(existingImageIds).map(\n (id) =>\n (\n getDeferredProperty(\n state.things.entities[id] as DeferredEntity,\n \"name\"\n ) as string\n ).split(\".\")[0]\n );\n\n let updatedNamePrefix = newReplaceDuplicateName(name, existingPrefixes);\n\n if (ext) {\n updatedNamePrefix += `.${ext}`;\n }\n\n thing.name = updatedNamePrefix;\n if (state.kinds.entities[thing.kind]) {\n dataSlice.caseReducers.updateKindContents(state, {\n type: \"updateKindContents\",\n payload: {\n changes: [\n { kindId: thing.kind, contents: [thing.id], updateType: \"add\" },\n ],\n isPermanent,\n },\n });\n } else {\n const unknownCategoryId = generateUUID({ definesUnknown: true });\n const unknownCategory: Category = {\n id: unknownCategoryId,\n name: UNKNOWN_CATEGORY_NAME,\n color: UNKNOWN_IMAGE_CATEGORY_COLOR,\n containing: [],\n kind: thing.kind,\n visible: true,\n };\n dataSlice.caseReducers.addCategories(state, {\n type: \"addCategories\",\n payload: { categories: [unknownCategory] },\n });\n dataSlice.caseReducers.addKinds(state, {\n type: \"addKinds\",\n payload: {\n kinds: [\n {\n id: thing.kind,\n containing: [thing.id],\n categories: [unknownCategoryId],\n unknownCategoryId,\n },\n ],\n isPermanent,\n },\n });\n }\n if (\"imageId\" in thing) {\n dataSlice.caseReducers.updateThingContents(state, {\n type: \"updateThingContents\",\n payload: {\n changes: [\n {\n thingId: thing.imageId,\n contents: [thing.id],\n updateType: \"add\",\n },\n ],\n isPermanent,\n },\n });\n }\n\n dataSlice.caseReducers.updateCategoryContents(state, {\n type: \"updateThingContents\",\n payload: {\n changes: [\n {\n categoryId: thing.categoryId,\n contents: [thing.id],\n updateType: \"add\",\n },\n ],\n isPermanent,\n },\n });\n\n thingsAdapter.addOne(state.things, thing);\n if (isPermanent) {\n state.things.entities[thing.id].changes = {};\n }\n }\n },\n addAnnotations(\n state,\n action: PayloadAction<{\n annotations: Array;\n isPermanent?: boolean;\n }>\n ) {\n const { annotations, isPermanent } = action.payload;\n const encodedAnnotations: AnnotationObject[] = [];\n for (const annotation of annotations) {\n if (state.things.ids.includes(annotation.id)) continue;\n\n if (annotation.decodedMask) {\n (annotation as AnnotationObject).encodedMask = encode(\n annotation.decodedMask\n );\n delete annotation.decodedMask;\n }\n encodedAnnotations.push(annotation as AnnotationObject);\n }\n dataSlice.caseReducers.addThings(state, {\n type: \"addThings\",\n payload: { things: encodedAnnotations, isPermanent },\n });\n },\n // Sets the category for the inference images back to Unknown\n clearPredictions(\n state,\n action: PayloadAction<{ kind: string; isPermanent?: boolean }>\n ) {\n const { isPermanent, kind } = action.payload;\n if (!(kind in state.kinds.entities)) return;\n\n const updates: Array<\n { id: string } & (Partial | Partial)\n > = [];\n\n const thingIds = getDeferredProperty(\n state.kinds.entities[kind],\n \"containing\"\n );\n\n thingIds.forEach((id) => {\n const thing = getCompleteEntity(state.things.entities[id]);\n if (!thing) return;\n if (thing.partition === Partition.Inference) {\n updates.push({\n id: id as string,\n categoryId: state.kinds.entities[kind].saved.unknownCategoryId,\n });\n }\n });\n\n dataSlice.caseReducers.updateThings(state, {\n type: \"updateThings\",\n payload: {\n updates: updates,\n isPermanent: isPermanent,\n },\n });\n },\n acceptPredictions(\n state,\n action: PayloadAction<{ kind: string; isPermanent?: boolean }>\n ) {\n const { isPermanent, kind } = action.payload;\n if (!(kind in state.kinds.entities)) return;\n const thingIds = getDeferredProperty(\n state.kinds.entities[kind],\n \"containing\"\n );\n const updates: Array<\n { id: string } & (Partial | Partial)\n > = [];\n thingIds.forEach((id) => {\n const thing = getCompleteEntity(state.things.entities[id]);\n if (!thing) return;\n const imagePartition = thing.partition;\n const categoryId = thing.categoryId;\n if (\n imagePartition === Partition.Inference &&\n !isUnknownCategory(categoryId)\n ) {\n updates.push({\n id: id as string,\n partition: Partition.Unassigned,\n });\n }\n });\n dataSlice.caseReducers.updateThings(state, {\n type: \"updateThings\",\n payload: {\n updates: updates,\n isPermanent: isPermanent,\n },\n });\n },\n\n updateThings(\n state,\n action: PayloadAction<{\n updates: ThingsUpdates;\n isPermanent?: boolean;\n }>\n ) {\n const { updates, isPermanent } = action.payload;\n\n for (const update of updates) {\n const { id, ...changes } = update;\n\n if (!state.things.ids.includes(id)) continue;\n\n if (\"categoryId\" in changes) {\n const oldCategory = getDeferredProperty(\n state.things.entities[id],\n \"categoryId\"\n );\n dataSlice.caseReducers.updateCategoryContents(state, {\n type: \"updateCategoryContents\",\n payload: {\n changes: [\n {\n categoryId: oldCategory,\n updateType: \"remove\",\n contents: [id],\n },\n ],\n isPermanent,\n },\n });\n dataSlice.caseReducers.updateCategoryContents(state, {\n type: \"updateCategoryContents\",\n payload: {\n changes: [\n {\n categoryId: changes.categoryId!,\n updateType: \"add\",\n contents: [id],\n },\n ],\n isPermanent,\n },\n });\n }\n\n if (isPermanent) {\n Object.assign(state.things.entities[id].saved, changes);\n } else {\n thingsAdapter.updateOne(state.things, { id, changes });\n }\n }\n },\n updateThingName(\n state,\n action: PayloadAction<{ id: string; name: string; isPermanent: boolean }>\n ) {\n const { id, name, isPermanent } = action.payload;\n const changes: Array<{ id: string; name: string }> = [{ id, name }];\n const thing = getCompleteEntity(state.things.entities[id]);\n if (thing) {\n if (\"containing\" in thing) {\n const containedThingIds = thing.containing;\n containedThingIds.forEach((containedId) => {\n const containedThing = getCompleteEntity(\n state.things.entities[containedId]\n );\n if (containedThing) {\n const containedThingName = containedThing.name;\n if (containedThing.name.includes(thing.name)) {\n changes.push({\n id: containedId,\n name: containedThingName.replace(thing.name, name),\n });\n }\n }\n });\n }\n }\n dataSlice.caseReducers.updateThings(state, {\n type: \"updateThings\",\n payload: { updates: changes, isPermanent },\n });\n },\n updateThingContents(\n state,\n action: PayloadAction<{\n changes: Array<{\n thingId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n contents: string[];\n }>;\n isPermanent?: boolean;\n }>\n ) {\n const { changes, isPermanent } = action.payload;\n for (const { thingId, contents, updateType } of changes) {\n const thing = state.things.entities[\n thingId\n ] as DeferredEntity;\n if (!(\"containing\" in state.things.entities[thingId].saved)) continue;\n const previousContents = getDeferredProperty(thing, \"containing\");\n\n if (!state.things.entities[thingId]) continue;\n\n const newContents = updateContents(\n previousContents,\n contents,\n updateType\n );\n if (isPermanent) {\n thing.saved.containing = newContents;\n //TODO: Change so entire changes object isnt removed\n thing.changes = {};\n } else {\n thingsAdapter.updateOne(state.things, {\n id: thingId,\n changes: { containing: newContents },\n });\n }\n }\n },\n deleteThings(\n state,\n action: PayloadAction<\n | {\n thingIds: Array | \"all\" | \"annotations\";\n activeKind?: string;\n disposeColorTensors: boolean;\n isPermanent?: boolean;\n preparedByListener?: boolean;\n }\n | {\n ofKinds: Array;\n activeKind?: string;\n disposeColorTensors: boolean;\n isPermanent?: boolean;\n preparedByListener?: boolean;\n }\n | {\n ofCategories: Array;\n activeKind: string;\n disposeColorTensors: boolean;\n isPermanent?: boolean;\n preparedByListener?: boolean;\n }\n >\n ) {\n if (!action.payload.preparedByListener) return;\n if (!(\"thingIds\" in action.payload)) return;\n const { thingIds, disposeColorTensors, isPermanent } = action.payload;\n const imageChanges: Record<\n string,\n {\n thingId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n contents: string[];\n }\n > = {};\n const imageChangesArray: Array<{\n thingId: string;\n updateType: \"add\" | \"remove\" | \"replace\";\n contents: string[];\n }> = [];\n for (const thingId of [...thingIds]) {\n const thingEntity = state.things.entities[thingId];\n const thing = getCompleteEntity(state.things.entities[thingId]);\n\n if (!thing) continue;\n\n if (\"containing\" in thing) {\n const thingContents = thing.containing;\n\n if (thingContents) {\n for (const containedThingId of thingContents) {\n const containedThing = state.things.entities[containedThingId];\n if (!containedThing) continue;\n\n const thingKind = getDeferredProperty(containedThing, \"kind\");\n const thingCategoryId = getDeferredProperty(\n containedThing,\n \"categoryId\"\n );\n const kind = state.kinds.entities[thingKind];\n const category = state.categories.entities[thingCategoryId];\n if (isPermanent) {\n if (disposeColorTensors) {\n dispose(containedThing.saved.data as TensorContainer);\n dispose(containedThing.changes as TensorContainer);\n }\n\n /* UPDATE KIND'S CONTAINING LIST */\n mutatingFilter(\n kind.saved.containing,\n (containedId) => containedId !== containedThingId\n );\n if (kind.changes.containing) {\n mutatingFilter(\n kind.changes.containing,\n (containedId) => containedId !== containedThingId\n );\n }\n /* UPDATE CATEGORY'S CONTAINING LIST */\n mutatingFilter(\n category.saved.containing,\n (thingId) => thingId !== containedThingId\n );\n if (category.changes.containing) {\n mutatingFilter(\n category.changes.containing,\n (thingId) => thingId !== containedThingId\n );\n }\n\n /* REMOVE THING */\n delete state.things.entities[containedThingId];\n mutatingFilter(\n state.things.ids,\n (thingId) => thingId !== containedThingId\n );\n } else {\n kind.changes.containing = getDeferredProperty(\n kind,\n \"containing\"\n ).filter((thingId) => thingId !== containedThingId);\n category.changes.containing = getDeferredProperty(\n category,\n \"containing\"\n ).filter((thingId) => thingId !== containedThingId);\n\n thingsAdapter.removeOne(state.things, containedThingId);\n }\n }\n }\n } else {\n const imageId = thing.imageId;\n\n if (imageId in imageChanges) {\n imageChanges[imageId].contents.push(thingId);\n } else {\n imageChanges[imageId] = {\n thingId: imageId,\n updateType: \"remove\",\n contents: [thingId],\n };\n }\n }\n const thingKind = thing.kind;\n const thingCategoryId = thing.categoryId;\n\n const kind = state.kinds.entities[thingKind];\n const category = state.categories.entities[thingCategoryId];\n if (isPermanent) {\n if (disposeColorTensors) {\n dispose(thingEntity.saved.data as TensorContainer);\n dispose(thingEntity.changes as TensorContainer);\n }\n\n /* UPDATE KIND'S CONTAINING LIST */\n\n mutatingFilter(\n kind.saved.containing,\n (containedId) => containedId !== thingId\n );\n if (kind.changes.containing) {\n mutatingFilter(\n kind.changes.containing,\n (containedId) => containedId !== thingId\n );\n }\n\n /* UPDATE CATEGORY'S CONTAINING LIST */\n mutatingFilter(\n category.saved.containing,\n (_thingId) => _thingId !== thingId\n );\n if (category.changes.containing) {\n mutatingFilter(\n category.changes.containing,\n (_thingId) => _thingId !== thingId\n );\n }\n\n /* REMOVE THING */\n delete state.things.entities[thingId];\n mutatingFilter(state.things.ids, (_thingId) => _thingId !== thingId);\n } else {\n kind.changes.containing = getDeferredProperty(\n kind,\n \"containing\"\n ).filter((_thingId) => _thingId !== thingId);\n category.changes.containing = getDeferredProperty(\n category,\n \"containing\"\n ).filter((_thingId) => _thingId !== thingId);\n\n thingsAdapter.removeOne(state.things, thingId);\n }\n }\n for (let [imageId, changes] of Object.entries(imageChanges)) {\n if (!thingIds.includes(imageId)) {\n imageChangesArray.push(changes);\n }\n }\n dataSlice.caseReducers.updateThingContents(state, {\n type: \"updateThingContents\",\n payload: { changes: imageChangesArray, isPermanent },\n });\n },\n\n reconcile(\n state,\n action: PayloadAction<{\n keepChanges: boolean;\n }>\n ) {\n if (action.payload.keepChanges) {\n dataSlice.caseReducers.keepChanges(state);\n } else {\n dataSlice.caseReducers.revertChanges(state);\n }\n },\n\n revertChanges(state) {\n const kindsToRemove = [];\n const categoriesToRemove = [];\n const thingsToRemove = [];\n for (const id of state.kinds.ids) {\n const kind = state.kinds.entities[id];\n if (!kind.changes) continue;\n if (\"added\" in kind.changes) {\n kindsToRemove.push(id);\n delete state.kinds.entities[id];\n } else {\n kind.changes = {};\n }\n }\n state.kinds.ids = updateContents(\n [...state.kinds.ids] as string[],\n kindsToRemove as string[],\n \"remove\"\n );\n for (const id of state.categories.ids) {\n const category = state.categories.entities[id];\n if (!category.changes) continue;\n if (\"added\" in category.changes) {\n categoriesToRemove.push(id);\n delete state.categories.entities[id];\n } else {\n category.changes = {};\n }\n }\n state.categories.ids = updateContents(\n [...state.categories.ids] as string[],\n categoriesToRemove as string[],\n \"remove\"\n );\n for (const id of state.things.ids) {\n const thing = state.things.entities[id];\n if (!thing.changes) continue;\n if (\"added\" in thing.changes) {\n dispose(thing.saved.data as TensorContainer);\n dispose(thing.changes.data as TensorContainer);\n thingsToRemove.push(id);\n delete state.things.entities[id];\n } else {\n dispose(thing.changes.data as TensorContainer);\n thing.changes = {};\n }\n }\n state.things.ids = updateContents(\n [...state.things.ids] as string[],\n thingsToRemove as string[],\n \"remove\"\n );\n },\n keepChanges(state) {\n const kindsToRemove = [];\n const categoriesToRemove = [];\n const thingsToRemove = [];\n for (const id of state.kinds.ids) {\n const kind = state.kinds.entities[id];\n\n if (!kind.changes) continue;\n if (\"deleted\" in kind.changes) {\n kindsToRemove.push(id);\n continue;\n } else {\n let { added, deleted, ...preparedDeferred } = kind.changes;\n Object.assign(kind.saved, preparedDeferred);\n kind.changes = {};\n }\n }\n state.kinds.ids = updateContents(\n [...state.kinds.ids] as string[],\n kindsToRemove as string[],\n \"remove\"\n );\n for (const id of state.categories.ids) {\n const category = state.categories.entities[id];\n if (!category.changes) continue;\n if (\"deleted\" in category.changes) {\n categoriesToRemove.push(id);\n delete state.categories.entities[id];\n } else {\n let { added, deleted, ...preparedDeferred } = category.changes;\n Object.assign(category.saved, preparedDeferred);\n category.changes = {};\n }\n }\n state.categories.ids = updateContents(\n [...state.categories.ids] as string[],\n categoriesToRemove as string[],\n \"remove\"\n );\n for (const id of state.things.ids) {\n const thing = state.things.entities[id];\n if (!thing.changes) continue;\n if (\"deleted\" in thing.changes) {\n dispose(thing.saved.data as TensorContainer);\n dispose(thing.changes.data as TensorContainer);\n thingsToRemove.push(id);\n delete state.things.entities[id];\n } else {\n let { added, deleted, ...preparedDeferred } = thing.changes;\n Object.assign(thing.saved, preparedDeferred);\n thing.changes = {};\n }\n }\n state.things.ids = updateContents(\n [...state.things.ids] as string[],\n thingsToRemove as string[],\n \"remove\"\n );\n },\n },\n});\n","import { createSelector } from \"@reduxjs/toolkit\";\nimport { kindsAdapter, categoriesAdapter, thingsAdapter } from \"./dataSlice\";\nimport { RootState } from \"store/rootReducer\";\nimport { AnnotationObject, Category, ImageObject, Kind } from \"./types\";\nimport { intersection } from \"lodash\";\n\nconst kindsSelectors = kindsAdapter.getSelectors(\n (state: RootState) => state.newData.kinds\n);\nconst categorySelectors = categoriesAdapter.getSelectors(\n (state: RootState) => state.newData.categories\n);\nconst thingsSelectors = thingsAdapter.getSelectors(\n (state: RootState) => state.newData.things\n);\n\nexport const selectKindDictionary = kindsSelectors.selectEntities; // returns kinds dict\nexport const selectAllKinds = kindsSelectors.selectAll; // returns an array\nexport const selectAllKindIds = kindsSelectors.selectIds;\nexport const selectTotalKindCount = kindsSelectors.selectTotal;\n\nexport const selectCategoriesDictionary = categorySelectors.selectEntities; // returns dict\nexport const selectAllCategories = categorySelectors.selectAll; // returns an array\nexport const selectAllCategoryIds = categorySelectors.selectIds;\nexport const selectTotalCategoryCount = categorySelectors.selectTotal;\nexport const selectCategoryById = categorySelectors.selectById;\n\nexport const selectThingsDictionary = thingsSelectors.selectEntities; // returns dict\nexport const selectAllThings = thingsSelectors.selectAll; // returns an array\nexport const selectAllThingIds = thingsSelectors.selectIds;\nexport const selectTotalThingCount = thingsSelectors.selectTotal;\n\nexport const selectDataProject = createSelector(\n selectAllKinds,\n selectAllCategories,\n selectAllThings,\n (kinds, categories, things) => {\n return { kinds, categories, things };\n }\n);\n\n/*\n KINDS\n*/\n\nexport const selectAllObjectKinds = createSelector(selectAllKinds, (kinds) => {\n return kinds.filter((kind) => kind.id !== \"Image\");\n});\n\nexport const selectObjectKindDict = createSelector(\n selectAllObjectKinds,\n (kinds) => {\n return kinds.reduce((kindDict: Record, kind) => {\n kindDict[kind.id] = kind;\n return kindDict;\n }, {});\n }\n);\n\n/*\n THINGS\n*/\n\nexport const selectAllImages = createSelector(selectAllThings, (things) => {\n return things.filter((thing) => thing.kind === \"Image\") as ImageObject[];\n});\n\nexport const selectAllObjects = createSelector(selectAllThings, (things) => {\n return things.filter((thing) => thing.kind !== \"Image\") as AnnotationObject[];\n});\n\nexport const selectSplitThingDict = createSelector(\n selectAllThings,\n (things) => {\n return things.reduce(\n (\n splitDict: {\n images: Record;\n objects: Record;\n },\n thing\n ) => {\n if (thing.kind === \"Image\") {\n splitDict.images[thing.id] = thing as ImageObject;\n } else {\n splitDict.objects[thing.id] = thing as AnnotationObject;\n }\n return splitDict;\n },\n { images: {}, objects: {} }\n );\n }\n);\n\nexport const selectThingsOfKind = createSelector(\n [selectKindDictionary, selectThingsDictionary],\n (kindDict, thingDict) => {\n return (kind: string) => {\n const thingsOfKind = kindDict[kind].containing;\n return thingsOfKind.map((thingId) => thingDict[thingId]);\n };\n }\n);\n\nexport const selectNumThingsByCatAndKind = createSelector(\n selectKindDictionary,\n selectCategoriesDictionary,\n (kindDict, catDict) => (catId: string, kind: string) => {\n const thingsOfKind = kindDict[kind].containing;\n const thingsOfCat = catDict[catId].containing;\n\n return intersection(thingsOfCat, thingsOfKind).length;\n }\n);\n\nexport const selectAnnotatedImages = createSelector(\n selectThingsOfKind,\n (thingsByKind) => {\n const images = thingsByKind(\"Image\");\n return images.filter((image) => {\n if (\"containing\" in image) {\n return image.containing.length > 0;\n }\n return false;\n }) as ImageObject[];\n }\n);\n\n/*\n CATEGORIES\n*/\n\nexport const selectUnknownCategoryByKind = createSelector(\n selectKindDictionary,\n selectCategoriesDictionary,\n (kindDict, catDict) => {\n return (kind: string) => {\n const unknownCatId = kindDict[kind].unknownCategoryId;\n return catDict[unknownCatId];\n };\n }\n);\n\nexport const selectCategoriesByKind = createSelector(\n [selectKindDictionary, selectCategoriesDictionary],\n (kindDict, categoriesDict) => {\n return (kind: string) => {\n const categoriesOfKind = kindDict[kind].categories;\n return categoriesOfKind.map((catId) => categoriesDict[catId]);\n };\n }\n);\n\nexport const selectAllImageCategories = createSelector(\n selectAllCategories,\n (categories) => {\n return categories.filter((category) => category.kind !== \"Image\");\n }\n);\n\nexport const selectUnknownImageCategory = createSelector(\n selectKindDictionary,\n (kinds) => {\n return kinds[\"Image\"]?.unknownCategoryId;\n }\n);\n\nexport const selectAllObjectCategories = createSelector(\n selectAllCategories,\n (categories) => {\n return categories.filter((category) => category.kind !== \"Image\");\n }\n);\n\nexport const selectObjectCategoryDict = createSelector(\n selectAllObjectCategories,\n (categories) => {\n return categories.reduce((catDict: Record, c) => {\n catDict[c.id] = c;\n return catDict;\n }, {});\n }\n);\n\nexport const selectCategoryProperty = createSelector(\n selectCategoriesDictionary,\n (entities) =>\n (id: string, property: S) => {\n const category = entities[id];\n if (!category) return;\n return category[property];\n }\n);\n\nexport const selectFirstUnknownCategory = createSelector(\n selectAllKinds,\n selectCategoriesDictionary,\n (kinds, catDict) => {\n if (kinds.length < 2) return;\n const unknownCatId = kinds[1].unknownCategoryId;\n return catDict[unknownCatId];\n }\n);\n","export enum AnnotationExportType {\n LabeledSemanticMasks,\n Matrix,\n BinarySemanticMasks,\n BinaryInstances,\n LabeledInstances,\n COCO,\n PIXIMI,\n}\nexport enum ImageShapeEnum {\n DicomImage,\n GreyScale,\n SingleRGBImage,\n HyperStackImage,\n InvalidImage,\n}\n","import {\n dispose,\n fill,\n scalar,\n Tensor1D,\n tensor1d,\n tensor2d,\n Tensor2D,\n Tensor3D,\n tensor4d,\n Tensor4D,\n tidy,\n} from \"@tensorflow/tfjs\";\nimport * as ImageJS from \"image-js\";\nimport { ImageShapeEnum } from \"utils/file-io/enums\";\nimport { BitDepth, ImageShapeInfo } from \"utils/file-io/types\";\nimport { Partition } from \"utils/models/enums\";\nimport { generateUUID } from \"./helpers\";\nimport { DEFAULT_COLORS } from \"./constants\";\nimport { Colors } from \"./types\";\nimport { ImageObject } from \"store/data/types\";\nimport { UNKNOWN_IMAGE_CATEGORY_ID } from \"store/data/constants\";\n\nexport const getImageInformation = (\n image: ImageJS.Image | ImageJS.Stack\n): ImageShapeInfo => {\n // a \"proper\" RGB will be an ImageJS.Image object with 3 components\n if (!Array.isArray(image) && image.components === 3) {\n return {\n shape: ImageShapeEnum.SingleRGBImage,\n components: image.components,\n bitDepth: image.bitDepth,\n alpha: image.alpha === 1,\n };\n // 1 channel (greyscale) image will also be an ImageJs.Image object\n } else if (!Array.isArray(image) && image.components === 1) {\n return {\n shape: ImageShapeEnum.GreyScale,\n components: image.components,\n bitDepth: image.bitDepth,\n alpha: image.alpha === 1,\n };\n // should not happen\n } else if (!Array.isArray(image)) {\n process.env.NODE_ENV !== \"production\" &&\n console.error(\"Unrecognized Image.JS.Image type, channels not in [1,3]\");\n return {\n shape: ImageShapeEnum.InvalidImage,\n };\n }\n // else RGBstack, or multi-channel, or multi-z-stack image as an ImageJS.Stack object\n else {\n return {\n shape: ImageShapeEnum.HyperStackImage,\n components: image.length,\n bitDepth: image[0].bitDepth,\n alpha: image[0].alpha === 1,\n };\n }\n};\n\n/*\n ========================================\n ImageJS <-> ImageType Conversion Helpers\n ========================================\n */\n\n/*\n Receives an image stack, where each elem is an ImageJS.Image object,\n representing an image \"frame\", in the following order:\n [slice_1_channel_1, slice1_channel2, ..., slice_numSlices_channel_numChannels]\n\n Each frame of an image stack has a data array\n which is 1D, in row major format\n e.g [r1c1, r1c2, r1c3, r2c1, r2c2, r2c3]\n representing an image of rows = height = 2, cols = width = 3\n\n March through and form a 2d imageData array of shape [frames, pixels]\n e.g:\n\n [[ 0, 1, 2, 3, 4, 5],\n [10,11,12,13,14,15],\n [20,21,22,23,24,25],\n [30,31,32,33,34,35],\n [40,41,42,43,44,45],\n [50,51,52,53,54,55]]\n\n Tensorflow prefers image data to have a shape of [height,width,channels],\n but we cannot simply reshape the 2d matrix above in that way, since the\n data is not ordered such. Instead create 4d tensor of shape:\n [slices,channels,height,width]\n and then transpose into the preffered shape\n\n The image tensor is of type \"float32\", wich tensorflow expects to be\n in the range of 0-1, so normalize the tensor with the bitdepth of the\n image, if necessary\n\n Return the resulting imageTensor\n */\nexport const convertToTensor = (\n imageStack: ImageJS.Stack,\n numSlices: number,\n numChannels: number\n): Tensor4D => {\n const { bitDepth, width, height } = imageStack[0];\n\n const numPixels = height * width;\n\n // create empty 2d array of expected size\n const imageData = new Float32Array(numSlices * numChannels * numPixels);\n\n // fill in 2d array with image stack data\n // shape: [numFrames, numPixels]\n for (let i = 0; i < imageStack.length; i++) {\n imageData.set(Float32Array.from(imageStack[i].data), i * numPixels);\n }\n\n return tidy(\"stackToTensor\", () => {\n // convert to 4d tensor\n // shape: [Z, C, H, W]\n // then permute dims\n // shape: [Z, H, W, C]\n let imageTensor: Tensor4D = tensor4d(imageData, [\n numSlices,\n numChannels,\n height,\n width,\n ]).transpose([0, 2, 3, 1]);\n\n // normalize in range of 0-1, if not already\n if (!(imageStack[0].data instanceof Float32Array)) {\n const normScalar = scalar(2 ** bitDepth - 1);\n imageTensor = imageTensor.div(normScalar);\n }\n\n return imageTensor;\n });\n};\n\n/*\n receive image of dims: [Z, H, W, C]\n get slice corresponding to given index\n return image slice with dims: [H, W, C]\n */\nexport const getImageSlice = (\n imageTensor: Tensor4D,\n sliceIdx: number,\n opts: { disposeImageTensor: boolean } = { disposeImageTensor: false }\n): Tensor3D => {\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n const [_, height, width, numChannels] = imageTensor.shape;\n\n return tidy(\"getImageSlice\", () => {\n const res = imageTensor\n .slice([sliceIdx], [1, height, width, numChannels])\n .reshape([height, width, numChannels]) as Tensor3D;\n // gc input tensor\n opts.disposeImageTensor && imageTensor.dispose();\n return res;\n });\n};\n\n// filter out channels with visibility true in image colors\nexport const filterVisibleChannels = (colors: Colors): Array => {\n /*\n colors.visible has shape { [channel: boolean]: boolean; }\n Object.entries(colors.visible) produces [string, boolean][],\n e.g. [ ['0', true], ['1', false], ['2', true] ]\n filter out the ones that have true as the value in index 1\n map remaining tuples to the int of the channel number in index 0\n resulting in channel nums that are tagged visible,\n e.g. [0, 2]\n */\n return Object.entries(colors.visible)\n .filter((channelVisible) => channelVisible[1])\n .map((channelVisible) => parseInt(channelVisible[0]));\n};\n\n/*\n for input of Tensor3D:\n\n receive image slice of dims:\n [H, W, C]\n and filter array with channels to include in result\n VC = num_visible_channels = filter.length\n\n return color filtered image slice of dims:\n [H, W, VC]\n\n for input of Tensor4D:\n\n receive image slice of dims:\n [Z, H, W, C]\n filter and return image slice of dims:\n [Z, H, W, VC]\n */\nexport const sliceVisibleChannels = (\n imageSlice: T,\n filter: Array,\n opts: { disposeImageSlice: boolean } = { disposeImageSlice: true }\n): T => {\n // channel axis is innermost\n const channelAxis = imageSlice.rank - 1;\n\n return tidy(\"sliceVisibleChannels\", () => {\n const indices = tensor1d(filter, \"int32\");\n\n // form a new 3D/4D tensor, gathering only channels in the indices matching the filter\n // channel axis is innermost, 2\n const res = (imageSlice as T).gather(indices, channelAxis);\n // gc input tensor\n opts.disposeImageSlice && imageSlice.dispose();\n return res;\n });\n};\n\n/*\n receive image colors containing color matrix of dims:\n [C, 3]\n and filter array with idx of color triples to include in result\n VC = num_visible_channels = filter.length\n\n return filtered color matrix of dims:\n [VC, 3]\n */\nexport const sliceVisibleColors = (\n colors: Colors,\n filter: Array\n): Tensor2D => {\n // channel axis is outermost\n const channelAxis = 0;\n\n return tidy(\"sliceVisibleColors\", () => {\n const indices = tensor1d(filter, \"int32\");\n\n // form a new 2D tensor, gathering only triples in indices matching filter\n return colors.color.gather(indices, channelAxis);\n });\n};\n\n/*\n for input of Tensor3D:\n\n receive image slice (with channels filtered for visibility) of shape:\n [H, W, VC]\n reshape input image slice to shape:\n [pixels, VC]\n\n e.g. if input image slice of shape [3, 2, 4] is:\n [ [[a, b, c, d],\n [e, f, g, h]],\n\n [[i, j, k, l],\n [m, n, o, p]],\n \n [[q, r, s, t],\n [u, v, w, x]] ]\n\n reshape to be of shape [6, 4]:\n [[a, b, c, d],\n [e, f, g, h],\n [i, j, k, l],\n [m, n, o, p],\n [q, r, s, t],\n [u, v, w, x]]\n\n apply colors to image slice, with colors.color of shape:\n [VC, 3]\n\n e.g. [4, 3]\n\n [[r1, g1, b1],\n [r2, g2, b2],\n [r3, g3, b3],\n [r4, g4, b4]]\n\n resulting in shape [pixels, 3]\n\n which is reshaped to [height, width, 3] and returned\n\n for input of Tensor4D, all of the above happens, however\n receive image of shape [Z, H, W, VC],\n reshape to [Z, pixels, VC]\n\n take colors of shape [VC, 3] and broad cast to shape [Z, VC, 3],\n where each element of the Z dim is just a copy,\n and apply, resulting in [Z, pixels, 3],\n which is then reshaped to [Z, H, W, 3] and returned\n\n if opts.scaleMinMax is set to true, each channel will be\n normalized from the range [min_channel_value, max_channel_value]\n to the range [0, 2**bitDepth-1]\n */\nexport const generateColoredTensor = (\n imageSlice: T,\n colors: Tensor2D,\n opts: {\n disposeImageSlice?: boolean;\n disposeColors?: boolean;\n } = {}\n): T => {\n opts.disposeImageSlice = opts.disposeImageSlice ?? true;\n opts.disposeColors = opts.disposeColors ?? true;\n\n if (imageSlice.rank === 3) {\n const [height, width, numVisibleChannels] = (imageSlice as Tensor3D).shape;\n\n return tidy(\"generateColoredTensor\", () => {\n const res: T =\n numVisibleChannels > 0\n ? imageSlice\n // [pixels, VC]\n .reshape([height * width, numVisibleChannels])\n // [pixels, VC] * [VC, 3] = [pixels, 3]\n .matMul(colors)\n // make sure composite is clamped to proper range for float32\n .clipByValue(0, 1)\n // [H, W, 3]\n .reshape([height, width, 3])\n : // if no visible channels, return tensor of all 0s: [H, W, 3]\n (fill([height, width, 1], 0) as T);\n\n opts.disposeImageSlice && imageSlice.dispose();\n opts.disposeColors && colors.dispose();\n\n return res;\n });\n } else {\n const [slices, height, width, numVisibleChannels] = (imageSlice as Tensor4D)\n .shape;\n\n return tidy(\"generateColoredTensor\", () => {\n const res: T =\n numVisibleChannels > 0\n ? imageSlice\n // [Z, pixels, VC]\n .reshape([slices, height * width, numVisibleChannels])\n // [Z, pixels, VC] * broadcast_on_Z( [VC, 3] ) = [Z, pixels, 3]\n .matMul(colors)\n // make sure composite is clamped to proper range for float32\n .clipByValue(0, 1)\n // [Z, H, W, 3]\n .reshape([slices, height, width, 3])\n : // if no visible channels, return tensor of all 0s: [H, W, 3]\n (fill([slices, height, width, 3], 0) as T);\n\n opts.disposeImageSlice && imageSlice.dispose();\n opts.disposeColors && colors.dispose();\n\n return res;\n });\n }\n};\n\n/* \n Gets a normalized tensor, where values are in the float range [0, 1],\n and returns a denormalized one, where values are in the integer range\n determined by the given bitdepth, [0, 2**bitDepth - 1]\n\n note: wrt \"integer range\" above, the tensor data type is not converted\n from \"float32\" to \"int32\"; it remains as \"float32\" but the values are all \"n.0\",\n where \"n\" is in the integer range [0, 2**bitDepth - 1]\n */\nexport const denormalizeTensor = (\n normalTensor: T,\n bitDepth: BitDepth,\n opts: { disposeNormalTensor: boolean } = { disposeNormalTensor: true }\n) => {\n const denormalizedTensor = tidy(() =>\n normalTensor.mul(2 ** bitDepth - 1).round()\n ) as T;\n\n opts.disposeNormalTensor && normalTensor.dispose();\n\n return denormalizedTensor;\n};\n\n/*\n Receives a tensor of shape [H,W,C] or [Z,H,W,C]\n normalizes it based on the bit depth\n and returns the correct TypedArray view on the buffer data\n */\nconst getImageTensorData = async (\n imageTensor: Tensor3D | Tensor4D,\n bitDepth: BitDepth,\n opts: { disposeImageTensor: boolean } = { disposeImageTensor: true }\n) => {\n //NOTE: Split the following into two steps. Previously created a tensor and called data() in one step, but the tensor was never disposed of\n const denormalizedImageTensor = denormalizeTensor(imageTensor, bitDepth, {\n disposeNormalTensor: opts.disposeImageTensor,\n });\n const imageData = await denormalizedImageTensor.data();\n denormalizedImageTensor.dispose();\n\n // DO NOT USE \"imageData instanceof Float32Array\" here\n // tensorflow sublcasses typed arrays, so it will always return false\n if (imageData.constructor.name !== \"Float32Array\") {\n throw Error(\"Tensor data should be stored as Float32Array\");\n }\n\n return imageData as Float32Array;\n};\n\n/*\n Receives an imageTensor of shape [H, W, C] or [Z, H, W, C]\n Returns an array of size 2, where the first element is the min values\n for each of the channels; and the second element contains\n the max values.\n\n For input of [Z, H, W, C], it is the min and max of each channel\n across the entire stack, eg the min and max values for channel 0\n are those that occur for every possible indexing of [Z, H, W, 0]\n */\nexport const findMinMaxs = async (\n imageTensor: T,\n opts: { disposeImageTensor: boolean } = { disposeImageTensor: false }\n): Promise<[number[], number[]]> => {\n let mins: number[];\n let maxs: number[];\n\n if (imageTensor.rank === 3) {\n mins = await tidy(\n () => (imageTensor as Tensor3D).min([0, 1]) as Tensor1D\n ).array();\n maxs = await tidy(\n () => (imageTensor as Tensor3D).max([0, 1]) as Tensor1D\n ).array();\n } else {\n mins = await tidy(\n () => (imageTensor as Tensor4D).min([0, 1, 2]) as Tensor1D\n ).array();\n maxs = await tidy(\n () => (imageTensor as Tensor4D).max([0, 1, 2]) as Tensor1D\n ).array();\n }\n\n opts.disposeImageTensor && imageTensor.dispose();\n return [mins, maxs];\n};\n\n/*\n Receives an image tensor of shape [H, W, C] or [Z, H, W, C],\n along with its Colors,\n and scales it by the ranges defined in Colors.\n\n Returns scaled image tensor of same shape as input.\n */\nexport const scaleImageTensor = (\n imageTensor: T,\n colors: Colors,\n opts: { disposeImageTensor: boolean } = { disposeImageTensor: true }\n): T => {\n const numChannels = imageTensor.shape[imageTensor.rank - 1];\n\n const mins: number[] = [];\n const ranges: number[] = [];\n for (let i = 0; i < numChannels; i++) {\n const [min, max] = colors.range[i];\n const range = max - min;\n mins.push(min);\n ranges.push(range);\n }\n\n const scaledImageTensor: T = tidy(() =>\n imageTensor.sub(tensor1d(mins)).div(tensor1d(ranges))\n );\n\n opts.disposeImageTensor && imageTensor.dispose();\n\n return scaledImageTensor;\n};\n\n/*\n Receives a tensor of shape [H, W, 3] or [Z, H, W, 3]\n returns its base64 data url, or array of urls if Z present\n */\nexport async function renderTensor(\n compositeTensor: T,\n bitDepth: BitDepth,\n opts?: { disposeCompositeTensor?: boolean; useCanvas?: boolean }\n): Promise;\n\nexport async function renderTensor(\n compositeTensor: Tensor3D | Tensor4D,\n bitDepth: BitDepth,\n opts?: { disposeCompositeTensor?: boolean; useCanvas?: boolean }\n): Promise {\n opts = opts ?? {};\n opts.disposeCompositeTensor = opts.disposeCompositeTensor ?? true;\n // using canvas will result in an rgba image where each channel is\n // 0-255, regardless of the value of bitDepth\n opts.useCanvas = opts.useCanvas ?? true;\n\n /*\n tf.browser.toPixels has 2 quirks:\n - it will convert the tensor to the range 0-255,\n which is what we usually want (bc less memory),\n but we can't override it to return 16 bit instead\n - it will insert alpha values (255) when the C dim is 3\n\n leaving here as reminder of why we're not using it\n */\n // const imageData = await browser.toPixels(compositeTensor);\n const imageData = await getImageTensorData(compositeTensor, bitDepth, {\n disposeImageTensor: opts.disposeCompositeTensor,\n });\n\n if (compositeTensor.rank === 3) {\n const [height, width, components] = (compositeTensor as Tensor3D).shape;\n\n const image = new ImageJS.Image({\n width,\n height,\n data: imageData,\n kind: \"RGB\" as ImageJS.ImageKind,\n bitDepth: bitDepth,\n components,\n alpha: 0,\n colorModel: \"RGB\" as ImageJS.ColorModel,\n });\n return image.toDataURL(\"image/png\", { useCanvas: opts.useCanvas });\n } else {\n let imageURLs: string[] = [];\n\n const [slices, height, width, components] = (compositeTensor as Tensor4D)\n .shape;\n\n const strideLength = height * width * components;\n\n for (let i = 0; i < slices; i++) {\n const sliceStart = i * strideLength;\n const sliceEnd = sliceStart + strideLength;\n\n const image = new ImageJS.Image({\n width,\n height,\n data: imageData.slice(sliceStart, sliceEnd),\n kind: \"RGB\" as ImageJS.ImageKind,\n bitDepth: bitDepth,\n components,\n alpha: 0,\n colorModel: \"RGB\" as ImageJS.ColorModel,\n });\n\n imageURLs.push(\n image.toDataURL(\"image/png\", { useCanvas: opts.useCanvas })\n );\n }\n\n return imageURLs;\n }\n}\n\n/*\n Receives a tensor of shape [Z, H, W, C], colors to apply, and a bitDepth,\n applies the colors generating a [H, W, 3] tensor,\n and returns the corresponding data url\n */\nexport async function createRenderedTensor(\n imageTensor: Tensor4D,\n colors: Colors,\n bitDepth: BitDepth,\n plane: T\n): Promise;\n\nexport async function createRenderedTensor(\n imageTensor: Tensor4D,\n colors: Colors,\n bitDepth: BitDepth,\n plane: number | undefined\n) {\n const compositeImage = tidy(() => {\n let operandTensor: Tensor4D | Tensor3D;\n let disposeOperandTensor: boolean;\n\n if (plane === undefined) {\n operandTensor = imageTensor;\n disposeOperandTensor = false;\n } else {\n // image slice := get z idx 0 of image with dims: [H, W, C]\n operandTensor = getImageSlice(imageTensor, plane);\n disposeOperandTensor = true;\n }\n\n // scale each channel by its range\n const scaledImageSlice = scaleImageTensor(operandTensor, colors, {\n disposeImageTensor: disposeOperandTensor,\n });\n\n // get indices of visible channels, VC\n const visibleChannels = filterVisibleChannels(colors);\n\n // image slice filtered by visible channels: [H, W, VC] or [Z, H, W, VC]\n const filteredSlice = sliceVisibleChannels(\n scaledImageSlice,\n visibleChannels\n );\n\n // color matrix filtered by visible channels: [VC, 3]\n const filteredColors = sliceVisibleColors(colors, visibleChannels);\n\n // composite image slice: [H, W, 3] or [Z, H, W, 3]\n const compositeImage = generateColoredTensor(filteredSlice, filteredColors);\n\n return compositeImage;\n });\n const src = await renderTensor(compositeImage, bitDepth);\n\n dispose(compositeImage);\n\n return src;\n}\n\nexport const convertToImage = async (\n imageStack: ImageJS.Stack,\n filename: string,\n currentColors: Colors | undefined,\n numSlices: number,\n numChannels: number\n): Promise => {\n if (!imageStack.length) {\n throw Error(\"Expected image stack\");\n }\n\n const activePlane = 0;\n\n const { bitDepth } = imageStack[activePlane];\n\n // image data := create image of dims: [Z, H, W, C]\n const imageTensor = convertToTensor(imageStack, numSlices, numChannels);\n\n const colors = currentColors\n ? currentColors\n : await generateDefaultColors(imageTensor);\n\n const coloredSliceURL = await createRenderedTensor(\n imageTensor,\n colors,\n bitDepth,\n activePlane\n );\n\n const [planes, height, width, channels] = imageTensor.shape;\n\n return {\n kind: \"Image\",\n activePlane: activePlane,\n colors: colors,\n bitDepth,\n categoryId: UNKNOWN_IMAGE_CATEGORY_ID,\n id: generateUUID(),\n name: filename,\n shape: { planes, height, width, channels },\n containing: [],\n data: imageTensor,\n partition: Partition.Inference,\n src: coloredSliceURL,\n visible: true,\n } as ImageObject;\n};\n\n/*\n ================================\n Image Color Manipulation Helpers\n ================================\n */\n\nexport const generateDefaultColors = async (\n imageTensor: T\n): Promise => {\n const range: { [channel: number]: [number, number] } = {};\n const visible: { [channel: number]: boolean } = {};\n let color: Array<[number, number, number]> = [];\n\n const numChannels =\n imageTensor.rank === 3\n ? (imageTensor as Tensor3D).shape[2]\n : (imageTensor as Tensor4D).shape[3];\n\n const [mins, maxs] = await findMinMaxs(imageTensor);\n\n if (mins.length !== numChannels || maxs.length !== numChannels) {\n throw Error(\n `Expected num channels, min values, and max values to all be ${numChannels}`\n );\n }\n\n for (let i = 0; i < numChannels; i++) {\n color.push(\n numChannels > 1 && i < DEFAULT_COLORS.length\n ? DEFAULT_COLORS[i]\n : [1, 1, 1]\n );\n\n range[i] = [mins[i], maxs[i]];\n\n // if image has more than 3 channels,\n // only show the first channel as default\n // (user can then toggle / untoggle the other channels if desired)\n visible[i] = !(numChannels > 3 && i > 0);\n }\n\n return {\n range,\n visible,\n color: tensor2d(color, [numChannels, 3], \"float32\"),\n };\n};\n\nexport const generateBlankColors = (numChannels: number): Colors => {\n const range: { [channel: number]: [number, number] } = {};\n const visible: { [channel: number]: boolean } = {};\n let color: Array<[number, number, number]> = [];\n\n for (let i = 0; i < numChannels; i++) {\n color.push(\n numChannels > 1 && i < DEFAULT_COLORS.length\n ? DEFAULT_COLORS[i]\n : [1, 1, 1]\n );\n\n range[i] = [0, 1];\n\n // if image has more than 3 channels,\n // only show the first channel as default\n // (user can then toggle / untoggle the other channels if desired)\n visible[i] = !(numChannels > 3 && i > 0);\n }\n\n return {\n range,\n visible,\n color: tensor2d(color, [numChannels, 3], \"float32\"),\n };\n};\n\n/*\n Set color ranges to provided mins and maxs\n */\nexport const scaleColors = (\n colors: Colors,\n minMax: { mins: number[]; maxs: number[] }\n) => {\n const { mins, maxs } = minMax;\n\n if (mins.length !== maxs.length) {\n throw Error(\"Number of min and max values must be identical\");\n }\n\n if (colors.color.shape[0] !== mins.length) {\n throw Error(\"Number of min and max values must match number of channels\");\n }\n\n for (let i = 0; i < mins.length; i++) {\n colors.range[i] = [mins[i], maxs[i]];\n }\n};\n","import { ImageViewerState } from \"store/types\";\nimport {\n ColorAdjustmentOptionsType,\n ZoomToolOptionsType,\n} from \"utils/annotator/types\";\n\nexport const selectZoomToolOptions = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): ZoomToolOptionsType => {\n return imageViewer.zoomOptions;\n};\n\nexport const selectZoomSelection = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): {\n dragging: boolean;\n minimum: { x: number; y: number } | undefined;\n maximum: { x: number; y: number } | undefined;\n selecting: boolean;\n centerPoint: { x: number; y: number } | undefined;\n} => {\n return imageViewer.zoomSelection;\n};\n\nexport const selectWorkingAnnotationId = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): string | undefined => {\n return imageViewer.workingAnnotationId;\n};\nexport const selectWorkingAnnotation = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.workingAnnotation;\n};\n\nexport const selectStageWidth = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): number => {\n return imageViewer.stageWidth;\n};\n\nexport const selectStageScale = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): number => {\n return imageViewer.zoomOptions.scale;\n};\n\nexport const selectStagePosition = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): { x: number; y: number } => {\n return imageViewer.stagePosition;\n};\n\nexport const selectStageHeight = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): number => {\n return imageViewer.stageHeight;\n};\n\nexport const selectSelectedAnnotationIds = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): string[] => {\n return imageViewer.selectedAnnotationIds;\n};\n\nexport const selectSelectedIVCategoryId = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.selectedCategoryId;\n};\n\nexport const selectImageViewerFilters = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.filters;\n};\n\nexport const selectImageStackImageIds = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.imageStack;\n};\n\nexport const selectImageOrigin = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.imageOrigin;\n};\n\nexport const selectImageIsloading = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.imageIsLoading;\n};\n\nexport const selectHighligtedIVCatogory = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.highlightedCategory;\n};\n\nexport const selectFilteredImageViewerCategoryIds = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.filters.categoryId;\n};\n\nexport const selectCursor = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): string => {\n return imageViewer.cursor;\n};\n\nexport const selectColorAdjustments = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): ColorAdjustmentOptionsType => {\n return imageViewer.colorAdjustment;\n};\n\nexport const selectActiveImageRenderedSrcs = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): Array => {\n return imageViewer.activeImageRenderedSrcs;\n};\n\nexport const selectActiveImageId = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}): string | undefined => {\n return imageViewer.activeImageId;\n};\n\nexport const selectActiveAnnotationIds = ({\n imageViewer,\n}: {\n imageViewer: ImageViewerState;\n}) => {\n return imageViewer.activeAnnotationIds;\n};\n","import { createSelector } from \"@reduxjs/toolkit\";\nimport {\n selectAllKinds,\n selectCategoriesDictionary,\n selectKindDictionary,\n selectThingsDictionary,\n} from \"store/data/selectors\";\nimport { generateBlankColors } from \"utils/common/tensorHelpers\";\nimport { Colors, ColorsRaw } from \"utils/common/types\";\nimport {\n AnnotationObject,\n Category,\n DecodedAnnotationObject,\n ImageObject,\n Kind,\n KindWithCategories,\n Shape,\n} from \"store/data/types\";\nimport {\n selectActiveAnnotationIds,\n selectActiveImageId,\n selectImageStackImageIds,\n selectSelectedAnnotationIds,\n selectWorkingAnnotation,\n} from \"./selectors\";\nimport { decodeAnnotation } from \"utils/annotator/rle\";\nimport { getCompleteEntity } from \"store/entities/utils\";\nimport { intersection } from \"lodash\";\n\nexport const selectActiveImage = createSelector(\n selectActiveImageId,\n selectThingsDictionary,\n (activeImageId, thingDict) => {\n if (!activeImageId) return undefined;\n return thingDict[activeImageId] as ImageObject | undefined;\n }\n);\n\nexport const selectImageViewerImages = createSelector(\n selectImageStackImageIds,\n selectThingsDictionary,\n (imageStackIds, thingDict) => {\n const imageViewerImages = imageStackIds.reduce(\n (images: ImageObject[], id) => {\n const image = thingDict[id];\n if (image) {\n images.push(image as ImageObject);\n }\n return images;\n },\n []\n );\n return imageViewerImages;\n }\n);\n\nexport const selectActiveImageRawColor = createSelector(\n selectActiveImage,\n (image): ColorsRaw => {\n let colors: Colors;\n if (!image) {\n colors = generateBlankColors(3);\n } else {\n colors = image.colors;\n }\n\n return {\n // is sync appropriate? if so we may need to dispose??\n color: colors.color.arraySync() as [number, number, number][],\n range: colors.range,\n visible: colors.visible,\n };\n }\n);\n\nexport const selectSelectedAnnotations = createSelector(\n selectSelectedAnnotationIds,\n selectThingsDictionary,\n (selectedAnnotationIds, thingsDict) => {\n return selectedAnnotationIds.reduce(\n (anns: DecodedAnnotationObject[], id) => {\n const ann = thingsDict[id] as AnnotationObject;\n if (ann) {\n const decodedAnn =\n ann.decodedMask === undefined\n ? decodeAnnotation(ann)\n : (ann as DecodedAnnotationObject);\n anns.push(decodedAnn);\n }\n return anns;\n },\n []\n );\n }\n);\n\nexport const selectActiveAnnotations = createSelector(\n [selectActiveAnnotationIds, selectThingsDictionary],\n (annotationIds, thingsDict): Array => {\n if (!annotationIds.length) return [];\n\n return annotationIds.map((annotationId) => {\n const annotation = thingsDict[annotationId] as AnnotationObject;\n const decodedAnnotation = !annotation.decodedMask\n ? decodeAnnotation(annotation)\n : (annotation as DecodedAnnotationObject);\n return decodedAnnotation;\n });\n }\n);\n\nexport const selectActiveAnnotationsViews = createSelector(\n selectActiveImage,\n selectActiveAnnotationIds,\n selectThingsDictionary,\n selectCategoriesDictionary,\n (activeImage, activeAnnotationIds, thingDict, catDict) => {\n if (!activeImage) return [];\n const imageShape = activeImage.shape;\n const activePlane = activeImage.activePlane;\n const annotationObjects: Array<{\n annotation: DecodedAnnotationObject;\n fillColor: string;\n imageShape: Shape;\n }> = [];\n\n for (const annotationId of activeAnnotationIds) {\n const annotation = thingDict[annotationId] as AnnotationObject;\n\n const decodedAnnotation = !annotation.decodedMask\n ? decodeAnnotation(annotation)\n : (annotation as DecodedAnnotationObject);\n\n if (\n annotation.plane === activePlane ||\n annotation.activePlane === activePlane\n ) {\n const fillColor = catDict[annotation.categoryId].color;\n annotationObjects.push({\n annotation: decodedAnnotation,\n fillColor,\n imageShape: imageShape,\n });\n }\n }\n return annotationObjects;\n }\n);\n\nexport const selectWorkingAnnotationView = createSelector(\n selectWorkingAnnotation,\n selectActiveImage,\n selectCategoriesDictionary,\n (workingAnnotationEntity, activeImage, catDict) => {\n if (!workingAnnotationEntity.saved || !activeImage) return;\n const workingAnnotation = getCompleteEntity(\n workingAnnotationEntity\n ) as AnnotationObject;\n const annotation = !workingAnnotation.decodedMask\n ? decodeAnnotation(workingAnnotation)\n : (workingAnnotation as DecodedAnnotationObject);\n const fillColor = catDict[workingAnnotation.categoryId].color;\n return {\n annotation: annotation,\n fillColor: fillColor,\n imageShape: activeImage.shape,\n };\n }\n);\nexport const selectSelectedActiveAnnotations = createSelector(\n [selectSelectedAnnotationIds, selectThingsDictionary],\n (annotationIds, thingsDict): Array => {\n if (!annotationIds.length) return [];\n\n return annotationIds.map((annotationId) => {\n const annotation = thingsDict[annotationId] as AnnotationObject;\n const decodedAnnotation = !annotation.decodedMask\n ? decodeAnnotation(annotation)\n : (annotation as DecodedAnnotationObject);\n return decodedAnnotation;\n });\n }\n);\n\nexport const selectImageViewerActiveKinds = createSelector(\n selectActiveImage,\n selectAllKinds,\n (activeImage, allKinds) => {\n if (!activeImage) return [];\n const activeKinds: Kind[] = [];\n const activeAnnotationIds = activeImage.containing;\n\n allKinds.forEach((kind) => {\n const intersect = intersection(activeAnnotationIds, kind.containing);\n if (intersect.length > 0) {\n activeKinds.push(kind);\n }\n });\n\n return activeKinds;\n }\n);\n\nexport const selectImageViewerActiveKindsWithFullCat = createSelector(\n selectAllKinds,\n selectCategoriesDictionary,\n (allKinds, catDict) => {\n const activeKinds: Array = [];\n\n allKinds.forEach((kind) => {\n if (kind.id === \"Image\") return;\n activeKinds.push({\n ...kind,\n categories: kind.categories.map((id) => catDict[id]),\n });\n });\n\n return activeKinds;\n }\n);\n\nexport const selectActiveImageCategoryObjectCount = createSelector(\n selectActiveImage,\n selectKindDictionary,\n (activeImage, kindDict) => (category: Category, kindIfUnknown?: string) => {\n if (!activeImage) return 0;\n\n const objectsInImage = activeImage.containing;\n let objectsInCategory;\n if (kindIfUnknown) {\n const kind = kindDict[kindIfUnknown];\n if (!kind) return 0;\n const objectsInKind = kind.containing;\n const unknownObjects = category.containing;\n objectsInCategory = intersection(objectsInKind, unknownObjects);\n } else {\n objectsInCategory = category.containing;\n }\n\n const objectsInBoth = intersection(objectsInCategory, objectsInImage);\n return objectsInBoth.length;\n }\n);\n","import { useContext, useEffect, useState } from \"react\";\nimport * as ImageJS from \"image-js\";\nimport { useSelector } from \"react-redux\";\n\nimport { StageContext } from \"contexts\";\nimport {\n selectPenSelectionBrushSize,\n selectQuickSelectionRegionSize,\n selectToolType,\n selectThresholdAnnotationValue,\n} from \"store/annotator/selectors\";\n\nimport {\n AnnotationTool,\n ColorAnnotationTool,\n EllipticalAnnotationTool,\n LassoAnnotationTool,\n MagneticAnnotationTool,\n ObjectAnnotationTool,\n PenAnnotationTool,\n PolygonalAnnotationTool,\n QuickAnnotationTool,\n RectangularAnnotationTool,\n ThresholdAnnotationTool,\n SelectionTool,\n BlankAnnotationTool,\n} from \"utils/annotator/tools\";\nimport { ToolType } from \"utils/annotator/enums\";\nimport { selectActiveImage } from \"store/imageViewer/reselectors\";\n\nexport const useAnnotationTool = () => {\n const [image, setImage] = useState();\n const [operator, setOperator] = useState(\n new BlankAnnotationTool()\n );\n\n const activeImage = useSelector(selectActiveImage);\n const operation = useSelector(selectToolType);\n const stageScale = useContext(StageContext)?.current?.scaleX() ?? 1;\n const penSelectionBrushSize = useSelector(selectPenSelectionBrushSize);\n const quickSelectionRegionSize = useSelector(selectQuickSelectionRegionSize);\n const threshold = useSelector(selectThresholdAnnotationValue);\n\n useEffect(() => {\n if (!activeImage) return;\n const loadImage = async () => {\n const image = await ImageJS.Image.load(activeImage.src, {\n ignorePalette: true,\n });\n setImage(image);\n };\n loadImage();\n }, [activeImage]);\n\n useEffect(() => {\n if (!image) return;\n\n switch (operation) {\n case ToolType.ColorAnnotation:\n setOperator(new ColorAnnotationTool(image));\n\n return;\n case ToolType.EllipticalAnnotation:\n setOperator(new EllipticalAnnotationTool(image));\n\n return;\n case ToolType.LassoAnnotation:\n setOperator(new LassoAnnotationTool(image));\n\n return;\n case ToolType.MagneticAnnotation:\n setOperator(new MagneticAnnotationTool(image, 0.5));\n\n return;\n case ToolType.ObjectAnnotation:\n ObjectAnnotationTool.compile(image).then(\n (operator: ObjectAnnotationTool) => {\n setOperator(operator);\n }\n );\n\n return;\n case ToolType.PenAnnotation:\n setOperator(new PenAnnotationTool(image));\n\n return;\n case ToolType.PolygonalAnnotation:\n setOperator(new PolygonalAnnotationTool(image));\n\n return;\n case ToolType.QuickAnnotation:\n setOperator(new QuickAnnotationTool(image));\n\n return;\n case ToolType.ThresholdAnnotation:\n setOperator(new ThresholdAnnotationTool(image));\n\n return;\n case ToolType.RectangularAnnotation:\n setOperator(new RectangularAnnotationTool(image));\n\n return;\n case ToolType.Pointer:\n setOperator(new SelectionTool(image));\n\n return;\n default:\n setOperator(new BlankAnnotationTool(image));\n\n return;\n }\n }, [operation, image]);\n\n useEffect(() => {\n if (operator instanceof ThresholdAnnotationTool) {\n operator.updateMask(threshold);\n }\n }, [operator, threshold]);\n\n useEffect(() => {\n if (operator instanceof QuickAnnotationTool) {\n const regionSize =\n quickSelectionRegionSize / Math.round(stageScale ? stageScale : 1);\n operator.initializeSuperpixels(regionSize);\n } else if (operator instanceof PenAnnotationTool) {\n const brushSize = penSelectionBrushSize / (stageScale ? stageScale : 1);\n operator.brushSize = Math.round(brushSize);\n }\n }, [operator, quickSelectionRegionSize, penSelectionBrushSize, stageScale]);\n\n return {\n annotationTool: operator,\n ToolSelecton: {\n /*!(\n annotationState !== AnnotationStateType.Annotating &&\n toolType !== ToolType.QuickAnnotation\n ) && }\n \n {/* })\n */\n },\n };\n};\n","import { useEffect, useRef } from \"react\";\n\ntype PassedFunc = (...args: any[]) => void;\ntype Timer = ReturnType;\n\nexport function useDebounce(\n func: Func,\n delay: number\n) {\n // State and setters for debounced value\n const timer = useRef();\n\n useEffect(() => {\n return () => {\n if (!timer.current) return;\n clearTimeout(timer.current);\n };\n }, []);\n\n const debouncedFunction = ((...args) => {\n const newTimer = setTimeout(() => {\n func(...args);\n }, delay);\n clearTimeout(timer.current);\n timer.current = newTimer;\n }) as Func;\n\n return debouncedFunction;\n}\n","export enum ThemeMode {\n Light = \"light\",\n Dark = \"dark\",\n}\n","import { createSlice, PayloadAction } from \"@reduxjs/toolkit\";\nimport { defaultAlert } from \"utils/common/constants\";\nimport { AlertState } from \"utils/common/types\";\nimport { HotkeyView, Languages } from \"utils/common/enums\";\nimport { ThemeMode } from \"themes/enums\";\nimport { AppSettingsState } from \"store/types\";\n\nconst initialState: AppSettingsState = {\n init: false,\n tileSize: 1,\n themeMode: ThemeMode.Light,\n language: Languages.English,\n soundEnabled: true,\n imageSelectionColor: \"#FF6DB6\",\n selectedImageBorderWidth: 5,\n alertState: defaultAlert,\n hotkeyStack: [],\n};\n\nexport const applicationSettingsSlice = createSlice({\n name: \"application\",\n initialState: initialState,\n reducers: {\n initialized(state: AppSettingsState) {\n state.init = true;\n },\n resetApplicationSettingsSetings(state: AppSettingsState) {\n return initialState;\n },\n\n hideAlertState(state, action: PayloadAction<{}>) {\n state.alertState.visible = false;\n },\n registerHotkeyView(\n state,\n action: PayloadAction<{ hotkeyView: HotkeyView }>\n ) {\n state.hotkeyStack.push(action.payload.hotkeyView);\n },\n setLanguage(state, action: PayloadAction<{ language: Languages }>) {\n state.language = action.payload.language;\n },\n setImageSelectionColor(\n state: AppSettingsState,\n action: PayloadAction<{ selectionColor: string }>\n ) {\n state.imageSelectionColor = action.payload.selectionColor;\n },\n setSelectedImageBorderWidth(\n state: AppSettingsState,\n action: PayloadAction<{ selectionSize: number }>\n ) {\n state.selectedImageBorderWidth = action.payload.selectionSize;\n },\n setThemeMode(\n state: AppSettingsState,\n action: PayloadAction<{ mode: ThemeMode }>\n ) {\n state.themeMode = action.payload.mode;\n },\n setSoundEnabled(state, action: PayloadAction<{ soundEnabled: boolean }>) {\n state.soundEnabled = action.payload.soundEnabled;\n },\n unregisterHotkeyView(state, action: PayloadAction<{}>) {\n state.hotkeyStack.pop();\n },\n updateAlertState(state, action: PayloadAction<{ alertState: AlertState }>) {\n state.alertState = action.payload.alertState;\n state.alertState.visible = true;\n },\n updateTileSize(\n state: AppSettingsState,\n action: PayloadAction<{ newValue: number }>\n ) {\n state.tileSize = action.payload.newValue!;\n },\n },\n});\n","import { AppSettingsState } from \"store/types\";\nimport { ThemeMode } from \"themes/enums\";\nimport { HotkeyView } from \"utils/common/enums\";\nimport { AlertState } from \"utils/common/types\";\n\nexport const selectAlertState = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): AlertState => {\n return applicationSettings.alertState;\n};\n\nexport const selectHotkeyView = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): HotkeyView => {\n return applicationSettings.hotkeyStack.at(-1)!;\n};\n\nexport const selectImageSelectionColor = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): string => {\n return applicationSettings.imageSelectionColor;\n};\n\nexport const selectInitSettings = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}) => {\n return applicationSettings.init;\n};\n\nexport const selectLanguageType = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}) => {\n return applicationSettings.language;\n};\n\nexport const selectSelectedImageBorderWidth = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): number => {\n return applicationSettings.selectedImageBorderWidth;\n};\n\nexport const selectSoundEnabled = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}) => {\n return applicationSettings.soundEnabled;\n};\n\nexport const selectThemeMode = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): ThemeMode => {\n return applicationSettings.themeMode;\n};\n\nexport const selectTileSize = ({\n applicationSettings,\n}: {\n applicationSettings: AppSettingsState;\n}): number => {\n return applicationSettings.tileSize;\n};\n","import React, { useCallback, useState } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\n\nimport { applicationSettingsSlice } from \"store/applicationSettings\";\nimport { selectAlertState } from \"store/applicationSettings/selectors\";\nimport { AlertType, HotkeyView } from \"utils/common/enums\";\n\nexport const useDialog = (closeOnError: boolean = true) => {\n const [open, setOpen] = useState(false);\n\n const alertState = useSelector(selectAlertState);\n\n React.useEffect(() => {\n if (\n alertState.visible &&\n alertState.alertType === AlertType.Error &&\n closeOnError\n ) {\n setOpen(false);\n }\n }, [alertState, closeOnError]);\n\n const onClose = useCallback(() => {\n setOpen(false);\n }, []);\n\n const onOpen = useCallback(() => {\n setOpen(true);\n }, []);\n\n return { onClose, onOpen, open };\n};\n\nexport const useDialogHotkey = (\n view?: HotkeyView,\n closeOnError: boolean = true\n) => {\n const dispatch = useDispatch();\n const {\n onClose: onDialogClose,\n onOpen: onDialogOpen,\n open: dialogOpen,\n } = useDialog(closeOnError);\n\n const onOpen = () => {\n view &&\n dispatch(\n applicationSettingsSlice.actions.registerHotkeyView({\n hotkeyView: view,\n })\n );\n onDialogOpen();\n };\n\n const onClose = () => {\n view && dispatch(applicationSettingsSlice.actions.unregisterHotkeyView({}));\n onDialogClose();\n };\n\n return { onClose, onOpen, open: dialogOpen };\n};\n","import { useContext } from 'react'\nimport type { DragDropManager } from 'dnd-core'\nimport { invariant } from '@react-dnd/invariant'\nimport { DndContext } from '../core/index.js'\n\n/**\n * A hook to retrieve the DragDropManager from Context\n */\nexport function useDragDropManager(): DragDropManager {\n\tconst { dragDropManager } = useContext(DndContext)\n\tinvariant(dragDropManager != null, 'Expected drag drop context')\n\treturn dragDropManager as DragDropManager\n}\n","import { useLayoutEffect, useEffect } from 'react'\n\n// suppress the useLayoutEffect warning on server side.\nexport const useIsomorphicLayoutEffect =\n\ttypeof window !== 'undefined' ? useLayoutEffect : useEffect\n","import type { DropTarget } from 'dnd-core'\nimport type { DropTargetMonitor } from '../../types/index.js'\nimport type { DropTargetHookSpec } from '../types.js'\n\nexport class DropTargetImpl implements DropTarget {\n\tpublic constructor(\n\t\tpublic spec: DropTargetHookSpec,\n\t\tprivate monitor: DropTargetMonitor,\n\t) {}\n\n\tpublic canDrop() {\n\t\tconst spec = this.spec\n\t\tconst monitor = this.monitor\n\t\treturn spec.canDrop ? spec.canDrop(monitor.getItem(), monitor) : true\n\t}\n\n\tpublic hover() {\n\t\tconst spec = this.spec\n\t\tconst monitor = this.monitor\n\t\tif (spec.hover) {\n\t\t\tspec.hover(monitor.getItem(), monitor)\n\t\t}\n\t}\n\n\tpublic drop() {\n\t\tconst spec = this.spec\n\t\tconst monitor = this.monitor\n\t\tif (spec.drop) {\n\t\t\treturn spec.drop(monitor.getItem(), monitor)\n\t\t}\n\t\treturn\n\t}\n}\n","import { registerTarget, TargetConnector } from '../../internals/index.js'\nimport type { DropTargetMonitor } from '../../types/index.js'\nimport type { DropTargetHookSpec } from '../types.js'\nimport { useDragDropManager } from '../useDragDropManager.js'\nimport { useIsomorphicLayoutEffect } from '../useIsomorphicLayoutEffect.js'\nimport { useAccept } from './useAccept.js'\nimport { useDropTarget } from './useDropTarget.js'\n\nexport function useRegisteredDropTarget(\n\tspec: DropTargetHookSpec,\n\tmonitor: DropTargetMonitor,\n\tconnector: TargetConnector,\n): void {\n\tconst manager = useDragDropManager()\n\tconst dropTarget = useDropTarget(spec, monitor)\n\tconst accept = useAccept(spec)\n\n\tuseIsomorphicLayoutEffect(\n\t\tfunction registerDropTarget() {\n\t\t\tconst [handlerId, unregister] = registerTarget(\n\t\t\t\taccept,\n\t\t\t\tdropTarget,\n\t\t\t\tmanager,\n\t\t\t)\n\t\t\tmonitor.receiveHandlerId(handlerId)\n\t\t\tconnector.receiveHandlerId(handlerId)\n\t\t\treturn unregister\n\t\t},\n\t\t[\n\t\t\tmanager,\n\t\t\tmonitor,\n\t\t\tdropTarget,\n\t\t\tconnector,\n\t\t\taccept.map((a) => a.toString()).join('|'),\n\t\t],\n\t)\n}\n","import { useEffect, useMemo } from 'react'\nimport type { DropTargetMonitor } from '../../types/index.js'\nimport type { DropTargetHookSpec } from '../types.js'\nimport { DropTargetImpl } from './DropTargetImpl.js'\n\nexport function useDropTarget(\n\tspec: DropTargetHookSpec,\n\tmonitor: DropTargetMonitor,\n) {\n\tconst dropTarget = useMemo(() => new DropTargetImpl(spec, monitor), [monitor])\n\tuseEffect(() => {\n\t\tdropTarget.spec = spec\n\t}, [spec])\n\treturn dropTarget\n}\n","import { invariant } from '@react-dnd/invariant'\nimport type { Identifier } from 'dnd-core'\nimport { useMemo } from 'react'\nimport type { DropTargetHookSpec } from '../types.js'\n\n/**\n * Internal utility hook to get an array-version of spec.accept.\n * The main utility here is that we aren't creating a new array on every render if a non-array spec.accept is passed in.\n * @param spec\n */\nexport function useAccept(\n\tspec: DropTargetHookSpec,\n): Identifier[] {\n\tconst { accept } = spec\n\treturn useMemo(() => {\n\t\tinvariant(spec.accept != null, 'accept must be defined')\n\t\treturn Array.isArray(accept) ? accept : [accept]\n\t}, [accept])\n}\n","import type {\n\tDragDropManager,\n\tDropTarget,\n\tUnsubscribe,\n\tIdentifier,\n\tTargetType,\n\tSourceType,\n\tDragSource,\n} from 'dnd-core'\n\nexport function registerTarget(\n\ttype: TargetType,\n\ttarget: DropTarget,\n\tmanager: DragDropManager,\n): [Identifier, Unsubscribe] {\n\tconst registry = manager.getRegistry()\n\tconst targetId = registry.addTarget(type, target)\n\n\treturn [targetId, () => registry.removeTarget(targetId)]\n}\n\nexport function registerSource(\n\ttype: SourceType,\n\tsource: DragSource,\n\tmanager: DragDropManager,\n): [Identifier, Unsubscribe] {\n\tconst registry = manager.getRegistry()\n\tconst sourceId = registry.addSource(type, source)\n\n\treturn [sourceId, () => registry.removeSource(sourceId)]\n}\n","import type {\n\tDragDropManager,\n\tDragDropMonitor,\n\tUnsubscribe,\n\tListener,\n\tIdentifier,\n\tXYCoord,\n} from 'dnd-core'\nimport { invariant } from '@react-dnd/invariant'\nimport type { DropTargetMonitor } from '../types/index.js'\n\nlet isCallingCanDrop = false\n\nexport class DropTargetMonitorImpl implements DropTargetMonitor {\n\tprivate internalMonitor: DragDropMonitor\n\tprivate targetId: Identifier | null = null\n\n\tpublic constructor(manager: DragDropManager) {\n\t\tthis.internalMonitor = manager.getMonitor()\n\t}\n\n\tpublic receiveHandlerId(targetId: Identifier | null): void {\n\t\tthis.targetId = targetId\n\t}\n\n\tpublic getHandlerId(): Identifier | null {\n\t\treturn this.targetId\n\t}\n\n\tpublic subscribeToStateChange(\n\t\tlistener: Listener,\n\t\toptions?: { handlerIds?: Identifier[] },\n\t): Unsubscribe {\n\t\treturn this.internalMonitor.subscribeToStateChange(listener, options)\n\t}\n\n\tpublic canDrop(): boolean {\n\t\t// Cut out early if the target id has not been set. This should prevent errors\n\t\t// where the user has an older version of dnd-core like in\n\t\t// https://github.com/react-dnd/react-dnd/issues/1310\n\t\tif (!this.targetId) {\n\t\t\treturn false\n\t\t}\n\t\tinvariant(\n\t\t\t!isCallingCanDrop,\n\t\t\t'You may not call monitor.canDrop() inside your canDrop() implementation. ' +\n\t\t\t\t'Read more: http://react-dnd.github.io/react-dnd/docs/api/drop-target-monitor',\n\t\t)\n\n\t\ttry {\n\t\t\tisCallingCanDrop = true\n\t\t\treturn this.internalMonitor.canDropOnTarget(this.targetId)\n\t\t} finally {\n\t\t\tisCallingCanDrop = false\n\t\t}\n\t}\n\n\tpublic isOver(options?: { shallow?: boolean }): boolean {\n\t\tif (!this.targetId) {\n\t\t\treturn false\n\t\t}\n\t\treturn this.internalMonitor.isOverTarget(this.targetId, options)\n\t}\n\n\tpublic getItemType(): Identifier | null {\n\t\treturn this.internalMonitor.getItemType()\n\t}\n\n\tpublic getItem(): any {\n\t\treturn this.internalMonitor.getItem()\n\t}\n\n\tpublic getDropResult(): any {\n\t\treturn this.internalMonitor.getDropResult()\n\t}\n\n\tpublic didDrop(): boolean {\n\t\treturn this.internalMonitor.didDrop()\n\t}\n\n\tpublic getInitialClientOffset(): XYCoord | null {\n\t\treturn this.internalMonitor.getInitialClientOffset()\n\t}\n\n\tpublic getInitialSourceClientOffset(): XYCoord | null {\n\t\treturn this.internalMonitor.getInitialSourceClientOffset()\n\t}\n\n\tpublic getSourceClientOffset(): XYCoord | null {\n\t\treturn this.internalMonitor.getSourceClientOffset()\n\t}\n\n\tpublic getClientOffset(): XYCoord | null {\n\t\treturn this.internalMonitor.getClientOffset()\n\t}\n\n\tpublic getDifferenceFromInitialOffset(): XYCoord | null {\n\t\treturn this.internalMonitor.getDifferenceFromInitialOffset()\n\t}\n}\n","import { invariant } from '@react-dnd/invariant'\nimport { cloneElement, isValidElement, ReactElement } from 'react'\n\nfunction throwIfCompositeComponentElement(element: ReactElement) {\n\t// Custom components can no longer be wrapped directly in React DnD 2.0\n\t// so that we don't need to depend on findDOMNode() from react-dom.\n\tif (typeof element.type === 'string') {\n\t\treturn\n\t}\n\n\tconst displayName =\n\t\t(element.type as any).displayName || element.type.name || 'the component'\n\n\tthrow new Error(\n\t\t'Only native element nodes can now be passed to React DnD connectors.' +\n\t\t\t`You can either wrap ${displayName} into a

, or turn it into a ` +\n\t\t\t'drag source or a drop target itself.',\n\t)\n}\n\nfunction wrapHookToRecognizeElement(hook: (node: any, options: any) => void) {\n\treturn (elementOrNode = null, options = null) => {\n\t\t// When passed a node, call the hook straight away.\n\t\tif (!isValidElement(elementOrNode)) {\n\t\t\tconst node = elementOrNode\n\t\t\thook(node, options)\n\t\t\t// return the node so it can be chained (e.g. when within callback refs\n\t\t\t//
connectDragSource(connectDropTarget(node))}/>\n\t\t\treturn node\n\t\t}\n\n\t\t// If passed a ReactElement, clone it and attach this function as a ref.\n\t\t// This helps us achieve a neat API where user doesn't even know that refs\n\t\t// are being used under the hood.\n\t\tconst element: ReactElement | null = elementOrNode\n\t\tthrowIfCompositeComponentElement(element as any)\n\n\t\t// When no options are passed, use the hook directly\n\t\tconst ref = options ? (node: Element) => hook(node, options) : hook\n\t\treturn cloneWithRef(element, ref)\n\t}\n}\n\nexport function wrapConnectorHooks(hooks: any) {\n\tconst wrappedHooks: any = {}\n\n\tObject.keys(hooks).forEach((key) => {\n\t\tconst hook = hooks[key]\n\n\t\t// ref objects should be passed straight through without wrapping\n\t\tif (key.endsWith('Ref')) {\n\t\t\twrappedHooks[key] = hooks[key]\n\t\t} else {\n\t\t\tconst wrappedHook = wrapHookToRecognizeElement(hook)\n\t\t\twrappedHooks[key] = () => wrappedHook\n\t\t}\n\t})\n\n\treturn wrappedHooks\n}\n\nfunction setRef(ref: any, node: any) {\n\tif (typeof ref === 'function') {\n\t\tref(node)\n\t} else {\n\t\tref.current = node\n\t}\n}\n\nfunction cloneWithRef(element: any, newRef: any): ReactElement {\n\tconst previousRef = element.ref\n\tinvariant(\n\t\ttypeof previousRef !== 'string',\n\t\t'Cannot connect React DnD to an element with an existing string ref. ' +\n\t\t\t'Please convert it to use a callback ref instead, or wrap it into a or
. ' +\n\t\t\t'Read more: https://reactjs.org/docs/refs-and-the-dom.html#callback-refs',\n\t)\n\n\tif (!previousRef) {\n\t\t// When there is no ref on the element, use the new ref directly\n\t\treturn cloneElement(element, {\n\t\t\tref: newRef,\n\t\t})\n\t} else {\n\t\treturn cloneElement(element, {\n\t\t\tref: (node: any) => {\n\t\t\t\tsetRef(previousRef, node)\n\t\t\t\tsetRef(newRef, node)\n\t\t\t},\n\t\t})\n\t}\n}\n","import type { RefObject } from 'react'\nimport { shallowEqual } from '@react-dnd/shallowequal'\nimport { wrapConnectorHooks } from './wrapConnectorHooks.js'\nimport type { Backend, Unsubscribe, Identifier } from 'dnd-core'\nimport { isRef } from './isRef.js'\nimport type { Connector } from './SourceConnector.js'\nimport type { DropTargetOptions } from '../types/index.js'\n\nexport class TargetConnector implements Connector {\n\tpublic hooks = wrapConnectorHooks({\n\t\tdropTarget: (node: any, options: DropTargetOptions) => {\n\t\t\tthis.clearDropTarget()\n\t\t\tthis.dropTargetOptions = options\n\t\t\tif (isRef(node)) {\n\t\t\t\tthis.dropTargetRef = node\n\t\t\t} else {\n\t\t\t\tthis.dropTargetNode = node\n\t\t\t}\n\t\t\tthis.reconnect()\n\t\t},\n\t})\n\n\tprivate handlerId: Identifier | null = null\n\t// The drop target may either be attached via ref or connect function\n\tprivate dropTargetRef: RefObject | null = null\n\tprivate dropTargetNode: any\n\tprivate dropTargetOptionsInternal: DropTargetOptions | null = null\n\tprivate unsubscribeDropTarget: Unsubscribe | undefined\n\n\tprivate lastConnectedHandlerId: Identifier | null = null\n\tprivate lastConnectedDropTarget: any = null\n\tprivate lastConnectedDropTargetOptions: DropTargetOptions | null = null\n\tprivate readonly backend: Backend\n\n\tpublic constructor(backend: Backend) {\n\t\tthis.backend = backend\n\t}\n\n\tpublic get connectTarget(): any {\n\t\treturn this.dropTarget\n\t}\n\n\tpublic reconnect(): void {\n\t\t// if nothing has changed then don't resubscribe\n\t\tconst didChange =\n\t\t\tthis.didHandlerIdChange() ||\n\t\t\tthis.didDropTargetChange() ||\n\t\t\tthis.didOptionsChange()\n\n\t\tif (didChange) {\n\t\t\tthis.disconnectDropTarget()\n\t\t}\n\n\t\tconst dropTarget = this.dropTarget\n\t\tif (!this.handlerId) {\n\t\t\treturn\n\t\t}\n\t\tif (!dropTarget) {\n\t\t\tthis.lastConnectedDropTarget = dropTarget\n\t\t\treturn\n\t\t}\n\n\t\tif (didChange) {\n\t\t\tthis.lastConnectedHandlerId = this.handlerId\n\t\t\tthis.lastConnectedDropTarget = dropTarget\n\t\t\tthis.lastConnectedDropTargetOptions = this.dropTargetOptions\n\n\t\t\tthis.unsubscribeDropTarget = this.backend.connectDropTarget(\n\t\t\t\tthis.handlerId,\n\t\t\t\tdropTarget,\n\t\t\t\tthis.dropTargetOptions,\n\t\t\t)\n\t\t}\n\t}\n\n\tpublic receiveHandlerId(newHandlerId: Identifier | null): void {\n\t\tif (newHandlerId === this.handlerId) {\n\t\t\treturn\n\t\t}\n\n\t\tthis.handlerId = newHandlerId\n\t\tthis.reconnect()\n\t}\n\n\tpublic get dropTargetOptions(): DropTargetOptions {\n\t\treturn this.dropTargetOptionsInternal\n\t}\n\tpublic set dropTargetOptions(options: DropTargetOptions) {\n\t\tthis.dropTargetOptionsInternal = options\n\t}\n\n\tprivate didHandlerIdChange(): boolean {\n\t\treturn this.lastConnectedHandlerId !== this.handlerId\n\t}\n\n\tprivate didDropTargetChange(): boolean {\n\t\treturn this.lastConnectedDropTarget !== this.dropTarget\n\t}\n\n\tprivate didOptionsChange(): boolean {\n\t\treturn !shallowEqual(\n\t\t\tthis.lastConnectedDropTargetOptions,\n\t\t\tthis.dropTargetOptions,\n\t\t)\n\t}\n\n\tpublic disconnectDropTarget() {\n\t\tif (this.unsubscribeDropTarget) {\n\t\t\tthis.unsubscribeDropTarget()\n\t\t\tthis.unsubscribeDropTarget = undefined\n\t\t}\n\t}\n\n\tprivate get dropTarget() {\n\t\treturn (\n\t\t\tthis.dropTargetNode || (this.dropTargetRef && this.dropTargetRef.current)\n\t\t)\n\t}\n\n\tprivate clearDropTarget() {\n\t\tthis.dropTargetRef = null\n\t\tthis.dropTargetNode = null\n\t}\n}\n","export interface Ref {\n\tcurrent: T\n}\n\nexport function isRef(obj: unknown): boolean {\n\treturn (\n\t\t// eslint-disable-next-line no-prototype-builtins\n\t\tobj !== null &&\n\t\ttypeof obj === 'object' &&\n\t\tObject.prototype.hasOwnProperty.call(obj, 'current')\n\t)\n}\n","export function shallowEqual(\n\tobjA: T,\n\tobjB: T,\n\tcompare?: (a: T, b: T, key?: string) => boolean | void,\n\tcompareContext?: any,\n) {\n\tlet compareResult = compare\n\t\t? compare.call(compareContext, objA, objB)\n\t\t: void 0\n\tif (compareResult !== void 0) {\n\t\treturn !!compareResult\n\t}\n\n\tif (objA === objB) {\n\t\treturn true\n\t}\n\n\tif (typeof objA !== 'object' || !objA || typeof objB !== 'object' || !objB) {\n\t\treturn false\n\t}\n\n\tconst keysA = Object.keys(objA)\n\tconst keysB = Object.keys(objB)\n\n\tif (keysA.length !== keysB.length) {\n\t\treturn false\n\t}\n\n\tconst bHasOwnProperty = Object.prototype.hasOwnProperty.bind(objB)\n\n\t// Test for A's keys different from B.\n\tfor (let idx = 0; idx < keysA.length; idx++) {\n\t\tconst key = keysA[idx] as string\n\n\t\tif (!bHasOwnProperty(key)) {\n\t\t\treturn false\n\t\t}\n\n\t\tconst valueA = (objA as any)[key]\n\t\tconst valueB = (objB as any)[key]\n\n\t\tcompareResult = compare\n\t\t\t? compare.call(compareContext, valueA, valueB, key)\n\t\t\t: void 0\n\n\t\tif (\n\t\t\tcompareResult === false ||\n\t\t\t(compareResult === void 0 && valueA !== valueB)\n\t\t) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n","import type { HandlerManager, MonitorEventEmitter } from '../types/index.js'\nimport { useIsomorphicLayoutEffect } from './useIsomorphicLayoutEffect.js'\nimport { useCollector } from './useCollector.js'\n\nexport function useMonitorOutput(\n\tmonitor: Monitor & MonitorEventEmitter,\n\tcollect: (monitor: Monitor) => Collected,\n\tonCollect?: () => void,\n): Collected {\n\tconst [collected, updateCollected] = useCollector(monitor, collect, onCollect)\n\n\tuseIsomorphicLayoutEffect(\n\t\tfunction subscribeToMonitorStateChange() {\n\t\t\tconst handlerId = monitor.getHandlerId()\n\t\t\tif (handlerId == null) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treturn monitor.subscribeToStateChange(updateCollected, {\n\t\t\t\thandlerIds: [handlerId],\n\t\t\t})\n\t\t},\n\t\t[monitor, updateCollected],\n\t)\n\n\treturn collected\n}\n","import equal from 'fast-deep-equal'\nimport { useState, useCallback } from 'react'\nimport { useIsomorphicLayoutEffect } from './useIsomorphicLayoutEffect.js'\n\n/**\n *\n * @param monitor The monitor to collect state from\n * @param collect The collecting function\n * @param onUpdate A method to invoke when updates occur\n */\nexport function useCollector(\n\tmonitor: T,\n\tcollect: (monitor: T) => S,\n\tonUpdate?: () => void,\n): [S, () => void] {\n\tconst [collected, setCollected] = useState(() => collect(monitor))\n\n\tconst updateCollected = useCallback(() => {\n\t\tconst nextValue = collect(monitor)\n\t\t// This needs to be a deep-equality check because some monitor-collected values\n\t\t// include XYCoord objects that may be equivalent, but do not have instance equality.\n\t\tif (!equal(collected, nextValue)) {\n\t\t\tsetCollected(nextValue)\n\t\t\tif (onUpdate) {\n\t\t\t\tonUpdate()\n\t\t\t}\n\t\t}\n\t}, [collected, monitor, onUpdate])\n\n\t// update the collected properties after react renders.\n\t// Note that the \"Dustbin Stress Test\" fails if this is not\n\t// done when the component updates\n\tuseIsomorphicLayoutEffect(updateCollected)\n\n\treturn [collected, updateCollected]\n}\n","import type { Connector } from '../internals/index.js'\nimport type { HandlerManager, MonitorEventEmitter } from '../types/index.js'\nimport { useMonitorOutput } from './useMonitorOutput.js'\n\nexport function useCollectedProps(\n\tcollector: ((monitor: Monitor) => Collected) | undefined,\n\tmonitor: Monitor & MonitorEventEmitter,\n\tconnector: Connector,\n) {\n\treturn useMonitorOutput(monitor, collector || (() => ({} as Collected)), () =>\n\t\tconnector.reconnect(),\n\t)\n}\n","import { useMemo } from 'react'\nimport type { TargetConnector } from '../../internals/index.js'\n\nexport function useConnectDropTarget(connector: TargetConnector) {\n\treturn useMemo(() => connector.hooks.dropTarget(), [connector])\n}\n","import type { ConnectDropTarget } from '../../types/index.js'\nimport type { DropTargetHookSpec, FactoryOrInstance } from '../types.js'\nimport { useRegisteredDropTarget } from './useRegisteredDropTarget.js'\nimport { useOptionalFactory } from '../useOptionalFactory.js'\nimport { useDropTargetMonitor } from './useDropTargetMonitor.js'\nimport { useDropTargetConnector } from './useDropTargetConnector.js'\nimport { useCollectedProps } from '../useCollectedProps.js'\nimport { useConnectDropTarget } from './connectors.js'\n\n/**\n * useDropTarget Hook\n * @param spec The drop target specification (object or function, function preferred)\n * @param deps The memoization deps array to use when evaluating spec changes\n */\nexport function useDrop<\n\tDragObject = unknown,\n\tDropResult = unknown,\n\tCollectedProps = unknown,\n>(\n\tspecArg: FactoryOrInstance<\n\t\tDropTargetHookSpec\n\t>,\n\tdeps?: unknown[],\n): [CollectedProps, ConnectDropTarget] {\n\tconst spec = useOptionalFactory(specArg, deps)\n\tconst monitor = useDropTargetMonitor()\n\tconst connector = useDropTargetConnector(spec.options)\n\tuseRegisteredDropTarget(spec, monitor, connector)\n\n\treturn [\n\t\tuseCollectedProps(spec.collect, monitor, connector),\n\t\tuseConnectDropTarget(connector),\n\t]\n}\n","import { useMemo } from 'react'\nimport type { FactoryOrInstance } from './types.js'\n\nexport function useOptionalFactory(\n\targ: FactoryOrInstance,\n\tdeps?: unknown[],\n): T {\n\tconst memoDeps = [...(deps || [])]\n\tif (deps == null && typeof arg !== 'function') {\n\t\tmemoDeps.push(arg)\n\t}\n\treturn useMemo(() => {\n\t\treturn typeof arg === 'function' ? (arg as () => T)() : (arg as T)\n\t}, memoDeps)\n}\n","import { useMemo } from 'react'\nimport { DropTargetMonitorImpl } from '../../internals/index.js'\nimport type { DropTargetMonitor } from '../../types/index.js'\nimport { useDragDropManager } from '../useDragDropManager.js'\n\nexport function useDropTargetMonitor(): DropTargetMonitor {\n\tconst manager = useDragDropManager()\n\treturn useMemo(() => new DropTargetMonitorImpl(manager), [manager])\n}\n","import { useMemo } from 'react'\nimport { TargetConnector } from '../../internals/index.js'\nimport type { DropTargetOptions } from '../../types/index.js'\nimport { useDragDropManager } from '../useDragDropManager.js'\nimport { useIsomorphicLayoutEffect } from '../useIsomorphicLayoutEffect.js'\n\nexport function useDropTargetConnector(\n\toptions: DropTargetOptions,\n): TargetConnector {\n\tconst manager = useDragDropManager()\n\tconst connector = useMemo(\n\t\t() => new TargetConnector(manager.getBackend()),\n\t\t[manager],\n\t)\n\tuseIsomorphicLayoutEffect(() => {\n\t\tconnector.dropTargetOptions = options || null\n\t\tconnector.reconnect()\n\t\treturn () => connector.disconnectDropTarget()\n\t}, [options])\n\treturn connector\n}\n","import { DropTargetMonitor, useDrop } from \"react-dnd\";\nimport { NativeTypes } from \"react-dnd-html5-backend\";\n\ntype DndFileDropItem = {\n files: FileList;\n items: DataTransferItemList;\n dataTransfer: DataTransfer;\n};\n\nexport const useDndFileDrop = (onDrop: (files: FileList) => void) => {\n return useDrop(\n () => ({\n accept: [NativeTypes.FILE],\n drop({ files }: DndFileDropItem) {\n if (onDrop) {\n onDrop(files);\n }\n },\n collect: (monitor: DropTargetMonitor) => ({\n isOver: monitor.isOver(),\n }),\n }),\n [onDrop]\n );\n};\n","import { createSlice, PayloadAction } from \"@reduxjs/toolkit\";\nimport { UNKNOWN_ANNOTATION_CATEGORY_ID } from \"store/data/constants\";\nimport { DecodedAnnotationObject } from \"store/data/types\";\nimport { ImageViewerState } from \"store/types\";\n\nimport { ZoomMode } from \"utils/annotator/enums\";\nimport {\n ColorAdjustmentOptionsType,\n ZoomToolOptionsType,\n} from \"utils/annotator/types\";\n\nimport { distinctFilter, mutatingFilter } from \"utils/common/helpers\";\n\nconst initialState: ImageViewerState = {\n imageStack: [],\n\n colorAdjustment: {\n blackPoint: 0,\n brightness: 0,\n contrast: 0,\n exposure: 0,\n highlights: 0,\n hue: 0,\n saturation: 0,\n shadows: 0,\n vibrance: 0,\n },\n cursor: \"default\",\n activeImageId: undefined,\n activeAnnotationIds: [],\n activeImageRenderedSrcs: [],\n imageOrigin: { x: 0, y: 0 },\n filters: { categoryId: [] },\n workingAnnotationId: undefined,\n\n workingAnnotation: { saved: undefined, changes: {} },\n selectedAnnotationIds: [],\n selectedCategoryId: UNKNOWN_ANNOTATION_CATEGORY_ID,\n stageHeight: 1000,\n stageScale: 1,\n stageWidth: 1000,\n stagePosition: { x: 0, y: 0 },\n zoomSelection: {\n dragging: false,\n minimum: undefined,\n maximum: undefined,\n selecting: false,\n centerPoint: undefined,\n },\n zoomOptions: {\n automaticCentering: true,\n mode: ZoomMode.In,\n scale: 1.0,\n toActualSize: false,\n toFit: false,\n },\n imageIsLoading: false,\n highlightedCategory: undefined,\n};\n\nexport const imageViewerSlice = createSlice({\n initialState: initialState,\n name: \"image-viewer\",\n reducers: {\n resetImageViewer: () => initialState,\n prepareImageViewer: (\n state,\n action: PayloadAction<{ selectedThingIds: string[] }>\n ) => {},\n setImageStack(state, action: PayloadAction<{ imageIds: string[] }>) {\n state.imageStack = action.payload.imageIds;\n },\n addActiveAnnotationId(\n state,\n action: PayloadAction<{ annotationId: string }>\n ) {\n state.activeAnnotationIds.push(action.payload.annotationId);\n },\n addActiveAnnotationIds(\n state,\n action: PayloadAction<{ annotationIds: Array }>\n ) {\n for (const annotationId of action.payload.annotationIds) {\n imageViewerSlice.caseReducers.addActiveAnnotationId(state, {\n type: \"addActiveAnnotationId\",\n payload: { annotationId },\n });\n }\n },\n setActiveAnnotationIds(\n state,\n action: PayloadAction<{\n annotationIds: Array;\n }>\n ) {\n state.activeAnnotationIds = [];\n imageViewerSlice.caseReducers.addActiveAnnotationIds(state, {\n type: \"addActiveAnnotationIds\",\n payload: { annotationIds: action.payload.annotationIds },\n });\n },\n removeActiveAnnotationId(\n state,\n action: PayloadAction<{\n annotationId: string;\n }>\n ) {\n mutatingFilter(\n state.activeAnnotationIds,\n (annotationId) => annotationId !== action.payload.annotationId\n );\n },\n removeActiveAnnotationIds(\n state,\n action: PayloadAction<{\n annotationIds: Array;\n }>\n ) {\n for (const annotationId of action.payload.annotationIds) {\n imageViewerSlice.caseReducers.removeActiveAnnotationId(state, {\n type: \"removeActiveAnnotationId\",\n payload: { annotationId },\n });\n }\n },\n addSelectedAnnotationId(\n state,\n action: PayloadAction<{ annotationId: string }>\n ) {\n state.selectedAnnotationIds.push(action.payload.annotationId);\n },\n addSelectedAnnotationIds(\n state,\n action: PayloadAction<{ annotationIds: Array }>\n ) {\n for (const annotationId of action.payload.annotationIds) {\n imageViewerSlice.caseReducers.addSelectedAnnotationId(state, {\n type: \"addSelectedAnnotationId\",\n payload: { annotationId },\n });\n }\n },\n setSelectedAnnotationIds(\n state,\n action: PayloadAction<{\n annotationIds: Array;\n workingAnnotationId?: string;\n }>\n ) {\n const { annotationIds, workingAnnotationId } = action.payload;\n state.selectedAnnotationIds = [];\n state.workingAnnotationId = workingAnnotationId\n ? workingAnnotationId\n : annotationIds[0];\n imageViewerSlice.caseReducers.addSelectedAnnotationIds(state, {\n type: \"addSelectedAnnotationIds\",\n payload: { annotationIds: action.payload.annotationIds },\n });\n },\n setAllSelectedAnnotationIds(state, action: PayloadAction<{}>) {\n state.selectedAnnotationIds = [];\n imageViewerSlice.caseReducers.addSelectedAnnotationIds(state, {\n type: \"addSelectedAnnotationIds\",\n payload: { annotationIds: state.activeAnnotationIds },\n });\n\n state.workingAnnotationId =\n state.workingAnnotationId ?? state.activeAnnotationIds[0];\n },\n removeSelectedAnnotationId(\n state,\n action: PayloadAction<{\n annotationId: string;\n }>\n ) {\n if (state.workingAnnotationId === action.payload.annotationId) {\n state.workingAnnotationId = undefined;\n }\n mutatingFilter(\n state.selectedAnnotationIds,\n (annotationId) => annotationId !== action.payload.annotationId\n );\n },\n removeSelectedAnnotationIds(\n state,\n action: PayloadAction<{\n annotationIds: string[];\n }>\n ) {\n for (const annotationId of action.payload.annotationIds) {\n imageViewerSlice.caseReducers.removeSelectedAnnotationId(state, {\n type: \"removeSelectedAnnotationId\",\n payload: { annotationId },\n });\n }\n },\n setWorkingAnnotation(\n state,\n action: PayloadAction<{\n annotation: DecodedAnnotationObject | string | undefined;\n preparedByListener?: boolean;\n }>\n ) {\n const { annotation, preparedByListener } = action.payload;\n if (!preparedByListener) return;\n\n state.workingAnnotation.saved = annotation as\n | DecodedAnnotationObject\n | undefined;\n state.workingAnnotation.changes = {};\n },\n updateWorkingAnnotation(\n state,\n action: PayloadAction<{ changes: Partial }>\n ) {\n if (state.workingAnnotation.saved) {\n state.workingAnnotation.changes = action.payload.changes;\n }\n },\n setSelectedCategoryId(\n state,\n action: PayloadAction<{ selectedCategoryId: string }>\n ) {\n state.selectedCategoryId = action.payload.selectedCategoryId;\n },\n setActiveImageId(\n state,\n action: PayloadAction<{\n imageId: string | undefined;\n prevImageId: string | undefined;\n }>\n ) {\n state.activeImageId = action.payload.imageId;\n // reset selected annotations\n },\n setActiveImageRenderedSrcs(\n state,\n action: PayloadAction<{\n renderedSrcs: Array;\n }>\n ) {\n state.activeImageRenderedSrcs = action.payload.renderedSrcs;\n },\n setImageOrigin(\n state,\n action: PayloadAction<{ origin: { x: number; y: number } }>\n ) {\n state.imageOrigin = action.payload.origin;\n },\n updateColorAdjustments(\n state,\n action: PayloadAction<{\n changes: Partial;\n }>\n ) {\n Object.assign(state.colorAdjustment, action.payload.changes);\n },\n setCursor(\n state,\n action: PayloadAction<{\n cursor: string;\n }>\n ) {\n state.cursor = action.payload.cursor;\n },\n setStageHeight(state, action: PayloadAction<{ stageHeight: number }>) {\n state.stageHeight = action.payload.stageHeight;\n },\n setStagePosition(\n state,\n action: PayloadAction<{ stagePosition: { x: number; y: number } }>\n ) {\n state.stagePosition = action.payload.stagePosition;\n },\n setStageScale(state, action: PayloadAction<{ stageScale: number }>) {\n state.stageScale = action.payload.stageScale;\n },\n setStageWidth(state, action: PayloadAction<{ stageWidth: number }>) {\n state.stageWidth = action.payload.stageWidth;\n },\n setZoomSelection(\n state,\n action: PayloadAction<{\n zoomSelection: {\n dragging: boolean;\n minimum: { x: number; y: number } | undefined;\n maximum: { x: number; y: number } | undefined;\n selecting: boolean;\n centerPoint: { x: number; y: number } | undefined;\n };\n }>\n ) {\n state.zoomSelection = action.payload.zoomSelection;\n },\n updateZoomSelection(\n state,\n action: PayloadAction<{\n changes: Partial<{\n dragging: boolean;\n minimum: { x: number; y: number } | undefined;\n maximum: { x: number; y: number } | undefined;\n selecting: boolean;\n centerPoint: { x: number; y: number } | undefined;\n }>;\n }>\n ) {\n Object.assign(state.zoomSelection, action.payload.changes);\n },\n setZoomToolOptions(\n state,\n action: PayloadAction<{ options: Partial }>\n ) {\n state.zoomOptions = { ...state.zoomOptions, ...action.payload.options };\n },\n setImageIsLoading(state, action: PayloadAction<{ isLoading: boolean }>) {\n state.imageIsLoading = action.payload.isLoading;\n },\n updateHighlightedAnnotationCategory(\n state,\n action: PayloadAction<{ categoryId: string | undefined }>\n ) {\n state.highlightedCategory = action.payload.categoryId;\n },\n addFilters(\n state,\n action: PayloadAction<{\n categoryIds: string[];\n }>\n ) {\n const newFilters = [\n ...state.filters[\"categoryId\"],\n ...action.payload.categoryIds,\n ].filter(distinctFilter);\n state.filters[\"categoryId\"] = newFilters;\n },\n removeFilters(\n state,\n action: PayloadAction<{\n categoryIds?: string[];\n all?: boolean;\n }>\n ) {\n if (action.payload.all) {\n state.filters[\"categoryId\"] = [];\n return;\n }\n if (action.payload.categoryIds) {\n mutatingFilter(\n state.filters[\"categoryId\"],\n (id) => !action.payload.categoryIds!.includes(id)\n );\n }\n },\n },\n});\n","import { createSlice, PayloadAction } from \"@reduxjs/toolkit\";\n\nimport { AnnotatorState } from \"store/types\";\n\nimport {\n AnnotationMode,\n AnnotationState,\n ToolType,\n} from \"utils/annotator/enums\";\nimport { AnnotationTool } from \"utils/annotator/tools/AnnotationTool\";\n\nconst initialState: AnnotatorState = {\n annotationState: AnnotationState.Blank,\n penSelectionBrushSize: 10,\n quickSelectionRegionSize: 40,\n thresholdAnnotationValue: 150,\n selectionMode: AnnotationMode.New,\n toolType: ToolType.RectangularAnnotation,\n};\n\nexport const annotatorSlice = createSlice({\n initialState: initialState,\n name: \"annotator\",\n reducers: {\n resetAnnotator: () => initialState,\n\n setAnnotationState(\n state,\n action: PayloadAction<{\n annotationState: AnnotationState;\n kind?: string;\n annotationTool: AnnotationTool;\n }>\n ) {\n state.annotationState = action.payload.annotationState;\n },\n\n setToolType(state, action: PayloadAction<{ operation: ToolType }>) {\n state.toolType = action.payload.operation;\n },\n setPenSelectionBrushSize(\n state,\n action: PayloadAction<{ penSelectionBrushSize: number }>\n ) {\n state.penSelectionBrushSize = action.payload.penSelectionBrushSize;\n },\n setQuickSelectionRegionSize(\n state,\n action: PayloadAction<{ quickSelectionRegionSize: number }>\n ) {\n state.quickSelectionRegionSize = action.payload.quickSelectionRegionSize;\n },\n\n setSelectionMode(\n state,\n action: PayloadAction<{ selectionMode: AnnotationMode }>\n ) {\n state.selectionMode = action.payload.selectionMode;\n },\n\n setThresholdAnnotationValue(\n state,\n action: PayloadAction<{ thresholdAnnotationValue: number }>\n ) {\n state.thresholdAnnotationValue = action.payload.thresholdAnnotationValue;\n },\n },\n});\n","import { StageContext } from \"contexts\";\nimport { useContext, useLayoutEffect, useState } from \"react\";\n\nexport const useMarchingAnts = () => {\n const [dashOffset, setDashOffset] = useState(0);\n const stageScale = useContext(StageContext)?.current?.scaleX() ?? 1;\n useLayoutEffect(() => {\n let timerId: number;\n const f = () => {\n timerId = requestAnimationFrame(f);\n setDashOffset((prev) => (prev + 5 / stageScale) % 32);\n };\n\n timerId = requestAnimationFrame(f);\n\n return () => cancelAnimationFrame(timerId);\n });\n\n return dashOffset;\n};\n","import React, { useCallback, useState } from \"react\";\n\nexport const useMenu = () => {\n const [anchorEl, setAnchorEl] = React.useState(null);\n\n const [open, setOpen] = useState(false);\n\n const onClose = useCallback(() => {\n setOpen(false);\n setAnchorEl(null);\n }, []);\n\n const onOpen = useCallback((event: React.MouseEvent) => {\n setOpen(true);\n setAnchorEl(event.currentTarget);\n }, []);\n\n return {\n anchorEl,\n onClose,\n onOpen,\n open,\n };\n};\n","import { HotkeyHandlerItem } from \"./types\";\n\nconst isff =\n typeof navigator !== \"undefined\"\n ? navigator.userAgent.toLowerCase().indexOf(\"firefox\") > 0\n : false;\n\n// Convert modifier keys to corresponding key codes\nfunction getMods(modifier: Record, key: string[]) {\n const mod_names = key.slice(0, key.length - 1);\n let mod_keys = [];\n for (let i = 0; i < mod_names.length; i++)\n mod_keys.push(modifier[mod_names[i].toLowerCase()]);\n return mod_keys;\n}\n\n// Convert the passed key string to an array\nfunction getKeys(key: string) {\n key = key.replace(/\\s/g, \"\"); // matches any whitespace character, including spaces, tabs, form feeds, etc.\n const keys = key.split(\",\"); // Set multiple shortcut keys at the same time, separated by ','\n let index = keys.lastIndexOf(\"\");\n\n // Shortcut keys may contain ',', special handling is required\n for (; index >= 0; ) {\n keys[index - 1] += \",\";\n keys.splice(index, 1);\n index = keys.lastIndexOf(\"\");\n }\n\n return keys;\n}\n\n// Compare arrays of modifier keys\nfunction compareArray(a1: number[], a2: number[]) {\n const arr1 = a1.length >= a2.length ? a1 : a2;\n const arr2 = a1.length >= a2.length ? a2 : a1;\n let isIndex = true;\n\n for (let i = 0; i < arr1.length; i++) {\n if (arr2.indexOf(arr1[i]) === -1) isIndex = false;\n }\n return isIndex;\n}\n\nconst _keyMap: Record = {\n backspace: 8,\n \"⌫\": 8,\n tab: 9,\n clear: 12,\n enter: 13,\n \"↩\": 13,\n return: 13,\n\n esc: 27,\n escape: 27,\n space: 32,\n left: 37,\n up: 38,\n right: 39,\n down: 40,\n del: 46,\n delete: 46,\n ins: 45,\n insert: 45,\n home: 36,\n end: 35,\n pageup: 33,\n pagedown: 34,\n capslock: 20,\n num_0: 96,\n num_1: 97,\n num_2: 98,\n num_3: 99,\n num_4: 100,\n num_5: 101,\n num_6: 102,\n num_7: 103,\n num_8: 104,\n num_9: 105,\n num_multiply: 106,\n num_add: 107,\n num_enter: 108,\n num_subtract: 109,\n num_decimal: 110,\n num_divide: 111,\n \"⇪\": 20,\n \",\": 188,\n \".\": 190,\n \"/\": 191,\n \"`\": 192,\n \"-\": isff ? 173 : 189,\n \"=\": isff ? 61 : 187,\n \";\": isff ? 59 : 186,\n \"'\": 222,\n \"[\": 219,\n \"]\": 221,\n \"\\\\\": 220,\n};\n\n// Modifier Keys\nconst _modifier: Record = {\n \"⇧\": 16,\n shift: 16,\n // altKey\n \"⌥\": 18,\n alt: 18,\n option: 18,\n // ctrlKey\n \"⌃\": 17,\n ctrl: 17,\n control: 17,\n // metaKey\n \"⌘\": 91,\n cmd: 91,\n command: 91,\n};\nconst modifierMap: Record = {\n 16: \"shiftKey\",\n 18: \"altKey\",\n 17: \"ctrlKey\",\n 91: \"metaKey\",\n\n shiftKey: 16,\n ctrlKey: 17,\n altKey: 18,\n metaKey: 91,\n};\nconst _mods: Record = {\n 18: false,\n 17: false,\n 91: false,\n};\n\nconst _handlers: Record = {};\n\nconst getCode = (x: string) =>\n _keyMap[x.toLowerCase()] ||\n _modifier[x.toLowerCase()] ||\n x.toUpperCase().charCodeAt(0);\n\n// F1~F12 special key\nfor (let k = 1; k < 20; k++) {\n _keyMap[`f${k}`] = 111 + k;\n}\n\nexport {\n _keyMap,\n _modifier,\n modifierMap,\n _mods,\n _handlers,\n isff,\n getMods,\n getKeys,\n compareArray,\n getCode,\n};\n","import { HotkeyHandlerItem } from \"./types\";\nimport {\n getMods,\n getKeys,\n getCode,\n compareArray,\n _keyMap,\n _modifier,\n modifierMap,\n _mods,\n _handlers,\n} from \"./hotkeyUtils\";\n\nlet _downKeys: number[] = []; // record the binding key pressed\nlet winListendFocus = false; // Whether the window has listened to the focus event\nconst elementHasBindEvent: Document[] = []; // Node records for bound events\n\n// Clear modifier keys\nfunction clearModifier(event: KeyboardEvent) {\n let key = event.keyCode || event.which || event.charCode;\n const i = _downKeys.indexOf(key);\n\n // Clear pressed key from list\n if (i >= 0) {\n _downKeys.splice(i, 1);\n }\n // Special treatment cmmand key, the problem that the combination\n // of shortcut keys keyup in cmmand is only executed once\n if (event.key && event.key.toLowerCase() === \"meta\") {\n _downKeys.splice(0, _downKeys.length);\n }\n\n // Modifier key shiftKey altKey ctrlKey (command||metaKey) clear\n if (key === 93 || key === 224) key = 91;\n if (key in _mods) {\n _mods[key] = false;\n\n // reset modifier keys to false\n for (const k in _modifier) if (_modifier[k] === key) setModifier(k, false);\n }\n}\n\nfunction unbind(keysInfo?: string, method?: Function): void {\n // unbind(), unbind all keys\n if (typeof keysInfo === \"undefined\") {\n Object.keys(_handlers).forEach((key) => delete _handlers[key]);\n } else if (typeof keysInfo === \"string\") {\n // support old method\n if (method) {\n eachUnbind({\n key: keysInfo,\n method,\n splitKey: \"+\",\n });\n }\n }\n}\n\n// Unbind a range of shortcut keys\nconst eachUnbind = ({\n key,\n method,\n splitKey = \"+\",\n}: {\n key: string;\n method: Function;\n splitKey: string;\n}) => {\n const multipleKeys = getKeys(key);\n multipleKeys.forEach((originKey) => {\n const unbindKeys = originKey.split(splitKey);\n const len = unbindKeys.length;\n const lastKey = unbindKeys[len - 1];\n const keyCode = lastKey === \"*\" ? \"*\" : getCode(lastKey);\n if (!_handlers[keyCode]) return;\n // Determine whether the range is passed in, if not, get the range\n\n const mods = len > 1 ? getMods(_modifier, unbindKeys) : [];\n _handlers[keyCode] = _handlers[keyCode].filter((record) => {\n // Judging by the function, whether to unbind, the function is equal and returns directly\n const isMatchingMethod = method ? record.method === method : true;\n return !(isMatchingMethod && compareArray(record.mods, mods));\n });\n });\n};\n\n// Process the callback function that monitors the corresponding shortcut key\nfunction eventHandler(\n event: KeyboardEvent,\n handler: HotkeyHandlerItem,\n element: Document\n) {\n if (handler.element !== element) {\n return;\n }\n let modifiersMatch;\n\n // Check if the modifier matches (return true if any)\n modifiersMatch = handler.mods.length > 0;\n\n for (const mod in _mods) {\n if (Object.prototype.hasOwnProperty.call(_mods, mod)) {\n if (\n (!_mods[mod] && handler.mods.indexOf(+mod) > -1) ||\n (_mods[mod] && handler.mods.indexOf(+mod) === -1)\n ) {\n modifiersMatch = false;\n }\n }\n }\n\n // Call the handler, if it is a modifier key, no processing\n if (\n (handler.mods.length === 0 && !_mods[17] && !_mods[91]) ||\n modifiersMatch ||\n handler.shortcut === \"*\"\n ) {\n if (handler.method(event, handler) === false) {\n if (event.preventDefault) event.preventDefault();\n else event.returnValue = false;\n if (event.stopPropagation) event.stopPropagation();\n if (event.cancelBubble) event.cancelBubble = true;\n }\n }\n}\n\n// Handling keydown events\nfunction dispatch(event: any, element: Document) {\n const asterisk = _handlers[\"*\"];\n let key = event.keyCode || event.which || event.charCode;\n // The command key value of Gecko (Firefox) is 224, which is consistent in Webkit (Chrome)\n // The left and right command keys of Webkit are different\n if (key === 93 || key === 224) key = 91;\n /**\n * Collect bound keys\n * If an Input Method Editor is processing key input and the event is keydown, return 229.\n * https://stackoverflow.com/questions/25043934/is-it-ok-to-ignore-keydown-events-with-keycode-229\n * http://lists.w3.org/Archives/Public/www-dom/2010JulSep/att-0182/keyCode-spec.html\n */\n if (_downKeys.indexOf(key) === -1 && key !== 229) _downKeys.push(key);\n\n [\"ctrlKey\", \"altKey\", \"metaKey\", \"shiftKey\"].forEach((keyName) => {\n const keyNum = modifierMap[keyName] as number;\n\n if (event[keyName] && _downKeys.indexOf(keyNum) === -1) {\n //modifier key is held and not in keydown list\n _downKeys.push(keyNum);\n } else if (!event[keyName] && _downKeys.indexOf(keyNum) > -1) {\n //modifier key isnt held down and is in keydown list\n if (keyName !== \"shiftKey\" && keyName !== \"altKey\") {\n //remove key from keydown list unless its the shift key\n _downKeys.splice(_downKeys.indexOf(keyNum), 1);\n }\n } else if (\n keyName === \"metaKey\" &&\n event[keyName] &&\n _downKeys.length === 3\n ) {\n // meta key is held down and there are three keys pressed\n //Fix if Command is pressed:\n if (!(event.ctrlKey || event.altKey)) {\n //if the control key and alt key arent pressed then remove meta key\n _downKeys = _downKeys.slice(_downKeys.indexOf(keyNum));\n }\n }\n });\n\n if (key in _mods && key !== 18) {\n _mods[key] = true;\n // Register keys with special characters to hotkeys\n for (const k in _modifier) {\n if (_modifier[k] === key) setModifier(k, true);\n }\n if (!asterisk) return;\n }\n\n // Bind modifier keys in modifierMap to event\n for (const e in _mods) {\n if (Object.prototype.hasOwnProperty.call(_mods, e)) {\n _mods[e] = event[modifierMap[e] as string];\n }\n }\n /**\n * https://github.com/jaywcjlove/hotkeys/pull/129\n * This solves the issue in Firefox on Windows where hotkeys corresponding to special characters would not trigger.\n * An example of this is ctrl+alt+m on a Swedish keyboard which is used to type μ.\n * Browser support: https://caniuse.com/#feat=keyboardevent-getmodifierstate\n */\n if (\n event.getModifierState &&\n !(event.altKey && !event.ctrlKey) &&\n event.getModifierState(\"AltGraph\")\n ) {\n if (_downKeys.indexOf(17) === -1) {\n _downKeys.push(17);\n }\n\n if (_downKeys.indexOf(18) === -1) {\n _downKeys.push(18);\n }\n\n _mods[17] = true;\n _mods[18] = true;\n }\n\n // What to do with any shortcut keys\n if (asterisk) {\n for (let i = 0; i < asterisk.length; i++) {\n if (\n (event.type === \"keydown\" && asterisk[i].keydown) ||\n (event.type === \"keyup\" && asterisk[i].keyup)\n ) {\n eventHandler(event, asterisk[i], element);\n }\n }\n }\n // key is not returned in _handlers\n if (!(key in _handlers)) return;\n\n for (let i = 0; i < _handlers[key].length; i++) {\n if (\n (event.type === \"keydown\" && _handlers[key][i].keydown) ||\n (event.type === \"keyup\" && _handlers[key][i].keyup)\n ) {\n if (_handlers[key][i].key) {\n const record = _handlers[key][i];\n const { splitKey } = record;\n const keyShortcut = record.key.split(splitKey);\n const _downKeysCurrent = []; // record the current key value\n for (let j = 0; j < keyShortcut.length; j++) {\n _downKeysCurrent.push(getCode(keyShortcut[j]));\n }\n if (_downKeysCurrent.sort().join(\"\") === _downKeys.sort().join(\"\")) {\n // find processing content\n eventHandler(event, record, element);\n }\n }\n }\n }\n}\n\n// Determine if element has bound events\nfunction isElementBind(element: Document) {\n return elementHasBindEvent.indexOf(element) > -1;\n}\ntype Option = {\n element: Document;\n keyup: boolean;\n keydown: boolean;\n capture: boolean;\n splitKey: string;\n};\nfunction hotkeys(hotkeys: string, option: Option, method: Function) {\n _downKeys = [];\n const keyList = getKeys(hotkeys); // Determine if element has bound events\n let mods: number[] = [];\n let element = document; // Shortcut key event binding node\n\n let keyup = false;\n let keydown = true;\n let splitKey = \"+\";\n let capture = false;\n\n // Judgment for the set range\n if (method === undefined && typeof option === \"function\") {\n method = option;\n }\n\n if (Object.prototype.toString.call(option) === \"[object Object]\") {\n if (option.element) element = option.element; // eslint-disable-line\n if (option.keyup) keyup = option.keyup; // eslint-disable-line\n if (option.keydown !== undefined) keydown = option.keydown; // eslint-disable-line\n if (option.capture !== undefined) capture = option.capture; // eslint-disable-line\n if (typeof option.splitKey === \"string\") splitKey = option.splitKey; // eslint-disable-line\n }\n\n // for each shortcut key\n for (let i = 0; i < keyList.length; i++) {\n let key = keyList[i].split(splitKey); // key list\n\n mods = [];\n\n // If it is a combination shortcut key, get the combination shortcut key\n if (key.length > 1) mods = getMods(_modifier, key);\n\n // Convert non-modifier keys to keycodes\n let hotkey = key.at(-1)!;\n let keyCode = hotkey === \"*\" ? \"*\" : getCode(hotkey); // * means match all shortcut keys\n // Determine whether the key is in _handlers, if not, assign an empty array\n if (!(keyCode in _handlers)) _handlers[keyCode] = [];\n _handlers[keyCode].push({\n keyup,\n keydown,\n mods,\n shortcut: keyList[i],\n method,\n key: keyList[i],\n splitKey,\n element,\n });\n }\n // Set shortcut keys on the global document\n if (typeof element !== \"undefined\" && !isElementBind(element) && window) {\n elementHasBindEvent.push(element);\n\n element.addEventListener(\n \"keydown\",\n (e) => {\n dispatch(e, element);\n },\n capture\n );\n if (!winListendFocus) {\n winListendFocus = true;\n\n window.addEventListener(\n \"focus\",\n () => {\n _downKeys = [];\n },\n capture\n );\n }\n element.addEventListener(\n \"keyup\",\n (e) => {\n dispatch(e, element);\n clearModifier(e);\n },\n capture\n );\n }\n}\n\nfunction trigger(shortcut: string) {\n Object.keys(_handlers).forEach((key) => {\n const data = _handlers[key].find((item) => item.shortcut === shortcut);\n if (data && data.method) {\n data.method();\n }\n });\n}\n\nfunction setModifier(modifier: string, value: boolean) {\n switch (modifier) {\n case \"shift\" || \"⇧\":\n hotkeys.shift = value;\n return;\n case \"⌥\" || \"alt\" || \"option\":\n hotkeys.alt = value;\n return;\n case \"⌃\" || \"ctrl\" || \"control\":\n hotkeys.control = value;\n return;\n case \"⌘\" || \"cmd\" || \"command\":\n hotkeys.command = value;\n return;\n }\n}\n\nhotkeys.trigger = trigger;\nhotkeys.unbind = unbind;\nhotkeys.keyMap = _keyMap;\nhotkeys.modifier = _modifier;\nhotkeys.modifierMap = modifierMap;\nhotkeys.command = false;\nhotkeys.shift = false;\nhotkeys.alt = false;\nhotkeys.control = false;\n\nexport default hotkeys;\n","//@ts-nocheck\nimport hotkeys from \"utils/common/hotkeys\"; //{ HotkeysEvent, KeyHandler }\nimport { useCallback, useEffect } from \"react\";\nimport { useSelector } from \"react-redux\";\nimport {\n HotkeyAvailableTags,\n HotkeyOptions,\n HotkeysEvent,\n HotkeyKeyHandler,\n} from \"utils/common/types\";\nimport { selectHotkeyView } from \"store/applicationSettings/selectors\";\nimport { HotkeyView } from \"utils/common/enums\";\n\n// We implement our own custom filter system.\n\nconst tagFilter = (\n { target }: KeyboardEvent,\n enableOnTags?: HotkeyAvailableTags[]\n) => {\n const targetTagName = target && (target as HTMLElement).tagName;\n\n return Boolean(\n targetTagName &&\n enableOnTags &&\n enableOnTags.includes(targetTagName as HotkeyAvailableTags)\n );\n};\n\nconst isKeyboardEventTriggeredByInput = (ev: KeyboardEvent) => {\n return tagFilter(ev, [\"INPUT\", \"TEXTAREA\", \"SELECT\"]);\n};\n\nexport function useHotkeys(\n keys: string,\n callback: HotkeyKeyHandler,\n hotkeyView: HotkeyView | Array,\n options?: HotkeyOptions\n): void;\nexport function useHotkeys(\n keys: string,\n callback: HotkeyKeyHandler,\n hotkeyView: HotkeyView | Array,\n deps?: any[]\n): void;\nexport function useHotkeys(\n keys: string,\n callback: HotkeyKeyHandler,\n hotkeyView: HotkeyView | Array,\n options?: HotkeyOptions,\n deps?: any[]\n): void;\nexport function useHotkeys(\n keys: string,\n callback: () => void,\n hotkeyView: HotkeyView | Array,\n options?: any[] | HotkeyOptions,\n deps?: any[]\n): void {\n if (options instanceof Array) {\n deps = options;\n options = undefined;\n }\n\n const {\n enableOnTags,\n filter,\n keyup,\n keydown,\n filterPreventDefault = true,\n enabled = true,\n enableOnContentEditable = false,\n } = (options as HotkeyOptions) || {};\n const currentHotkeyView = useSelector(selectHotkeyView);\n // The return value of this callback determines if the browsers default behavior is prevented.\n\n const memoisedCallback = useCallback(\n (keyboardEvent: KeyboardEvent, hotkeysEvent: HotkeysEvent) => {\n if (filter && !filter(keyboardEvent)) {\n return !filterPreventDefault;\n }\n\n // Check whether the hotkeys was triggered inside an input and that input is enabled or if it was triggered by a content editable tag and it is enabled.\n if (\n (isKeyboardEventTriggeredByInput(keyboardEvent) &&\n !tagFilter(keyboardEvent, enableOnTags)) ||\n ((keyboardEvent.target as HTMLElement)?.isContentEditable &&\n !enableOnContentEditable)\n ) {\n return true;\n }\n\n if (\n (Array.isArray(hotkeyView) && hotkeyView.includes(currentHotkeyView)) ||\n (!Array.isArray(hotkeyView) && hotkeyView === currentHotkeyView)\n ) {\n callback(keyboardEvent, hotkeysEvent);\n return true;\n }\n\n return false;\n }, //eslint-disable-next-line react-hooks/exhaustive-deps\n deps\n ? [hotkeyView, currentHotkeyView, enableOnTags, filter, ...deps]\n : [hotkeyView, currentHotkeyView, enableOnTags, filter]\n );\n\n useEffect(() => {\n if (!enabled) {\n hotkeys.unbind(keys, memoisedCallback);\n\n return;\n }\n\n // In this case keydown is likely undefined, so we set it to false,\n // since hotkeys sets `keydown` to true in absense of explicit setting.\n if (keyup && keydown !== true) {\n (options as HotkeyOptions).keydown = false;\n }\n\n hotkeys(keys, (options as HotkeyOptions) || {}, memoisedCallback);\n\n return () => hotkeys.unbind(keys, memoisedCallback);\n }, [keyup, keydown, options, memoisedCallback, keys, enabled]);\n}\n","import { useCallback, useState } from \"react\";\nimport { batch, useDispatch, useSelector } from \"react-redux\";\nimport { useHotkeys } from \"hooks/useHotkeys\";\nimport { imageViewerSlice } from \"store/imageViewer\";\n\nimport { getOverlappingAnnotations } from \"utils/annotator\";\nimport { getAnnotationsInBox } from \"utils/annotator/imageHelper\";\nimport { Point } from \"utils/annotator/types\";\nimport { ToolType } from \"utils/annotator/enums\";\nimport { HotkeyView } from \"utils/common/enums\";\nimport { DecodedAnnotationObject } from \"store/data/types\";\nimport { selectActiveImageId } from \"store/imageViewer/selectors\";\nimport { selectActiveAnnotations } from \"store/imageViewer/reselectors\";\n\nconst delta = 10;\n\nexport const usePointerTool = (\n absolutePosition: any,\n deselectAllAnnotations: any,\n selectedAnnotationsIds: any,\n toolType: any\n) => {\n const dispatch = useDispatch();\n const activeImageId = useSelector(selectActiveImageId);\n const activeAnnotations = useSelector(selectActiveAnnotations);\n const [currentIndex, setCurrentIndex] = useState(0);\n const [shift, setShift] = useState(false);\n const [dragging, setDragging] = useState(false);\n const [minimum, setMinimum] = useState();\n const [maximum, setMaximum] = useState();\n const [selecting, setSelecting] = useState(false);\n\n useHotkeys(\n \"shift\",\n (event) => {\n if (event.type === \"keydown\") {\n setShift(true);\n } else {\n setShift(false);\n }\n },\n HotkeyView.Annotator,\n { keyup: true, keydown: true }\n );\n\n /*\n * * HANDLE POINTER FUNCTIONS * *\n */\n\n const onPointerMouseDown = useCallback(\n (position: { x: number; y: number }) => {\n setDragging(false);\n setMinimum(position);\n setSelecting(true);\n },\n []\n );\n\n const handlePointerMouseMove = useCallback(\n (position: { x: number; y: number }) => {\n if (!position || !selecting || !minimum) return;\n\n setDragging(Math.abs(position.x - minimum.x) >= delta);\n setMaximum(position);\n },\n [minimum, selecting]\n );\n\n const selectEnclosedAnnotations = useCallback(\n (position: { x: number; y: number }) => {\n if (!position || !selecting || !minimum) return;\n // correct minimum or maximum in the case where user may have selected rectangle from right to left\n\n const minimumNew: { x: number; y: number } = {\n x: minimum.x > position.x ? position.x : minimum.x,\n y: minimum.y > position.y ? position.y : minimum.y,\n };\n const maximumNew: { x: number; y: number } = {\n x: minimum.x > position.x ? minimum.x : position.x,\n y: minimum.y > position.y ? minimum.y : position.y,\n };\n\n if (!minimumNew || !activeAnnotations.length) {\n setSelecting(false);\n return;\n }\n\n const annotationsInBox = getAnnotationsInBox(\n minimumNew,\n maximumNew,\n activeAnnotations\n );\n\n if (annotationsInBox.length) {\n let newSelectedAnnotations: string[] = annotationsInBox.map(\n (an) => an.id\n );\n if (shift) {\n newSelectedAnnotations = [\n ...selectedAnnotationsIds,\n ...newSelectedAnnotations,\n ];\n } else {\n //only include if not already selected\n const additionalAnnotations = newSelectedAnnotations.filter(\n (id: string) => {\n return !selectedAnnotationsIds.includes(id);\n }\n );\n newSelectedAnnotations = [\n ...selectedAnnotationsIds,\n ...additionalAnnotations,\n ];\n }\n batch(() => {\n dispatch(\n imageViewerSlice.actions.setSelectedAnnotationIds({\n annotationIds: newSelectedAnnotations,\n workingAnnotationId: newSelectedAnnotations[0],\n })\n );\n dispatch(\n imageViewerSlice.actions.setWorkingAnnotation({\n annotation: activeAnnotations.filter(\n (annotation) => annotation.id === newSelectedAnnotations[0]\n )[0],\n })\n );\n });\n }\n\n setSelecting(false);\n },\n [\n activeAnnotations,\n dispatch,\n minimum,\n selectedAnnotationsIds,\n selecting,\n shift,\n ]\n );\n\n const handleClick = useCallback(() => {\n if (\n toolType !== ToolType.Pointer ||\n !absolutePosition ||\n !activeAnnotations.length ||\n !activeImageId\n )\n return;\n let currentAnnotation: DecodedAnnotationObject | undefined;\n\n const overlappingAnnotationIds = getOverlappingAnnotations(\n absolutePosition,\n activeAnnotations as DecodedAnnotationObject[]\n );\n\n if (overlappingAnnotationIds.length === 0) {\n deselectAllAnnotations();\n dispatch(\n imageViewerSlice.actions.setWorkingAnnotation({\n annotation: undefined,\n })\n );\n } else if (overlappingAnnotationIds.length > 1) {\n setCurrentIndex((currentIndex) => {\n return currentIndex + 1 === overlappingAnnotationIds.length\n ? 0\n : currentIndex + 1;\n });\n\n const nextAnnotationId = overlappingAnnotationIds[currentIndex];\n\n currentAnnotation = activeAnnotations.find(\n (annotation: DecodedAnnotationObject) => {\n return annotation.id === nextAnnotationId;\n }\n );\n } else {\n currentAnnotation = activeAnnotations.find(\n (annotation: DecodedAnnotationObject) => {\n return annotation.id === overlappingAnnotationIds[0];\n }\n );\n setCurrentIndex(0);\n }\n\n if (!currentAnnotation) return;\n\n if (!shift) {\n batch(() => {\n dispatch(\n imageViewerSlice.actions.setSelectedAnnotationIds({\n annotationIds: [currentAnnotation!.id],\n workingAnnotationId: currentAnnotation?.id,\n })\n );\n dispatch(\n imageViewerSlice.actions.setWorkingAnnotation({\n annotation: currentAnnotation!,\n })\n );\n dispatch(\n imageViewerSlice.actions.setSelectedCategoryId({\n selectedCategoryId: currentAnnotation!.categoryId,\n })\n );\n });\n }\n\n if (shift && !selectedAnnotationsIds.includes(currentAnnotation.id)) {\n //include newly selected annotation if not already selected\n dispatch(\n imageViewerSlice.actions.setSelectedAnnotationIds({\n annotationIds: [...selectedAnnotationsIds, currentAnnotation.id],\n workingAnnotationId: currentAnnotation.id,\n })\n );\n dispatch(\n imageViewerSlice.actions.setWorkingAnnotation({\n annotation: currentAnnotation,\n })\n );\n }\n }, [\n activeAnnotations,\n currentIndex,\n dispatch,\n activeImageId,\n selectedAnnotationsIds,\n shift,\n toolType,\n deselectAllAnnotations,\n absolutePosition,\n ]);\n\n const handlePointerMouseUp = useCallback(\n (position: { x: number; y: number }) => {\n if (!position || !selecting || !minimum) return;\n if (dragging) {\n // correct minimum or maximum in the case where user may have selected rectangle from right to left\n selectEnclosedAnnotations(position);\n } else {\n handleClick();\n }\n setDragging(false);\n setSelecting(false);\n },\n [dragging, minimum, selecting, selectEnclosedAnnotations, handleClick]\n );\n\n return {\n onPointerMouseDown,\n handlePointerMouseMove,\n handlePointerMouseUp,\n dragging,\n minimum,\n maximum,\n selecting,\n };\n};\n","import * as React from 'react';\nimport { getThemeProps, useThemeWithoutDefault as useTheme } from '@mui/system';\nimport useEnhancedEffect from '../utils/useEnhancedEffect';\n\n/**\n * @deprecated Not used internally. Use `MediaQueryListEvent` from lib.dom.d.ts instead.\n */\n\n/**\n * @deprecated Not used internally. Use `MediaQueryList` from lib.dom.d.ts instead.\n */\n\n/**\n * @deprecated Not used internally. Use `(event: MediaQueryListEvent) => void` instead.\n */\n\nfunction useMediaQueryOld(query, defaultMatches, matchMedia, ssrMatchMedia, noSsr) {\n const [match, setMatch] = React.useState(() => {\n if (noSsr && matchMedia) {\n return matchMedia(query).matches;\n }\n if (ssrMatchMedia) {\n return ssrMatchMedia(query).matches;\n }\n\n // Once the component is mounted, we rely on the\n // event listeners to return the correct matches value.\n return defaultMatches;\n });\n useEnhancedEffect(() => {\n let active = true;\n if (!matchMedia) {\n return undefined;\n }\n const queryList = matchMedia(query);\n const updateMatch = () => {\n // Workaround Safari wrong implementation of matchMedia\n // TODO can we remove it?\n // https://github.com/mui/material-ui/pull/17315#issuecomment-528286677\n if (active) {\n setMatch(queryList.matches);\n }\n };\n updateMatch();\n // TODO: Use `addEventListener` once support for Safari < 14 is dropped\n queryList.addListener(updateMatch);\n return () => {\n active = false;\n queryList.removeListener(updateMatch);\n };\n }, [query, matchMedia]);\n return match;\n}\n\n// eslint-disable-next-line no-useless-concat -- Workaround for https://github.com/webpack/webpack/issues/14814\nconst maybeReactUseSyncExternalStore = React['useSyncExternalStore' + ''];\nfunction useMediaQueryNew(query, defaultMatches, matchMedia, ssrMatchMedia, noSsr) {\n const getDefaultSnapshot = React.useCallback(() => defaultMatches, [defaultMatches]);\n const getServerSnapshot = React.useMemo(() => {\n if (noSsr && matchMedia) {\n return () => matchMedia(query).matches;\n }\n if (ssrMatchMedia !== null) {\n const {\n matches\n } = ssrMatchMedia(query);\n return () => matches;\n }\n return getDefaultSnapshot;\n }, [getDefaultSnapshot, query, ssrMatchMedia, noSsr, matchMedia]);\n const [getSnapshot, subscribe] = React.useMemo(() => {\n if (matchMedia === null) {\n return [getDefaultSnapshot, () => () => {}];\n }\n const mediaQueryList = matchMedia(query);\n return [() => mediaQueryList.matches, notify => {\n // TODO: Use `addEventListener` once support for Safari < 14 is dropped\n mediaQueryList.addListener(notify);\n return () => {\n mediaQueryList.removeListener(notify);\n };\n }];\n }, [getDefaultSnapshot, matchMedia, query]);\n const match = maybeReactUseSyncExternalStore(subscribe, getSnapshot, getServerSnapshot);\n return match;\n}\nexport default function useMediaQuery(queryInput, options = {}) {\n const theme = useTheme();\n // Wait for jsdom to support the match media feature.\n // All the browsers MUI support have this built-in.\n // This defensive check is here for simplicity.\n // Most of the time, the match media logic isn't central to people tests.\n const supportMatchMedia = typeof window !== 'undefined' && typeof window.matchMedia !== 'undefined';\n const {\n defaultMatches = false,\n matchMedia = supportMatchMedia ? window.matchMedia : null,\n ssrMatchMedia = null,\n noSsr = false\n } = getThemeProps({\n name: 'MuiUseMediaQuery',\n props: options,\n theme\n });\n if (process.env.NODE_ENV !== 'production') {\n if (typeof queryInput === 'function' && theme === null) {\n console.error(['MUI: The `query` argument provided is invalid.', 'You are providing a function without a theme in the context.', 'One of the parent elements needs to use a ThemeProvider.'].join('\\n'));\n }\n }\n let query = typeof queryInput === 'function' ? queryInput(theme) : queryInput;\n query = query.replace(/^@media( ?)/m, '');\n\n // TODO: Drop `useMediaQueryOld` and use `use-sync-external-store` shim in `useMediaQueryNew` once the package is stable\n const useMediaQueryImplementation = maybeReactUseSyncExternalStore !== undefined ? useMediaQueryNew : useMediaQueryOld;\n const match = useMediaQueryImplementation(query, defaultMatches, matchMedia, ssrMatchMedia, noSsr);\n if (process.env.NODE_ENV !== 'production') {\n // eslint-disable-next-line react-hooks/rules-of-hooks\n React.useDebugValue({\n query,\n match\n });\n }\n return match;\n}","import { createTheme } from \"@mui/material/styles\";\n\nexport const lightTheme = createTheme({\n palette: {\n contrastThreshold: 4.5, // contrast ration needs to be 4.5:1 for accessibility\n },\n components: {\n MuiButtonBase: {\n defaultProps: {\n disableRipple: true,\n },\n },\n MuiListItemIcon: {\n styleOverrides: {\n root: {\n minWidth: 36,\n },\n },\n },\n },\n});\n\nexport const darkTheme = createTheme({\n components: {\n MuiButtonBase: {\n defaultProps: {\n disableRipple: true,\n },\n },\n MuiDialog: {\n styleOverrides: {\n paper: {\n backgroundImage: \"none\",\n },\n },\n },\n MuiDrawer: {\n styleOverrides: {\n paperAnchorDockedLeft: {\n borderRight: \"1px solid rgba(16, 16, 16)\",\n },\n paperAnchorDockedRight: {\n borderLeft: \"1px solid rgba(16, 16, 16)\",\n },\n paper: {\n // boxShadow: \"inset 0 0 16px #000000\",\n },\n },\n },\n MuiListItem: {\n styleOverrides: {\n root: {\n \"&.Mui-selected\": {\n backgroundColor: \"rgba(60, 61, 62)\",\n },\n },\n },\n },\n MuiListItemIcon: {\n styleOverrides: {\n root: {\n minWidth: 36,\n },\n },\n },\n MuiSlider: {\n styleOverrides: {\n rail: {\n color: \"rgba(73, 73, 73)\",\n },\n thumb: {\n color: \"rgba(201, 201, 201)\",\n },\n track: {\n color: \"rgba(159, 159, 159)\",\n },\n },\n },\n },\n palette: {\n contrastThreshold: 4.5, // contrast ration needs to be 4.5:1 for accessibility\n background: {\n paper: \"rgba(40, 40, 40)\",\n default: \"rgba(50, 50, 50)\",\n },\n divider: \"rgba(72, 72, 72)\",\n text: {\n primary: \"rgba(200, 200, 200)\",\n },\n mode: \"dark\",\n },\n});\n","import { Theme } from \"@nivo/core\";\nimport { lightTheme, darkTheme } from \"./muiTheme\";\nimport { ThemeMode } from \"./enums\";\n\nexport const getNivoTheme = (themeMode: ThemeMode) => {\n const theme = themeMode === ThemeMode.Light ? lightTheme : darkTheme;\n\n const nivoTheme: Theme = {\n fontSize: 14,\n axis: {\n legend: {\n text: {\n fontSize: 16,\n fill: theme.palette.text.primary,\n },\n },\n ticks: {\n line: {\n stroke: theme.palette.text.primary,\n strokeWidth: 1,\n },\n text: {\n fontSize: 14,\n fill: theme.palette.text.primary,\n },\n },\n },\n legends: {\n text: {\n fill: theme.palette.text.primary,\n },\n },\n grid: {\n line: {\n stroke: theme.palette.grey[100],\n },\n },\n };\n\n return nivoTheme;\n};\n","import { useEffect, useState } from \"react\";\nimport { useSelector } from \"react-redux\";\nimport { Theme } from \"@nivo/core\";\n\nimport { getNivoTheme } from \"themes/nivoTheme\";\nimport { selectThemeMode } from \"store/applicationSettings/selectors\";\n\nexport const usePreferredNivoTheme = () => {\n const themeMode = useSelector(selectThemeMode);\n const [theme, setTheme] = useState(getNivoTheme(themeMode));\n\n useEffect(() => {\n setTheme(getNivoTheme(themeMode));\n }, [themeMode]);\n\n return theme;\n};\n","// prettier-ignore\nimport {language} from \"./language\";\n\nconst de: language = {\n translation: {\n Cancel: \"Abbrechen\",\n Categories: \"Kategorien\",\n \"Create category\": \"Neue Kategorie\",\n \"Create new classifier\": \"Neuer Classifier\",\n \"Delete category\": \"Lösche Kategorie\",\n \"Delete images\": \"Lösche Bilder\",\n Description: \"Beschreibung\",\n Edit: \"Edit\",\n \"Edit category\": \"Bearbeite Kategorie\",\n Help: \"Hilfe\",\n \"Hide other categories\": \"Blende andere Kategorien aus\",\n \"Hide sidebar\": \"Blende Sidebar aus\",\n \"Import images\": \"Bilder hinzufügen\",\n Logo: \"Logo\",\n Model: \"Modell\",\n \"Open classifier\": \"Öffne Classifier\",\n \"Open example classifier\": \"Öffne Beispiel Classifier\",\n \"Open weights\": \"Öffne Gewichte\",\n Open: \"Öffnen\",\n \"Run classifier\": \"Starte Classfier\",\n \"Save annotations and predictions\": \"Speicher Labels und Predictions\",\n \"Save classifier\": \"Speicher Classifier\",\n \"Save weights\": \"Speicher Gewichte\",\n Save: \"Speichern\",\n \"Search images\": \"Durchsuche Bilder\",\n \"Send feedback\": \"Sende Feedback\",\n Settings: \"Einstellungen\",\n \"Show sidebar\": \"Blende Sidebar ein\",\n Unknown: \"Unbekannt\",\n },\n};\n\nexport default de;\n","import { language } from \"./language\";\n\nconst en: language = {\n translation: {\n Cancel: \"Cancel\",\n Categories: \"Categories\",\n \"Create category\": \"Create category\",\n \"Create new classifier\": \"Create new classifier\",\n \"Delete category\": \"Delete category\",\n \"Delete images\": \"Delete images\",\n Description: \"Description\",\n Edit: \"Edit\",\n \"Edit category\": \"Edit category\",\n Help: \"Help\",\n \"Hide other categories\": \"Hide other categories\",\n \"Hide sidebar\": \"Hide sidebar\",\n \"Import images\": \"Import images\",\n Logo: \"Logo\",\n Model: \"Model\",\n \"Open classifier\": \"Open classifier\",\n \"Open example classifier\": \"Open example classifier\",\n \"Open weights\": \"Open weights\",\n Open: \"Open\",\n \"Run classifier\": \"Run classifier\",\n \"Save annotations and predictions\": \"Save annotations and predictions\",\n \"Save classifier\": \"Save classifier\",\n \"Save weights\": \"Save weights\",\n Save: \"Save\",\n \"Search images\": \"Search images\",\n \"Send feedback\": \"Send feedback\",\n Settings: \"Settings\",\n \"Show sidebar\": \"Show sidebar\",\n Unknown: \"Unknown\",\n },\n};\n\nexport default en;\n","import { language } from \"./language\";\n\nconst fas: language = {\n translation: {\n \"Actual size\": \"سایز اصلی\",\n \"Add area\": \"ناحیه اضافه کنید\",\n \"Annotation mode\": \"حالت گروه توضیحی\",\n Cancel: \"لفو کردن\",\n Categories: \"دسته بندی کردن\",\n \"Center image automatically\":\n \"تصویر را به طور اتوماتیک در مرکز قرار می دهد\",\n \"Color annotation\": \"توضیح رنگ ها\",\n \"Create category\": \"ایجاد دسته بندی\",\n \"Create new classifier\": \"ایجاد طبقه بندی جدید\",\n Create: \"ایجاد\",\n \"Delete category\": \"پاک کردن دسته بندی\",\n \"Delete images\": \"پاک کردن تصویر\",\n Description: \"توضیحات\",\n Edit: \"ویرایش\",\n \"Edit category\": \"ویرایش دسته بندی\",\n \"Elliptical annotation\": \"گروه توضیحی بیضوی\",\n Export: \"فرستادن\",\n \"Export annotations\": \"فرستادن گروه توضیحی\",\n \"Freehand annotation\": \"گروه توضیحی آزاد\",\n Help: \"کمک یابی\",\n \"Hide category\": \"پنهان سازی طبقه بندی\",\n \"Hide other categories\": \"پنهان سازی سایر طبقه بندی ها\",\n \"Hide sidebar\": \"پنهان سازی منوی کناری\",\n \"Import images\": \"آوردن تصاویر\",\n Intersection: \"تلاقی کردن\",\n \"Invert annotation\": \"معکوس کردن گروه توضیحی\",\n \"Lasso annotation\": \"گروه توضیحی کمند\",\n Logo: \"علامت\",\n \"Magnetic annotation\": \"گروه توضیحی آهنربایی\",\n Model: \"نمونه\",\n \"Object annotation\": \"گروه توضیحی هدف\",\n \"Open classifier\": \"باز کردن طبقه بندی\",\n \"Open example classifier\": \"باز کردن طبقه بندی نمونه\",\n \"Open image\": \"باز کردن تصویر\",\n \"Open weights\": \"باز کردن وزن ها\",\n Open: \"باز کردن\",\n \"Pen annotation\": \"گروه توضیحی خودکار\",\n Pointer: \"نشانه گر\",\n \"Polygonal annotation\": \"گروه توضیحی چند ظلعی\",\n Name: \"نام\",\n \"New annotation\": \"گروه توضیحی جدید\",\n \"Quick annotation\": \"گروه توضیحی سریع\",\n \"Rectangular annotation\": \"گروه توضیحی مستطیلی\",\n \"Run classifier\": \"اجرا کردن طبقه بندی\",\n \"Save annotations and predictions\":\n \"ذخیره کردن گروه های توضیحی و پیش بینی ها\",\n \"Save classifier\": \"ذخیره کردن طبقه بندی\",\n \"Save weights\": \"ذخیره کردن وزن ها\",\n Save: \"ذخیره کردن\",\n \"Search images\": \"جستجوی تصاویر\",\n \"Send feedback\": \"فرستادن پیشنهادات\",\n Settings: \"تنظیمات\",\n \"Show category\": \"نشان دادن گروه ها\",\n \"Show sidebar\": \"نشان دادن منوی کناری\",\n \"Subtract area\": \"کم کردن ناحیه\",\n Unknown: \"تعریف نشده\",\n Zoom: \"بزرگ نمایی\",\n \"Zoom in\": \"بزرگ نمایی کردن\",\n \"Zoom mode\": \"حالت بزرگ نمایی\",\n \"Zoom out\": \"کوچک نمایی کردن\",\n },\n};\n\nexport default fas;\n","// prettier-ignore\nimport {language} from \"./language\";\n\nconst fi: language = {\n translation: {\n Cancel: \"Peruuta\",\n Categories: \"Kategoriat\",\n \"Create category\": \"Luo kategoria\",\n \"Create new classifier\": \"Luo uusi luokittelija\",\n \"Delete category\": \"Poista kategoria\",\n \"Delete images\": \"Poista kuvat\",\n Description: \"Kuvaus\",\n Edit: \"Edit\",\n \"Edit category\": \"Muokkaa kategoriaa\",\n Help: \"Apua\",\n \"Hide other categories\": \"Piilota muut kategoriat\",\n \"Hide sidebar\": \"Piilota sivupalkki\",\n \"Import images\": \"Tuo kuvat\",\n Logo: \"Logo\",\n Model: \"Malli\",\n \"Open classifier\": \"Avaa luokittelija\",\n \"Open example classifier\": \"Avaa esimerkkiluokittelija\",\n \"Open weights\": \"Avaa painot\",\n Open: \"Avaa\",\n \"Run classifier\": \"Suorita luokittelija\",\n \"Save annotations and predictions\":\n \"Tallenna annotaatiot ja luokittelun tulos\",\n \"Save classifier\": \"Tallenna luokittelija\",\n \"Save weights\": \"Tallenna painot\",\n Save: \"Tallenna\",\n \"Search images\": \"Etsi kuvia\",\n \"Send feedback\": \"Lähetä palautetta\",\n Settings: \"Asetukset\",\n \"Show sidebar\": \"Näytä sivupalkki\",\n Unknown: \"Tuntematon\",\n },\n};\n\nexport default fi;\n","import { language } from \"./language\";\n\nconst fr: language = {\n translation: {\n \"Actual size\": \"Taille réelle\",\n \"Add area\": \"Ajouter région\",\n \"Annotation mode\": \"Mode d'annotation\",\n Cancel: \"Annuler\",\n Categories: \"Catégories\",\n \"Center image automatically\": \"Centrer image automatiquement\",\n \"Color annotation\": \"Annotation couleur\",\n \"Create category\": \"Nouvelle catégorie\",\n \"Create new classifier\": \"Nouveau classifieur\",\n Create: \"Ajouter\",\n \"Delete category\": \"Supprimer catégorie\",\n \"Delete images\": \"Supprimer images\",\n Description: \"Description\",\n Edit: \"Modifier\",\n \"Edit category\": \"Modifier catégorie\",\n \"Elliptical annotation\": \"Annotation elliptique\",\n Export: \"Exporter\",\n \"Export annotations\": \"Exporter annotations\",\n \"Freehand annotation\": \"Annotation main-libres\",\n Help: \"Aide\",\n \"Hide category\": \"Cacher catégorie\",\n \"Hide other categories\": \"Cacher autres catégories\",\n \"Hide sidebar\": \"Cacher sidebar\",\n \"Import images\": \"Importer images\",\n Intersection: \"Intersection\",\n \"Invert annotation\": \"Inverser annotation\",\n \"Lasso annotation\": \"Annotation lasso\",\n Logo: \"Logo\",\n \"Magnetic annotation\": \"Annotation magnétique\",\n Model: \"Modèle\",\n \"Object annotation\": \"Annotation objet\",\n \"Open classifier\": \"Ouvrir classifieur\",\n \"Open example classifier\": \"Ouvrir example classifieur\",\n \"Open image\": \"Ouvrir image\",\n \"Open weights\": \"Ouvrir poids\",\n Open: \"Ouvrir\",\n \"Pen annotation\": \"Annotation pinceau\",\n Pointer: \"Pointeur\",\n \"Polygonal annotation\": \"Annotation polygonale\",\n Name: \"Nom\",\n \"New annotation\": \"Nouvelle annotation\",\n \"Quick annotation\": \"Annotation rapide\",\n \"Rectangular annotation\": \"Annotation rectangulaire\",\n \"Run classifier\": \"Initier classifieur\",\n \"Save annotations and predictions\": \"Sauver annotations et predictions\",\n \"Save classifier\": \"Sauver classifieur\",\n \"Save weights\": \"Sauver poids\",\n Save: \"Sauver\",\n \"Search images\": \"Rechercher images\",\n \"Send feedback\": \"Envoyer feedback\",\n Settings: \"Préférences\",\n \"Show category\": \"Montrer catégorie\",\n \"Show sidebar\": \"Montrer sidebar\",\n \"Subtract area\": \"Soustraire région\",\n Unknown: \"Inconnu\",\n Zoom: \"Zoom\",\n \"Zoom in\": \"Zoom intérieur\",\n \"Zoom mode\": \"Mode zoom\",\n \"Zoom out\": \"Zoom extérieur\",\n },\n};\n\nexport default fr;\n","// prettier-ignore\nimport {language} from \"./language\";\n\nconst gr: language = {\n translation: {\n Cancel: \"Άκυρο\",\n Categories: \"Κατηγορίες\",\n \"Create category\": \"Δημιουργία κατηγορίας\",\n \"Create new classifier\": \"Νέος ταξινομητής\",\n \"Delete category\": \"Διαγραφή κατηγορίας\",\n \"Delete images\": \"Διαγραφή εικόνων\",\n Description: \"Περιγραφή\",\n Edit: \"Επεξεργασία\",\n \"Edit category\": \"Επεξεργασία κατηγορίας\",\n Help: \"Βοήθεια\",\n \"Hide other categories\": \"Απόκρυψη άλλων κατηγοριών\",\n \"Hide sidebar\": \"Απόκρυψη πλευρικής γραμμής\",\n \"Import images\": \"Εισαγωγή εικόνων\",\n Logo: \"Λογότυπο\",\n Model: \"Mοντέλο\",\n \"Open classifier\": \"Άνοιγμα ταξινομητή\",\n \"Open example classifier\": \"Παράδειγμα ταξινομητή\",\n \"Open weights\": \"Άνοιγμα παραμέτρων\",\n Open: \"Άνοιγμα\",\n \"Run classifier\": \"Εκτέλεση ταξινομητή\",\n \"Save annotations and predictions\": \"Αποθήκευση ετικετών και προβλέψεων\",\n \"Save classifier\": \"Αποθήκευση ταξινομητή\",\n \"Save weights\": \"Αποθήκευση παραμέτρων\",\n Save: \"Αποθήκευση\",\n \"Search images\": \"Αναζήτηση εικόνων\",\n \"Send feedback\": \"Eπικοινωνια\",\n Settings: \"Ρυθμίσεις\",\n \"Show sidebar\": \"Εμφάνιση πλευρικής γραμμής\",\n Unknown: \"Άγνωστο\",\n },\n};\n\nexport default gr;\n","// prettier-ignore\nimport {language} from \"./language\";\n\nconst hi: language = {\n translation: {\n Cancel: \"रद्द करें\",\n Categories: \"सारे वर्ग\",\n \"Create category\": \"वर्ग बनाएँ\",\n \"Create new classifier\": \"नया वर्गीकर्त्ता बनाएं\", // वर्गीकर्त्ता = classifier but is rarely used; consider transliterating instead\n \"Delete category\": \"वर्ग हटाएँ\",\n \"Delete images\": \"चित्रों को हटाएँ\",\n Description: \"विवरण\",\n \"Edit category\": \"वर्ग का बदलाव करें\",\n Help: \"सहायता\",\n \"Hide other categories\": \"बाकि वर्गों को छिपाएं\",\n \"Hide sidebar\": \"साइडबार छिपाएं\",\n \"Import images\": \"चित्रों को आयात करें\",\n Logo: \"लोगो\", // transliterated (Logo) but that's ok because it is a loanword\n Model: \"मॉडल\", // transliterated (Model) but should figure out an appropriate abstraction and translate that\n \"Open classifier\": \"वर्गीकर्त्ता खोलें\",\n \"Open example classifier\": \"उदाहरण का वर्गीकर्त्ता खोलें\",\n \"Open weights\": \"वेइट्स खोलें\",\n Open: \"खोलें\",\n \"Run classifier\": \"वर्गीकर्त्ता को चलाएं\",\n \"Save annotations and predictions\": \"टिप्पणी तथा अनुमान सहेजें\", // व्याख्या is an alternative to टिप्पणी\n \"Save classifier\": \"वर्गीकर्त्ता सहेजें\",\n \"Save weights\": \"वेइट्स सहेजें\",\n Save: \"सहेजें\",\n \"Search images\": \"चित्र खोजें\",\n \"Send feedback\": \"प्रतिक्रिया भेजें\",\n Settings: \"सेटिंग्स\",\n \"Show sidebar\": \"साइडबार दिखाएं\",\n Unknown: \"अज्ञात\", // should be used as an adjective before a noun\n },\n};\n\nexport default hi;\n","// prettier-ignore\nimport {language} from \"./language\";\n\nconst hu: language = {\n translation: {\n Cancel: \"Mégse\",\n Categories: \"Osztályok\",\n \"Create category\": \"Új osztály létrehozása\",\n \"Create new classifier\": \"Új osztályozó létrehozása\",\n \"Delete category\": \"Osztály törlése\",\n \"Delete images\": \"Képek törlése\",\n Description: \"Leírás\",\n Edit: \"Edit\",\n \"Edit category\": \"Kategória szerkesztése\",\n Help: \"Súgó\",\n \"Hide other categories\": \"Többi osztály elrejtése\",\n \"Hide sidebar\": \"Oldalsáv elrejtése\",\n \"Import images\": \"Képek importálása\",\n Logo: \"Logó\",\n Model: \"Modell\",\n \"Open classifier\": \"Osztályozó betöltése\",\n \"Open example classifier\": \"Példa osztályozó betöltése\",\n \"Open weights\": \"Súlyok betöltése\",\n Open: \"Megnyitás\",\n \"Run classifier\": \"Osztályozó futtatása\",\n \"Save annotations and predictions\": \"Címkék és predikciók mentése\",\n \"Save classifier\": \"Osztályozó mentése\",\n \"Save weights\": \"Súlyok mentése\",\n Save: \"Mentés\",\n \"Search images\": \"Képek keresése\",\n \"Send feedback\": \"Visszajelzés küldése\",\n Settings: \"Beállítások\",\n \"Show sidebar\": \"Oldalsáv megjelenítése\",\n Unknown: \"Ismeretlen\",\n },\n};\n\nexport default hu;\n","import { useSelector } from \"react-redux\";\nimport { selectLanguageType } from \"store/applicationSettings/selectors\";\nimport { de, en, fas, fi, fr, gr, hi, hu } from \"translations\";\nimport { Languages } from \"utils/common/enums\";\n\nexport const useTranslation = () => {\n const language = useSelector(selectLanguageType);\n\n const t = (word: string) => {\n switch (language) {\n case Languages.English:\n if (!en.translation[word]) return word;\n return en.translation[word];\n case Languages.Farsi:\n if (!fas.translation[word]) return word;\n return fas.translation[word];\n case Languages.Finnish:\n if (!fi.translation[word]) return word;\n return fi.translation[word];\n case Languages.French:\n if (!fr.translation[word]) return word;\n return fr.translation[word];\n case Languages.German:\n if (!de.translation[word]) return word;\n return de.translation[word];\n case Languages.Greek:\n if (!gr.translation[word]) return word;\n return gr.translation[word];\n case Languages.Hindi:\n if (!hi.translation[word]) return word;\n return hi.translation[word];\n case Languages.Hungarian:\n if (!hu.translation[word]) return word;\n return hu.translation[word];\n default:\n return word;\n }\n };\n\n return t;\n};\n","import { v4 as uuidv4 } from \"uuid\";\nimport { Category, Kind } from \"./types\";\nimport { UNKNOWN_CATEGORY_NAME } from \"./constants\";\nimport { UNKNOWN_IMAGE_CATEGORY_COLOR } from \"utils/common/constants\";\n\nexport const generateUUID = (options?: { definesUnknown: boolean }) => {\n let id = uuidv4();\n let unknownFlag: string;\n if (options?.definesUnknown) {\n unknownFlag = \"0\";\n } else {\n unknownFlag = \"1\";\n }\n return unknownFlag + id.slice(1);\n};\n\nexport const isUnknownCategory = (categoryId: string) => {\n return categoryId[0] === \"0\";\n};\n\nexport const generateUnknownCategory = (kind: string) => {\n const unknownCategoryId = generateUUID({ definesUnknown: true });\n const unknownCategory: Category = {\n id: unknownCategoryId,\n name: UNKNOWN_CATEGORY_NAME,\n color: UNKNOWN_IMAGE_CATEGORY_COLOR,\n containing: [],\n kind: kind,\n visible: true,\n };\n return unknownCategory;\n};\n\nexport const generateNewKind = (id: string) => {\n const unknownCategory = generateUnknownCategory(id);\n const newKind: Kind = {\n id,\n categories: [unknownCategory.id],\n unknownCategoryId: unknownCategory.id,\n containing: [],\n };\n return { newKind, unknownCategory };\n};\n","export const MIMETYPES = [\n \"image/png\",\n \"image/jpeg\",\n \"image/tiff\",\n \"image/dicom\",\n] as const;\n","import * as ImageJS from \"image-js\";\nimport * as DicomParser from \"dicom-parser\";\n\nimport {\n ImageFileError,\n ImageFileShapeInfo,\n ImageFileType,\n ImageShapeInfo,\n MIMEType,\n} from \"./types\";\nimport { MIMETYPES } from \"./constants\";\nimport {\n convertToImage,\n getImageInformation,\n} from \"utils/common/tensorHelpers\";\nimport { ImageShapeEnum } from \"./enums\";\nimport { getStackTraceFromError } from \"utils/common/helpers\";\nimport { AlertState } from \"utils/common/types\";\nimport { AlertType } from \"utils/common/enums\";\nimport { ImageObject } from \"store/data/types\";\n\nasync function decodeImageFile(imageFile: File, imageTypeEnum: ImageShapeEnum) {\n let imageStack: ImageJS.Stack;\n if (imageTypeEnum === ImageShapeEnum.DicomImage) {\n const imgArrayBuffer = await imageFile.arrayBuffer();\n\n const imgArray = new Uint8Array(imgArrayBuffer);\n\n var dicomImgData = DicomParser.parseDicom(imgArray);\n var pixelDataElement = dicomImgData.elements.x7fe00010;\n\n const samplesPerPixel = dicomImgData.int16(\"x00280002\");\n const rows = dicomImgData.int16(\"x00280010\");\n const columns = dicomImgData.int16(\"x00280011\");\n const bitsAllocated = dicomImgData.int16(\"x00280100\");\n\n if (!samplesPerPixel || !rows || !columns || !bitsAllocated) {\n throw Error(\"Failed to parse dicom image tags\");\n }\n\n var pixelData = new Uint16Array(\n dicomImgData.byteArray.buffer,\n pixelDataElement.dataOffset,\n pixelDataElement.length / 2\n );\n\n const img = new ImageJS.Image(rows, columns, pixelData, {\n components: samplesPerPixel,\n bitDepth: bitsAllocated,\n alpha: 0,\n });\n\n const channels: ImageJS.Image[] = [];\n for (let i = 0; i < samplesPerPixel; i++) {\n channels.push(img.getChannel(i));\n }\n imageStack = new ImageJS.Stack(channels);\n } else {\n imageStack = await loadImageFileAsStack(imageFile);\n }\n\n return {\n imageStack,\n fileName: imageFile.name,\n } as ImageFileType;\n}\n\nfunction isImageShapeValid(\n imageStack: Array,\n channels: number,\n slices: number,\n imageShape: ImageShapeEnum\n) {\n if (imageShape === ImageShapeEnum.GreyScale) {\n return channels === 1 && imageStack.length === 1;\n } else if (imageShape === ImageShapeEnum.SingleRGBImage) {\n return channels === 3 && imageStack.length === 3;\n } else {\n return channels * slices === imageStack.length;\n }\n}\n\nexport const uploadImages = async (\n files: FileList,\n channels: number,\n slices: number,\n referenceShape: ImageShapeInfo,\n categoryId: string\n): Promise<{\n imagesToUpload: ImageObject[];\n warning: any;\n errors: AlertState[];\n}> => {\n const invalidImageFiles: Array = [];\n const imagesToUpload: Array = [];\n const errors: Array = [];\n let warning: AlertState | undefined;\n\n for (const file of files) {\n try {\n const { imageStack, fileName } = await decodeImageFile(\n file,\n referenceShape.shape\n );\n if (\n !isImageShapeValid(imageStack, channels, slices, referenceShape.shape)\n ) {\n invalidImageFiles.push({\n fileName: fileName,\n error: `Could not match image to shape ${channels} (c) x ${slices} (z)`,\n });\n } else if (\n !(imageStack[0].bitDepth === 8 || imageStack[0].bitDepth === 16)\n ) {\n invalidImageFiles.push({\n fileName,\n error: `Unsupported bit depth of ${imageStack[0].bitDepth}`,\n });\n } else {\n try {\n const imageToUpload = await convertToImage(\n imageStack,\n fileName,\n undefined,\n slices,\n channels\n );\n imageToUpload.kind = \"Image\";\n imageToUpload.categoryId = categoryId;\n imageToUpload.containing = [];\n\n imagesToUpload.push(imageToUpload as ImageObject);\n } catch (err) {\n const error = err as Error;\n const stackTrace = await getStackTraceFromError(error);\n errors.push({\n alertType: AlertType.Error,\n name: \"Could not convert file to image\",\n description: error.message,\n stackTrace: stackTrace,\n });\n }\n }\n } catch (err) {\n process.env.NODE_ENV !== \"production\" && console.error(err);\n invalidImageFiles.push({\n fileName: file.name,\n error: \"Could not decode\",\n });\n }\n }\n\n if (invalidImageFiles.length) {\n warning = {\n alertType: AlertType.Warning,\n name: \"Could not draw image from files\",\n description: `Could not load or resolve images from the following files: ${invalidImageFiles.reduce(\n (prev, curr) => prev + \"\\n\" + curr.fileName + \": (\" + curr.error + \")\",\n \"\"\n )}`,\n };\n }\n return { imagesToUpload, warning, errors };\n};\n\n/*\n ----------------------------\n File blob & data url helpers\n ----------------------------\n */\n\nconst forceStack = async (image: ImageJS.Image | ImageJS.Stack) => {\n const imageShapeInfo = getImageInformation(image);\n\n if (imageShapeInfo.shape !== ImageShapeEnum.HyperStackImage) {\n image = (image as ImageJS.Image).split({ preserveAlpha: false });\n // preserveAlpha removes the alpha data from each ImageJS.Image\n // but its still present as its own ImageJS.Image as the final\n // element of the stack, so remove it\n if (imageShapeInfo.alpha) {\n image = new ImageJS.Stack(image.splice(0, image.length - 1));\n }\n return image;\n } else {\n return image as ImageJS.Stack;\n }\n};\n\n/*\n Receives a path to an image file, retrieved via import, eg:\n\n import myImage from \"path/to/myImage.png\"\n\n or via url, eg:\n\n \"https://piximi.photos/path/to/img\"\n\n and optionally a name, which is inffered from path if not provided.\n\n A File object is generated, identical to a File\n object retrived via html:\n \n \n\n This should only be used browser side, \n If you want to generate files node side, use the analogous function defined in\n \"nodeImageHelper.ts\".\n*/\nexport const fileFromPath = async (\n imPath: string,\n name: string | undefined = undefined\n) => {\n let imName: string;\n\n if (!name) {\n const pathParts = imPath.split(\"/\");\n imName = pathParts[pathParts.length - 1];\n } else {\n imName = name;\n }\n\n return fetch(imPath)\n .then((res) => res.blob())\n .then((blob) => new File([blob], imName, blob));\n};\n\n/*\n Receives a File blob and returns an ImageJS.Stack\n \n If the file is a greyscale, rgb, rgba, ImageJS will return a single\n ImageJS.Image object, where the data field has the pixel data interleaved\n (including alpha, if present).\n\n e.g. for rgba: [r1, g1, b1, a1, r2, g2, b2, a2, ...]\n\n Otherwise ImageJS will return an ImageJS.Stack object, which is a sublcass\n of a simple array, where each element is a single channel ImageJS.Image object.\n\n Instead we want to always return a stack, regardless of filetype.\n Alpha channel is discarded, if present.\n BitDepth and datat type is preserved.\n\n ---\n\n The File object, may come from an HTML ,\n \n or generated via \"fileFromPath\" either here or in \"nodeImageHelper.ts\"\n*/\nexport const loadImageFileAsStack = async (file: File) => {\n try {\n const buffer = await file.arrayBuffer();\n\n const image = (await ImageJS.Image.load(buffer, {\n ignorePalette: true,\n })) as ImageJS.Image | ImageJS.Stack;\n\n return forceStack(image);\n } catch (err) {\n process.env.NODE_ENV !== \"production\" &&\n console.error(`Error loading image file ${file.name}`);\n throw err;\n }\n};\n\n/*\n Converts a base64 dataURL encoded image into an ImageJS stack\n\n If the encoded image is a greyscale, rgb, or rgba, ImageJS will return a single\n ImageJS.Image object, where the data field has the pixel data interleaved\n (including alpha, if present).\n\n e.g. for rgba: [r1, g1, b1, a1, r2, g2, b2, a2, ...]\n\n Otherwise ImageJS will return an ImageJS.Stack object, which is a sublcass\n of a simple array, where each element is a single channel ImageJS.Image object.\n\n Instead we want to always return a stack, regardless of filetype.\n Alpha channel is discarded, if present.\n BitDepth and datat type is preserved.\n */\nexport const loadDataUrlAsStack = async (dataURL: string) => {\n try {\n const image = await ImageJS.Image.load(dataURL, {\n ignorePalette: true,\n });\n\n return forceStack(image);\n } catch (err) {\n process.env.NODE_ENV !== \"production\" &&\n console.error(\"Error loading dataURL\");\n throw err;\n }\n};\n\nexport const getImageFileInformation = async (\n file: File\n): Promise => {\n const ext = file.type as MIMEType;\n try {\n // https://stackoverflow.com/questions/56565528/typescript-const-assertions-how-to-use-array-prototype-includes\n if (!(MIMETYPES as ReadonlyArray).includes(file.type)) {\n process.env.NODE_ENV !== \"production\" &&\n console.error(\"Invalid MIME Type:\", ext);\n return { shape: ImageShapeEnum.InvalidImage, ext };\n }\n\n if (file.name.endsWith(\"dcm\") || file.name.endsWith(\"DICOM\")) {\n return { shape: ImageShapeEnum.DicomImage, ext: \"image/dicom\" };\n }\n\n const buffer = await file.arrayBuffer();\n const image: ImageJS.Image | ImageJS.Stack = await ImageJS.Image.load(\n buffer,\n {\n ignorePalette: true,\n }\n );\n\n return { ...getImageInformation(image), ext };\n } catch (err) {\n return { shape: ImageShapeEnum.InvalidImage, ext };\n }\n};\n","import { useCallback } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { applicationSettingsSlice } from \"store/applicationSettings\";\n\nimport { dataSlice } from \"store/data/dataSlice\";\nimport { generateNewKind } from \"store/data/helpers\";\nimport { selectUnknownImageCategory } from \"store/data/selectors\";\nimport { ImageShapeEnum } from \"utils/file-io/enums\";\nimport { getImageFileInformation, uploadImages } from \"utils/file-io/helpers\";\n\nexport const useUpload = (\n setOpenDimensionsDialogBox: (flag: boolean) => void\n) => {\n const dispatch = useDispatch();\n const unknownImageCategory = useSelector(selectUnknownImageCategory);\n\n return useCallback(\n async (files: FileList) => {\n const imageShapeInfo = await getImageFileInformation(files[0]);\n let imageCategory = unknownImageCategory;\n switch (imageShapeInfo.shape) {\n case ImageShapeEnum.SingleRGBImage:\n case ImageShapeEnum.GreyScale: {\n const channels =\n imageShapeInfo.shape === ImageShapeEnum.GreyScale ? 1 : 3;\n if (!imageCategory) {\n const { newKind, unknownCategory } = generateNewKind(\"Image\");\n imageCategory = unknownCategory.id;\n dispatch(dataSlice.actions.addKinds({ kinds: [newKind] }));\n dispatch(\n dataSlice.actions.addCategories({ categories: [unknownCategory] })\n );\n }\n const res = await uploadImages(\n files,\n channels,\n 1,\n imageShapeInfo,\n imageCategory\n );\n //HACK: Future plans to re-work error messages\n if (res.warning) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.warning,\n })\n );\n } else if (res.errors.length) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.errors[0],\n })\n );\n } else {\n dispatch(\n dataSlice.actions.addThings({\n things: res.imagesToUpload,\n isPermanent: true,\n })\n );\n }\n break;\n }\n case ImageShapeEnum.DicomImage: {\n const res = await uploadImages(\n files,\n 1,\n 1,\n imageShapeInfo,\n unknownImageCategory\n );\n //HACK: Future plans to re-work error messages\n if (res.warning) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.warning,\n })\n );\n } else if (res.errors.length) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.errors[0],\n })\n );\n } else {\n dispatch(\n dataSlice.actions.addThings({\n things: res.imagesToUpload,\n isPermanent: true,\n })\n );\n }\n break;\n }\n case ImageShapeEnum.HyperStackImage:\n setOpenDimensionsDialogBox(true);\n break;\n case ImageShapeEnum.InvalidImage:\n process.env.NODE_ENV !== \"production\" &&\n console.warn(\n \"Could not get shape information from first image in file list\"\n );\n break;\n default:\n process.env.NODE_ENV !== \"production\" &&\n console.warn(\"Unrecognized ImageShapeEnum value\");\n }\n\n return imageShapeInfo;\n },\n [dispatch, setOpenDimensionsDialogBox, unknownImageCategory]\n );\n};\n","import { useState } from \"react\";\nimport Konva from \"konva\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { KonvaEventObject } from \"konva/lib/Node\";\n\nimport { imageViewerSlice } from \"store/imageViewer\";\n\nimport { selectToolType } from \"store/annotator/selectors\";\n\nimport { useDebounce } from \"hooks/useDebounce\";\nimport { Point } from \"utils/annotator/types\";\nimport { ToolType, ZoomMode } from \"utils/annotator/enums\";\nimport {\n selectActiveImageId,\n selectStageWidth,\n selectZoomSelection,\n selectZoomToolOptions,\n} from \"store/imageViewer/selectors\";\n\nexport const useZoom = (stage?: Konva.Stage | null) => {\n const delta = 10;\n const [selectStart, setSelectStart] = useState<{ x: number; y: number }>();\n\n const dispatch = useDispatch();\n const stageWidth = useSelector(selectStageWidth);\n const toolType = useSelector(selectToolType);\n const { automaticCentering, mode } = useSelector(selectZoomToolOptions);\n const zoomSelection = useSelector(selectZoomSelection);\n const activeImageId = useSelector(selectActiveImageId);\n const updateZoomScale = useDebounce((scale: number) => {\n dispatch(\n imageViewerSlice.actions.setZoomToolOptions({ options: { scale } })\n );\n }, 300);\n\n const zoomAndOffset = (newScale: number, center: Point) => {\n if (!center || !stage) return;\n\n const stageX = stage.x();\n const stageY = stage.y();\n const stageScale = stage.scaleX();\n\n const mousePointTo = {\n x: (center.x - stageX!) / stageScale,\n y: (center.y - stageY!) / stageScale,\n };\n\n var newPos = {\n x: center.x - mousePointTo.x * newScale,\n y: center.y - mousePointTo.y * newScale,\n };\n\n stage.position(newPos);\n stage.scale({ x: newScale, y: newScale });\n };\n\n const resetZoomSelection = () => {\n dispatch(\n imageViewerSlice.actions.setZoomSelection({\n zoomSelection: {\n maximum: undefined,\n minimum: undefined,\n selecting: false,\n dragging: false,\n centerPoint: undefined,\n },\n })\n );\n };\n\n const handleZoomMouseDown = (\n position: { x: number; y: number },\n event: KonvaEventObject\n ) => {\n if (toolType !== ToolType.Zoom) return;\n const stage = event.target.getStage()!;\n setSelectStart(stage.getPointerPosition()!);\n dispatch(\n imageViewerSlice.actions.setZoomSelection({\n zoomSelection: {\n ...zoomSelection,\n dragging: false,\n minimum: position,\n selecting: true,\n },\n })\n );\n };\n\n const handleZoomMouseMove = (\n position: { x: number; y: number },\n event: KonvaEventObject\n ) => {\n const stage = event.target.getStage()!;\n const _position = stage.getPointerPosition()!;\n if (\n mode === ZoomMode.Out ||\n !zoomSelection.selecting ||\n !position ||\n !zoomSelection.minimum ||\n !selectStart\n )\n return;\n\n dispatch(\n imageViewerSlice.actions.setZoomSelection({\n zoomSelection: {\n ...zoomSelection,\n dragging: Math.abs(_position.x - selectStart.x) >= delta,\n maximum: position,\n },\n })\n );\n };\n\n const handleZoomMouseUp = (\n position: { x: number; y: number },\n event: KonvaEventObject\n ) => {\n if (!activeImageId || !zoomSelection.selecting || !stage) return;\n if (zoomSelection.dragging) {\n const stage = event.target.getStage()!;\n const _position = stage.getPointerPosition()!;\n if (!_position || !position || !selectStart) return;\n\n dispatch(\n imageViewerSlice.actions.setZoomSelection({\n zoomSelection: { ...zoomSelection, maximum: position },\n })\n );\n\n if (!zoomSelection.minimum) return;\n\n const selectedWidth = Math.abs(_position.x - selectStart.x);\n const newScale = Math.max(\n Math.min(stageWidth / selectedWidth, 5),\n stage.scaleX()\n );\n let topLeft;\n if (selectStart.x < _position.x) {\n if (selectStart.y < _position.y) {\n topLeft = selectStart;\n } else {\n topLeft = { x: selectStart.x, y: _position.y };\n }\n } else {\n if (selectStart.y < _position.y) {\n topLeft = { x: _position.x, y: selectStart.y };\n } else {\n topLeft = _position;\n }\n }\n zoomAndOffset(newScale, {\n x: topLeft.x + selectedWidth / 2,\n y: topLeft.y + selectedWidth / 2,\n });\n }\n\n dispatch(\n imageViewerSlice.actions.setZoomSelection({\n zoomSelection: { ...zoomSelection, dragging: false, selecting: false },\n })\n );\n };\n\n const handleZoomScroll = (event: KonvaEventObject) => {\n event.evt.preventDefault();\n if (!activeImageId) return;\n const stage = event.target.getStage()!;\n const scaleBy = 1.035;\n\n const oldScale = stage.scaleX();\n\n const newScale =\n event.evt.deltaY < 0\n ? Math.min(5, oldScale * scaleBy)\n : Math.max(0.25, oldScale / scaleBy);\n\n let center;\n\n if (automaticCentering) {\n center = {\n x: (stage.width() / 2) * stage.scaleX() + stage.x(),\n y: (stage.height() / 2) * stage.scaleX() + stage.y(),\n };\n } else {\n center = stage.getPointerPosition() as Point;\n }\n if (!center) return;\n zoomAndOffset(newScale, center);\n\n updateZoomScale(newScale);\n\n const labelGroup = stage.find(`#label-group`)[0];\n if (!labelGroup) return;\n const labelPosition = labelGroup.position();\n const labelPointTo = {\n x: labelPosition.x / oldScale - stage.x() / oldScale,\n y: labelPosition.y / oldScale - stage.y() / oldScale,\n };\n labelGroup.setAttrs({\n scaleX: 1 / stage.scaleX(),\n scaleY: 1 / stage.scaleY(),\n });\n\n var newLabelPos = {\n x: labelPosition.x - labelPointTo.x * newScale,\n y: labelPosition.y - labelPointTo.y * newScale,\n };\n\n labelGroup.setAttrs({\n position: newLabelPos,\n });\n };\n\n const handleZoomDblClick = (event: KonvaEventObject) => {\n event.evt.preventDefault();\n if (!activeImageId) return;\n const stage = event.target.getStage()!;\n const scaleBy = 1.2;\n\n const oldScale = stage.scaleX();\n\n const newScale =\n mode === ZoomMode.In\n ? Math.min(5, oldScale * scaleBy)\n : Math.max(0.25, oldScale / scaleBy);\n\n let center;\n\n if (automaticCentering) {\n center = zoomSelection.centerPoint;\n } else {\n center = stage.getPointerPosition() as Point;\n }\n\n if (!center) return;\n zoomAndOffset(newScale, center);\n\n const labelGroup = stage.find(`#label-group`)[0];\n if (!labelGroup) return;\n const labelPosition = labelGroup.position();\n const labelPointTo = {\n x: labelPosition.x / oldScale - stage.x() / oldScale,\n y: labelPosition.y / oldScale - stage.y() / oldScale,\n };\n labelGroup.setAttrs({\n scaleX: 1 / stage.scaleX(),\n scaleY: 1 / stage.scaleY(),\n });\n\n var newLabelPos = {\n x: labelPosition.x - labelPointTo.x * newScale,\n y: labelPosition.y - labelPointTo.y * newScale,\n };\n\n labelGroup.setAttrs({\n position: newLabelPos,\n });\n };\n\n return {\n resetZoomSelection,\n handleZoomMouseDown,\n handleZoomMouseMove,\n handleZoomMouseUp,\n handleZoomScroll,\n zoomAndOffset,\n handleZoomDblClick,\n };\n};\n","import { useCallback, useEffect, useState } from \"react\";\nimport Konva from \"konva\";\nimport * as ImageJS from \"image-js\";\nimport { Point } from \"utils/annotator/types\";\n\nexport const usePointerLocation = (\n imageRef: React.MutableRefObject,\n stageRef: React.RefObject,\n originalImage: ImageJS.Image\n) => {\n const [absolutePosition, setAbsolutePosition] = useState();\n const [positionByStage, setPositionByStage] = useState();\n const [outOfBounds, setOutOfBounds] = useState(false);\n const [pixelColor, setPixelColor] = useState();\n\n const getPositionFromImage = useCallback(\n (position: Point): Point | undefined => {\n if (!imageRef || !imageRef.current) return;\n\n const transform = imageRef.current.getAbsoluteTransform().copy();\n\n transform.invert();\n\n const imageOffset = transform.point(position);\n\n return {\n x: (imageOffset.x / imageRef.current.width()) * originalImage.width,\n y: (imageOffset.y / imageRef.current.height()) * originalImage.height,\n };\n },\n [imageRef, originalImage]\n );\n const getRelativePosition = useCallback(\n (position: Point, ref: Konva.Node | null): Point | undefined => {\n if (!ref) return;\n\n const transform = ref.getAbsoluteTransform().copy();\n\n transform.invert();\n return transform.point(position);\n },\n []\n );\n const setCurrentMousePosition = useCallback(() => {\n if (!stageRef.current) return;\n const position = stageRef.current.getPointerPosition();\n\n if (!position) return;\n\n const relative = getRelativePosition(position, imageRef.current);\n\n if (!relative) return;\n\n setPositionByStage(getRelativePosition(position, stageRef.current));\n\n let adjustedX: number;\n let adjustedY: number;\n let xOut: boolean;\n let yOut: boolean;\n\n if (relative.x < 0) {\n adjustedX = 0;\n xOut = true;\n } else if (relative.x > originalImage.width) {\n adjustedX = originalImage.width;\n xOut = true;\n } else {\n adjustedX = relative.x;\n xOut = false;\n }\n\n if (relative.y < 0) {\n adjustedY = 0;\n yOut = true;\n } else if (relative.y > originalImage.height) {\n adjustedY = originalImage.height;\n yOut = true;\n } else {\n adjustedY = relative.y;\n yOut = false;\n }\n\n relative.x = Math.round(adjustedX);\n relative.y = Math.round(adjustedY);\n\n setAbsolutePosition(relative);\n setOutOfBounds(xOut || yOut);\n }, [stageRef, getRelativePosition, originalImage, imageRef]);\n\n useEffect(() => {\n if (!absolutePosition?.x || outOfBounds) return;\n\n let y: number;\n /* For some reason the full range of x values work, but only y < height work\n and when x >= width - 1, only y < height - 1 works in getPixelXY\n */\n if (absolutePosition.x >= originalImage.width - 1) {\n y = Math.min(originalImage.height - 2, absolutePosition.y);\n } else {\n y = Math.min(originalImage.height - 1, absolutePosition.y);\n }\n\n const pixelColor = originalImage\n .getPixelXY(absolutePosition.x, y)\n .slice(0, -1);\n setPixelColor(pixelColor.join(\", \"));\n }, [originalImage, absolutePosition?.x, absolutePosition?.y, outOfBounds]);\n\n return {\n absolutePosition,\n positionByStage,\n outOfBounds,\n pixelColor,\n getPositionFromImage,\n setCurrentMousePosition,\n };\n};\n","import * as React from 'react';\nimport { useTheme as useThemeSystem } from '@mui/system';\nimport defaultTheme from './defaultTheme';\nimport THEME_ID from './identifier';\nexport default function useTheme() {\n const theme = useThemeSystem(defaultTheme);\n if (process.env.NODE_ENV !== 'production') {\n // eslint-disable-next-line react-hooks/rules-of-hooks\n React.useDebugValue(theme);\n }\n return theme[THEME_ID] || theme;\n}","import { Breakpoint, useMediaQuery, useTheme } from \"@mui/material\";\ntype BreakpointOrNull = Breakpoint | null;\nexport const useBreakpointObserver = () => {\n const theme = useTheme();\n const keys: readonly Breakpoint[] = [...theme.breakpoints.keys].reverse();\n return (\n keys.reduce((output: BreakpointOrNull, key: Breakpoint) => {\n // eslint-disable-next-line react-hooks/rules-of-hooks\n const matches = useMediaQuery(theme.breakpoints.up(key));\n return !output && matches ? key : output;\n }, null) || \"xs\"\n );\n};\n","import { useBreakpointObserver } from \"hooks/useBreakpointObserver/useBreakpointObserver\";\nimport { useEffect, useState } from \"react\";\nimport { mobileBreakpoints } from \"utils/common/constants\";\n\nexport const useMobileView = () => {\n const breakpoint = useBreakpointObserver();\n const [isMobile, setIsMobile] = useState(\n mobileBreakpoints.includes(breakpoint)\n );\n useEffect(() => {\n setIsMobile(mobileBreakpoints.includes(breakpoint));\n }, [breakpoint]);\n\n return isMobile;\n};\n","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"sx\"];\nimport { isPlainObject } from '@mui/utils';\nimport defaultSxConfig from './defaultSxConfig';\nconst splitProps = props => {\n var _props$theme$unstable, _props$theme;\n const result = {\n systemProps: {},\n otherProps: {}\n };\n const config = (_props$theme$unstable = props == null ? void 0 : (_props$theme = props.theme) == null ? void 0 : _props$theme.unstable_sxConfig) != null ? _props$theme$unstable : defaultSxConfig;\n Object.keys(props).forEach(prop => {\n if (config[prop]) {\n result.systemProps[prop] = props[prop];\n } else {\n result.otherProps[prop] = props[prop];\n }\n });\n return result;\n};\nexport default function extendSxProp(props) {\n const {\n sx: inSx\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const {\n systemProps,\n otherProps\n } = splitProps(other);\n let finalSx;\n if (Array.isArray(inSx)) {\n finalSx = [systemProps, ...inSx];\n } else if (typeof inSx === 'function') {\n finalSx = (...args) => {\n const result = inSx(...args);\n if (!isPlainObject(result)) {\n return systemProps;\n }\n return _extends({}, systemProps, result);\n };\n } else {\n finalSx = _extends({}, systemProps, inSx);\n }\n return _extends({}, otherProps, {\n sx: finalSx\n });\n}","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"className\", \"component\"];\nimport * as React from 'react';\nimport clsx from 'clsx';\nimport styled from '@mui/styled-engine';\nimport styleFunctionSx, { extendSxProp } from './styleFunctionSx';\nimport useTheme from './useTheme';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default function createBox(options = {}) {\n const {\n themeId,\n defaultTheme,\n defaultClassName = 'MuiBox-root',\n generateClassName\n } = options;\n const BoxRoot = styled('div', {\n shouldForwardProp: prop => prop !== 'theme' && prop !== 'sx' && prop !== 'as'\n })(styleFunctionSx);\n const Box = /*#__PURE__*/React.forwardRef(function Box(inProps, ref) {\n const theme = useTheme(defaultTheme);\n const _extendSxProp = extendSxProp(inProps),\n {\n className,\n component = 'div'\n } = _extendSxProp,\n other = _objectWithoutPropertiesLoose(_extendSxProp, _excluded);\n return /*#__PURE__*/_jsx(BoxRoot, _extends({\n as: component,\n ref: ref,\n className: clsx(className, generateClassName ? generateClassName(defaultClassName) : defaultClassName),\n theme: themeId ? theme[themeId] || theme : theme\n }, other));\n });\n return Box;\n}","import { createBox } from '@mui/system';\nimport PropTypes from 'prop-types';\nimport { unstable_ClassNameGenerator as ClassNameGenerator } from '../className';\nimport { createTheme } from '../styles';\nimport THEME_ID from '../styles/identifier';\nconst defaultTheme = createTheme();\nconst Box = createBox({\n themeId: THEME_ID,\n defaultTheme,\n defaultClassName: 'MuiBox-root',\n generateClassName: ClassNameGenerator.generate\n});\nprocess.env.NODE_ENV !== \"production\" ? Box.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * @ignore\n */\n children: PropTypes.node,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default Box;","import { h as hasOwnProperty, E as Emotion, c as createEmotionProps, w as withEmotionCache, T as ThemeContext, i as isBrowser$1 } from './emotion-element-c39617d8.browser.esm.js';\nexport { C as CacheProvider, T as ThemeContext, a as ThemeProvider, _ as __unsafe_useEmotionCache, u as useTheme, w as withEmotionCache, b as withTheme } from './emotion-element-c39617d8.browser.esm.js';\nimport * as React from 'react';\nimport { insertStyles, registerStyles, getRegisteredStyles } from '@emotion/utils';\nimport { useInsertionEffectWithLayoutFallback, useInsertionEffectAlwaysWithSyncFallback } from '@emotion/use-insertion-effect-with-fallbacks';\nimport { serializeStyles } from '@emotion/serialize';\nimport '@emotion/cache';\nimport '@babel/runtime/helpers/extends';\nimport '@emotion/weak-memoize';\nimport '../_isolated-hnrs/dist/emotion-react-_isolated-hnrs.browser.esm.js';\nimport 'hoist-non-react-statics';\n\nvar pkg = {\n\tname: \"@emotion/react\",\n\tversion: \"11.11.1\",\n\tmain: \"dist/emotion-react.cjs.js\",\n\tmodule: \"dist/emotion-react.esm.js\",\n\tbrowser: {\n\t\t\"./dist/emotion-react.esm.js\": \"./dist/emotion-react.browser.esm.js\"\n\t},\n\texports: {\n\t\t\".\": {\n\t\t\tmodule: {\n\t\t\t\tworker: \"./dist/emotion-react.worker.esm.js\",\n\t\t\t\tbrowser: \"./dist/emotion-react.browser.esm.js\",\n\t\t\t\t\"default\": \"./dist/emotion-react.esm.js\"\n\t\t\t},\n\t\t\t\"import\": \"./dist/emotion-react.cjs.mjs\",\n\t\t\t\"default\": \"./dist/emotion-react.cjs.js\"\n\t\t},\n\t\t\"./jsx-runtime\": {\n\t\t\tmodule: {\n\t\t\t\tworker: \"./jsx-runtime/dist/emotion-react-jsx-runtime.worker.esm.js\",\n\t\t\t\tbrowser: \"./jsx-runtime/dist/emotion-react-jsx-runtime.browser.esm.js\",\n\t\t\t\t\"default\": \"./jsx-runtime/dist/emotion-react-jsx-runtime.esm.js\"\n\t\t\t},\n\t\t\t\"import\": \"./jsx-runtime/dist/emotion-react-jsx-runtime.cjs.mjs\",\n\t\t\t\"default\": \"./jsx-runtime/dist/emotion-react-jsx-runtime.cjs.js\"\n\t\t},\n\t\t\"./_isolated-hnrs\": {\n\t\t\tmodule: {\n\t\t\t\tworker: \"./_isolated-hnrs/dist/emotion-react-_isolated-hnrs.worker.esm.js\",\n\t\t\t\tbrowser: \"./_isolated-hnrs/dist/emotion-react-_isolated-hnrs.browser.esm.js\",\n\t\t\t\t\"default\": \"./_isolated-hnrs/dist/emotion-react-_isolated-hnrs.esm.js\"\n\t\t\t},\n\t\t\t\"import\": \"./_isolated-hnrs/dist/emotion-react-_isolated-hnrs.cjs.mjs\",\n\t\t\t\"default\": \"./_isolated-hnrs/dist/emotion-react-_isolated-hnrs.cjs.js\"\n\t\t},\n\t\t\"./jsx-dev-runtime\": {\n\t\t\tmodule: {\n\t\t\t\tworker: \"./jsx-dev-runtime/dist/emotion-react-jsx-dev-runtime.worker.esm.js\",\n\t\t\t\tbrowser: \"./jsx-dev-runtime/dist/emotion-react-jsx-dev-runtime.browser.esm.js\",\n\t\t\t\t\"default\": \"./jsx-dev-runtime/dist/emotion-react-jsx-dev-runtime.esm.js\"\n\t\t\t},\n\t\t\t\"import\": \"./jsx-dev-runtime/dist/emotion-react-jsx-dev-runtime.cjs.mjs\",\n\t\t\t\"default\": \"./jsx-dev-runtime/dist/emotion-react-jsx-dev-runtime.cjs.js\"\n\t\t},\n\t\t\"./package.json\": \"./package.json\",\n\t\t\"./types/css-prop\": \"./types/css-prop.d.ts\",\n\t\t\"./macro\": {\n\t\t\ttypes: {\n\t\t\t\t\"import\": \"./macro.d.mts\",\n\t\t\t\t\"default\": \"./macro.d.ts\"\n\t\t\t},\n\t\t\t\"default\": \"./macro.js\"\n\t\t}\n\t},\n\ttypes: \"types/index.d.ts\",\n\tfiles: [\n\t\t\"src\",\n\t\t\"dist\",\n\t\t\"jsx-runtime\",\n\t\t\"jsx-dev-runtime\",\n\t\t\"_isolated-hnrs\",\n\t\t\"types/*.d.ts\",\n\t\t\"macro.*\"\n\t],\n\tsideEffects: false,\n\tauthor: \"Emotion Contributors\",\n\tlicense: \"MIT\",\n\tscripts: {\n\t\t\"test:typescript\": \"dtslint types\"\n\t},\n\tdependencies: {\n\t\t\"@babel/runtime\": \"^7.18.3\",\n\t\t\"@emotion/babel-plugin\": \"^11.11.0\",\n\t\t\"@emotion/cache\": \"^11.11.0\",\n\t\t\"@emotion/serialize\": \"^1.1.2\",\n\t\t\"@emotion/use-insertion-effect-with-fallbacks\": \"^1.0.1\",\n\t\t\"@emotion/utils\": \"^1.2.1\",\n\t\t\"@emotion/weak-memoize\": \"^0.3.1\",\n\t\t\"hoist-non-react-statics\": \"^3.3.1\"\n\t},\n\tpeerDependencies: {\n\t\treact: \">=16.8.0\"\n\t},\n\tpeerDependenciesMeta: {\n\t\t\"@types/react\": {\n\t\t\toptional: true\n\t\t}\n\t},\n\tdevDependencies: {\n\t\t\"@definitelytyped/dtslint\": \"0.0.112\",\n\t\t\"@emotion/css\": \"11.11.0\",\n\t\t\"@emotion/css-prettifier\": \"1.1.3\",\n\t\t\"@emotion/server\": \"11.11.0\",\n\t\t\"@emotion/styled\": \"11.11.0\",\n\t\t\"html-tag-names\": \"^1.1.2\",\n\t\treact: \"16.14.0\",\n\t\t\"svg-tag-names\": \"^1.1.1\",\n\t\ttypescript: \"^4.5.5\"\n\t},\n\trepository: \"https://github.com/emotion-js/emotion/tree/main/packages/react\",\n\tpublishConfig: {\n\t\taccess: \"public\"\n\t},\n\t\"umd:main\": \"dist/emotion-react.umd.min.js\",\n\tpreconstruct: {\n\t\tentrypoints: [\n\t\t\t\"./index.js\",\n\t\t\t\"./jsx-runtime.js\",\n\t\t\t\"./jsx-dev-runtime.js\",\n\t\t\t\"./_isolated-hnrs.js\"\n\t\t],\n\t\tumdName: \"emotionReact\",\n\t\texports: {\n\t\t\tenvConditions: [\n\t\t\t\t\"browser\",\n\t\t\t\t\"worker\"\n\t\t\t],\n\t\t\textra: {\n\t\t\t\t\"./types/css-prop\": \"./types/css-prop.d.ts\",\n\t\t\t\t\"./macro\": {\n\t\t\t\t\ttypes: {\n\t\t\t\t\t\t\"import\": \"./macro.d.mts\",\n\t\t\t\t\t\t\"default\": \"./macro.d.ts\"\n\t\t\t\t\t},\n\t\t\t\t\t\"default\": \"./macro.js\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n};\n\nvar jsx = function jsx(type, props) {\n var args = arguments;\n\n if (props == null || !hasOwnProperty.call(props, 'css')) {\n // $FlowFixMe\n return React.createElement.apply(undefined, args);\n }\n\n var argsLength = args.length;\n var createElementArgArray = new Array(argsLength);\n createElementArgArray[0] = Emotion;\n createElementArgArray[1] = createEmotionProps(type, props);\n\n for (var i = 2; i < argsLength; i++) {\n createElementArgArray[i] = args[i];\n } // $FlowFixMe\n\n\n return React.createElement.apply(null, createElementArgArray);\n};\n\nvar warnedAboutCssPropForGlobal = false; // maintain place over rerenders.\n// initial render from browser, insertBefore context.sheet.tags[0] or if a style hasn't been inserted there yet, appendChild\n// initial client-side render from SSR, use place of hydrating tag\n\nvar Global = /* #__PURE__ */withEmotionCache(function (props, cache) {\n if (process.env.NODE_ENV !== 'production' && !warnedAboutCssPropForGlobal && ( // check for className as well since the user is\n // probably using the custom createElement which\n // means it will be turned into a className prop\n // $FlowFixMe I don't really want to add it to the type since it shouldn't be used\n props.className || props.css)) {\n console.error(\"It looks like you're using the css prop on Global, did you mean to use the styles prop instead?\");\n warnedAboutCssPropForGlobal = true;\n }\n\n var styles = props.styles;\n var serialized = serializeStyles([styles], undefined, React.useContext(ThemeContext));\n\n if (!isBrowser$1) {\n var _ref;\n\n var serializedNames = serialized.name;\n var serializedStyles = serialized.styles;\n var next = serialized.next;\n\n while (next !== undefined) {\n serializedNames += ' ' + next.name;\n serializedStyles += next.styles;\n next = next.next;\n }\n\n var shouldCache = cache.compat === true;\n var rules = cache.insert(\"\", {\n name: serializedNames,\n styles: serializedStyles\n }, cache.sheet, shouldCache);\n\n if (shouldCache) {\n return null;\n }\n\n return /*#__PURE__*/React.createElement(\"style\", (_ref = {}, _ref[\"data-emotion\"] = cache.key + \"-global \" + serializedNames, _ref.dangerouslySetInnerHTML = {\n __html: rules\n }, _ref.nonce = cache.sheet.nonce, _ref));\n } // yes, i know these hooks are used conditionally\n // but it is based on a constant that will never change at runtime\n // it's effectively like having two implementations and switching them out\n // so it's not actually breaking anything\n\n\n var sheetRef = React.useRef();\n useInsertionEffectWithLayoutFallback(function () {\n var key = cache.key + \"-global\"; // use case of https://github.com/emotion-js/emotion/issues/2675\n\n var sheet = new cache.sheet.constructor({\n key: key,\n nonce: cache.sheet.nonce,\n container: cache.sheet.container,\n speedy: cache.sheet.isSpeedy\n });\n var rehydrating = false; // $FlowFixMe\n\n var node = document.querySelector(\"style[data-emotion=\\\"\" + key + \" \" + serialized.name + \"\\\"]\");\n\n if (cache.sheet.tags.length) {\n sheet.before = cache.sheet.tags[0];\n }\n\n if (node !== null) {\n rehydrating = true; // clear the hash so this node won't be recognizable as rehydratable by other s\n\n node.setAttribute('data-emotion', key);\n sheet.hydrate([node]);\n }\n\n sheetRef.current = [sheet, rehydrating];\n return function () {\n sheet.flush();\n };\n }, [cache]);\n useInsertionEffectWithLayoutFallback(function () {\n var sheetRefCurrent = sheetRef.current;\n var sheet = sheetRefCurrent[0],\n rehydrating = sheetRefCurrent[1];\n\n if (rehydrating) {\n sheetRefCurrent[1] = false;\n return;\n }\n\n if (serialized.next !== undefined) {\n // insert keyframes\n insertStyles(cache, serialized.next, true);\n }\n\n if (sheet.tags.length) {\n // if this doesn't exist then it will be null so the style element will be appended\n var element = sheet.tags[sheet.tags.length - 1].nextElementSibling;\n sheet.before = element;\n sheet.flush();\n }\n\n cache.insert(\"\", serialized, sheet, false);\n }, [cache, serialized.name]);\n return null;\n});\n\nif (process.env.NODE_ENV !== 'production') {\n Global.displayName = 'EmotionGlobal';\n}\n\nfunction css() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return serializeStyles(args);\n}\n\nvar keyframes = function keyframes() {\n var insertable = css.apply(void 0, arguments);\n var name = \"animation-\" + insertable.name; // $FlowFixMe\n\n return {\n name: name,\n styles: \"@keyframes \" + name + \"{\" + insertable.styles + \"}\",\n anim: 1,\n toString: function toString() {\n return \"_EMO_\" + this.name + \"_\" + this.styles + \"_EMO_\";\n }\n };\n};\n\nvar classnames = function classnames(args) {\n var len = args.length;\n var i = 0;\n var cls = '';\n\n for (; i < len; i++) {\n var arg = args[i];\n if (arg == null) continue;\n var toAdd = void 0;\n\n switch (typeof arg) {\n case 'boolean':\n break;\n\n case 'object':\n {\n if (Array.isArray(arg)) {\n toAdd = classnames(arg);\n } else {\n if (process.env.NODE_ENV !== 'production' && arg.styles !== undefined && arg.name !== undefined) {\n console.error('You have passed styles created with `css` from `@emotion/react` package to the `cx`.\\n' + '`cx` is meant to compose class names (strings) so you should convert those styles to a class name by passing them to the `css` received from component.');\n }\n\n toAdd = '';\n\n for (var k in arg) {\n if (arg[k] && k) {\n toAdd && (toAdd += ' ');\n toAdd += k;\n }\n }\n }\n\n break;\n }\n\n default:\n {\n toAdd = arg;\n }\n }\n\n if (toAdd) {\n cls && (cls += ' ');\n cls += toAdd;\n }\n }\n\n return cls;\n};\n\nfunction merge(registered, css, className) {\n var registeredStyles = [];\n var rawClassName = getRegisteredStyles(registered, registeredStyles, className);\n\n if (registeredStyles.length < 2) {\n return className;\n }\n\n return rawClassName + css(registeredStyles);\n}\n\nvar Insertion = function Insertion(_ref) {\n var cache = _ref.cache,\n serializedArr = _ref.serializedArr;\n useInsertionEffectAlwaysWithSyncFallback(function () {\n\n for (var i = 0; i < serializedArr.length; i++) {\n insertStyles(cache, serializedArr[i], false);\n }\n });\n\n return null;\n};\n\nvar ClassNames = /* #__PURE__ */withEmotionCache(function (props, cache) {\n var hasRendered = false;\n var serializedArr = [];\n\n var css = function css() {\n if (hasRendered && process.env.NODE_ENV !== 'production') {\n throw new Error('css can only be used during render');\n }\n\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n var serialized = serializeStyles(args, cache.registered);\n serializedArr.push(serialized); // registration has to happen here as the result of this might get consumed by `cx`\n\n registerStyles(cache, serialized, false);\n return cache.key + \"-\" + serialized.name;\n };\n\n var cx = function cx() {\n if (hasRendered && process.env.NODE_ENV !== 'production') {\n throw new Error('cx can only be used during render');\n }\n\n for (var _len2 = arguments.length, args = new Array(_len2), _key2 = 0; _key2 < _len2; _key2++) {\n args[_key2] = arguments[_key2];\n }\n\n return merge(cache.registered, css, classnames(args));\n };\n\n var content = {\n css: css,\n cx: cx,\n theme: React.useContext(ThemeContext)\n };\n var ele = props.children(content);\n hasRendered = true;\n return /*#__PURE__*/React.createElement(React.Fragment, null, /*#__PURE__*/React.createElement(Insertion, {\n cache: cache,\n serializedArr: serializedArr\n }), ele);\n});\n\nif (process.env.NODE_ENV !== 'production') {\n ClassNames.displayName = 'EmotionClassNames';\n}\n\nif (process.env.NODE_ENV !== 'production') {\n var isBrowser = \"object\" !== 'undefined'; // #1727, #2905 for some reason Jest and Vitest evaluate modules twice if some consuming module gets mocked\n\n var isTestEnv = typeof jest !== 'undefined' || typeof vi !== 'undefined';\n\n if (isBrowser && !isTestEnv) {\n // globalThis has wide browser support - https://caniuse.com/?search=globalThis, Node.js 12 and later\n var globalContext = // $FlowIgnore\n typeof globalThis !== 'undefined' ? globalThis // eslint-disable-line no-undef\n : isBrowser ? window : global;\n var globalKey = \"__EMOTION_REACT_\" + pkg.version.split('.')[0] + \"__\";\n\n if (globalContext[globalKey]) {\n console.warn('You are loading @emotion/react when it is already loaded. Running ' + 'multiple instances may cause problems. This can happen if multiple ' + 'versions are used, or if multiple builds of the same version are ' + 'used.');\n }\n\n globalContext[globalKey] = true;\n }\n}\n\nexport { ClassNames, Global, jsx as createElement, css, jsx, keyframes };\n","import * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { Global } from '@emotion/react';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction isEmpty(obj) {\n return obj === undefined || obj === null || Object.keys(obj).length === 0;\n}\nexport default function GlobalStyles(props) {\n const {\n styles,\n defaultTheme = {}\n } = props;\n const globalStyles = typeof styles === 'function' ? themeInput => styles(isEmpty(themeInput) ? defaultTheme : themeInput) : styles;\n return /*#__PURE__*/_jsx(Global, {\n styles: globalStyles\n });\n}\nprocess.env.NODE_ENV !== \"production\" ? GlobalStyles.propTypes = {\n defaultTheme: PropTypes.object,\n styles: PropTypes.oneOfType([PropTypes.array, PropTypes.string, PropTypes.object, PropTypes.func])\n} : void 0;","import * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { GlobalStyles as MuiGlobalStyles } from '@mui/styled-engine';\nimport useTheme from '../useTheme';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction GlobalStyles({\n styles,\n themeId,\n defaultTheme = {}\n}) {\n const upperTheme = useTheme(defaultTheme);\n const globalStyles = typeof styles === 'function' ? styles(themeId ? upperTheme[themeId] || upperTheme : upperTheme) : styles;\n return /*#__PURE__*/_jsx(MuiGlobalStyles, {\n styles: globalStyles\n });\n}\nprocess.env.NODE_ENV !== \"production\" ? GlobalStyles.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit TypeScript types and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * @ignore\n */\n defaultTheme: PropTypes.object,\n /**\n * @ignore\n */\n styles: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.array, PropTypes.func, PropTypes.number, PropTypes.object, PropTypes.string, PropTypes.bool]),\n /**\n * @ignore\n */\n themeId: PropTypes.string\n} : void 0;\nexport default GlobalStyles;","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { GlobalStyles as SystemGlobalStyles } from '@mui/system';\nimport defaultTheme from '../styles/defaultTheme';\nimport THEME_ID from '../styles/identifier';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction GlobalStyles(props) {\n return /*#__PURE__*/_jsx(SystemGlobalStyles, _extends({}, props, {\n defaultTheme: defaultTheme,\n themeId: THEME_ID\n }));\n}\nprocess.env.NODE_ENV !== \"production\" ? GlobalStyles.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The styles you want to apply globally.\n */\n styles: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.array, PropTypes.func, PropTypes.number, PropTypes.object, PropTypes.string, PropTypes.bool])\n} : void 0;\nexport default GlobalStyles;","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport useThemeProps from '../styles/useThemeProps';\nimport GlobalStyles from '../GlobalStyles';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nexport const html = (theme, enableColorScheme) => _extends({\n WebkitFontSmoothing: 'antialiased',\n // Antialiasing.\n MozOsxFontSmoothing: 'grayscale',\n // Antialiasing.\n // Change from `box-sizing: content-box` so that `width`\n // is not affected by `padding` or `border`.\n boxSizing: 'border-box',\n // Fix font resize problem in iOS\n WebkitTextSizeAdjust: '100%'\n}, enableColorScheme && !theme.vars && {\n colorScheme: theme.palette.mode\n});\nexport const body = theme => _extends({\n color: (theme.vars || theme).palette.text.primary\n}, theme.typography.body1, {\n backgroundColor: (theme.vars || theme).palette.background.default,\n '@media print': {\n // Save printer ink.\n backgroundColor: (theme.vars || theme).palette.common.white\n }\n});\nexport const styles = (theme, enableColorScheme = false) => {\n var _theme$components, _theme$components$Mui;\n const colorSchemeStyles = {};\n if (enableColorScheme && theme.colorSchemes) {\n Object.entries(theme.colorSchemes).forEach(([key, scheme]) => {\n var _scheme$palette;\n colorSchemeStyles[theme.getColorSchemeSelector(key).replace(/\\s*&/, '')] = {\n colorScheme: (_scheme$palette = scheme.palette) == null ? void 0 : _scheme$palette.mode\n };\n });\n }\n let defaultStyles = _extends({\n html: html(theme, enableColorScheme),\n '*, *::before, *::after': {\n boxSizing: 'inherit'\n },\n 'strong, b': {\n fontWeight: theme.typography.fontWeightBold\n },\n body: _extends({\n margin: 0\n }, body(theme), {\n // Add support for document.body.requestFullScreen().\n // Other elements, if background transparent, are not supported.\n '&::backdrop': {\n backgroundColor: (theme.vars || theme).palette.background.default\n }\n })\n }, colorSchemeStyles);\n const themeOverrides = (_theme$components = theme.components) == null ? void 0 : (_theme$components$Mui = _theme$components.MuiCssBaseline) == null ? void 0 : _theme$components$Mui.styleOverrides;\n if (themeOverrides) {\n defaultStyles = [defaultStyles, themeOverrides];\n }\n return defaultStyles;\n};\n\n/**\n * Kickstart an elegant, consistent, and simple baseline to build upon.\n */\nfunction CssBaseline(inProps) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiCssBaseline'\n });\n const {\n children,\n enableColorScheme = false\n } = props;\n return /*#__PURE__*/_jsxs(React.Fragment, {\n children: [/*#__PURE__*/_jsx(GlobalStyles, {\n styles: theme => styles(theme, enableColorScheme)\n }), children]\n });\n}\nprocess.env.NODE_ENV !== \"production\" ? CssBaseline.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * You can wrap a node.\n */\n children: PropTypes.node,\n /**\n * Enable `color-scheme` CSS property to use `theme.palette.mode`.\n * For more details, check out https://developer.mozilla.org/en-US/docs/Web/CSS/color-scheme\n * For browser support, check out https://caniuse.com/?search=color-scheme\n * @default false\n */\n enableColorScheme: PropTypes.bool\n} : void 0;\nexport default CssBaseline;","import { createSlice, PayloadAction } from \"@reduxjs/toolkit\";\n\nimport { ProjectState } from \"store/types\";\nimport { ThingSortKey } from \"utils/common/enums\";\nimport { mutatingFilter, toUnique } from \"utils/common/helpers\";\nimport { Partition } from \"utils/models/enums\";\n\nexport const initialState: ProjectState = {\n name: \"Untitled project\",\n selectedThingIds: [],\n sortType: ThingSortKey.None,\n activeKind: \"Image\",\n thingFilters: {},\n\n highlightedCategory: undefined,\n loadPercent: 1,\n loadMessage: \"\",\n kindTabFilters: [],\n};\n\nexport const projectSlice = createSlice({\n name: \"project\",\n initialState: initialState,\n reducers: {\n resetProject: () => initialState,\n\n createNewProject(state, action: PayloadAction<{ name: string }>) {\n state.name = action.payload.name;\n state.sortType = ThingSortKey.None;\n },\n setProject(state, action: PayloadAction<{ project: ProjectState }>) {\n // WARNING, don't do below (overwrites draft object)\n // state = action.payload.project;\n return action.payload.project;\n },\n setActiveKind(state, action: PayloadAction<{ kind: string }>) {\n state.activeKind = action.payload.kind;\n },\n selectThings(\n state,\n action: PayloadAction<{ ids: Array | string }>\n ) {\n const ids =\n typeof action.payload.ids === \"string\"\n ? [action.payload.ids]\n : action.payload.ids;\n const allSelectedThings = [\n ...new Set([...state.selectedThingIds, ...ids]),\n ];\n\n state.selectedThingIds = allSelectedThings;\n },\n deselectThings(\n state,\n action: PayloadAction<{ ids: Array | string }>\n ) {\n const ids =\n typeof action.payload.ids === \"string\"\n ? [action.payload.ids]\n : action.payload.ids;\n state.selectedThingIds = state.selectedThingIds.filter(\n (id: string) => !ids.includes(id)\n );\n },\n setSortType_new(state, action: PayloadAction<{ sortType: ThingSortKey }>) {\n state.sortType = action.payload.sortType;\n },\n setProjectName(state, action: PayloadAction<{ name: string }>) {\n state.name = action.payload.name;\n },\n setLoadPercent(\n state,\n action: PayloadAction<{ loadPercent?: number; loadMessage?: string }>\n ) {\n const { loadPercent, loadMessage } = action.payload;\n\n if (!loadPercent) {\n state.loadPercent = 1; // not / done loading\n state.loadMessage = \"\";\n } else if (loadPercent < 0) {\n state.loadPercent = -1; // indefinite loading\n state.loadMessage = loadMessage ?? \"Loading...\";\n } else if (loadPercent >= 1) {\n state.loadPercent = 1; // default to not loading if invalid\n state.loadMessage = \"\";\n } else {\n state.loadPercent = loadPercent; // loading [0, 1]\n state.loadMessage = loadMessage ?? \"\";\n }\n },\n sendLoadPercent(\n state,\n action: PayloadAction<{ loadPercent?: number; loadMessage?: string }>\n ) {},\n setLoadMessage(state, action: PayloadAction<{ message: string }>) {\n state.loadMessage = action.payload.message;\n },\n updateHighlightedCategory(\n state,\n action: PayloadAction<{ categoryId: string | undefined }>\n ) {\n state.highlightedCategory = action.payload.categoryId;\n },\n addThingCategoryFilters(\n state,\n action: PayloadAction<{\n categoryIds: string[];\n kinds?: string[];\n }>\n ) {\n const { categoryIds, kinds } = {\n kinds: [state.activeKind],\n ...action.payload,\n };\n\n for (const kind of kinds) {\n if (kind in state.thingFilters) {\n const existingFilters = state.thingFilters[kind].categoryId ?? [];\n const newFilters = toUnique([...categoryIds, ...existingFilters]);\n state.thingFilters[kind].categoryId = newFilters;\n } else {\n state.thingFilters[kind] = { categoryId: categoryIds, partition: [] };\n }\n }\n },\n removeThingCategoryFilters(\n state,\n action: PayloadAction<{\n categoryIds: string[] | \"all\";\n kinds?: string[];\n }>\n ) {\n const { categoryIds, kinds } = {\n kinds: [state.activeKind],\n ...action.payload,\n };\n\n for (const kind of kinds) {\n if (!(kind in state.thingFilters)) continue;\n if (categoryIds === \"all\") {\n state.thingFilters[kind].categoryId = [];\n } else {\n mutatingFilter(\n state.thingFilters[kind].categoryId,\n (id) => !categoryIds!.includes(id)\n );\n }\n if (\n state.thingFilters[kind].categoryId.length === 0 &&\n state.thingFilters[kind].partition.length === 0\n ) {\n delete state.thingFilters[kind];\n }\n }\n },\n addThingPartitionFilters(\n state,\n action: PayloadAction<{\n partitions: Partition[];\n kinds?: string[];\n }>\n ) {\n const { partitions, kinds } = {\n kinds: [state.activeKind],\n ...action.payload,\n };\n\n for (const kind of kinds) {\n if (kind in state.thingFilters) {\n const existingFilters = state.thingFilters[kind].partition ?? [];\n const newFilters = toUnique([...partitions, ...existingFilters]);\n state.thingFilters[kind].partition = newFilters;\n } else {\n state.thingFilters[kind] = { categoryId: [], partition: partitions };\n }\n }\n },\n removeThingPartitionFilters(\n state,\n action: PayloadAction<{\n partitions: string[] | \"all\";\n kinds?: string[];\n }>\n ) {\n const { partitions, kinds } = {\n kinds: [state.activeKind],\n ...action.payload,\n };\n for (const kind of kinds) {\n if (!(kind in state.thingFilters)) continue;\n if (partitions === \"all\") {\n state.thingFilters[kind].partition = [];\n } else {\n mutatingFilter(\n state.thingFilters[kind].partition,\n (id) => !partitions.includes(id)\n );\n }\n if (\n state.thingFilters[kind].partition.length === 0 &&\n state.thingFilters[kind].categoryId.length === 0\n ) {\n delete state.thingFilters[kind];\n }\n }\n },\n addKindTabFilter(state, action: PayloadAction<{ kindId: string }>) {\n state.kindTabFilters.push(action.payload.kindId);\n },\n removeKindTabFilter(state, action: PayloadAction<{ kindId: string }>) {\n mutatingFilter(\n state.kindTabFilters,\n (id) => id !== action.payload.kindId\n );\n },\n },\n});\n","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getDividerUtilityClass(slot) {\n return generateUtilityClass('MuiDivider', slot);\n}\nconst dividerClasses = generateUtilityClasses('MuiDivider', ['root', 'absolute', 'fullWidth', 'inset', 'middle', 'flexItem', 'light', 'vertical', 'withChildren', 'withChildrenVertical', 'textAlignRight', 'textAlignLeft', 'wrapper', 'wrapperVertical']);\nexport default dividerClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"absolute\", \"children\", \"className\", \"component\", \"flexItem\", \"light\", \"orientation\", \"role\", \"textAlign\", \"variant\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport { alpha } from '@mui/system';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport { getDividerUtilityClass } from './dividerClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n absolute,\n children,\n classes,\n flexItem,\n light,\n orientation,\n textAlign,\n variant\n } = ownerState;\n const slots = {\n root: ['root', absolute && 'absolute', variant, light && 'light', orientation === 'vertical' && 'vertical', flexItem && 'flexItem', children && 'withChildren', children && orientation === 'vertical' && 'withChildrenVertical', textAlign === 'right' && orientation !== 'vertical' && 'textAlignRight', textAlign === 'left' && orientation !== 'vertical' && 'textAlignLeft'],\n wrapper: ['wrapper', orientation === 'vertical' && 'wrapperVertical']\n };\n return composeClasses(slots, getDividerUtilityClass, classes);\n};\nconst DividerRoot = styled('div', {\n name: 'MuiDivider',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.absolute && styles.absolute, styles[ownerState.variant], ownerState.light && styles.light, ownerState.orientation === 'vertical' && styles.vertical, ownerState.flexItem && styles.flexItem, ownerState.children && styles.withChildren, ownerState.children && ownerState.orientation === 'vertical' && styles.withChildrenVertical, ownerState.textAlign === 'right' && ownerState.orientation !== 'vertical' && styles.textAlignRight, ownerState.textAlign === 'left' && ownerState.orientation !== 'vertical' && styles.textAlignLeft];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n margin: 0,\n // Reset browser default style.\n flexShrink: 0,\n borderWidth: 0,\n borderStyle: 'solid',\n borderColor: (theme.vars || theme).palette.divider,\n borderBottomWidth: 'thin'\n}, ownerState.absolute && {\n position: 'absolute',\n bottom: 0,\n left: 0,\n width: '100%'\n}, ownerState.light && {\n borderColor: theme.vars ? `rgba(${theme.vars.palette.dividerChannel} / 0.08)` : alpha(theme.palette.divider, 0.08)\n}, ownerState.variant === 'inset' && {\n marginLeft: 72\n}, ownerState.variant === 'middle' && ownerState.orientation === 'horizontal' && {\n marginLeft: theme.spacing(2),\n marginRight: theme.spacing(2)\n}, ownerState.variant === 'middle' && ownerState.orientation === 'vertical' && {\n marginTop: theme.spacing(1),\n marginBottom: theme.spacing(1)\n}, ownerState.orientation === 'vertical' && {\n height: '100%',\n borderBottomWidth: 0,\n borderRightWidth: 'thin'\n}, ownerState.flexItem && {\n alignSelf: 'stretch',\n height: 'auto'\n}), ({\n ownerState\n}) => _extends({}, ownerState.children && {\n display: 'flex',\n whiteSpace: 'nowrap',\n textAlign: 'center',\n border: 0,\n '&::before, &::after': {\n content: '\"\"',\n alignSelf: 'center'\n }\n}), ({\n theme,\n ownerState\n}) => _extends({}, ownerState.children && ownerState.orientation !== 'vertical' && {\n '&::before, &::after': {\n width: '100%',\n borderTop: `thin solid ${(theme.vars || theme).palette.divider}`\n }\n}), ({\n theme,\n ownerState\n}) => _extends({}, ownerState.children && ownerState.orientation === 'vertical' && {\n flexDirection: 'column',\n '&::before, &::after': {\n height: '100%',\n borderLeft: `thin solid ${(theme.vars || theme).palette.divider}`\n }\n}), ({\n ownerState\n}) => _extends({}, ownerState.textAlign === 'right' && ownerState.orientation !== 'vertical' && {\n '&::before': {\n width: '90%'\n },\n '&::after': {\n width: '10%'\n }\n}, ownerState.textAlign === 'left' && ownerState.orientation !== 'vertical' && {\n '&::before': {\n width: '10%'\n },\n '&::after': {\n width: '90%'\n }\n}));\nconst DividerWrapper = styled('span', {\n name: 'MuiDivider',\n slot: 'Wrapper',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.wrapper, ownerState.orientation === 'vertical' && styles.wrapperVertical];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n display: 'inline-block',\n paddingLeft: `calc(${theme.spacing(1)} * 1.2)`,\n paddingRight: `calc(${theme.spacing(1)} * 1.2)`\n}, ownerState.orientation === 'vertical' && {\n paddingTop: `calc(${theme.spacing(1)} * 1.2)`,\n paddingBottom: `calc(${theme.spacing(1)} * 1.2)`\n}));\nconst Divider = /*#__PURE__*/React.forwardRef(function Divider(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiDivider'\n });\n const {\n absolute = false,\n children,\n className,\n component = children ? 'div' : 'hr',\n flexItem = false,\n light = false,\n orientation = 'horizontal',\n role = component !== 'hr' ? 'separator' : undefined,\n textAlign = 'center',\n variant = 'fullWidth'\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n absolute,\n component,\n flexItem,\n light,\n orientation,\n role,\n textAlign,\n variant\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(DividerRoot, _extends({\n as: component,\n className: clsx(classes.root, className),\n role: role,\n ref: ref,\n ownerState: ownerState\n }, other, {\n children: children ? /*#__PURE__*/_jsx(DividerWrapper, {\n className: classes.wrapper,\n ownerState: ownerState,\n children: children\n }) : null\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? Divider.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * Absolutely position the element.\n * @default false\n */\n absolute: PropTypes.bool,\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * If `true`, a vertical divider will have the correct height when used in flex container.\n * (By default, a vertical divider will have a calculated height of `0px` if it is the child of a flex container.)\n * @default false\n */\n flexItem: PropTypes.bool,\n /**\n * If `true`, the divider will have a lighter color.\n * @default false\n */\n light: PropTypes.bool,\n /**\n * The component orientation.\n * @default 'horizontal'\n */\n orientation: PropTypes.oneOf(['horizontal', 'vertical']),\n /**\n * @ignore\n */\n role: PropTypes /* @typescript-to-proptypes-ignore */.string,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The text alignment.\n * @default 'center'\n */\n textAlign: PropTypes.oneOf(['center', 'left', 'right']),\n /**\n * The variant to use.\n * @default 'fullWidth'\n */\n variant: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['fullWidth', 'inset', 'middle']), PropTypes.string])\n} : void 0;\nexport default Divider;","import * as React from 'react';\n\n/**\n * @ignore - internal component.\n */\nconst ListContext = /*#__PURE__*/React.createContext({});\nif (process.env.NODE_ENV !== 'production') {\n ListContext.displayName = 'ListContext';\n}\nexport default ListContext;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getListUtilityClass(slot) {\n return generateUtilityClass('MuiList', slot);\n}\nconst listClasses = generateUtilityClasses('MuiList', ['root', 'padding', 'dense', 'subheader']);\nexport default listClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"children\", \"className\", \"component\", \"dense\", \"disablePadding\", \"subheader\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport ListContext from './ListContext';\nimport { getListUtilityClass } from './listClasses';\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n disablePadding,\n dense,\n subheader\n } = ownerState;\n const slots = {\n root: ['root', !disablePadding && 'padding', dense && 'dense', subheader && 'subheader']\n };\n return composeClasses(slots, getListUtilityClass, classes);\n};\nconst ListRoot = styled('ul', {\n name: 'MuiList',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, !ownerState.disablePadding && styles.padding, ownerState.dense && styles.dense, ownerState.subheader && styles.subheader];\n }\n})(({\n ownerState\n}) => _extends({\n listStyle: 'none',\n margin: 0,\n padding: 0,\n position: 'relative'\n}, !ownerState.disablePadding && {\n paddingTop: 8,\n paddingBottom: 8\n}, ownerState.subheader && {\n paddingTop: 0\n}));\nconst List = /*#__PURE__*/React.forwardRef(function List(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiList'\n });\n const {\n children,\n className,\n component = 'ul',\n dense = false,\n disablePadding = false,\n subheader\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const context = React.useMemo(() => ({\n dense\n }), [dense]);\n const ownerState = _extends({}, props, {\n component,\n dense,\n disablePadding\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(ListContext.Provider, {\n value: context,\n children: /*#__PURE__*/_jsxs(ListRoot, _extends({\n as: component,\n className: clsx(classes.root, className),\n ref: ref,\n ownerState: ownerState\n }, other, {\n children: [subheader, children]\n }))\n });\n});\nprocess.env.NODE_ENV !== \"production\" ? List.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * If `true`, compact vertical padding designed for keyboard and mouse input is used for\n * the list and list items.\n * The prop is available to descendant components as the `dense` context.\n * @default false\n */\n dense: PropTypes.bool,\n /**\n * If `true`, vertical padding is removed from the list.\n * @default false\n */\n disablePadding: PropTypes.bool,\n /**\n * The content of the subheader, normally `ListSubheader`.\n */\n subheader: PropTypes.node,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default List;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getTypographyUtilityClass(slot) {\n return generateUtilityClass('MuiTypography', slot);\n}\nconst typographyClasses = generateUtilityClasses('MuiTypography', ['root', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'subtitle1', 'subtitle2', 'body1', 'body2', 'inherit', 'button', 'caption', 'overline', 'alignLeft', 'alignRight', 'alignCenter', 'alignJustify', 'noWrap', 'gutterBottom', 'paragraph']);\nexport default typographyClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"align\", \"className\", \"component\", \"gutterBottom\", \"noWrap\", \"paragraph\", \"variant\", \"variantMapping\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_extendSxProp as extendSxProp } from '@mui/system';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport capitalize from '../utils/capitalize';\nimport { getTypographyUtilityClass } from './typographyClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n align,\n gutterBottom,\n noWrap,\n paragraph,\n variant,\n classes\n } = ownerState;\n const slots = {\n root: ['root', variant, ownerState.align !== 'inherit' && `align${capitalize(align)}`, gutterBottom && 'gutterBottom', noWrap && 'noWrap', paragraph && 'paragraph']\n };\n return composeClasses(slots, getTypographyUtilityClass, classes);\n};\nexport const TypographyRoot = styled('span', {\n name: 'MuiTypography',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.variant && styles[ownerState.variant], ownerState.align !== 'inherit' && styles[`align${capitalize(ownerState.align)}`], ownerState.noWrap && styles.noWrap, ownerState.gutterBottom && styles.gutterBottom, ownerState.paragraph && styles.paragraph];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n margin: 0\n}, ownerState.variant && theme.typography[ownerState.variant], ownerState.align !== 'inherit' && {\n textAlign: ownerState.align\n}, ownerState.noWrap && {\n overflow: 'hidden',\n textOverflow: 'ellipsis',\n whiteSpace: 'nowrap'\n}, ownerState.gutterBottom && {\n marginBottom: '0.35em'\n}, ownerState.paragraph && {\n marginBottom: 16\n}));\nconst defaultVariantMapping = {\n h1: 'h1',\n h2: 'h2',\n h3: 'h3',\n h4: 'h4',\n h5: 'h5',\n h6: 'h6',\n subtitle1: 'h6',\n subtitle2: 'h6',\n body1: 'p',\n body2: 'p',\n inherit: 'p'\n};\n\n// TODO v6: deprecate these color values in v5.x and remove the transformation in v6\nconst colorTransformations = {\n primary: 'primary.main',\n textPrimary: 'text.primary',\n secondary: 'secondary.main',\n textSecondary: 'text.secondary',\n error: 'error.main'\n};\nconst transformDeprecatedColors = color => {\n return colorTransformations[color] || color;\n};\nconst Typography = /*#__PURE__*/React.forwardRef(function Typography(inProps, ref) {\n const themeProps = useThemeProps({\n props: inProps,\n name: 'MuiTypography'\n });\n const color = transformDeprecatedColors(themeProps.color);\n const props = extendSxProp(_extends({}, themeProps, {\n color\n }));\n const {\n align = 'inherit',\n className,\n component,\n gutterBottom = false,\n noWrap = false,\n paragraph = false,\n variant = 'body1',\n variantMapping = defaultVariantMapping\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n align,\n color,\n className,\n component,\n gutterBottom,\n noWrap,\n paragraph,\n variant,\n variantMapping\n });\n const Component = component || (paragraph ? 'p' : variantMapping[variant] || defaultVariantMapping[variant]) || 'span';\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(TypographyRoot, _extends({\n as: Component,\n ref: ref,\n ownerState: ownerState,\n className: clsx(classes.root, className)\n }, other));\n});\nprocess.env.NODE_ENV !== \"production\" ? Typography.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * Set the text-align on the component.\n * @default 'inherit'\n */\n align: PropTypes.oneOf(['center', 'inherit', 'justify', 'left', 'right']),\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * If `true`, the text will have a bottom margin.\n * @default false\n */\n gutterBottom: PropTypes.bool,\n /**\n * If `true`, the text will not wrap, but instead will truncate with a text overflow ellipsis.\n *\n * Note that text overflow can only happen with block or inline-block level elements\n * (the element needs to have a width in order to overflow).\n * @default false\n */\n noWrap: PropTypes.bool,\n /**\n * If `true`, the element will be a paragraph element.\n * @default false\n */\n paragraph: PropTypes.bool,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * Applies the theme typography styles.\n * @default 'body1'\n */\n variant: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['body1', 'body2', 'button', 'caption', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'inherit', 'overline', 'subtitle1', 'subtitle2']), PropTypes.string]),\n /**\n * The component maps the variant prop to a range of different HTML element types.\n * For instance, subtitle1 to `
`.\n * If you wish to change that mapping, you can provide your own.\n * Alternatively, you can use the `component` prop.\n * @default {\n * h1: 'h1',\n * h2: 'h2',\n * h3: 'h3',\n * h4: 'h4',\n * h5: 'h5',\n * h6: 'h6',\n * subtitle1: 'h6',\n * subtitle2: 'h6',\n * body1: 'p',\n * body2: 'p',\n * inherit: 'p',\n * }\n */\n variantMapping: PropTypes /* @typescript-to-proptypes-ignore */.object\n} : void 0;\nexport default Typography;","export default function _taggedTemplateLiteral(strings, raw) {\n if (!raw) {\n raw = strings.slice(0);\n }\n return Object.freeze(Object.defineProperties(strings, {\n raw: {\n value: Object.freeze(raw)\n }\n }));\n}","import setPrototypeOf from \"./setPrototypeOf.js\";\nexport default function _inheritsLoose(subClass, superClass) {\n subClass.prototype = Object.create(superClass.prototype);\n subClass.prototype.constructor = subClass;\n setPrototypeOf(subClass, superClass);\n}","import React from 'react';\nexport default React.createContext(null);","import { Children, cloneElement, isValidElement } from 'react';\n/**\n * Given `this.props.children`, return an object mapping key to child.\n *\n * @param {*} children `this.props.children`\n * @return {object} Mapping of key to child\n */\n\nexport function getChildMapping(children, mapFn) {\n var mapper = function mapper(child) {\n return mapFn && isValidElement(child) ? mapFn(child) : child;\n };\n\n var result = Object.create(null);\n if (children) Children.map(children, function (c) {\n return c;\n }).forEach(function (child) {\n // run the map function here instead so that the key is the computed one\n result[child.key] = mapper(child);\n });\n return result;\n}\n/**\n * When you're adding or removing children some may be added or removed in the\n * same render pass. We want to show *both* since we want to simultaneously\n * animate elements in and out. This function takes a previous set of keys\n * and a new set of keys and merges them with its best guess of the correct\n * ordering. In the future we may expose some of the utilities in\n * ReactMultiChild to make this easy, but for now React itself does not\n * directly have this concept of the union of prevChildren and nextChildren\n * so we implement it here.\n *\n * @param {object} prev prev children as returned from\n * `ReactTransitionChildMapping.getChildMapping()`.\n * @param {object} next next children as returned from\n * `ReactTransitionChildMapping.getChildMapping()`.\n * @return {object} a key set that contains all keys in `prev` and all keys\n * in `next` in a reasonable order.\n */\n\nexport function mergeChildMappings(prev, next) {\n prev = prev || {};\n next = next || {};\n\n function getValueForKey(key) {\n return key in next ? next[key] : prev[key];\n } // For each key of `next`, the list of keys to insert before that key in\n // the combined list\n\n\n var nextKeysPending = Object.create(null);\n var pendingKeys = [];\n\n for (var prevKey in prev) {\n if (prevKey in next) {\n if (pendingKeys.length) {\n nextKeysPending[prevKey] = pendingKeys;\n pendingKeys = [];\n }\n } else {\n pendingKeys.push(prevKey);\n }\n }\n\n var i;\n var childMapping = {};\n\n for (var nextKey in next) {\n if (nextKeysPending[nextKey]) {\n for (i = 0; i < nextKeysPending[nextKey].length; i++) {\n var pendingNextKey = nextKeysPending[nextKey][i];\n childMapping[nextKeysPending[nextKey][i]] = getValueForKey(pendingNextKey);\n }\n }\n\n childMapping[nextKey] = getValueForKey(nextKey);\n } // Finally, add the keys which didn't appear before any key in `next`\n\n\n for (i = 0; i < pendingKeys.length; i++) {\n childMapping[pendingKeys[i]] = getValueForKey(pendingKeys[i]);\n }\n\n return childMapping;\n}\n\nfunction getProp(child, prop, props) {\n return props[prop] != null ? props[prop] : child.props[prop];\n}\n\nexport function getInitialChildMapping(props, onExited) {\n return getChildMapping(props.children, function (child) {\n return cloneElement(child, {\n onExited: onExited.bind(null, child),\n in: true,\n appear: getProp(child, 'appear', props),\n enter: getProp(child, 'enter', props),\n exit: getProp(child, 'exit', props)\n });\n });\n}\nexport function getNextChildMapping(nextProps, prevChildMapping, onExited) {\n var nextChildMapping = getChildMapping(nextProps.children);\n var children = mergeChildMappings(prevChildMapping, nextChildMapping);\n Object.keys(children).forEach(function (key) {\n var child = children[key];\n if (!isValidElement(child)) return;\n var hasPrev = (key in prevChildMapping);\n var hasNext = (key in nextChildMapping);\n var prevChild = prevChildMapping[key];\n var isLeaving = isValidElement(prevChild) && !prevChild.props.in; // item is new (entering)\n\n if (hasNext && (!hasPrev || isLeaving)) {\n // console.log('entering', key)\n children[key] = cloneElement(child, {\n onExited: onExited.bind(null, child),\n in: true,\n exit: getProp(child, 'exit', nextProps),\n enter: getProp(child, 'enter', nextProps)\n });\n } else if (!hasNext && hasPrev && !isLeaving) {\n // item is old (exiting)\n // console.log('leaving', key)\n children[key] = cloneElement(child, {\n in: false\n });\n } else if (hasNext && hasPrev && isValidElement(prevChild)) {\n // item hasn't changed transition states\n // copy over the last transition props;\n // console.log('unchanged', key)\n children[key] = cloneElement(child, {\n onExited: onExited.bind(null, child),\n in: prevChild.props.in,\n exit: getProp(child, 'exit', nextProps),\n enter: getProp(child, 'enter', nextProps)\n });\n }\n });\n return children;\n}","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _assertThisInitialized from \"@babel/runtime/helpers/esm/assertThisInitialized\";\nimport _inheritsLoose from \"@babel/runtime/helpers/esm/inheritsLoose\";\nimport PropTypes from 'prop-types';\nimport React from 'react';\nimport TransitionGroupContext from './TransitionGroupContext';\nimport { getChildMapping, getInitialChildMapping, getNextChildMapping } from './utils/ChildMapping';\n\nvar values = Object.values || function (obj) {\n return Object.keys(obj).map(function (k) {\n return obj[k];\n });\n};\n\nvar defaultProps = {\n component: 'div',\n childFactory: function childFactory(child) {\n return child;\n }\n};\n/**\n * The `` component manages a set of transition components\n * (`` and ``) in a list. Like with the transition\n * components, `` is a state machine for managing the mounting\n * and unmounting of components over time.\n *\n * Consider the example below. As items are removed or added to the TodoList the\n * `in` prop is toggled automatically by the ``.\n *\n * Note that `` does not define any animation behavior!\n * Exactly _how_ a list item animates is up to the individual transition\n * component. This means you can mix and match animations across different list\n * items.\n */\n\nvar TransitionGroup = /*#__PURE__*/function (_React$Component) {\n _inheritsLoose(TransitionGroup, _React$Component);\n\n function TransitionGroup(props, context) {\n var _this;\n\n _this = _React$Component.call(this, props, context) || this;\n\n var handleExited = _this.handleExited.bind(_assertThisInitialized(_this)); // Initial children should all be entering, dependent on appear\n\n\n _this.state = {\n contextValue: {\n isMounting: true\n },\n handleExited: handleExited,\n firstRender: true\n };\n return _this;\n }\n\n var _proto = TransitionGroup.prototype;\n\n _proto.componentDidMount = function componentDidMount() {\n this.mounted = true;\n this.setState({\n contextValue: {\n isMounting: false\n }\n });\n };\n\n _proto.componentWillUnmount = function componentWillUnmount() {\n this.mounted = false;\n };\n\n TransitionGroup.getDerivedStateFromProps = function getDerivedStateFromProps(nextProps, _ref) {\n var prevChildMapping = _ref.children,\n handleExited = _ref.handleExited,\n firstRender = _ref.firstRender;\n return {\n children: firstRender ? getInitialChildMapping(nextProps, handleExited) : getNextChildMapping(nextProps, prevChildMapping, handleExited),\n firstRender: false\n };\n } // node is `undefined` when user provided `nodeRef` prop\n ;\n\n _proto.handleExited = function handleExited(child, node) {\n var currentChildMapping = getChildMapping(this.props.children);\n if (child.key in currentChildMapping) return;\n\n if (child.props.onExited) {\n child.props.onExited(node);\n }\n\n if (this.mounted) {\n this.setState(function (state) {\n var children = _extends({}, state.children);\n\n delete children[child.key];\n return {\n children: children\n };\n });\n }\n };\n\n _proto.render = function render() {\n var _this$props = this.props,\n Component = _this$props.component,\n childFactory = _this$props.childFactory,\n props = _objectWithoutPropertiesLoose(_this$props, [\"component\", \"childFactory\"]);\n\n var contextValue = this.state.contextValue;\n var children = values(this.state.children).map(childFactory);\n delete props.appear;\n delete props.enter;\n delete props.exit;\n\n if (Component === null) {\n return /*#__PURE__*/React.createElement(TransitionGroupContext.Provider, {\n value: contextValue\n }, children);\n }\n\n return /*#__PURE__*/React.createElement(TransitionGroupContext.Provider, {\n value: contextValue\n }, /*#__PURE__*/React.createElement(Component, props, children));\n };\n\n return TransitionGroup;\n}(React.Component);\n\nTransitionGroup.propTypes = process.env.NODE_ENV !== \"production\" ? {\n /**\n * `` renders a `
` by default. You can change this\n * behavior by providing a `component` prop.\n * If you use React v16+ and would like to avoid a wrapping `
` element\n * you can pass in `component={null}`. This is useful if the wrapping div\n * borks your css styles.\n */\n component: PropTypes.any,\n\n /**\n * A set of `` components, that are toggled `in` and out as they\n * leave. the `` will inject specific transition props, so\n * remember to spread them through if you are wrapping the `` as\n * with our `` example.\n *\n * While this component is meant for multiple `Transition` or `CSSTransition`\n * children, sometimes you may want to have a single transition child with\n * content that you want to be transitioned out and in when you change it\n * (e.g. routes, images etc.) In that case you can change the `key` prop of\n * the transition child as you change its content, this will cause\n * `TransitionGroup` to transition the child out and back in.\n */\n children: PropTypes.node,\n\n /**\n * A convenience prop that enables or disables appear animations\n * for all children. Note that specifying this will override any defaults set\n * on individual children Transitions.\n */\n appear: PropTypes.bool,\n\n /**\n * A convenience prop that enables or disables enter animations\n * for all children. Note that specifying this will override any defaults set\n * on individual children Transitions.\n */\n enter: PropTypes.bool,\n\n /**\n * A convenience prop that enables or disables exit animations\n * for all children. Note that specifying this will override any defaults set\n * on individual children Transitions.\n */\n exit: PropTypes.bool,\n\n /**\n * You may need to apply reactive updates to a child as it is exiting.\n * This is generally done by using `cloneElement` however in the case of an exiting\n * child the element has already been removed and not accessible to the consumer.\n *\n * If you do need to update a child as it leaves you can provide a `childFactory`\n * to wrap every child, even the ones that are leaving.\n *\n * @type Function(child: ReactElement) -> ReactElement\n */\n childFactory: PropTypes.func\n} : {};\nTransitionGroup.defaultProps = defaultProps;\nexport default TransitionGroup;","import * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction Ripple(props) {\n const {\n className,\n classes,\n pulsate = false,\n rippleX,\n rippleY,\n rippleSize,\n in: inProp,\n onExited,\n timeout\n } = props;\n const [leaving, setLeaving] = React.useState(false);\n const rippleClassName = clsx(className, classes.ripple, classes.rippleVisible, pulsate && classes.ripplePulsate);\n const rippleStyles = {\n width: rippleSize,\n height: rippleSize,\n top: -(rippleSize / 2) + rippleY,\n left: -(rippleSize / 2) + rippleX\n };\n const childClassName = clsx(classes.child, leaving && classes.childLeaving, pulsate && classes.childPulsate);\n if (!inProp && !leaving) {\n setLeaving(true);\n }\n React.useEffect(() => {\n if (!inProp && onExited != null) {\n // react-transition-group#onExited\n const timeoutId = setTimeout(onExited, timeout);\n return () => {\n clearTimeout(timeoutId);\n };\n }\n return undefined;\n }, [onExited, inProp, timeout]);\n return /*#__PURE__*/_jsx(\"span\", {\n className: rippleClassName,\n style: rippleStyles,\n children: /*#__PURE__*/_jsx(\"span\", {\n className: childClassName\n })\n });\n}\nprocess.env.NODE_ENV !== \"production\" ? Ripple.propTypes = {\n /**\n * Override or extend the styles applied to the component.\n * See [CSS API](#css) below for more details.\n */\n classes: PropTypes.object.isRequired,\n className: PropTypes.string,\n /**\n * @ignore - injected from TransitionGroup\n */\n in: PropTypes.bool,\n /**\n * @ignore - injected from TransitionGroup\n */\n onExited: PropTypes.func,\n /**\n * If `true`, the ripple pulsates, typically indicating the keyboard focus state of an element.\n */\n pulsate: PropTypes.bool,\n /**\n * Diameter of the ripple.\n */\n rippleSize: PropTypes.number,\n /**\n * Horizontal position of the ripple center.\n */\n rippleX: PropTypes.number,\n /**\n * Vertical position of the ripple center.\n */\n rippleY: PropTypes.number,\n /**\n * exit delay\n */\n timeout: PropTypes.number.isRequired\n} : void 0;\nexport default Ripple;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getTouchRippleUtilityClass(slot) {\n return generateUtilityClass('MuiTouchRipple', slot);\n}\nconst touchRippleClasses = generateUtilityClasses('MuiTouchRipple', ['root', 'ripple', 'rippleVisible', 'ripplePulsate', 'child', 'childLeaving', 'childPulsate']);\nexport default touchRippleClasses;","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"center\", \"classes\", \"className\"];\nlet _ = t => t,\n _t,\n _t2,\n _t3,\n _t4;\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { TransitionGroup } from 'react-transition-group';\nimport clsx from 'clsx';\nimport { keyframes } from '@mui/system';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport Ripple from './Ripple';\nimport touchRippleClasses from './touchRippleClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst DURATION = 550;\nexport const DELAY_RIPPLE = 80;\nconst enterKeyframe = keyframes(_t || (_t = _`\n 0% {\n transform: scale(0);\n opacity: 0.1;\n }\n\n 100% {\n transform: scale(1);\n opacity: 0.3;\n }\n`));\nconst exitKeyframe = keyframes(_t2 || (_t2 = _`\n 0% {\n opacity: 1;\n }\n\n 100% {\n opacity: 0;\n }\n`));\nconst pulsateKeyframe = keyframes(_t3 || (_t3 = _`\n 0% {\n transform: scale(1);\n }\n\n 50% {\n transform: scale(0.92);\n }\n\n 100% {\n transform: scale(1);\n }\n`));\nexport const TouchRippleRoot = styled('span', {\n name: 'MuiTouchRipple',\n slot: 'Root'\n})({\n overflow: 'hidden',\n pointerEvents: 'none',\n position: 'absolute',\n zIndex: 0,\n top: 0,\n right: 0,\n bottom: 0,\n left: 0,\n borderRadius: 'inherit'\n});\n\n// This `styled()` function invokes keyframes. `styled-components` only supports keyframes\n// in string templates. Do not convert these styles in JS object as it will break.\nexport const TouchRippleRipple = styled(Ripple, {\n name: 'MuiTouchRipple',\n slot: 'Ripple'\n})(_t4 || (_t4 = _`\n opacity: 0;\n position: absolute;\n\n &.${0} {\n opacity: 0.3;\n transform: scale(1);\n animation-name: ${0};\n animation-duration: ${0}ms;\n animation-timing-function: ${0};\n }\n\n &.${0} {\n animation-duration: ${0}ms;\n }\n\n & .${0} {\n opacity: 1;\n display: block;\n width: 100%;\n height: 100%;\n border-radius: 50%;\n background-color: currentColor;\n }\n\n & .${0} {\n opacity: 0;\n animation-name: ${0};\n animation-duration: ${0}ms;\n animation-timing-function: ${0};\n }\n\n & .${0} {\n position: absolute;\n /* @noflip */\n left: 0px;\n top: 0;\n animation-name: ${0};\n animation-duration: 2500ms;\n animation-timing-function: ${0};\n animation-iteration-count: infinite;\n animation-delay: 200ms;\n }\n`), touchRippleClasses.rippleVisible, enterKeyframe, DURATION, ({\n theme\n}) => theme.transitions.easing.easeInOut, touchRippleClasses.ripplePulsate, ({\n theme\n}) => theme.transitions.duration.shorter, touchRippleClasses.child, touchRippleClasses.childLeaving, exitKeyframe, DURATION, ({\n theme\n}) => theme.transitions.easing.easeInOut, touchRippleClasses.childPulsate, pulsateKeyframe, ({\n theme\n}) => theme.transitions.easing.easeInOut);\n\n/**\n * @ignore - internal component.\n *\n * TODO v5: Make private\n */\nconst TouchRipple = /*#__PURE__*/React.forwardRef(function TouchRipple(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiTouchRipple'\n });\n const {\n center: centerProp = false,\n classes = {},\n className\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const [ripples, setRipples] = React.useState([]);\n const nextKey = React.useRef(0);\n const rippleCallback = React.useRef(null);\n React.useEffect(() => {\n if (rippleCallback.current) {\n rippleCallback.current();\n rippleCallback.current = null;\n }\n }, [ripples]);\n\n // Used to filter out mouse emulated events on mobile.\n const ignoringMouseDown = React.useRef(false);\n // We use a timer in order to only show the ripples for touch \"click\" like events.\n // We don't want to display the ripple for touch scroll events.\n const startTimer = React.useRef(null);\n\n // This is the hook called once the previous timeout is ready.\n const startTimerCommit = React.useRef(null);\n const container = React.useRef(null);\n React.useEffect(() => {\n return () => {\n clearTimeout(startTimer.current);\n };\n }, []);\n const startCommit = React.useCallback(params => {\n const {\n pulsate,\n rippleX,\n rippleY,\n rippleSize,\n cb\n } = params;\n setRipples(oldRipples => [...oldRipples, /*#__PURE__*/_jsx(TouchRippleRipple, {\n classes: {\n ripple: clsx(classes.ripple, touchRippleClasses.ripple),\n rippleVisible: clsx(classes.rippleVisible, touchRippleClasses.rippleVisible),\n ripplePulsate: clsx(classes.ripplePulsate, touchRippleClasses.ripplePulsate),\n child: clsx(classes.child, touchRippleClasses.child),\n childLeaving: clsx(classes.childLeaving, touchRippleClasses.childLeaving),\n childPulsate: clsx(classes.childPulsate, touchRippleClasses.childPulsate)\n },\n timeout: DURATION,\n pulsate: pulsate,\n rippleX: rippleX,\n rippleY: rippleY,\n rippleSize: rippleSize\n }, nextKey.current)]);\n nextKey.current += 1;\n rippleCallback.current = cb;\n }, [classes]);\n const start = React.useCallback((event = {}, options = {}, cb = () => {}) => {\n const {\n pulsate = false,\n center = centerProp || options.pulsate,\n fakeElement = false // For test purposes\n } = options;\n if ((event == null ? void 0 : event.type) === 'mousedown' && ignoringMouseDown.current) {\n ignoringMouseDown.current = false;\n return;\n }\n if ((event == null ? void 0 : event.type) === 'touchstart') {\n ignoringMouseDown.current = true;\n }\n const element = fakeElement ? null : container.current;\n const rect = element ? element.getBoundingClientRect() : {\n width: 0,\n height: 0,\n left: 0,\n top: 0\n };\n\n // Get the size of the ripple\n let rippleX;\n let rippleY;\n let rippleSize;\n if (center || event === undefined || event.clientX === 0 && event.clientY === 0 || !event.clientX && !event.touches) {\n rippleX = Math.round(rect.width / 2);\n rippleY = Math.round(rect.height / 2);\n } else {\n const {\n clientX,\n clientY\n } = event.touches && event.touches.length > 0 ? event.touches[0] : event;\n rippleX = Math.round(clientX - rect.left);\n rippleY = Math.round(clientY - rect.top);\n }\n if (center) {\n rippleSize = Math.sqrt((2 * rect.width ** 2 + rect.height ** 2) / 3);\n\n // For some reason the animation is broken on Mobile Chrome if the size is even.\n if (rippleSize % 2 === 0) {\n rippleSize += 1;\n }\n } else {\n const sizeX = Math.max(Math.abs((element ? element.clientWidth : 0) - rippleX), rippleX) * 2 + 2;\n const sizeY = Math.max(Math.abs((element ? element.clientHeight : 0) - rippleY), rippleY) * 2 + 2;\n rippleSize = Math.sqrt(sizeX ** 2 + sizeY ** 2);\n }\n\n // Touche devices\n if (event != null && event.touches) {\n // check that this isn't another touchstart due to multitouch\n // otherwise we will only clear a single timer when unmounting while two\n // are running\n if (startTimerCommit.current === null) {\n // Prepare the ripple effect.\n startTimerCommit.current = () => {\n startCommit({\n pulsate,\n rippleX,\n rippleY,\n rippleSize,\n cb\n });\n };\n // Delay the execution of the ripple effect.\n startTimer.current = setTimeout(() => {\n if (startTimerCommit.current) {\n startTimerCommit.current();\n startTimerCommit.current = null;\n }\n }, DELAY_RIPPLE); // We have to make a tradeoff with this value.\n }\n } else {\n startCommit({\n pulsate,\n rippleX,\n rippleY,\n rippleSize,\n cb\n });\n }\n }, [centerProp, startCommit]);\n const pulsate = React.useCallback(() => {\n start({}, {\n pulsate: true\n });\n }, [start]);\n const stop = React.useCallback((event, cb) => {\n clearTimeout(startTimer.current);\n\n // The touch interaction occurs too quickly.\n // We still want to show ripple effect.\n if ((event == null ? void 0 : event.type) === 'touchend' && startTimerCommit.current) {\n startTimerCommit.current();\n startTimerCommit.current = null;\n startTimer.current = setTimeout(() => {\n stop(event, cb);\n });\n return;\n }\n startTimerCommit.current = null;\n setRipples(oldRipples => {\n if (oldRipples.length > 0) {\n return oldRipples.slice(1);\n }\n return oldRipples;\n });\n rippleCallback.current = cb;\n }, []);\n React.useImperativeHandle(ref, () => ({\n pulsate,\n start,\n stop\n }), [pulsate, start, stop]);\n return /*#__PURE__*/_jsx(TouchRippleRoot, _extends({\n className: clsx(touchRippleClasses.root, classes.root, className),\n ref: container\n }, other, {\n children: /*#__PURE__*/_jsx(TransitionGroup, {\n component: null,\n exit: true,\n children: ripples\n })\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? TouchRipple.propTypes = {\n /**\n * If `true`, the ripple starts at the center of the component\n * rather than at the point of interaction.\n */\n center: PropTypes.bool,\n /**\n * Override or extend the styles applied to the component.\n * See [CSS API](#css) below for more details.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string\n} : void 0;\nexport default TouchRipple;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getButtonBaseUtilityClass(slot) {\n return generateUtilityClass('MuiButtonBase', slot);\n}\nconst buttonBaseClasses = generateUtilityClasses('MuiButtonBase', ['root', 'disabled', 'focusVisible']);\nexport default buttonBaseClasses;","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"action\", \"centerRipple\", \"children\", \"className\", \"component\", \"disabled\", \"disableRipple\", \"disableTouchRipple\", \"focusRipple\", \"focusVisibleClassName\", \"LinkComponent\", \"onBlur\", \"onClick\", \"onContextMenu\", \"onDragLeave\", \"onFocus\", \"onFocusVisible\", \"onKeyDown\", \"onKeyUp\", \"onMouseDown\", \"onMouseLeave\", \"onMouseUp\", \"onTouchEnd\", \"onTouchMove\", \"onTouchStart\", \"tabIndex\", \"TouchRippleProps\", \"touchRippleRef\", \"type\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { elementTypeAcceptingRef, refType } from '@mui/utils';\nimport composeClasses from '@mui/base/composeClasses';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport useForkRef from '../utils/useForkRef';\nimport useEventCallback from '../utils/useEventCallback';\nimport useIsFocusVisible from '../utils/useIsFocusVisible';\nimport TouchRipple from './TouchRipple';\nimport buttonBaseClasses, { getButtonBaseUtilityClass } from './buttonBaseClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n disabled,\n focusVisible,\n focusVisibleClassName,\n classes\n } = ownerState;\n const slots = {\n root: ['root', disabled && 'disabled', focusVisible && 'focusVisible']\n };\n const composedClasses = composeClasses(slots, getButtonBaseUtilityClass, classes);\n if (focusVisible && focusVisibleClassName) {\n composedClasses.root += ` ${focusVisibleClassName}`;\n }\n return composedClasses;\n};\nexport const ButtonBaseRoot = styled('button', {\n name: 'MuiButtonBase',\n slot: 'Root',\n overridesResolver: (props, styles) => styles.root\n})({\n display: 'inline-flex',\n alignItems: 'center',\n justifyContent: 'center',\n position: 'relative',\n boxSizing: 'border-box',\n WebkitTapHighlightColor: 'transparent',\n backgroundColor: 'transparent',\n // Reset default value\n // We disable the focus ring for mouse, touch and keyboard users.\n outline: 0,\n border: 0,\n margin: 0,\n // Remove the margin in Safari\n borderRadius: 0,\n padding: 0,\n // Remove the padding in Firefox\n cursor: 'pointer',\n userSelect: 'none',\n verticalAlign: 'middle',\n MozAppearance: 'none',\n // Reset\n WebkitAppearance: 'none',\n // Reset\n textDecoration: 'none',\n // So we take precedent over the style of a native element.\n color: 'inherit',\n '&::-moz-focus-inner': {\n borderStyle: 'none' // Remove Firefox dotted outline.\n },\n\n [`&.${buttonBaseClasses.disabled}`]: {\n pointerEvents: 'none',\n // Disable link interactions\n cursor: 'default'\n },\n '@media print': {\n colorAdjust: 'exact'\n }\n});\n\n/**\n * `ButtonBase` contains as few styles as possible.\n * It aims to be a simple building block for creating a button.\n * It contains a load of style reset and some focus/ripple logic.\n */\nconst ButtonBase = /*#__PURE__*/React.forwardRef(function ButtonBase(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiButtonBase'\n });\n const {\n action,\n centerRipple = false,\n children,\n className,\n component = 'button',\n disabled = false,\n disableRipple = false,\n disableTouchRipple = false,\n focusRipple = false,\n LinkComponent = 'a',\n onBlur,\n onClick,\n onContextMenu,\n onDragLeave,\n onFocus,\n onFocusVisible,\n onKeyDown,\n onKeyUp,\n onMouseDown,\n onMouseLeave,\n onMouseUp,\n onTouchEnd,\n onTouchMove,\n onTouchStart,\n tabIndex = 0,\n TouchRippleProps,\n touchRippleRef,\n type\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const buttonRef = React.useRef(null);\n const rippleRef = React.useRef(null);\n const handleRippleRef = useForkRef(rippleRef, touchRippleRef);\n const {\n isFocusVisibleRef,\n onFocus: handleFocusVisible,\n onBlur: handleBlurVisible,\n ref: focusVisibleRef\n } = useIsFocusVisible();\n const [focusVisible, setFocusVisible] = React.useState(false);\n if (disabled && focusVisible) {\n setFocusVisible(false);\n }\n React.useImperativeHandle(action, () => ({\n focusVisible: () => {\n setFocusVisible(true);\n buttonRef.current.focus();\n }\n }), []);\n const [mountedState, setMountedState] = React.useState(false);\n React.useEffect(() => {\n setMountedState(true);\n }, []);\n const enableTouchRipple = mountedState && !disableRipple && !disabled;\n React.useEffect(() => {\n if (focusVisible && focusRipple && !disableRipple && mountedState) {\n rippleRef.current.pulsate();\n }\n }, [disableRipple, focusRipple, focusVisible, mountedState]);\n function useRippleHandler(rippleAction, eventCallback, skipRippleAction = disableTouchRipple) {\n return useEventCallback(event => {\n if (eventCallback) {\n eventCallback(event);\n }\n const ignore = skipRippleAction;\n if (!ignore && rippleRef.current) {\n rippleRef.current[rippleAction](event);\n }\n return true;\n });\n }\n const handleMouseDown = useRippleHandler('start', onMouseDown);\n const handleContextMenu = useRippleHandler('stop', onContextMenu);\n const handleDragLeave = useRippleHandler('stop', onDragLeave);\n const handleMouseUp = useRippleHandler('stop', onMouseUp);\n const handleMouseLeave = useRippleHandler('stop', event => {\n if (focusVisible) {\n event.preventDefault();\n }\n if (onMouseLeave) {\n onMouseLeave(event);\n }\n });\n const handleTouchStart = useRippleHandler('start', onTouchStart);\n const handleTouchEnd = useRippleHandler('stop', onTouchEnd);\n const handleTouchMove = useRippleHandler('stop', onTouchMove);\n const handleBlur = useRippleHandler('stop', event => {\n handleBlurVisible(event);\n if (isFocusVisibleRef.current === false) {\n setFocusVisible(false);\n }\n if (onBlur) {\n onBlur(event);\n }\n }, false);\n const handleFocus = useEventCallback(event => {\n // Fix for https://github.com/facebook/react/issues/7769\n if (!buttonRef.current) {\n buttonRef.current = event.currentTarget;\n }\n handleFocusVisible(event);\n if (isFocusVisibleRef.current === true) {\n setFocusVisible(true);\n if (onFocusVisible) {\n onFocusVisible(event);\n }\n }\n if (onFocus) {\n onFocus(event);\n }\n });\n const isNonNativeButton = () => {\n const button = buttonRef.current;\n return component && component !== 'button' && !(button.tagName === 'A' && button.href);\n };\n\n /**\n * IE11 shim for https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/repeat\n */\n const keydownRef = React.useRef(false);\n const handleKeyDown = useEventCallback(event => {\n // Check if key is already down to avoid repeats being counted as multiple activations\n if (focusRipple && !keydownRef.current && focusVisible && rippleRef.current && event.key === ' ') {\n keydownRef.current = true;\n rippleRef.current.stop(event, () => {\n rippleRef.current.start(event);\n });\n }\n if (event.target === event.currentTarget && isNonNativeButton() && event.key === ' ') {\n event.preventDefault();\n }\n if (onKeyDown) {\n onKeyDown(event);\n }\n\n // Keyboard accessibility for non interactive elements\n if (event.target === event.currentTarget && isNonNativeButton() && event.key === 'Enter' && !disabled) {\n event.preventDefault();\n if (onClick) {\n onClick(event);\n }\n }\n });\n const handleKeyUp = useEventCallback(event => {\n // calling preventDefault in keyUp on a \n *
\n * );\n * }\n * ```\n *\n * When the button is clicked the component will shift to the `'entering'` state\n * and stay there for 500ms (the value of `timeout`) before it finally switches\n * to `'entered'`.\n *\n * When `in` is `false` the same thing happens except the state moves from\n * `'exiting'` to `'exited'`.\n */\n\nvar Transition = /*#__PURE__*/function (_React$Component) {\n _inheritsLoose(Transition, _React$Component);\n\n function Transition(props, context) {\n var _this;\n\n _this = _React$Component.call(this, props, context) || this;\n var parentGroup = context; // In the context of a TransitionGroup all enters are really appears\n\n var appear = parentGroup && !parentGroup.isMounting ? props.enter : props.appear;\n var initialStatus;\n _this.appearStatus = null;\n\n if (props.in) {\n if (appear) {\n initialStatus = EXITED;\n _this.appearStatus = ENTERING;\n } else {\n initialStatus = ENTERED;\n }\n } else {\n if (props.unmountOnExit || props.mountOnEnter) {\n initialStatus = UNMOUNTED;\n } else {\n initialStatus = EXITED;\n }\n }\n\n _this.state = {\n status: initialStatus\n };\n _this.nextCallback = null;\n return _this;\n }\n\n Transition.getDerivedStateFromProps = function getDerivedStateFromProps(_ref, prevState) {\n var nextIn = _ref.in;\n\n if (nextIn && prevState.status === UNMOUNTED) {\n return {\n status: EXITED\n };\n }\n\n return null;\n } // getSnapshotBeforeUpdate(prevProps) {\n // let nextStatus = null\n // if (prevProps !== this.props) {\n // const { status } = this.state\n // if (this.props.in) {\n // if (status !== ENTERING && status !== ENTERED) {\n // nextStatus = ENTERING\n // }\n // } else {\n // if (status === ENTERING || status === ENTERED) {\n // nextStatus = EXITING\n // }\n // }\n // }\n // return { nextStatus }\n // }\n ;\n\n var _proto = Transition.prototype;\n\n _proto.componentDidMount = function componentDidMount() {\n this.updateStatus(true, this.appearStatus);\n };\n\n _proto.componentDidUpdate = function componentDidUpdate(prevProps) {\n var nextStatus = null;\n\n if (prevProps !== this.props) {\n var status = this.state.status;\n\n if (this.props.in) {\n if (status !== ENTERING && status !== ENTERED) {\n nextStatus = ENTERING;\n }\n } else {\n if (status === ENTERING || status === ENTERED) {\n nextStatus = EXITING;\n }\n }\n }\n\n this.updateStatus(false, nextStatus);\n };\n\n _proto.componentWillUnmount = function componentWillUnmount() {\n this.cancelNextCallback();\n };\n\n _proto.getTimeouts = function getTimeouts() {\n var timeout = this.props.timeout;\n var exit, enter, appear;\n exit = enter = appear = timeout;\n\n if (timeout != null && typeof timeout !== 'number') {\n exit = timeout.exit;\n enter = timeout.enter; // TODO: remove fallback for next major\n\n appear = timeout.appear !== undefined ? timeout.appear : enter;\n }\n\n return {\n exit: exit,\n enter: enter,\n appear: appear\n };\n };\n\n _proto.updateStatus = function updateStatus(mounting, nextStatus) {\n if (mounting === void 0) {\n mounting = false;\n }\n\n if (nextStatus !== null) {\n // nextStatus will always be ENTERING or EXITING.\n this.cancelNextCallback();\n\n if (nextStatus === ENTERING) {\n if (this.props.unmountOnExit || this.props.mountOnEnter) {\n var node = this.props.nodeRef ? this.props.nodeRef.current : ReactDOM.findDOMNode(this); // https://github.com/reactjs/react-transition-group/pull/749\n // With unmountOnExit or mountOnEnter, the enter animation should happen at the transition between `exited` and `entering`.\n // To make the animation happen, we have to separate each rendering and avoid being processed as batched.\n\n if (node) forceReflow(node);\n }\n\n this.performEnter(mounting);\n } else {\n this.performExit();\n }\n } else if (this.props.unmountOnExit && this.state.status === EXITED) {\n this.setState({\n status: UNMOUNTED\n });\n }\n };\n\n _proto.performEnter = function performEnter(mounting) {\n var _this2 = this;\n\n var enter = this.props.enter;\n var appearing = this.context ? this.context.isMounting : mounting;\n\n var _ref2 = this.props.nodeRef ? [appearing] : [ReactDOM.findDOMNode(this), appearing],\n maybeNode = _ref2[0],\n maybeAppearing = _ref2[1];\n\n var timeouts = this.getTimeouts();\n var enterTimeout = appearing ? timeouts.appear : timeouts.enter; // no enter animation skip right to ENTERED\n // if we are mounting and running this it means appear _must_ be set\n\n if (!mounting && !enter || config.disabled) {\n this.safeSetState({\n status: ENTERED\n }, function () {\n _this2.props.onEntered(maybeNode);\n });\n return;\n }\n\n this.props.onEnter(maybeNode, maybeAppearing);\n this.safeSetState({\n status: ENTERING\n }, function () {\n _this2.props.onEntering(maybeNode, maybeAppearing);\n\n _this2.onTransitionEnd(enterTimeout, function () {\n _this2.safeSetState({\n status: ENTERED\n }, function () {\n _this2.props.onEntered(maybeNode, maybeAppearing);\n });\n });\n });\n };\n\n _proto.performExit = function performExit() {\n var _this3 = this;\n\n var exit = this.props.exit;\n var timeouts = this.getTimeouts();\n var maybeNode = this.props.nodeRef ? undefined : ReactDOM.findDOMNode(this); // no exit animation skip right to EXITED\n\n if (!exit || config.disabled) {\n this.safeSetState({\n status: EXITED\n }, function () {\n _this3.props.onExited(maybeNode);\n });\n return;\n }\n\n this.props.onExit(maybeNode);\n this.safeSetState({\n status: EXITING\n }, function () {\n _this3.props.onExiting(maybeNode);\n\n _this3.onTransitionEnd(timeouts.exit, function () {\n _this3.safeSetState({\n status: EXITED\n }, function () {\n _this3.props.onExited(maybeNode);\n });\n });\n });\n };\n\n _proto.cancelNextCallback = function cancelNextCallback() {\n if (this.nextCallback !== null) {\n this.nextCallback.cancel();\n this.nextCallback = null;\n }\n };\n\n _proto.safeSetState = function safeSetState(nextState, callback) {\n // This shouldn't be necessary, but there are weird race conditions with\n // setState callbacks and unmounting in testing, so always make sure that\n // we can cancel any pending setState callbacks after we unmount.\n callback = this.setNextCallback(callback);\n this.setState(nextState, callback);\n };\n\n _proto.setNextCallback = function setNextCallback(callback) {\n var _this4 = this;\n\n var active = true;\n\n this.nextCallback = function (event) {\n if (active) {\n active = false;\n _this4.nextCallback = null;\n callback(event);\n }\n };\n\n this.nextCallback.cancel = function () {\n active = false;\n };\n\n return this.nextCallback;\n };\n\n _proto.onTransitionEnd = function onTransitionEnd(timeout, handler) {\n this.setNextCallback(handler);\n var node = this.props.nodeRef ? this.props.nodeRef.current : ReactDOM.findDOMNode(this);\n var doesNotHaveTimeoutOrListener = timeout == null && !this.props.addEndListener;\n\n if (!node || doesNotHaveTimeoutOrListener) {\n setTimeout(this.nextCallback, 0);\n return;\n }\n\n if (this.props.addEndListener) {\n var _ref3 = this.props.nodeRef ? [this.nextCallback] : [node, this.nextCallback],\n maybeNode = _ref3[0],\n maybeNextCallback = _ref3[1];\n\n this.props.addEndListener(maybeNode, maybeNextCallback);\n }\n\n if (timeout != null) {\n setTimeout(this.nextCallback, timeout);\n }\n };\n\n _proto.render = function render() {\n var status = this.state.status;\n\n if (status === UNMOUNTED) {\n return null;\n }\n\n var _this$props = this.props,\n children = _this$props.children,\n _in = _this$props.in,\n _mountOnEnter = _this$props.mountOnEnter,\n _unmountOnExit = _this$props.unmountOnExit,\n _appear = _this$props.appear,\n _enter = _this$props.enter,\n _exit = _this$props.exit,\n _timeout = _this$props.timeout,\n _addEndListener = _this$props.addEndListener,\n _onEnter = _this$props.onEnter,\n _onEntering = _this$props.onEntering,\n _onEntered = _this$props.onEntered,\n _onExit = _this$props.onExit,\n _onExiting = _this$props.onExiting,\n _onExited = _this$props.onExited,\n _nodeRef = _this$props.nodeRef,\n childProps = _objectWithoutPropertiesLoose(_this$props, [\"children\", \"in\", \"mountOnEnter\", \"unmountOnExit\", \"appear\", \"enter\", \"exit\", \"timeout\", \"addEndListener\", \"onEnter\", \"onEntering\", \"onEntered\", \"onExit\", \"onExiting\", \"onExited\", \"nodeRef\"]);\n\n return (\n /*#__PURE__*/\n // allows for nested Transitions\n React.createElement(TransitionGroupContext.Provider, {\n value: null\n }, typeof children === 'function' ? children(status, childProps) : React.cloneElement(React.Children.only(children), childProps))\n );\n };\n\n return Transition;\n}(React.Component);\n\nTransition.contextType = TransitionGroupContext;\nTransition.propTypes = process.env.NODE_ENV !== \"production\" ? {\n /**\n * A React reference to DOM element that need to transition:\n * https://stackoverflow.com/a/51127130/4671932\n *\n * - When `nodeRef` prop is used, `node` is not passed to callback functions\n * (e.g. `onEnter`) because user already has direct access to the node.\n * - When changing `key` prop of `Transition` in a `TransitionGroup` a new\n * `nodeRef` need to be provided to `Transition` with changed `key` prop\n * (see\n * [test/CSSTransition-test.js](https://github.com/reactjs/react-transition-group/blob/13435f897b3ab71f6e19d724f145596f5910581c/test/CSSTransition-test.js#L362-L437)).\n */\n nodeRef: PropTypes.shape({\n current: typeof Element === 'undefined' ? PropTypes.any : function (propValue, key, componentName, location, propFullName, secret) {\n var value = propValue[key];\n return PropTypes.instanceOf(value && 'ownerDocument' in value ? value.ownerDocument.defaultView.Element : Element)(propValue, key, componentName, location, propFullName, secret);\n }\n }),\n\n /**\n * A `function` child can be used instead of a React element. This function is\n * called with the current transition status (`'entering'`, `'entered'`,\n * `'exiting'`, `'exited'`), which can be used to apply context\n * specific props to a component.\n *\n * ```jsx\n * \n * {state => (\n * \n * )}\n * \n * ```\n */\n children: PropTypes.oneOfType([PropTypes.func.isRequired, PropTypes.element.isRequired]).isRequired,\n\n /**\n * Show the component; triggers the enter or exit states\n */\n in: PropTypes.bool,\n\n /**\n * By default the child component is mounted immediately along with\n * the parent `Transition` component. If you want to \"lazy mount\" the component on the\n * first `in={true}` you can set `mountOnEnter`. After the first enter transition the component will stay\n * mounted, even on \"exited\", unless you also specify `unmountOnExit`.\n */\n mountOnEnter: PropTypes.bool,\n\n /**\n * By default the child component stays mounted after it reaches the `'exited'` state.\n * Set `unmountOnExit` if you'd prefer to unmount the component after it finishes exiting.\n */\n unmountOnExit: PropTypes.bool,\n\n /**\n * By default the child component does not perform the enter transition when\n * it first mounts, regardless of the value of `in`. If you want this\n * behavior, set both `appear` and `in` to `true`.\n *\n * > **Note**: there are no special appear states like `appearing`/`appeared`, this prop\n * > only adds an additional enter transition. However, in the\n * > `` component that first enter transition does result in\n * > additional `.appear-*` classes, that way you can choose to style it\n * > differently.\n */\n appear: PropTypes.bool,\n\n /**\n * Enable or disable enter transitions.\n */\n enter: PropTypes.bool,\n\n /**\n * Enable or disable exit transitions.\n */\n exit: PropTypes.bool,\n\n /**\n * The duration of the transition, in milliseconds.\n * Required unless `addEndListener` is provided.\n *\n * You may specify a single timeout for all transitions:\n *\n * ```jsx\n * timeout={500}\n * ```\n *\n * or individually:\n *\n * ```jsx\n * timeout={{\n * appear: 500,\n * enter: 300,\n * exit: 500,\n * }}\n * ```\n *\n * - `appear` defaults to the value of `enter`\n * - `enter` defaults to `0`\n * - `exit` defaults to `0`\n *\n * @type {number | { enter?: number, exit?: number, appear?: number }}\n */\n timeout: function timeout(props) {\n var pt = timeoutsShape;\n if (!props.addEndListener) pt = pt.isRequired;\n\n for (var _len = arguments.length, args = new Array(_len > 1 ? _len - 1 : 0), _key = 1; _key < _len; _key++) {\n args[_key - 1] = arguments[_key];\n }\n\n return pt.apply(void 0, [props].concat(args));\n },\n\n /**\n * Add a custom transition end trigger. Called with the transitioning\n * DOM node and a `done` callback. Allows for more fine grained transition end\n * logic. Timeouts are still used as a fallback if provided.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * ```jsx\n * addEndListener={(node, done) => {\n * // use the css transitionend event to mark the finish of a transition\n * node.addEventListener('transitionend', done, false);\n * }}\n * ```\n */\n addEndListener: PropTypes.func,\n\n /**\n * Callback fired before the \"entering\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool) -> void\n */\n onEnter: PropTypes.func,\n\n /**\n * Callback fired after the \"entering\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool)\n */\n onEntering: PropTypes.func,\n\n /**\n * Callback fired after the \"entered\" status is applied. An extra parameter\n * `isAppearing` is supplied to indicate if the enter stage is occurring on the initial mount\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement, isAppearing: bool) -> void\n */\n onEntered: PropTypes.func,\n\n /**\n * Callback fired before the \"exiting\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExit: PropTypes.func,\n\n /**\n * Callback fired after the \"exiting\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed.\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExiting: PropTypes.func,\n\n /**\n * Callback fired after the \"exited\" status is applied.\n *\n * **Note**: when `nodeRef` prop is passed, `node` is not passed\n *\n * @type Function(node: HtmlElement) -> void\n */\n onExited: PropTypes.func\n} : {}; // Name the function so it is clearer in the documentation\n\nfunction noop() {}\n\nTransition.defaultProps = {\n in: false,\n mountOnEnter: false,\n unmountOnExit: false,\n appear: false,\n enter: true,\n exit: true,\n onEnter: noop,\n onEntering: noop,\n onEntered: noop,\n onExit: noop,\n onExiting: noop,\n onExited: noop\n};\nTransition.UNMOUNTED = UNMOUNTED;\nTransition.EXITED = EXITED;\nTransition.ENTERING = ENTERING;\nTransition.ENTERED = ENTERED;\nTransition.EXITING = EXITING;\nexport default Transition;","export var forceReflow = function forceReflow(node) {\n return node.scrollTop;\n};","export const reflow = node => node.scrollTop;\nexport function getTransitionProps(props, options) {\n var _style$transitionDura, _style$transitionTimi;\n const {\n timeout,\n easing,\n style = {}\n } = props;\n return {\n duration: (_style$transitionDura = style.transitionDuration) != null ? _style$transitionDura : typeof timeout === 'number' ? timeout : timeout[options.mode] || 0,\n easing: (_style$transitionTimi = style.transitionTimingFunction) != null ? _style$transitionTimi : typeof easing === 'object' ? easing[options.mode] : easing,\n delay: style.transitionDelay\n };\n}","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"addEndListener\", \"appear\", \"children\", \"easing\", \"in\", \"onEnter\", \"onEntered\", \"onEntering\", \"onExit\", \"onExited\", \"onExiting\", \"style\", \"timeout\", \"TransitionComponent\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { elementAcceptingRef } from '@mui/utils';\nimport { Transition } from 'react-transition-group';\nimport useTheme from '../styles/useTheme';\nimport { getTransitionProps, reflow } from '../transitions/utils';\nimport useForkRef from '../utils/useForkRef';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction getScale(value) {\n return `scale(${value}, ${value ** 2})`;\n}\nconst styles = {\n entering: {\n opacity: 1,\n transform: getScale(1)\n },\n entered: {\n opacity: 1,\n transform: 'none'\n }\n};\n\n/*\n TODO v6: remove\n Conditionally apply a workaround for the CSS transition bug in Safari 15.4 / WebKit browsers.\n */\nconst isWebKit154 = typeof navigator !== 'undefined' && /^((?!chrome|android).)*(safari|mobile)/i.test(navigator.userAgent) && /(os |version\\/)15(.|_)4/i.test(navigator.userAgent);\n\n/**\n * The Grow transition is used by the [Tooltip](/material-ui/react-tooltip/) and\n * [Popover](/material-ui/react-popover/) components.\n * It uses [react-transition-group](https://github.com/reactjs/react-transition-group) internally.\n */\nconst Grow = /*#__PURE__*/React.forwardRef(function Grow(props, ref) {\n const {\n addEndListener,\n appear = true,\n children,\n easing,\n in: inProp,\n onEnter,\n onEntered,\n onEntering,\n onExit,\n onExited,\n onExiting,\n style,\n timeout = 'auto',\n // eslint-disable-next-line react/prop-types\n TransitionComponent = Transition\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const timer = React.useRef();\n const autoTimeout = React.useRef();\n const theme = useTheme();\n const nodeRef = React.useRef(null);\n const handleRef = useForkRef(nodeRef, children.ref, ref);\n const normalizedTransitionCallback = callback => maybeIsAppearing => {\n if (callback) {\n const node = nodeRef.current;\n\n // onEnterXxx and onExitXxx callbacks have a different arguments.length value.\n if (maybeIsAppearing === undefined) {\n callback(node);\n } else {\n callback(node, maybeIsAppearing);\n }\n }\n };\n const handleEntering = normalizedTransitionCallback(onEntering);\n const handleEnter = normalizedTransitionCallback((node, isAppearing) => {\n reflow(node); // So the animation always start from the start.\n\n const {\n duration: transitionDuration,\n delay,\n easing: transitionTimingFunction\n } = getTransitionProps({\n style,\n timeout,\n easing\n }, {\n mode: 'enter'\n });\n let duration;\n if (timeout === 'auto') {\n duration = theme.transitions.getAutoHeightDuration(node.clientHeight);\n autoTimeout.current = duration;\n } else {\n duration = transitionDuration;\n }\n node.style.transition = [theme.transitions.create('opacity', {\n duration,\n delay\n }), theme.transitions.create('transform', {\n duration: isWebKit154 ? duration : duration * 0.666,\n delay,\n easing: transitionTimingFunction\n })].join(',');\n if (onEnter) {\n onEnter(node, isAppearing);\n }\n });\n const handleEntered = normalizedTransitionCallback(onEntered);\n const handleExiting = normalizedTransitionCallback(onExiting);\n const handleExit = normalizedTransitionCallback(node => {\n const {\n duration: transitionDuration,\n delay,\n easing: transitionTimingFunction\n } = getTransitionProps({\n style,\n timeout,\n easing\n }, {\n mode: 'exit'\n });\n let duration;\n if (timeout === 'auto') {\n duration = theme.transitions.getAutoHeightDuration(node.clientHeight);\n autoTimeout.current = duration;\n } else {\n duration = transitionDuration;\n }\n node.style.transition = [theme.transitions.create('opacity', {\n duration,\n delay\n }), theme.transitions.create('transform', {\n duration: isWebKit154 ? duration : duration * 0.666,\n delay: isWebKit154 ? delay : delay || duration * 0.333,\n easing: transitionTimingFunction\n })].join(',');\n node.style.opacity = 0;\n node.style.transform = getScale(0.75);\n if (onExit) {\n onExit(node);\n }\n });\n const handleExited = normalizedTransitionCallback(onExited);\n const handleAddEndListener = next => {\n if (timeout === 'auto') {\n timer.current = setTimeout(next, autoTimeout.current || 0);\n }\n if (addEndListener) {\n // Old call signature before `react-transition-group` implemented `nodeRef`\n addEndListener(nodeRef.current, next);\n }\n };\n React.useEffect(() => {\n return () => {\n clearTimeout(timer.current);\n };\n }, []);\n return /*#__PURE__*/_jsx(TransitionComponent, _extends({\n appear: appear,\n in: inProp,\n nodeRef: nodeRef,\n onEnter: handleEnter,\n onEntered: handleEntered,\n onEntering: handleEntering,\n onExit: handleExit,\n onExited: handleExited,\n onExiting: handleExiting,\n addEndListener: handleAddEndListener,\n timeout: timeout === 'auto' ? null : timeout\n }, other, {\n children: (state, childProps) => {\n return /*#__PURE__*/React.cloneElement(children, _extends({\n style: _extends({\n opacity: 0,\n transform: getScale(0.75),\n visibility: state === 'exited' && !inProp ? 'hidden' : undefined\n }, styles[state], style, children.props.style),\n ref: handleRef\n }, childProps));\n }\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? Grow.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * Add a custom transition end trigger. Called with the transitioning DOM\n * node and a done callback. Allows for more fine grained transition end\n * logic. Note: Timeouts are still used as a fallback if provided.\n */\n addEndListener: PropTypes.func,\n /**\n * Perform the enter transition when it first mounts if `in` is also `true`.\n * Set this to `false` to disable this behavior.\n * @default true\n */\n appear: PropTypes.bool,\n /**\n * A single child content element.\n */\n children: elementAcceptingRef.isRequired,\n /**\n * The transition timing function.\n * You may specify a single easing or a object containing enter and exit values.\n */\n easing: PropTypes.oneOfType([PropTypes.shape({\n enter: PropTypes.string,\n exit: PropTypes.string\n }), PropTypes.string]),\n /**\n * If `true`, the component will transition in.\n */\n in: PropTypes.bool,\n /**\n * @ignore\n */\n onEnter: PropTypes.func,\n /**\n * @ignore\n */\n onEntered: PropTypes.func,\n /**\n * @ignore\n */\n onEntering: PropTypes.func,\n /**\n * @ignore\n */\n onExit: PropTypes.func,\n /**\n * @ignore\n */\n onExited: PropTypes.func,\n /**\n * @ignore\n */\n onExiting: PropTypes.func,\n /**\n * @ignore\n */\n style: PropTypes.object,\n /**\n * The duration for the transition, in milliseconds.\n * You may specify a single timeout for all transitions, or individually with an object.\n *\n * Set to 'auto' to automatically calculate transition time based on height.\n * @default 'auto'\n */\n timeout: PropTypes.oneOfType([PropTypes.oneOf(['auto']), PropTypes.number, PropTypes.shape({\n appear: PropTypes.number,\n enter: PropTypes.number,\n exit: PropTypes.number\n })])\n} : void 0;\nGrow.muiSupportAuto = true;\nexport default Grow;","import * as React from 'react';\nimport * as ReactDOM from 'react-dom';\nimport PropTypes from 'prop-types';\nimport { exactProp, HTMLElementType, unstable_useEnhancedEffect as useEnhancedEffect, unstable_useForkRef as useForkRef, unstable_setRef as setRef } from '@mui/utils';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nfunction getContainer(container) {\n return typeof container === 'function' ? container() : container;\n}\n\n/**\n * Portals provide a first-class way to render children into a DOM node\n * that exists outside the DOM hierarchy of the parent component.\n *\n * Demos:\n *\n * - [Portal](https://mui.com/base/react-portal/)\n *\n * API:\n *\n * - [Portal API](https://mui.com/base/react-portal/components-api/#portal)\n */\nconst Portal = /*#__PURE__*/React.forwardRef(function Portal(props, forwardedRef) {\n const {\n children,\n container,\n disablePortal = false\n } = props;\n const [mountNode, setMountNode] = React.useState(null);\n // @ts-expect-error TODO upstream fix\n const handleRef = useForkRef( /*#__PURE__*/React.isValidElement(children) ? children.ref : null, forwardedRef);\n useEnhancedEffect(() => {\n if (!disablePortal) {\n setMountNode(getContainer(container) || document.body);\n }\n }, [container, disablePortal]);\n useEnhancedEffect(() => {\n if (mountNode && !disablePortal) {\n setRef(forwardedRef, mountNode);\n return () => {\n setRef(forwardedRef, null);\n };\n }\n return undefined;\n }, [forwardedRef, mountNode, disablePortal]);\n if (disablePortal) {\n if ( /*#__PURE__*/React.isValidElement(children)) {\n const newProps = {\n ref: handleRef\n };\n return /*#__PURE__*/React.cloneElement(children, newProps);\n }\n return /*#__PURE__*/_jsx(React.Fragment, {\n children: children\n });\n }\n return /*#__PURE__*/_jsx(React.Fragment, {\n children: mountNode ? /*#__PURE__*/ReactDOM.createPortal(children, mountNode) : mountNode\n });\n});\nprocess.env.NODE_ENV !== \"production\" ? Portal.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit TypeScript types and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The children to render into the `container`.\n */\n children: PropTypes.node,\n /**\n * An HTML element or function that returns one.\n * The `container` will have the portal children appended to it.\n *\n * By default, it uses the body of the top-level document object,\n * so it's simply `document.body` most of the time.\n */\n container: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([HTMLElementType, PropTypes.func]),\n /**\n * The `children` will be under the DOM hierarchy of the parent component.\n * @default false\n */\n disablePortal: PropTypes.bool\n} : void 0;\nif (process.env.NODE_ENV !== 'production') {\n // eslint-disable-next-line\n Portal['propTypes' + ''] = exactProp(Portal.propTypes);\n}\nexport default Portal;","import { unstable_ownerWindow as ownerWindow, unstable_ownerDocument as ownerDocument, unstable_getScrollbarSize as getScrollbarSize } from '@mui/utils';\n// Is a vertical scrollbar displayed?\nfunction isOverflowing(container) {\n const doc = ownerDocument(container);\n if (doc.body === container) {\n return ownerWindow(container).innerWidth > doc.documentElement.clientWidth;\n }\n return container.scrollHeight > container.clientHeight;\n}\nexport function ariaHidden(element, show) {\n if (show) {\n element.setAttribute('aria-hidden', 'true');\n } else {\n element.removeAttribute('aria-hidden');\n }\n}\nfunction getPaddingRight(element) {\n return parseInt(ownerWindow(element).getComputedStyle(element).paddingRight, 10) || 0;\n}\nfunction isAriaHiddenForbiddenOnElement(element) {\n // The forbidden HTML tags are the ones from ARIA specification that\n // can be children of body and can't have aria-hidden attribute.\n // cf. https://www.w3.org/TR/html-aria/#docconformance\n const forbiddenTagNames = ['TEMPLATE', 'SCRIPT', 'STYLE', 'LINK', 'MAP', 'META', 'NOSCRIPT', 'PICTURE', 'COL', 'COLGROUP', 'PARAM', 'SLOT', 'SOURCE', 'TRACK'];\n const isForbiddenTagName = forbiddenTagNames.indexOf(element.tagName) !== -1;\n const isInputHidden = element.tagName === 'INPUT' && element.getAttribute('type') === 'hidden';\n return isForbiddenTagName || isInputHidden;\n}\nfunction ariaHiddenSiblings(container, mountElement, currentElement, elementsToExclude, show) {\n const blacklist = [mountElement, currentElement, ...elementsToExclude];\n [].forEach.call(container.children, element => {\n const isNotExcludedElement = blacklist.indexOf(element) === -1;\n const isNotForbiddenElement = !isAriaHiddenForbiddenOnElement(element);\n if (isNotExcludedElement && isNotForbiddenElement) {\n ariaHidden(element, show);\n }\n });\n}\nfunction findIndexOf(items, callback) {\n let idx = -1;\n items.some((item, index) => {\n if (callback(item)) {\n idx = index;\n return true;\n }\n return false;\n });\n return idx;\n}\nfunction handleContainer(containerInfo, props) {\n const restoreStyle = [];\n const container = containerInfo.container;\n if (!props.disableScrollLock) {\n if (isOverflowing(container)) {\n // Compute the size before applying overflow hidden to avoid any scroll jumps.\n const scrollbarSize = getScrollbarSize(ownerDocument(container));\n restoreStyle.push({\n value: container.style.paddingRight,\n property: 'padding-right',\n el: container\n });\n // Use computed style, here to get the real padding to add our scrollbar width.\n container.style.paddingRight = `${getPaddingRight(container) + scrollbarSize}px`;\n\n // .mui-fixed is a global helper.\n const fixedElements = ownerDocument(container).querySelectorAll('.mui-fixed');\n [].forEach.call(fixedElements, element => {\n restoreStyle.push({\n value: element.style.paddingRight,\n property: 'padding-right',\n el: element\n });\n element.style.paddingRight = `${getPaddingRight(element) + scrollbarSize}px`;\n });\n }\n let scrollContainer;\n if (container.parentNode instanceof DocumentFragment) {\n scrollContainer = ownerDocument(container).body;\n } else {\n // Improve Gatsby support\n // https://css-tricks.com/snippets/css/force-vertical-scrollbar/\n const parent = container.parentElement;\n const containerWindow = ownerWindow(container);\n scrollContainer = (parent == null ? void 0 : parent.nodeName) === 'HTML' && containerWindow.getComputedStyle(parent).overflowY === 'scroll' ? parent : container;\n }\n\n // Block the scroll even if no scrollbar is visible to account for mobile keyboard\n // screensize shrink.\n restoreStyle.push({\n value: scrollContainer.style.overflow,\n property: 'overflow',\n el: scrollContainer\n }, {\n value: scrollContainer.style.overflowX,\n property: 'overflow-x',\n el: scrollContainer\n }, {\n value: scrollContainer.style.overflowY,\n property: 'overflow-y',\n el: scrollContainer\n });\n scrollContainer.style.overflow = 'hidden';\n }\n const restore = () => {\n restoreStyle.forEach(({\n value,\n el,\n property\n }) => {\n if (value) {\n el.style.setProperty(property, value);\n } else {\n el.style.removeProperty(property);\n }\n });\n };\n return restore;\n}\nfunction getHiddenSiblings(container) {\n const hiddenSiblings = [];\n [].forEach.call(container.children, element => {\n if (element.getAttribute('aria-hidden') === 'true') {\n hiddenSiblings.push(element);\n }\n });\n return hiddenSiblings;\n}\n/**\n * @ignore - do not document.\n *\n * Proper state management for containers and the modals in those containers.\n * Simplified, but inspired by react-overlay's ModalManager class.\n * Used by the Modal to ensure proper styling of containers.\n */\nexport default class ModalManager {\n constructor() {\n this.containers = void 0;\n this.modals = void 0;\n this.modals = [];\n this.containers = [];\n }\n add(modal, container) {\n let modalIndex = this.modals.indexOf(modal);\n if (modalIndex !== -1) {\n return modalIndex;\n }\n modalIndex = this.modals.length;\n this.modals.push(modal);\n\n // If the modal we are adding is already in the DOM.\n if (modal.modalRef) {\n ariaHidden(modal.modalRef, false);\n }\n const hiddenSiblings = getHiddenSiblings(container);\n ariaHiddenSiblings(container, modal.mount, modal.modalRef, hiddenSiblings, true);\n const containerIndex = findIndexOf(this.containers, item => item.container === container);\n if (containerIndex !== -1) {\n this.containers[containerIndex].modals.push(modal);\n return modalIndex;\n }\n this.containers.push({\n modals: [modal],\n container,\n restore: null,\n hiddenSiblings\n });\n return modalIndex;\n }\n mount(modal, props) {\n const containerIndex = findIndexOf(this.containers, item => item.modals.indexOf(modal) !== -1);\n const containerInfo = this.containers[containerIndex];\n if (!containerInfo.restore) {\n containerInfo.restore = handleContainer(containerInfo, props);\n }\n }\n remove(modal, ariaHiddenState = true) {\n const modalIndex = this.modals.indexOf(modal);\n if (modalIndex === -1) {\n return modalIndex;\n }\n const containerIndex = findIndexOf(this.containers, item => item.modals.indexOf(modal) !== -1);\n const containerInfo = this.containers[containerIndex];\n containerInfo.modals.splice(containerInfo.modals.indexOf(modal), 1);\n this.modals.splice(modalIndex, 1);\n\n // If that was the last modal in a container, clean up the container.\n if (containerInfo.modals.length === 0) {\n // The modal might be closed before it had the chance to be mounted in the DOM.\n if (containerInfo.restore) {\n containerInfo.restore();\n }\n if (modal.modalRef) {\n // In case the modal wasn't in the DOM yet.\n ariaHidden(modal.modalRef, ariaHiddenState);\n }\n ariaHiddenSiblings(containerInfo.container, modal.mount, modal.modalRef, containerInfo.hiddenSiblings, false);\n this.containers.splice(containerIndex, 1);\n } else {\n // Otherwise make sure the next top modal is visible to a screen reader.\n const nextTop = containerInfo.modals[containerInfo.modals.length - 1];\n // as soon as a modal is adding its modalRef is undefined. it can't set\n // aria-hidden because the dom element doesn't exist either\n // when modal was unmounted before modalRef gets null\n if (nextTop.modalRef) {\n ariaHidden(nextTop.modalRef, false);\n }\n }\n return modalIndex;\n }\n isTopModal(modal) {\n return this.modals.length > 0 && this.modals[this.modals.length - 1] === modal;\n }\n}","/* eslint-disable consistent-return, jsx-a11y/no-noninteractive-tabindex */\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport { exactProp, elementAcceptingRef, unstable_useForkRef as useForkRef, unstable_ownerDocument as ownerDocument } from '@mui/utils';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\n// Inspired by https://github.com/focus-trap/tabbable\nconst candidatesSelector = ['input', 'select', 'textarea', 'a[href]', 'button', '[tabindex]', 'audio[controls]', 'video[controls]', '[contenteditable]:not([contenteditable=\"false\"])'].join(',');\nfunction getTabIndex(node) {\n const tabindexAttr = parseInt(node.getAttribute('tabindex') || '', 10);\n if (!Number.isNaN(tabindexAttr)) {\n return tabindexAttr;\n }\n\n // Browsers do not return `tabIndex` correctly for contentEditable nodes;\n // https://bugs.chromium.org/p/chromium/issues/detail?id=661108&q=contenteditable%20tabindex&can=2\n // so if they don't have a tabindex attribute specifically set, assume it's 0.\n // in Chrome,
, \n open a GitHub issue\n \n {\" or visit \"}\n \n forum.image.sc/tag/piximi\n \n {\" to report this error.\"}\n

\n

\n {\"Save your data before reloading Piximi:\"}\n

\n

\n \n\n \n \n {classifierModelStatus === ModelStatus.Trained && (\n \n )}\n {segmenterModelStatus === ModelStatus.Trained && (\n \n )}\n \n\n \n\n \n\n \n \n \n );\n};\n","import React, { useState } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\nimport { useHotkeys } from \"hooks\";\n\nimport { Alert, Box } from \"@mui/material\";\n\nimport { CustomNumberTextField } from \"components/forms/CustomNumberTextField\";\n\nimport { HotkeyView } from \"utils/common/enums\";\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { applicationSettingsSlice } from \"store/applicationSettings\";\nimport { dataSlice } from \"store/data/dataSlice\";\nimport { uploadImages } from \"utils/file-io/helpers\";\nimport { ImageShapeInfo } from \"utils/file-io/types\";\nimport { ImageShapeEnum } from \"utils/file-io/enums\";\nimport { selectUnknownImageCategory } from \"store/data/selectors\";\nimport { generateNewKind } from \"store/data/helpers\";\n\ntype ImageShapeDialogProps = {\n files: FileList;\n open: boolean;\n onClose: () => void;\n referenceImageShape: ImageShapeInfo;\n};\n\nexport const ImageShapeDialog = ({\n files,\n open,\n onClose,\n referenceImageShape,\n}: ImageShapeDialogProps) => {\n const dispatch = useDispatch();\n\n const [channels, setChannels] = useState(\n referenceImageShape.components && referenceImageShape.components % 3 !== 0\n ? referenceImageShape.components\n : 3 // assume 3 if no compnents key set or if cleanly divisible by 3\n );\n\n const [frames, setFrames] = useState(-1);\n const [invalidImageShape, setInvalidImageShape] = useState(false);\n\n const unknownImageCategory = useSelector(selectUnknownImageCategory);\n\n const handleChannelsChange = async (channels: number) => {\n setChannels(channels);\n };\n\n const handleUploadImages = async () => {\n var imageFrames = frames;\n if (imageFrames === -1) {\n imageFrames =\n referenceImageShape.shape === ImageShapeEnum.HyperStackImage\n ? referenceImageShape.components! // components always set on HyperStackImage\n : 1;\n setFrames(imageFrames);\n }\n let imageCategory = unknownImageCategory;\n const slices = imageFrames / channels;\n\n // check if user-supplied channels cleanly divides known num frames\n if (!Number.isInteger(slices)) {\n setInvalidImageShape(true);\n return;\n }\n setInvalidImageShape(false);\n if (!imageCategory) {\n const { newKind, unknownCategory } = generateNewKind(\"Image\");\n imageCategory = unknownCategory.id;\n dispatch(dataSlice.actions.addKinds({ kinds: [newKind] }));\n dispatch(\n dataSlice.actions.addCategories({ categories: [unknownCategory] })\n );\n }\n\n const res = await uploadImages(\n files,\n channels,\n slices,\n referenceImageShape,\n imageCategory\n );\n //HACK: Future plans to re-work error messages\n if (res.warning) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.warning,\n })\n );\n } else if (res.errors.length) {\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: res.errors[0],\n })\n );\n } else {\n dispatch(\n dataSlice.actions.addThings({\n things: res.imagesToUpload,\n isPermanent: true,\n })\n );\n }\n\n closeDialog();\n };\n\n const closeDialog = () => {\n setInvalidImageShape(false);\n setFrames(-1);\n onClose();\n };\n\n useHotkeys(\n \"enter\",\n () => {\n handleUploadImages();\n },\n HotkeyView.ImageShapeDialog,\n { enableOnTags: [\"INPUT\"] },\n [handleUploadImages]\n );\n\n return (\n \n \n {invalidImageShape && (\n {`Invalid image shape: Cannot create a ${channels} (c) x ${(\n frames / channels\n ).toFixed(2)} (z) image from file.`}\n )}\n \n }\n onConfirm={handleUploadImages}\n confirmText=\"OK\"\n />\n );\n};\n","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getTabUtilityClass(slot) {\n return generateUtilityClass('MuiTab', slot);\n}\nconst tabClasses = generateUtilityClasses('MuiTab', ['root', 'labelIcon', 'textColorInherit', 'textColorPrimary', 'textColorSecondary', 'selected', 'disabled', 'fullWidth', 'wrapped', 'iconWrapper']);\nexport default tabClasses;","// Source from https://github.com/alitaheri/normalize-scroll-left\nlet cachedType;\n\n/**\n * Based on the jquery plugin https://github.com/othree/jquery.rtl-scroll-type\n *\n * Types of scrollLeft, assuming scrollWidth=100 and direction is rtl.\n *\n * Type | <- Most Left | Most Right -> | Initial\n * ---------------- | ------------ | ------------- | -------\n * default | 0 | 100 | 100\n * negative (spec*) | -100 | 0 | 0\n * reverse | 100 | 0 | 0\n *\n * Edge 85: default\n * Safari 14: negative\n * Chrome 85: negative\n * Firefox 81: negative\n * IE11: reverse\n *\n * spec* https://drafts.csswg.org/cssom-view/#dom-window-scroll\n */\nexport function detectScrollType() {\n if (cachedType) {\n return cachedType;\n }\n const dummy = document.createElement('div');\n const container = document.createElement('div');\n container.style.width = '10px';\n container.style.height = '1px';\n dummy.appendChild(container);\n dummy.dir = 'rtl';\n dummy.style.fontSize = '14px';\n dummy.style.width = '4px';\n dummy.style.height = '1px';\n dummy.style.position = 'absolute';\n dummy.style.top = '-1000px';\n dummy.style.overflow = 'scroll';\n document.body.appendChild(dummy);\n cachedType = 'reverse';\n if (dummy.scrollLeft > 0) {\n cachedType = 'default';\n } else {\n dummy.scrollLeft = 1;\n if (dummy.scrollLeft === 0) {\n cachedType = 'negative';\n }\n }\n document.body.removeChild(dummy);\n return cachedType;\n}\n\n// Based on https://stackoverflow.com/a/24394376\nexport function getNormalizedScrollLeft(element, direction) {\n const scrollLeft = element.scrollLeft;\n\n // Perform the calculations only when direction is rtl to avoid messing up the ltr behavior\n if (direction !== 'rtl') {\n return scrollLeft;\n }\n const type = detectScrollType();\n switch (type) {\n case 'negative':\n return element.scrollWidth - element.clientWidth + scrollLeft;\n case 'reverse':\n return element.scrollWidth - element.clientWidth - scrollLeft;\n default:\n return scrollLeft;\n }\n}","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\", \"disabled\", \"disableFocusRipple\", \"fullWidth\", \"icon\", \"iconPosition\", \"indicator\", \"label\", \"onChange\", \"onClick\", \"onFocus\", \"selected\", \"selectionFollowsFocus\", \"textColor\", \"value\", \"wrapped\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport ButtonBase from '../ButtonBase';\nimport capitalize from '../utils/capitalize';\nimport useThemeProps from '../styles/useThemeProps';\nimport styled from '../styles/styled';\nimport unsupportedProp from '../utils/unsupportedProp';\nimport tabClasses, { getTabUtilityClass } from './tabClasses';\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n textColor,\n fullWidth,\n wrapped,\n icon,\n label,\n selected,\n disabled\n } = ownerState;\n const slots = {\n root: ['root', icon && label && 'labelIcon', `textColor${capitalize(textColor)}`, fullWidth && 'fullWidth', wrapped && 'wrapped', selected && 'selected', disabled && 'disabled'],\n iconWrapper: ['iconWrapper']\n };\n return composeClasses(slots, getTabUtilityClass, classes);\n};\nconst TabRoot = styled(ButtonBase, {\n name: 'MuiTab',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.label && ownerState.icon && styles.labelIcon, styles[`textColor${capitalize(ownerState.textColor)}`], ownerState.fullWidth && styles.fullWidth, ownerState.wrapped && styles.wrapped];\n }\n})(({\n theme,\n ownerState\n}) => _extends({}, theme.typography.button, {\n maxWidth: 360,\n minWidth: 90,\n position: 'relative',\n minHeight: 48,\n flexShrink: 0,\n padding: '12px 16px',\n overflow: 'hidden',\n whiteSpace: 'normal',\n textAlign: 'center'\n}, ownerState.label && {\n flexDirection: ownerState.iconPosition === 'top' || ownerState.iconPosition === 'bottom' ? 'column' : 'row'\n}, {\n lineHeight: 1.25\n}, ownerState.icon && ownerState.label && {\n minHeight: 72,\n paddingTop: 9,\n paddingBottom: 9,\n [`& > .${tabClasses.iconWrapper}`]: _extends({}, ownerState.iconPosition === 'top' && {\n marginBottom: 6\n }, ownerState.iconPosition === 'bottom' && {\n marginTop: 6\n }, ownerState.iconPosition === 'start' && {\n marginRight: theme.spacing(1)\n }, ownerState.iconPosition === 'end' && {\n marginLeft: theme.spacing(1)\n })\n}, ownerState.textColor === 'inherit' && {\n color: 'inherit',\n opacity: 0.6,\n // same opacity as theme.palette.text.secondary\n [`&.${tabClasses.selected}`]: {\n opacity: 1\n },\n [`&.${tabClasses.disabled}`]: {\n opacity: (theme.vars || theme).palette.action.disabledOpacity\n }\n}, ownerState.textColor === 'primary' && {\n color: (theme.vars || theme).palette.text.secondary,\n [`&.${tabClasses.selected}`]: {\n color: (theme.vars || theme).palette.primary.main\n },\n [`&.${tabClasses.disabled}`]: {\n color: (theme.vars || theme).palette.text.disabled\n }\n}, ownerState.textColor === 'secondary' && {\n color: (theme.vars || theme).palette.text.secondary,\n [`&.${tabClasses.selected}`]: {\n color: (theme.vars || theme).palette.secondary.main\n },\n [`&.${tabClasses.disabled}`]: {\n color: (theme.vars || theme).palette.text.disabled\n }\n}, ownerState.fullWidth && {\n flexShrink: 1,\n flexGrow: 1,\n flexBasis: 0,\n maxWidth: 'none'\n}, ownerState.wrapped && {\n fontSize: theme.typography.pxToRem(12)\n}));\nconst Tab = /*#__PURE__*/React.forwardRef(function Tab(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiTab'\n });\n const {\n className,\n disabled = false,\n disableFocusRipple = false,\n // eslint-disable-next-line react/prop-types\n fullWidth,\n icon: iconProp,\n iconPosition = 'top',\n // eslint-disable-next-line react/prop-types\n indicator,\n label,\n onChange,\n onClick,\n onFocus,\n // eslint-disable-next-line react/prop-types\n selected,\n // eslint-disable-next-line react/prop-types\n selectionFollowsFocus,\n // eslint-disable-next-line react/prop-types\n textColor = 'inherit',\n value,\n wrapped = false\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n disabled,\n disableFocusRipple,\n selected,\n icon: !!iconProp,\n iconPosition,\n label: !!label,\n fullWidth,\n textColor,\n wrapped\n });\n const classes = useUtilityClasses(ownerState);\n const icon = iconProp && label && /*#__PURE__*/React.isValidElement(iconProp) ? /*#__PURE__*/React.cloneElement(iconProp, {\n className: clsx(classes.iconWrapper, iconProp.props.className)\n }) : iconProp;\n const handleClick = event => {\n if (!selected && onChange) {\n onChange(event, value);\n }\n if (onClick) {\n onClick(event);\n }\n };\n const handleFocus = event => {\n if (selectionFollowsFocus && !selected && onChange) {\n onChange(event, value);\n }\n if (onFocus) {\n onFocus(event);\n }\n };\n return /*#__PURE__*/_jsxs(TabRoot, _extends({\n focusRipple: !disableFocusRipple,\n className: clsx(classes.root, className),\n ref: ref,\n role: \"tab\",\n \"aria-selected\": selected,\n disabled: disabled,\n onClick: handleClick,\n onFocus: handleFocus,\n ownerState: ownerState,\n tabIndex: selected ? 0 : -1\n }, other, {\n children: [iconPosition === 'top' || iconPosition === 'start' ? /*#__PURE__*/_jsxs(React.Fragment, {\n children: [icon, label]\n }) : /*#__PURE__*/_jsxs(React.Fragment, {\n children: [label, icon]\n }), indicator]\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? Tab.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * This prop isn't supported.\n * Use the `component` prop if you need to change the children structure.\n */\n children: unsupportedProp,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * If `true`, the component is disabled.\n * @default false\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the keyboard focus ripple is disabled.\n * @default false\n */\n disableFocusRipple: PropTypes.bool,\n /**\n * If `true`, the ripple effect is disabled.\n *\n * ⚠️ Without a ripple there is no styling for :focus-visible by default. Be sure\n * to highlight the element by applying separate styles with the `.Mui-focusVisible` class.\n * @default false\n */\n disableRipple: PropTypes.bool,\n /**\n * The icon to display.\n */\n icon: PropTypes.oneOfType([PropTypes.element, PropTypes.string]),\n /**\n * The position of the icon relative to the label.\n * @default 'top'\n */\n iconPosition: PropTypes.oneOf(['bottom', 'end', 'start', 'top']),\n /**\n * The label element.\n */\n label: PropTypes.node,\n /**\n * @ignore\n */\n onChange: PropTypes.func,\n /**\n * @ignore\n */\n onClick: PropTypes.func,\n /**\n * @ignore\n */\n onFocus: PropTypes.func,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * You can provide your own value. Otherwise, we fallback to the child position index.\n */\n value: PropTypes.any,\n /**\n * Tab labels appear in a single row.\n * They can use a second line if needed.\n * @default false\n */\n wrapped: PropTypes.bool\n} : void 0;\nexport default Tab;","function easeInOutSin(time) {\n return (1 + Math.sin(Math.PI * time - Math.PI / 2)) / 2;\n}\nexport default function animate(property, element, to, options = {}, cb = () => {}) {\n const {\n ease = easeInOutSin,\n duration = 300 // standard\n } = options;\n let start = null;\n const from = element[property];\n let cancelled = false;\n const cancel = () => {\n cancelled = true;\n };\n const step = timestamp => {\n if (cancelled) {\n cb(new Error('Animation cancelled'));\n return;\n }\n if (start === null) {\n start = timestamp;\n }\n const time = Math.min(1, (timestamp - start) / duration);\n element[property] = ease(time) * (to - from) + from;\n if (time >= 1) {\n requestAnimationFrame(() => {\n cb(null);\n });\n return;\n }\n requestAnimationFrame(step);\n };\n if (from === to) {\n cb(new Error('Element already at target position'));\n return cancel;\n }\n requestAnimationFrame(step);\n return cancel;\n}","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"onChange\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport debounce from '../utils/debounce';\nimport { ownerWindow, unstable_useEnhancedEffect as useEnhancedEffect } from '../utils';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst styles = {\n width: 99,\n height: 99,\n position: 'absolute',\n top: -9999,\n overflow: 'scroll'\n};\n\n/**\n * @ignore - internal component.\n * The component originates from https://github.com/STORIS/react-scrollbar-size.\n * It has been moved into the core in order to minimize the bundle size.\n */\nexport default function ScrollbarSize(props) {\n const {\n onChange\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const scrollbarHeight = React.useRef();\n const nodeRef = React.useRef(null);\n const setMeasurements = () => {\n scrollbarHeight.current = nodeRef.current.offsetHeight - nodeRef.current.clientHeight;\n };\n useEnhancedEffect(() => {\n const handleResize = debounce(() => {\n const prevHeight = scrollbarHeight.current;\n setMeasurements();\n if (prevHeight !== scrollbarHeight.current) {\n onChange(scrollbarHeight.current);\n }\n });\n const containerWindow = ownerWindow(nodeRef.current);\n containerWindow.addEventListener('resize', handleResize);\n return () => {\n handleResize.clear();\n containerWindow.removeEventListener('resize', handleResize);\n };\n }, [onChange]);\n React.useEffect(() => {\n setMeasurements();\n onChange(scrollbarHeight.current);\n }, [onChange]);\n return /*#__PURE__*/_jsx(\"div\", _extends({\n style: styles,\n ref: nodeRef\n }, other));\n}\nprocess.env.NODE_ENV !== \"production\" ? ScrollbarSize.propTypes = {\n onChange: PropTypes.func.isRequired\n} : void 0;","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M15.41 16.09l-4.58-4.59 4.58-4.59L14 5.5l-6 6 6 6z\"\n}), 'KeyboardArrowLeft');","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M8.59 16.34l4.58-4.59-4.58-4.59L10 5.75l6 6-6 6z\"\n}), 'KeyboardArrowRight');","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getTabScrollButtonUtilityClass(slot) {\n return generateUtilityClass('MuiTabScrollButton', slot);\n}\nconst tabScrollButtonClasses = generateUtilityClasses('MuiTabScrollButton', ['root', 'vertical', 'horizontal', 'disabled']);\nexport default tabScrollButtonClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\", \"slots\", \"slotProps\", \"direction\", \"orientation\", \"disabled\"];\n/* eslint-disable jsx-a11y/aria-role */\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses, useSlotProps } from '@mui/base';\nimport KeyboardArrowLeft from '../internal/svg-icons/KeyboardArrowLeft';\nimport KeyboardArrowRight from '../internal/svg-icons/KeyboardArrowRight';\nimport ButtonBase from '../ButtonBase';\nimport useTheme from '../styles/useTheme';\nimport useThemeProps from '../styles/useThemeProps';\nimport styled from '../styles/styled';\nimport tabScrollButtonClasses, { getTabScrollButtonUtilityClass } from './tabScrollButtonClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n orientation,\n disabled\n } = ownerState;\n const slots = {\n root: ['root', orientation, disabled && 'disabled']\n };\n return composeClasses(slots, getTabScrollButtonUtilityClass, classes);\n};\nconst TabScrollButtonRoot = styled(ButtonBase, {\n name: 'MuiTabScrollButton',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.orientation && styles[ownerState.orientation]];\n }\n})(({\n ownerState\n}) => _extends({\n width: 40,\n flexShrink: 0,\n opacity: 0.8,\n [`&.${tabScrollButtonClasses.disabled}`]: {\n opacity: 0\n }\n}, ownerState.orientation === 'vertical' && {\n width: '100%',\n height: 40,\n '& svg': {\n transform: `rotate(${ownerState.isRtl ? -90 : 90}deg)`\n }\n}));\nconst TabScrollButton = /*#__PURE__*/React.forwardRef(function TabScrollButton(inProps, ref) {\n var _slots$StartScrollBut, _slots$EndScrollButto;\n const props = useThemeProps({\n props: inProps,\n name: 'MuiTabScrollButton'\n });\n const {\n className,\n slots = {},\n slotProps = {},\n direction\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const theme = useTheme();\n const isRtl = theme.direction === 'rtl';\n const ownerState = _extends({\n isRtl\n }, props);\n const classes = useUtilityClasses(ownerState);\n const StartButtonIcon = (_slots$StartScrollBut = slots.StartScrollButtonIcon) != null ? _slots$StartScrollBut : KeyboardArrowLeft;\n const EndButtonIcon = (_slots$EndScrollButto = slots.EndScrollButtonIcon) != null ? _slots$EndScrollButto : KeyboardArrowRight;\n const startButtonIconProps = useSlotProps({\n elementType: StartButtonIcon,\n externalSlotProps: slotProps.startScrollButtonIcon,\n additionalProps: {\n fontSize: 'small'\n },\n ownerState\n });\n const endButtonIconProps = useSlotProps({\n elementType: EndButtonIcon,\n externalSlotProps: slotProps.endScrollButtonIcon,\n additionalProps: {\n fontSize: 'small'\n },\n ownerState\n });\n return /*#__PURE__*/_jsx(TabScrollButtonRoot, _extends({\n component: \"div\",\n className: clsx(classes.root, className),\n ref: ref,\n role: null,\n ownerState: ownerState,\n tabIndex: null\n }, other, {\n children: direction === 'left' ? /*#__PURE__*/_jsx(StartButtonIcon, _extends({}, startButtonIconProps)) : /*#__PURE__*/_jsx(EndButtonIcon, _extends({}, endButtonIconProps))\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? TabScrollButton.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The direction the button should indicate.\n */\n direction: PropTypes.oneOf(['left', 'right']).isRequired,\n /**\n * If `true`, the component is disabled.\n * @default false\n */\n disabled: PropTypes.bool,\n /**\n * The component orientation (layout flow direction).\n */\n orientation: PropTypes.oneOf(['horizontal', 'vertical']).isRequired,\n /**\n * The extra props for the slot components.\n * You can override the existing props or add new ones.\n * @default {}\n */\n slotProps: PropTypes.shape({\n endScrollButtonIcon: PropTypes.oneOfType([PropTypes.func, PropTypes.object]),\n startScrollButtonIcon: PropTypes.oneOfType([PropTypes.func, PropTypes.object])\n }),\n /**\n * The components used for each slot inside.\n * @default {}\n */\n slots: PropTypes.shape({\n EndScrollButtonIcon: PropTypes.elementType,\n StartScrollButtonIcon: PropTypes.elementType\n }),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default TabScrollButton;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getTabsUtilityClass(slot) {\n return generateUtilityClass('MuiTabs', slot);\n}\nconst tabsClasses = generateUtilityClasses('MuiTabs', ['root', 'vertical', 'flexContainer', 'flexContainerVertical', 'centered', 'scroller', 'fixed', 'scrollableX', 'scrollableY', 'hideScrollbar', 'scrollButtons', 'scrollButtonsHideMobile', 'indicator']);\nexport default tabsClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"aria-label\", \"aria-labelledby\", \"action\", \"centered\", \"children\", \"className\", \"component\", \"allowScrollButtonsMobile\", \"indicatorColor\", \"onChange\", \"orientation\", \"ScrollButtonComponent\", \"scrollButtons\", \"selectionFollowsFocus\", \"slots\", \"slotProps\", \"TabIndicatorProps\", \"TabScrollButtonProps\", \"textColor\", \"value\", \"variant\", \"visibleScrollbar\"];\nimport * as React from 'react';\nimport { isFragment } from 'react-is';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses, useSlotProps } from '@mui/base';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport useTheme from '../styles/useTheme';\nimport debounce from '../utils/debounce';\nimport { getNormalizedScrollLeft, detectScrollType } from '../utils/scrollLeft';\nimport animate from '../internal/animate';\nimport ScrollbarSize from './ScrollbarSize';\nimport TabScrollButton from '../TabScrollButton';\nimport useEventCallback from '../utils/useEventCallback';\nimport tabsClasses, { getTabsUtilityClass } from './tabsClasses';\nimport ownerDocument from '../utils/ownerDocument';\nimport ownerWindow from '../utils/ownerWindow';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst nextItem = (list, item) => {\n if (list === item) {\n return list.firstChild;\n }\n if (item && item.nextElementSibling) {\n return item.nextElementSibling;\n }\n return list.firstChild;\n};\nconst previousItem = (list, item) => {\n if (list === item) {\n return list.lastChild;\n }\n if (item && item.previousElementSibling) {\n return item.previousElementSibling;\n }\n return list.lastChild;\n};\nconst moveFocus = (list, currentFocus, traversalFunction) => {\n let wrappedOnce = false;\n let nextFocus = traversalFunction(list, currentFocus);\n while (nextFocus) {\n // Prevent infinite loop.\n if (nextFocus === list.firstChild) {\n if (wrappedOnce) {\n return;\n }\n wrappedOnce = true;\n }\n\n // Same logic as useAutocomplete.js\n const nextFocusDisabled = nextFocus.disabled || nextFocus.getAttribute('aria-disabled') === 'true';\n if (!nextFocus.hasAttribute('tabindex') || nextFocusDisabled) {\n // Move to the next element.\n nextFocus = traversalFunction(list, nextFocus);\n } else {\n nextFocus.focus();\n return;\n }\n }\n};\nconst useUtilityClasses = ownerState => {\n const {\n vertical,\n fixed,\n hideScrollbar,\n scrollableX,\n scrollableY,\n centered,\n scrollButtonsHideMobile,\n classes\n } = ownerState;\n const slots = {\n root: ['root', vertical && 'vertical'],\n scroller: ['scroller', fixed && 'fixed', hideScrollbar && 'hideScrollbar', scrollableX && 'scrollableX', scrollableY && 'scrollableY'],\n flexContainer: ['flexContainer', vertical && 'flexContainerVertical', centered && 'centered'],\n indicator: ['indicator'],\n scrollButtons: ['scrollButtons', scrollButtonsHideMobile && 'scrollButtonsHideMobile'],\n scrollableX: [scrollableX && 'scrollableX'],\n hideScrollbar: [hideScrollbar && 'hideScrollbar']\n };\n return composeClasses(slots, getTabsUtilityClass, classes);\n};\nconst TabsRoot = styled('div', {\n name: 'MuiTabs',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [{\n [`& .${tabsClasses.scrollButtons}`]: styles.scrollButtons\n }, {\n [`& .${tabsClasses.scrollButtons}`]: ownerState.scrollButtonsHideMobile && styles.scrollButtonsHideMobile\n }, styles.root, ownerState.vertical && styles.vertical];\n }\n})(({\n ownerState,\n theme\n}) => _extends({\n overflow: 'hidden',\n minHeight: 48,\n // Add iOS momentum scrolling for iOS < 13.0\n WebkitOverflowScrolling: 'touch',\n display: 'flex'\n}, ownerState.vertical && {\n flexDirection: 'column'\n}, ownerState.scrollButtonsHideMobile && {\n [`& .${tabsClasses.scrollButtons}`]: {\n [theme.breakpoints.down('sm')]: {\n display: 'none'\n }\n }\n}));\nconst TabsScroller = styled('div', {\n name: 'MuiTabs',\n slot: 'Scroller',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.scroller, ownerState.fixed && styles.fixed, ownerState.hideScrollbar && styles.hideScrollbar, ownerState.scrollableX && styles.scrollableX, ownerState.scrollableY && styles.scrollableY];\n }\n})(({\n ownerState\n}) => _extends({\n position: 'relative',\n display: 'inline-block',\n flex: '1 1 auto',\n whiteSpace: 'nowrap'\n}, ownerState.fixed && {\n overflowX: 'hidden',\n width: '100%'\n}, ownerState.hideScrollbar && {\n // Hide dimensionless scrollbar on macOS\n scrollbarWidth: 'none',\n // Firefox\n '&::-webkit-scrollbar': {\n display: 'none' // Safari + Chrome\n }\n}, ownerState.scrollableX && {\n overflowX: 'auto',\n overflowY: 'hidden'\n}, ownerState.scrollableY && {\n overflowY: 'auto',\n overflowX: 'hidden'\n}));\nconst FlexContainer = styled('div', {\n name: 'MuiTabs',\n slot: 'FlexContainer',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.flexContainer, ownerState.vertical && styles.flexContainerVertical, ownerState.centered && styles.centered];\n }\n})(({\n ownerState\n}) => _extends({\n display: 'flex'\n}, ownerState.vertical && {\n flexDirection: 'column'\n}, ownerState.centered && {\n justifyContent: 'center'\n}));\nconst TabsIndicator = styled('span', {\n name: 'MuiTabs',\n slot: 'Indicator',\n overridesResolver: (props, styles) => styles.indicator\n})(({\n ownerState,\n theme\n}) => _extends({\n position: 'absolute',\n height: 2,\n bottom: 0,\n width: '100%',\n transition: theme.transitions.create()\n}, ownerState.indicatorColor === 'primary' && {\n backgroundColor: (theme.vars || theme).palette.primary.main\n}, ownerState.indicatorColor === 'secondary' && {\n backgroundColor: (theme.vars || theme).palette.secondary.main\n}, ownerState.vertical && {\n height: '100%',\n width: 2,\n right: 0\n}));\nconst TabsScrollbarSize = styled(ScrollbarSize, {\n name: 'MuiTabs',\n slot: 'ScrollbarSize'\n})({\n overflowX: 'auto',\n overflowY: 'hidden',\n // Hide dimensionless scrollbar on macOS\n scrollbarWidth: 'none',\n // Firefox\n '&::-webkit-scrollbar': {\n display: 'none' // Safari + Chrome\n }\n});\n\nconst defaultIndicatorStyle = {};\nlet warnedOnceTabPresent = false;\nconst Tabs = /*#__PURE__*/React.forwardRef(function Tabs(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiTabs'\n });\n const theme = useTheme();\n const isRtl = theme.direction === 'rtl';\n const {\n 'aria-label': ariaLabel,\n 'aria-labelledby': ariaLabelledBy,\n action,\n centered = false,\n children: childrenProp,\n className,\n component = 'div',\n allowScrollButtonsMobile = false,\n indicatorColor = 'primary',\n onChange,\n orientation = 'horizontal',\n ScrollButtonComponent = TabScrollButton,\n scrollButtons = 'auto',\n selectionFollowsFocus,\n slots = {},\n slotProps = {},\n TabIndicatorProps = {},\n TabScrollButtonProps = {},\n textColor = 'primary',\n value,\n variant = 'standard',\n visibleScrollbar = false\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const scrollable = variant === 'scrollable';\n const vertical = orientation === 'vertical';\n const scrollStart = vertical ? 'scrollTop' : 'scrollLeft';\n const start = vertical ? 'top' : 'left';\n const end = vertical ? 'bottom' : 'right';\n const clientSize = vertical ? 'clientHeight' : 'clientWidth';\n const size = vertical ? 'height' : 'width';\n const ownerState = _extends({}, props, {\n component,\n allowScrollButtonsMobile,\n indicatorColor,\n orientation,\n vertical,\n scrollButtons,\n textColor,\n variant,\n visibleScrollbar,\n fixed: !scrollable,\n hideScrollbar: scrollable && !visibleScrollbar,\n scrollableX: scrollable && !vertical,\n scrollableY: scrollable && vertical,\n centered: centered && !scrollable,\n scrollButtonsHideMobile: !allowScrollButtonsMobile\n });\n const classes = useUtilityClasses(ownerState);\n const startScrollButtonIconProps = useSlotProps({\n elementType: slots.StartScrollButtonIcon,\n externalSlotProps: slotProps.startScrollButtonIcon,\n ownerState\n });\n const endScrollButtonIconProps = useSlotProps({\n elementType: slots.EndScrollButtonIcon,\n externalSlotProps: slotProps.endScrollButtonIcon,\n ownerState\n });\n if (process.env.NODE_ENV !== 'production') {\n if (centered && scrollable) {\n console.error('MUI: You can not use the `centered={true}` and `variant=\"scrollable\"` properties ' + 'at the same time on a `Tabs` component.');\n }\n }\n const [mounted, setMounted] = React.useState(false);\n const [indicatorStyle, setIndicatorStyle] = React.useState(defaultIndicatorStyle);\n const [displayScroll, setDisplayScroll] = React.useState({\n start: false,\n end: false\n });\n const [scrollerStyle, setScrollerStyle] = React.useState({\n overflow: 'hidden',\n scrollbarWidth: 0\n });\n const valueToIndex = new Map();\n const tabsRef = React.useRef(null);\n const tabListRef = React.useRef(null);\n const getTabsMeta = () => {\n const tabsNode = tabsRef.current;\n let tabsMeta;\n if (tabsNode) {\n const rect = tabsNode.getBoundingClientRect();\n // create a new object with ClientRect class props + scrollLeft\n tabsMeta = {\n clientWidth: tabsNode.clientWidth,\n scrollLeft: tabsNode.scrollLeft,\n scrollTop: tabsNode.scrollTop,\n scrollLeftNormalized: getNormalizedScrollLeft(tabsNode, theme.direction),\n scrollWidth: tabsNode.scrollWidth,\n top: rect.top,\n bottom: rect.bottom,\n left: rect.left,\n right: rect.right\n };\n }\n let tabMeta;\n if (tabsNode && value !== false) {\n const children = tabListRef.current.children;\n if (children.length > 0) {\n const tab = children[valueToIndex.get(value)];\n if (process.env.NODE_ENV !== 'production') {\n if (!tab) {\n console.error([`MUI: The \\`value\\` provided to the Tabs component is invalid.`, `None of the Tabs' children match with \"${value}\".`, valueToIndex.keys ? `You can provide one of the following values: ${Array.from(valueToIndex.keys()).join(', ')}.` : null].join('\\n'));\n }\n }\n tabMeta = tab ? tab.getBoundingClientRect() : null;\n if (process.env.NODE_ENV !== 'production') {\n if (process.env.NODE_ENV !== 'test' && !warnedOnceTabPresent && tabMeta && tabMeta.width === 0 && tabMeta.height === 0 &&\n // if the whole Tabs component is hidden, don't warn\n tabsMeta.clientWidth !== 0) {\n tabsMeta = null;\n console.error(['MUI: The `value` provided to the Tabs component is invalid.', `The Tab with this \\`value\\` (\"${value}\") is not part of the document layout.`, \"Make sure the tab item is present in the document or that it's not `display: none`.\"].join('\\n'));\n warnedOnceTabPresent = true;\n }\n }\n }\n }\n return {\n tabsMeta,\n tabMeta\n };\n };\n const updateIndicatorState = useEventCallback(() => {\n const {\n tabsMeta,\n tabMeta\n } = getTabsMeta();\n let startValue = 0;\n let startIndicator;\n if (vertical) {\n startIndicator = 'top';\n if (tabMeta && tabsMeta) {\n startValue = tabMeta.top - tabsMeta.top + tabsMeta.scrollTop;\n }\n } else {\n startIndicator = isRtl ? 'right' : 'left';\n if (tabMeta && tabsMeta) {\n const correction = isRtl ? tabsMeta.scrollLeftNormalized + tabsMeta.clientWidth - tabsMeta.scrollWidth : tabsMeta.scrollLeft;\n startValue = (isRtl ? -1 : 1) * (tabMeta[startIndicator] - tabsMeta[startIndicator] + correction);\n }\n }\n const newIndicatorStyle = {\n [startIndicator]: startValue,\n // May be wrong until the font is loaded.\n [size]: tabMeta ? tabMeta[size] : 0\n };\n\n // IE11 support, replace with Number.isNaN\n // eslint-disable-next-line no-restricted-globals\n if (isNaN(indicatorStyle[startIndicator]) || isNaN(indicatorStyle[size])) {\n setIndicatorStyle(newIndicatorStyle);\n } else {\n const dStart = Math.abs(indicatorStyle[startIndicator] - newIndicatorStyle[startIndicator]);\n const dSize = Math.abs(indicatorStyle[size] - newIndicatorStyle[size]);\n if (dStart >= 1 || dSize >= 1) {\n setIndicatorStyle(newIndicatorStyle);\n }\n }\n });\n const scroll = (scrollValue, {\n animation = true\n } = {}) => {\n if (animation) {\n animate(scrollStart, tabsRef.current, scrollValue, {\n duration: theme.transitions.duration.standard\n });\n } else {\n tabsRef.current[scrollStart] = scrollValue;\n }\n };\n const moveTabsScroll = delta => {\n let scrollValue = tabsRef.current[scrollStart];\n if (vertical) {\n scrollValue += delta;\n } else {\n scrollValue += delta * (isRtl ? -1 : 1);\n // Fix for Edge\n scrollValue *= isRtl && detectScrollType() === 'reverse' ? -1 : 1;\n }\n scroll(scrollValue);\n };\n const getScrollSize = () => {\n const containerSize = tabsRef.current[clientSize];\n let totalSize = 0;\n const children = Array.from(tabListRef.current.children);\n for (let i = 0; i < children.length; i += 1) {\n const tab = children[i];\n if (totalSize + tab[clientSize] > containerSize) {\n // If the first item is longer than the container size, then only scroll\n // by the container size.\n if (i === 0) {\n totalSize = containerSize;\n }\n break;\n }\n totalSize += tab[clientSize];\n }\n return totalSize;\n };\n const handleStartScrollClick = () => {\n moveTabsScroll(-1 * getScrollSize());\n };\n const handleEndScrollClick = () => {\n moveTabsScroll(getScrollSize());\n };\n\n // TODO Remove as browser support for hiding the scrollbar\n // with CSS improves.\n const handleScrollbarSizeChange = React.useCallback(scrollbarWidth => {\n setScrollerStyle({\n overflow: null,\n scrollbarWidth\n });\n }, []);\n const getConditionalElements = () => {\n const conditionalElements = {};\n conditionalElements.scrollbarSizeListener = scrollable ? /*#__PURE__*/_jsx(TabsScrollbarSize, {\n onChange: handleScrollbarSizeChange,\n className: clsx(classes.scrollableX, classes.hideScrollbar)\n }) : null;\n const scrollButtonsActive = displayScroll.start || displayScroll.end;\n const showScrollButtons = scrollable && (scrollButtons === 'auto' && scrollButtonsActive || scrollButtons === true);\n conditionalElements.scrollButtonStart = showScrollButtons ? /*#__PURE__*/_jsx(ScrollButtonComponent, _extends({\n slots: {\n StartScrollButtonIcon: slots.StartScrollButtonIcon\n },\n slotProps: {\n startScrollButtonIcon: startScrollButtonIconProps\n },\n orientation: orientation,\n direction: isRtl ? 'right' : 'left',\n onClick: handleStartScrollClick,\n disabled: !displayScroll.start\n }, TabScrollButtonProps, {\n className: clsx(classes.scrollButtons, TabScrollButtonProps.className)\n })) : null;\n conditionalElements.scrollButtonEnd = showScrollButtons ? /*#__PURE__*/_jsx(ScrollButtonComponent, _extends({\n slots: {\n EndScrollButtonIcon: slots.EndScrollButtonIcon\n },\n slotProps: {\n endScrollButtonIcon: endScrollButtonIconProps\n },\n orientation: orientation,\n direction: isRtl ? 'left' : 'right',\n onClick: handleEndScrollClick,\n disabled: !displayScroll.end\n }, TabScrollButtonProps, {\n className: clsx(classes.scrollButtons, TabScrollButtonProps.className)\n })) : null;\n return conditionalElements;\n };\n const scrollSelectedIntoView = useEventCallback(animation => {\n const {\n tabsMeta,\n tabMeta\n } = getTabsMeta();\n if (!tabMeta || !tabsMeta) {\n return;\n }\n if (tabMeta[start] < tabsMeta[start]) {\n // left side of button is out of view\n const nextScrollStart = tabsMeta[scrollStart] + (tabMeta[start] - tabsMeta[start]);\n scroll(nextScrollStart, {\n animation\n });\n } else if (tabMeta[end] > tabsMeta[end]) {\n // right side of button is out of view\n const nextScrollStart = tabsMeta[scrollStart] + (tabMeta[end] - tabsMeta[end]);\n scroll(nextScrollStart, {\n animation\n });\n }\n });\n const updateScrollButtonState = useEventCallback(() => {\n if (scrollable && scrollButtons !== false) {\n const {\n scrollTop,\n scrollHeight,\n clientHeight,\n scrollWidth,\n clientWidth\n } = tabsRef.current;\n let showStartScroll;\n let showEndScroll;\n if (vertical) {\n showStartScroll = scrollTop > 1;\n showEndScroll = scrollTop < scrollHeight - clientHeight - 1;\n } else {\n const scrollLeft = getNormalizedScrollLeft(tabsRef.current, theme.direction);\n // use 1 for the potential rounding error with browser zooms.\n showStartScroll = isRtl ? scrollLeft < scrollWidth - clientWidth - 1 : scrollLeft > 1;\n showEndScroll = !isRtl ? scrollLeft < scrollWidth - clientWidth - 1 : scrollLeft > 1;\n }\n if (showStartScroll !== displayScroll.start || showEndScroll !== displayScroll.end) {\n setDisplayScroll({\n start: showStartScroll,\n end: showEndScroll\n });\n }\n }\n });\n React.useEffect(() => {\n const handleResize = debounce(() => {\n // If the Tabs component is replaced by Suspense with a fallback, the last\n // ResizeObserver's handler that runs because of the change in the layout is trying to\n // access a dom node that is no longer there (as the fallback component is being shown instead).\n // See https://github.com/mui/material-ui/issues/33276\n // TODO: Add tests that will ensure the component is not failing when\n // replaced by Suspense with a fallback, once React is updated to version 18\n if (tabsRef.current) {\n updateIndicatorState();\n updateScrollButtonState();\n }\n });\n const win = ownerWindow(tabsRef.current);\n win.addEventListener('resize', handleResize);\n let resizeObserver;\n if (typeof ResizeObserver !== 'undefined') {\n resizeObserver = new ResizeObserver(handleResize);\n Array.from(tabListRef.current.children).forEach(child => {\n resizeObserver.observe(child);\n });\n }\n return () => {\n handleResize.clear();\n win.removeEventListener('resize', handleResize);\n if (resizeObserver) {\n resizeObserver.disconnect();\n }\n };\n }, [updateIndicatorState, updateScrollButtonState]);\n const handleTabsScroll = React.useMemo(() => debounce(() => {\n updateScrollButtonState();\n }), [updateScrollButtonState]);\n React.useEffect(() => {\n return () => {\n handleTabsScroll.clear();\n };\n }, [handleTabsScroll]);\n React.useEffect(() => {\n setMounted(true);\n }, []);\n React.useEffect(() => {\n updateIndicatorState();\n updateScrollButtonState();\n });\n React.useEffect(() => {\n // Don't animate on the first render.\n scrollSelectedIntoView(defaultIndicatorStyle !== indicatorStyle);\n }, [scrollSelectedIntoView, indicatorStyle]);\n React.useImperativeHandle(action, () => ({\n updateIndicator: updateIndicatorState,\n updateScrollButtons: updateScrollButtonState\n }), [updateIndicatorState, updateScrollButtonState]);\n const indicator = /*#__PURE__*/_jsx(TabsIndicator, _extends({}, TabIndicatorProps, {\n className: clsx(classes.indicator, TabIndicatorProps.className),\n ownerState: ownerState,\n style: _extends({}, indicatorStyle, TabIndicatorProps.style)\n }));\n let childIndex = 0;\n const children = React.Children.map(childrenProp, child => {\n if (! /*#__PURE__*/React.isValidElement(child)) {\n return null;\n }\n if (process.env.NODE_ENV !== 'production') {\n if (isFragment(child)) {\n console.error([\"MUI: The Tabs component doesn't accept a Fragment as a child.\", 'Consider providing an array instead.'].join('\\n'));\n }\n }\n const childValue = child.props.value === undefined ? childIndex : child.props.value;\n valueToIndex.set(childValue, childIndex);\n const selected = childValue === value;\n childIndex += 1;\n return /*#__PURE__*/React.cloneElement(child, _extends({\n fullWidth: variant === 'fullWidth',\n indicator: selected && !mounted && indicator,\n selected,\n selectionFollowsFocus,\n onChange,\n textColor,\n value: childValue\n }, childIndex === 1 && value === false && !child.props.tabIndex ? {\n tabIndex: 0\n } : {}));\n });\n const handleKeyDown = event => {\n const list = tabListRef.current;\n const currentFocus = ownerDocument(list).activeElement;\n // Keyboard navigation assumes that [role=\"tab\"] are siblings\n // though we might warn in the future about nested, interactive elements\n // as a a11y violation\n const role = currentFocus.getAttribute('role');\n if (role !== 'tab') {\n return;\n }\n let previousItemKey = orientation === 'horizontal' ? 'ArrowLeft' : 'ArrowUp';\n let nextItemKey = orientation === 'horizontal' ? 'ArrowRight' : 'ArrowDown';\n if (orientation === 'horizontal' && isRtl) {\n // swap previousItemKey with nextItemKey\n previousItemKey = 'ArrowRight';\n nextItemKey = 'ArrowLeft';\n }\n switch (event.key) {\n case previousItemKey:\n event.preventDefault();\n moveFocus(list, currentFocus, previousItem);\n break;\n case nextItemKey:\n event.preventDefault();\n moveFocus(list, currentFocus, nextItem);\n break;\n case 'Home':\n event.preventDefault();\n moveFocus(list, null, nextItem);\n break;\n case 'End':\n event.preventDefault();\n moveFocus(list, null, previousItem);\n break;\n default:\n break;\n }\n };\n const conditionalElements = getConditionalElements();\n return /*#__PURE__*/_jsxs(TabsRoot, _extends({\n className: clsx(classes.root, className),\n ownerState: ownerState,\n ref: ref,\n as: component\n }, other, {\n children: [conditionalElements.scrollButtonStart, conditionalElements.scrollbarSizeListener, /*#__PURE__*/_jsxs(TabsScroller, {\n className: classes.scroller,\n ownerState: ownerState,\n style: {\n overflow: scrollerStyle.overflow,\n [vertical ? `margin${isRtl ? 'Left' : 'Right'}` : 'marginBottom']: visibleScrollbar ? undefined : -scrollerStyle.scrollbarWidth\n },\n ref: tabsRef,\n onScroll: handleTabsScroll,\n children: [/*#__PURE__*/_jsx(FlexContainer, {\n \"aria-label\": ariaLabel,\n \"aria-labelledby\": ariaLabelledBy,\n \"aria-orientation\": orientation === 'vertical' ? 'vertical' : null,\n className: classes.flexContainer,\n ownerState: ownerState,\n onKeyDown: handleKeyDown,\n ref: tabListRef,\n role: \"tablist\",\n children: children\n }), mounted && indicator]\n }), conditionalElements.scrollButtonEnd]\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? Tabs.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * Callback fired when the component mounts.\n * This is useful when you want to trigger an action programmatically.\n * It supports two actions: `updateIndicator()` and `updateScrollButtons()`\n *\n * @param {object} actions This object contains all possible actions\n * that can be triggered programmatically.\n */\n action: refType,\n /**\n * If `true`, the scroll buttons aren't forced hidden on mobile.\n * By default the scroll buttons are hidden on mobile and takes precedence over `scrollButtons`.\n * @default false\n */\n allowScrollButtonsMobile: PropTypes.bool,\n /**\n * The label for the Tabs as a string.\n */\n 'aria-label': PropTypes.string,\n /**\n * An id or list of ids separated by a space that label the Tabs.\n */\n 'aria-labelledby': PropTypes.string,\n /**\n * If `true`, the tabs are centered.\n * This prop is intended for large views.\n * @default false\n */\n centered: PropTypes.bool,\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * Determines the color of the indicator.\n * @default 'primary'\n */\n indicatorColor: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['primary', 'secondary']), PropTypes.string]),\n /**\n * Callback fired when the value changes.\n *\n * @param {React.SyntheticEvent} event The event source of the callback. **Warning**: This is a generic event not a change event.\n * @param {any} value We default to the index of the child (number)\n */\n onChange: PropTypes.func,\n /**\n * The component orientation (layout flow direction).\n * @default 'horizontal'\n */\n orientation: PropTypes.oneOf(['horizontal', 'vertical']),\n /**\n * The component used to render the scroll buttons.\n * @default TabScrollButton\n */\n ScrollButtonComponent: PropTypes.elementType,\n /**\n * Determine behavior of scroll buttons when tabs are set to scroll:\n *\n * - `auto` will only present them when not all the items are visible.\n * - `true` will always present them.\n * - `false` will never present them.\n *\n * By default the scroll buttons are hidden on mobile.\n * This behavior can be disabled with `allowScrollButtonsMobile`.\n * @default 'auto'\n */\n scrollButtons: PropTypes /* @typescript-to-proptypes-ignore */.oneOf(['auto', false, true]),\n /**\n * If `true` the selected tab changes on focus. Otherwise it only\n * changes on activation.\n */\n selectionFollowsFocus: PropTypes.bool,\n /**\n * The extra props for the slot components.\n * You can override the existing props or add new ones.\n * @default {}\n */\n slotProps: PropTypes.shape({\n endScrollButtonIcon: PropTypes.oneOfType([PropTypes.func, PropTypes.object]),\n startScrollButtonIcon: PropTypes.oneOfType([PropTypes.func, PropTypes.object])\n }),\n /**\n * The components used for each slot inside.\n * @default {}\n */\n slots: PropTypes.shape({\n EndScrollButtonIcon: PropTypes.elementType,\n StartScrollButtonIcon: PropTypes.elementType\n }),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * Props applied to the tab indicator element.\n * @default {}\n */\n TabIndicatorProps: PropTypes.object,\n /**\n * Props applied to the [`TabScrollButton`](/material-ui/api/tab-scroll-button/) element.\n * @default {}\n */\n TabScrollButtonProps: PropTypes.object,\n /**\n * Determines the color of the `Tab`.\n * @default 'primary'\n */\n textColor: PropTypes.oneOf(['inherit', 'primary', 'secondary']),\n /**\n * The value of the currently selected `Tab`.\n * If you don't want any selected `Tab`, you can set this prop to `false`.\n */\n value: PropTypes.any,\n /**\n * Determines additional display behavior of the tabs:\n *\n * - `scrollable` will invoke scrolling properties and allow for horizontally\n * scrolling (or swiping) of the tab bar.\n * -`fullWidth` will make the tabs grow to use all the available space,\n * which should be used for small views, like on mobile.\n * - `standard` will render the default state.\n * @default 'standard'\n */\n variant: PropTypes.oneOf(['fullWidth', 'scrollable', 'standard']),\n /**\n * If `true`, the scrollbar is visible. It can be useful when displaying\n * a long vertical list of tabs.\n * @default false\n */\n visibleScrollbar: PropTypes.bool\n} : void 0;\nexport default Tabs;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport { getListItemIconUtilityClass } from './listItemIconClasses';\nimport ListContext from '../List/ListContext';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n alignItems,\n classes\n } = ownerState;\n const slots = {\n root: ['root', alignItems === 'flex-start' && 'alignItemsFlexStart']\n };\n return composeClasses(slots, getListItemIconUtilityClass, classes);\n};\nconst ListItemIconRoot = styled('div', {\n name: 'MuiListItemIcon',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.alignItems === 'flex-start' && styles.alignItemsFlexStart];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n minWidth: 56,\n color: (theme.vars || theme).palette.action.active,\n flexShrink: 0,\n display: 'inline-flex'\n}, ownerState.alignItems === 'flex-start' && {\n marginTop: 8\n}));\n\n/**\n * A simple wrapper to apply `List` styles to an `Icon` or `SvgIcon`.\n */\nconst ListItemIcon = /*#__PURE__*/React.forwardRef(function ListItemIcon(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiListItemIcon'\n });\n const {\n className\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const context = React.useContext(ListContext);\n const ownerState = _extends({}, props, {\n alignItems: context.alignItems\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(ListItemIconRoot, _extends({\n className: clsx(classes.root, className),\n ownerState: ownerState,\n ref: ref\n }, other));\n});\nprocess.env.NODE_ENV !== \"production\" ? ListItemIcon.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component, normally `Icon`, `SvgIcon`,\n * or a `@mui/icons-material` SVG icon element.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default ListItemIcon;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"children\", \"className\", \"disableTypography\", \"inset\", \"primary\", \"primaryTypographyProps\", \"secondary\", \"secondaryTypographyProps\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport Typography from '../Typography';\nimport ListContext from '../List/ListContext';\nimport useThemeProps from '../styles/useThemeProps';\nimport styled from '../styles/styled';\nimport listItemTextClasses, { getListItemTextUtilityClass } from './listItemTextClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n inset,\n primary,\n secondary,\n dense\n } = ownerState;\n const slots = {\n root: ['root', inset && 'inset', dense && 'dense', primary && secondary && 'multiline'],\n primary: ['primary'],\n secondary: ['secondary']\n };\n return composeClasses(slots, getListItemTextUtilityClass, classes);\n};\nconst ListItemTextRoot = styled('div', {\n name: 'MuiListItemText',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [{\n [`& .${listItemTextClasses.primary}`]: styles.primary\n }, {\n [`& .${listItemTextClasses.secondary}`]: styles.secondary\n }, styles.root, ownerState.inset && styles.inset, ownerState.primary && ownerState.secondary && styles.multiline, ownerState.dense && styles.dense];\n }\n})(({\n ownerState\n}) => _extends({\n flex: '1 1 auto',\n minWidth: 0,\n marginTop: 4,\n marginBottom: 4\n}, ownerState.primary && ownerState.secondary && {\n marginTop: 6,\n marginBottom: 6\n}, ownerState.inset && {\n paddingLeft: 56\n}));\nconst ListItemText = /*#__PURE__*/React.forwardRef(function ListItemText(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiListItemText'\n });\n const {\n children,\n className,\n disableTypography = false,\n inset = false,\n primary: primaryProp,\n primaryTypographyProps,\n secondary: secondaryProp,\n secondaryTypographyProps\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const {\n dense\n } = React.useContext(ListContext);\n let primary = primaryProp != null ? primaryProp : children;\n let secondary = secondaryProp;\n const ownerState = _extends({}, props, {\n disableTypography,\n inset,\n primary: !!primary,\n secondary: !!secondary,\n dense\n });\n const classes = useUtilityClasses(ownerState);\n if (primary != null && primary.type !== Typography && !disableTypography) {\n primary = /*#__PURE__*/_jsx(Typography, _extends({\n variant: dense ? 'body2' : 'body1',\n className: classes.primary,\n component: primaryTypographyProps != null && primaryTypographyProps.variant ? undefined : 'span',\n display: \"block\"\n }, primaryTypographyProps, {\n children: primary\n }));\n }\n if (secondary != null && secondary.type !== Typography && !disableTypography) {\n secondary = /*#__PURE__*/_jsx(Typography, _extends({\n variant: \"body2\",\n className: classes.secondary,\n color: \"text.secondary\",\n display: \"block\"\n }, secondaryTypographyProps, {\n children: secondary\n }));\n }\n return /*#__PURE__*/_jsxs(ListItemTextRoot, _extends({\n className: clsx(classes.root, className),\n ownerState: ownerState,\n ref: ref\n }, other, {\n children: [primary, secondary]\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? ListItemText.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * Alias for the `primary` prop.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * If `true`, the children won't be wrapped by a Typography component.\n * This can be useful to render an alternative Typography variant by wrapping\n * the `children` (or `primary`) text, and optional `secondary` text\n * with the Typography component.\n * @default false\n */\n disableTypography: PropTypes.bool,\n /**\n * If `true`, the children are indented.\n * This should be used if there is no left avatar or left icon.\n * @default false\n */\n inset: PropTypes.bool,\n /**\n * The main content element.\n */\n primary: PropTypes.node,\n /**\n * These props will be forwarded to the primary typography component\n * (as long as disableTypography is not `true`).\n */\n primaryTypographyProps: PropTypes.object,\n /**\n * The secondary content element.\n */\n secondary: PropTypes.node,\n /**\n * These props will be forwarded to the secondary typography component\n * (as long as disableTypography is not `true`).\n */\n secondaryTypographyProps: PropTypes.object,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default ListItemText;","import { SequentialClassifier } from \"../AbstractClassifier/AbstractClassifier\";\nimport { Model } from \"../Model/Model\";\nimport { io, loadGraphModel, loadLayersModel } from \"@tensorflow/tfjs\";\n\nenum LoadState {\n Unloaded,\n Uploaded,\n Loaded,\n}\n\nexport class UploadedClassifier extends SequentialClassifier {\n readonly TFHub: boolean;\n protected _ioHandler?: ReturnType;\n protected _loadState: LoadState;\n\n /*\n * whether from src, or the descFile, the JSON file should contain 'modelTopology' and 'weightsManifest\n *\n * 'modelTopology': A JSON object that can be either of:\n * 1) a model architecture JSON consistent with the format of the return value of keras.Model.to_json()\n * 2) a full model JSON in the format of keras.models.save_model().\n *\n * 'weightsManifest': A TensorFlow.js weights manifest.\n * See the Python converter function save_model() for more details.\n * It is also assumed that model weights (.bin files) can be accessed\n * from relative paths described by the paths fields in weights manifest.\n */\n constructor({\n TFHub,\n descFile, // Model description json file\n weightsFiles, // Model weights bin files\n ...modelArgs\n }: {\n TFHub: boolean;\n descFile?: File;\n weightsFiles?: Array;\n } & ConstructorParameters[0]) {\n super(modelArgs);\n\n this._loadState = LoadState.Unloaded;\n\n this.TFHub = TFHub;\n\n if (descFile && weightsFiles) {\n this._ioHandler = io.browserFiles([descFile, ...weightsFiles]);\n } else if (descFile) {\n throw Error(\"No weights files (.bin) provided\");\n } else if (weightsFiles) {\n throw Error(\"No description file (.json) provided\");\n }\n\n if (this.TFHub && !this.src) {\n throw Error(\"A TFHub url was not provided\");\n }\n\n if ((this.TFHub || this.src) && this._ioHandler) {\n throw Error(\n \"A model cannot have both a url source and uploaded model files\"\n );\n }\n }\n\n public async upload(): Promise {\n if (this.src && this.TFHub && !Model.verifyTFHubUrl(this.src)) {\n throw new Error(`Expected TFHub Url: ${this.src}`);\n }\n\n if (this.src && this.graph) {\n this._model = await loadGraphModel(this.src, {\n fromTFHub: this.TFHub,\n });\n } else if (this.src) {\n this._model = await loadLayersModel(this.src, {\n fromTFHub: this.TFHub,\n });\n } else if (this._ioHandler && this.graph) {\n this._model = await loadGraphModel(this._ioHandler);\n } else if (this._ioHandler) {\n this._model = await loadLayersModel(this._ioHandler);\n } else {\n throw Error(\"Could not load model, no source available\");\n }\n\n this._loadState = LoadState.Uploaded;\n }\n\n public async loadModel() {\n if (this._loadState === LoadState.Unloaded) {\n await this.upload();\n }\n\n this._loadState = LoadState.Loaded;\n }\n}\n","import React, { useState } from \"react\";\n\nimport {\n DialogContent,\n ListItemIcon,\n ListItemText,\n MenuItem,\n Typography,\n} from \"@mui/material\";\n\nimport FileOpenIcon from \"@mui/icons-material/FileOpen\";\n\nimport { Model } from \"utils/models/Model/Model\";\nimport { UploadedClassifier } from \"utils/models/UploadedClassifier/UploadedClassifier\";\nimport { ModelTask } from \"utils/models/enums\";\nimport { Shape } from \"store/data/types\";\n\n//TODO: MenuItem??\n\nexport const LocalFileUpload = ({\n modelTask,\n isGraph,\n setModel,\n setInputShape,\n}: {\n modelTask: ModelTask;\n isGraph: boolean;\n setModel: (model: Model) => void;\n setInputShape: React.Dispatch>;\n}) => {\n const [errMessage, setErrMessage] = useState(\"\");\n const [successMessage, setSuccessMessage] = useState(\"\");\n\n const loadModel = async (descFile: File, weightsFiles: Array) => {\n setErrMessage(\"\");\n setSuccessMessage(\"\");\n\n // remove the file extension from the model name\n const modelName = descFile.name.replace(/\\..+$/, \"\");\n\n if (modelTask === ModelTask.Classification) {\n const model = new UploadedClassifier({\n TFHub: false,\n descFile,\n weightsFiles,\n name: modelName,\n task: modelTask,\n graph: isGraph,\n pretrained: true,\n trainable: isGraph,\n });\n\n try {\n await model.upload();\n } catch (err) {\n setErrMessage(`Model upload failed: ${err}`);\n return;\n }\n\n const inputShape = model.defaultInputShape;\n\n setInputShape((prevShape) => ({\n ...prevShape,\n height: inputShape[0],\n width: inputShape[1],\n channels: inputShape[2],\n }));\n\n setModel(model);\n\n setSuccessMessage(\n `Successfully uploaded Classification ${\n isGraph ? \"Graph\" : \"Layers\"\n } Model (\"${model.name}\")`\n );\n } else {\n // TODO - segmenter\n setErrMessage(\"Uploaded segmenter not yet supported\");\n }\n };\n\n const handleFilesSelected = async (\n event: React.ChangeEvent\n ) => {\n event.persist();\n\n if (!event.currentTarget.files) {\n setErrMessage(\"No files selected\");\n return;\n } else if (event.currentTarget.files.length < 2) {\n setErrMessage(\n \"Must include model description (.json) and at least one weights file (.bin)\"\n );\n return;\n } else {\n setErrMessage(\"\");\n }\n\n let weightsFiles: Array = [];\n let jsonFile = event.currentTarget.files[0];\n for (let i = 0; i < event.currentTarget.files.length; i++) {\n const file = event.currentTarget.files[i];\n if (file.name.endsWith(\".json\")) {\n jsonFile = file;\n // jsonFile.type === \"application/json\"\n } else {\n weightsFiles.push(file);\n // file.type === \"application/macbinary\"\n }\n }\n\n await loadModel(jsonFile, weightsFiles);\n };\n\n return (\n <>\n \n Upload model files from your computer.\n \n Tensorflow requires a .json files containing the model description as\n well as the corresponding model weights (.bin file(s)).\n \n \n\n \n ) =>\n handleFilesSelected(event)\n }\n />\n \n {errMessage}\n \n \n {successMessage}\n \n \n );\n};\n","import React, { useEffect, useState } from \"react\";\n\nimport {\n DialogContent,\n FormControl,\n InputLabel,\n MenuItem,\n Select,\n SelectChangeEvent,\n Typography,\n} from \"@mui/material\";\n\nimport { Model } from \"utils/models/Model/Model\";\nimport { range } from \"lodash\";\n\nexport const PretrainedModelSelector = ({\n values,\n setModel,\n}: {\n values: Array;\n setModel: (model: Model | undefined) => void;\n}) => {\n // const [errMessage, setErrMessage] = useState(\"\");\n const [modelIdxs, setModelIdxs] = useState(\n range(-1, values.length)\n );\n const [selectedIdxVal, setSelectedIdxVal] = useState(\"-1\");\n\n useEffect(() => {\n setModelIdxs(range(-1, values.length));\n }, [values]);\n\n const handlePreTrainedModelChange = async (event: SelectChangeEvent) => {\n const idxVal = event.target.value;\n setSelectedIdxVal(idxVal);\n const idx = Number(idxVal);\n setModel(idx >= 0 ? values[idx] : undefined);\n };\n\n return (\n <>\n \n \n Choose from a provided pre-trained model\n \n \n \n \n \n Pre-trained Models\n \n \n {modelIdxs.map((idx) => (\n \n {idx === -1 ? \"None\" : values[idx].name}\n \n ))}\n \n {/* \n {errMessage}\n */}\n \n \n \n );\n};\n","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getInputAdornmentUtilityClass(slot) {\n return generateUtilityClass('MuiInputAdornment', slot);\n}\nconst inputAdornmentClasses = generateUtilityClasses('MuiInputAdornment', ['root', 'filled', 'standard', 'outlined', 'positionStart', 'positionEnd', 'disablePointerEvents', 'hiddenLabel', 'sizeSmall']);\nexport default inputAdornmentClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nvar _span;\nconst _excluded = [\"children\", \"className\", \"component\", \"disablePointerEvents\", \"disableTypography\", \"position\", \"variant\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport capitalize from '../utils/capitalize';\nimport Typography from '../Typography';\nimport FormControlContext from '../FormControl/FormControlContext';\nimport useFormControl from '../FormControl/useFormControl';\nimport styled from '../styles/styled';\nimport inputAdornmentClasses, { getInputAdornmentUtilityClass } from './inputAdornmentClasses';\nimport useThemeProps from '../styles/useThemeProps';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst overridesResolver = (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, styles[`position${capitalize(ownerState.position)}`], ownerState.disablePointerEvents === true && styles.disablePointerEvents, styles[ownerState.variant]];\n};\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n disablePointerEvents,\n hiddenLabel,\n position,\n size,\n variant\n } = ownerState;\n const slots = {\n root: ['root', disablePointerEvents && 'disablePointerEvents', position && `position${capitalize(position)}`, variant, hiddenLabel && 'hiddenLabel', size && `size${capitalize(size)}`]\n };\n return composeClasses(slots, getInputAdornmentUtilityClass, classes);\n};\nconst InputAdornmentRoot = styled('div', {\n name: 'MuiInputAdornment',\n slot: 'Root',\n overridesResolver\n})(({\n theme,\n ownerState\n}) => _extends({\n display: 'flex',\n height: '0.01em',\n // Fix IE11 flexbox alignment. To remove at some point.\n maxHeight: '2em',\n alignItems: 'center',\n whiteSpace: 'nowrap',\n color: (theme.vars || theme).palette.action.active\n}, ownerState.variant === 'filled' && {\n // Styles applied to the root element if `variant=\"filled\"`.\n [`&.${inputAdornmentClasses.positionStart}&:not(.${inputAdornmentClasses.hiddenLabel})`]: {\n marginTop: 16\n }\n}, ownerState.position === 'start' && {\n // Styles applied to the root element if `position=\"start\"`.\n marginRight: 8\n}, ownerState.position === 'end' && {\n // Styles applied to the root element if `position=\"end\"`.\n marginLeft: 8\n}, ownerState.disablePointerEvents === true && {\n // Styles applied to the root element if `disablePointerEvents={true}`.\n pointerEvents: 'none'\n}));\nconst InputAdornment = /*#__PURE__*/React.forwardRef(function InputAdornment(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiInputAdornment'\n });\n const {\n children,\n className,\n component = 'div',\n disablePointerEvents = false,\n disableTypography = false,\n position,\n variant: variantProp\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const muiFormControl = useFormControl() || {};\n let variant = variantProp;\n if (variantProp && muiFormControl.variant) {\n if (process.env.NODE_ENV !== 'production') {\n if (variantProp === muiFormControl.variant) {\n console.error('MUI: The `InputAdornment` variant infers the variant prop ' + 'you do not have to provide one.');\n }\n }\n }\n if (muiFormControl && !variant) {\n variant = muiFormControl.variant;\n }\n const ownerState = _extends({}, props, {\n hiddenLabel: muiFormControl.hiddenLabel,\n size: muiFormControl.size,\n disablePointerEvents,\n position,\n variant\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(FormControlContext.Provider, {\n value: null,\n children: /*#__PURE__*/_jsx(InputAdornmentRoot, _extends({\n as: component,\n ownerState: ownerState,\n className: clsx(classes.root, className),\n ref: ref\n }, other, {\n children: typeof children === 'string' && !disableTypography ? /*#__PURE__*/_jsx(Typography, {\n color: \"text.secondary\",\n children: children\n }) : /*#__PURE__*/_jsxs(React.Fragment, {\n children: [position === 'start' ? /* notranslate needed while Google Translate will not fix zero-width space issue */_span || (_span = /*#__PURE__*/_jsx(\"span\", {\n className: \"notranslate\",\n children: \"\\u200B\"\n })) : null, children]\n })\n }))\n });\n});\nprocess.env.NODE_ENV !== \"production\" ? InputAdornment.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component, normally an `IconButton` or string.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * Disable pointer events on the root.\n * This allows for the content of the adornment to focus the `input` on click.\n * @default false\n */\n disablePointerEvents: PropTypes.bool,\n /**\n * If children is a string then disable wrapping in a Typography component.\n * @default false\n */\n disableTypography: PropTypes.bool,\n /**\n * The position this adornment should appear relative to the `Input`.\n */\n position: PropTypes.oneOf(['end', 'start']).isRequired,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The variant to use.\n * Note: If you are using the `TextField` component or the `FormControl` component\n * you do not have to set this manually.\n */\n variant: PropTypes.oneOf(['filled', 'outlined', 'standard'])\n} : void 0;\nexport default InputAdornment;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getFormControlLabelUtilityClasses(slot) {\n return generateUtilityClass('MuiFormControlLabel', slot);\n}\nconst formControlLabelClasses = generateUtilityClasses('MuiFormControlLabel', ['root', 'labelPlacementStart', 'labelPlacementTop', 'labelPlacementBottom', 'disabled', 'label', 'error', 'required', 'asterisk']);\nexport default formControlLabelClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"checked\", \"className\", \"componentsProps\", \"control\", \"disabled\", \"disableTypography\", \"inputRef\", \"label\", \"labelPlacement\", \"name\", \"onChange\", \"required\", \"slotProps\", \"value\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport { useFormControl } from '../FormControl';\nimport Typography from '../Typography';\nimport capitalize from '../utils/capitalize';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport formControlLabelClasses, { getFormControlLabelUtilityClasses } from './formControlLabelClasses';\nimport formControlState from '../FormControl/formControlState';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n disabled,\n labelPlacement,\n error,\n required\n } = ownerState;\n const slots = {\n root: ['root', disabled && 'disabled', `labelPlacement${capitalize(labelPlacement)}`, error && 'error', required && 'required'],\n label: ['label', disabled && 'disabled'],\n asterisk: ['asterisk', error && 'error']\n };\n return composeClasses(slots, getFormControlLabelUtilityClasses, classes);\n};\nexport const FormControlLabelRoot = styled('label', {\n name: 'MuiFormControlLabel',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [{\n [`& .${formControlLabelClasses.label}`]: styles.label\n }, styles.root, styles[`labelPlacement${capitalize(ownerState.labelPlacement)}`]];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n display: 'inline-flex',\n alignItems: 'center',\n cursor: 'pointer',\n // For correct alignment with the text.\n verticalAlign: 'middle',\n WebkitTapHighlightColor: 'transparent',\n marginLeft: -11,\n marginRight: 16,\n // used for row presentation of radio/checkbox\n [`&.${formControlLabelClasses.disabled}`]: {\n cursor: 'default'\n }\n}, ownerState.labelPlacement === 'start' && {\n flexDirection: 'row-reverse',\n marginLeft: 16,\n // used for row presentation of radio/checkbox\n marginRight: -11\n}, ownerState.labelPlacement === 'top' && {\n flexDirection: 'column-reverse',\n marginLeft: 16\n}, ownerState.labelPlacement === 'bottom' && {\n flexDirection: 'column',\n marginLeft: 16\n}, {\n [`& .${formControlLabelClasses.label}`]: {\n [`&.${formControlLabelClasses.disabled}`]: {\n color: (theme.vars || theme).palette.text.disabled\n }\n }\n}));\nconst AsteriskComponent = styled('span', {\n name: 'MuiFormControlLabel',\n slot: 'Asterisk',\n overridesResolver: (props, styles) => styles.asterisk\n})(({\n theme\n}) => ({\n [`&.${formControlLabelClasses.error}`]: {\n color: (theme.vars || theme).palette.error.main\n }\n}));\n\n/**\n * Drop-in replacement of the `Radio`, `Switch` and `Checkbox` component.\n * Use this component if you want to display an extra label.\n */\nconst FormControlLabel = /*#__PURE__*/React.forwardRef(function FormControlLabel(inProps, ref) {\n var _ref, _slotProps$typography;\n const props = useThemeProps({\n props: inProps,\n name: 'MuiFormControlLabel'\n });\n const {\n className,\n componentsProps = {},\n control,\n disabled: disabledProp,\n disableTypography,\n label: labelProp,\n labelPlacement = 'end',\n required: requiredProp,\n slotProps = {}\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const muiFormControl = useFormControl();\n const disabled = (_ref = disabledProp != null ? disabledProp : control.props.disabled) != null ? _ref : muiFormControl == null ? void 0 : muiFormControl.disabled;\n const required = requiredProp != null ? requiredProp : control.props.required;\n const controlProps = {\n disabled,\n required\n };\n ['checked', 'name', 'onChange', 'value', 'inputRef'].forEach(key => {\n if (typeof control.props[key] === 'undefined' && typeof props[key] !== 'undefined') {\n controlProps[key] = props[key];\n }\n });\n const fcs = formControlState({\n props,\n muiFormControl,\n states: ['error']\n });\n const ownerState = _extends({}, props, {\n disabled,\n labelPlacement,\n required,\n error: fcs.error\n });\n const classes = useUtilityClasses(ownerState);\n const typographySlotProps = (_slotProps$typography = slotProps.typography) != null ? _slotProps$typography : componentsProps.typography;\n let label = labelProp;\n if (label != null && label.type !== Typography && !disableTypography) {\n label = /*#__PURE__*/_jsx(Typography, _extends({\n component: \"span\"\n }, typographySlotProps, {\n className: clsx(classes.label, typographySlotProps == null ? void 0 : typographySlotProps.className),\n children: label\n }));\n }\n return /*#__PURE__*/_jsxs(FormControlLabelRoot, _extends({\n className: clsx(classes.root, className),\n ownerState: ownerState,\n ref: ref\n }, other, {\n children: [/*#__PURE__*/React.cloneElement(control, controlProps), label, required && /*#__PURE__*/_jsxs(AsteriskComponent, {\n ownerState: ownerState,\n \"aria-hidden\": true,\n className: classes.asterisk,\n children: [\"\\u2009\", '*']\n })]\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? FormControlLabel.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * If `true`, the component appears selected.\n */\n checked: PropTypes.bool,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The props used for each slot inside.\n * @default {}\n */\n componentsProps: PropTypes.shape({\n typography: PropTypes.object\n }),\n /**\n * A control element. For instance, it can be a `Radio`, a `Switch` or a `Checkbox`.\n */\n control: PropTypes.element.isRequired,\n /**\n * If `true`, the control is disabled.\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the label is rendered as it is passed without an additional typography node.\n */\n disableTypography: PropTypes.bool,\n /**\n * Pass a ref to the `input` element.\n */\n inputRef: refType,\n /**\n * A text or an element to be used in an enclosing label element.\n */\n label: PropTypes.node,\n /**\n * The position of the label.\n * @default 'end'\n */\n labelPlacement: PropTypes.oneOf(['bottom', 'end', 'start', 'top']),\n /**\n * @ignore\n */\n name: PropTypes.string,\n /**\n * Callback fired when the state is changed.\n *\n * @param {React.SyntheticEvent} event The event source of the callback.\n * You can pull out the new checked state by accessing `event.target.checked` (boolean).\n */\n onChange: PropTypes.func,\n /**\n * If `true`, the label will indicate that the `input` is required.\n */\n required: PropTypes.bool,\n /**\n * The props used for each slot inside.\n * @default {}\n */\n slotProps: PropTypes.shape({\n typography: PropTypes.object\n }),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The value of the component.\n */\n value: PropTypes.any\n} : void 0;\nexport default FormControlLabel;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getSwitchBaseUtilityClass(slot) {\n return generateUtilityClass('PrivateSwitchBase', slot);\n}\nconst switchBaseClasses = generateUtilityClasses('PrivateSwitchBase', ['root', 'checked', 'disabled', 'input', 'edgeStart', 'edgeEnd']);\nexport default switchBaseClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"autoFocus\", \"checked\", \"checkedIcon\", \"className\", \"defaultChecked\", \"disabled\", \"disableFocusRipple\", \"edge\", \"icon\", \"id\", \"inputProps\", \"inputRef\", \"name\", \"onBlur\", \"onChange\", \"onFocus\", \"readOnly\", \"required\", \"tabIndex\", \"type\", \"value\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport capitalize from '../utils/capitalize';\nimport styled from '../styles/styled';\nimport useControlled from '../utils/useControlled';\nimport useFormControl from '../FormControl/useFormControl';\nimport ButtonBase from '../ButtonBase';\nimport { getSwitchBaseUtilityClass } from './switchBaseClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n checked,\n disabled,\n edge\n } = ownerState;\n const slots = {\n root: ['root', checked && 'checked', disabled && 'disabled', edge && `edge${capitalize(edge)}`],\n input: ['input']\n };\n return composeClasses(slots, getSwitchBaseUtilityClass, classes);\n};\nconst SwitchBaseRoot = styled(ButtonBase)(({\n ownerState\n}) => _extends({\n padding: 9,\n borderRadius: '50%'\n}, ownerState.edge === 'start' && {\n marginLeft: ownerState.size === 'small' ? -3 : -12\n}, ownerState.edge === 'end' && {\n marginRight: ownerState.size === 'small' ? -3 : -12\n}));\nconst SwitchBaseInput = styled('input')({\n cursor: 'inherit',\n position: 'absolute',\n opacity: 0,\n width: '100%',\n height: '100%',\n top: 0,\n left: 0,\n margin: 0,\n padding: 0,\n zIndex: 1\n});\n\n/**\n * @ignore - internal component.\n */\nconst SwitchBase = /*#__PURE__*/React.forwardRef(function SwitchBase(props, ref) {\n const {\n autoFocus,\n checked: checkedProp,\n checkedIcon,\n className,\n defaultChecked,\n disabled: disabledProp,\n disableFocusRipple = false,\n edge = false,\n icon,\n id,\n inputProps,\n inputRef,\n name,\n onBlur,\n onChange,\n onFocus,\n readOnly,\n required = false,\n tabIndex,\n type,\n value\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const [checked, setCheckedState] = useControlled({\n controlled: checkedProp,\n default: Boolean(defaultChecked),\n name: 'SwitchBase',\n state: 'checked'\n });\n const muiFormControl = useFormControl();\n const handleFocus = event => {\n if (onFocus) {\n onFocus(event);\n }\n if (muiFormControl && muiFormControl.onFocus) {\n muiFormControl.onFocus(event);\n }\n };\n const handleBlur = event => {\n if (onBlur) {\n onBlur(event);\n }\n if (muiFormControl && muiFormControl.onBlur) {\n muiFormControl.onBlur(event);\n }\n };\n const handleInputChange = event => {\n // Workaround for https://github.com/facebook/react/issues/9023\n if (event.nativeEvent.defaultPrevented) {\n return;\n }\n const newChecked = event.target.checked;\n setCheckedState(newChecked);\n if (onChange) {\n // TODO v6: remove the second argument.\n onChange(event, newChecked);\n }\n };\n let disabled = disabledProp;\n if (muiFormControl) {\n if (typeof disabled === 'undefined') {\n disabled = muiFormControl.disabled;\n }\n }\n const hasLabelFor = type === 'checkbox' || type === 'radio';\n const ownerState = _extends({}, props, {\n checked,\n disabled,\n disableFocusRipple,\n edge\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsxs(SwitchBaseRoot, _extends({\n component: \"span\",\n className: clsx(classes.root, className),\n centerRipple: true,\n focusRipple: !disableFocusRipple,\n disabled: disabled,\n tabIndex: null,\n role: undefined,\n onFocus: handleFocus,\n onBlur: handleBlur,\n ownerState: ownerState,\n ref: ref\n }, other, {\n children: [/*#__PURE__*/_jsx(SwitchBaseInput, _extends({\n autoFocus: autoFocus,\n checked: checkedProp,\n defaultChecked: defaultChecked,\n className: classes.input,\n disabled: disabled,\n id: hasLabelFor ? id : undefined,\n name: name,\n onChange: handleInputChange,\n readOnly: readOnly,\n ref: inputRef,\n required: required,\n ownerState: ownerState,\n tabIndex: tabIndex,\n type: type\n }, type === 'checkbox' && value === undefined ? {} : {\n value\n }, inputProps)), checked ? checkedIcon : icon]\n }));\n});\n\n// NB: If changed, please update Checkbox, Switch and Radio\n// so that the API documentation is updated.\nprocess.env.NODE_ENV !== \"production\" ? SwitchBase.propTypes = {\n /**\n * If `true`, the `input` element is focused during the first mount.\n */\n autoFocus: PropTypes.bool,\n /**\n * If `true`, the component is checked.\n */\n checked: PropTypes.bool,\n /**\n * The icon to display when the component is checked.\n */\n checkedIcon: PropTypes.node.isRequired,\n /**\n * Override or extend the styles applied to the component.\n * See [CSS API](#css) below for more details.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * @ignore\n */\n defaultChecked: PropTypes.bool,\n /**\n * If `true`, the component is disabled.\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the keyboard focus ripple is disabled.\n * @default false\n */\n disableFocusRipple: PropTypes.bool,\n /**\n * If given, uses a negative margin to counteract the padding on one\n * side (this is often helpful for aligning the left or right\n * side of the icon with content above or below, without ruining the border\n * size and shape).\n * @default false\n */\n edge: PropTypes.oneOf(['end', 'start', false]),\n /**\n * The icon to display when the component is unchecked.\n */\n icon: PropTypes.node.isRequired,\n /**\n * The id of the `input` element.\n */\n id: PropTypes.string,\n /**\n * [Attributes](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#Attributes) applied to the `input` element.\n */\n inputProps: PropTypes.object,\n /**\n * Pass a ref to the `input` element.\n */\n inputRef: refType,\n /*\n * @ignore\n */\n name: PropTypes.string,\n /**\n * @ignore\n */\n onBlur: PropTypes.func,\n /**\n * Callback fired when the state is changed.\n *\n * @param {object} event The event source of the callback.\n * You can pull out the new checked state by accessing `event.target.checked` (boolean).\n */\n onChange: PropTypes.func,\n /**\n * @ignore\n */\n onFocus: PropTypes.func,\n /**\n * It prevents the user from changing the value of the field\n * (not from interacting with the field).\n */\n readOnly: PropTypes.bool,\n /**\n * If `true`, the `input` element is required.\n */\n required: PropTypes.bool,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.object,\n /**\n * @ignore\n */\n tabIndex: PropTypes.oneOfType([PropTypes.number, PropTypes.string]),\n /**\n * The input component prop `type`.\n */\n type: PropTypes.string.isRequired,\n /**\n * The value of the component.\n */\n value: PropTypes.any\n} : void 0;\nexport default SwitchBase;","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M19 5v14H5V5h14m0-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z\"\n}), 'CheckBoxOutlineBlank');","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M19 3H5c-1.11 0-2 .9-2 2v14c0 1.1.89 2 2 2h14c1.11 0 2-.9 2-2V5c0-1.1-.89-2-2-2zm-9 14l-5-5 1.41-1.41L10 14.17l7.59-7.59L19 8l-9 9z\"\n}), 'CheckBox');","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M19 3H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm-2 10H7v-2h10v2z\"\n}), 'IndeterminateCheckBox');","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getCheckboxUtilityClass(slot) {\n return generateUtilityClass('MuiCheckbox', slot);\n}\nconst checkboxClasses = generateUtilityClasses('MuiCheckbox', ['root', 'checked', 'disabled', 'indeterminate', 'colorPrimary', 'colorSecondary']);\nexport default checkboxClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"checkedIcon\", \"color\", \"icon\", \"indeterminate\", \"indeterminateIcon\", \"inputProps\", \"size\", \"className\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport { alpha } from '@mui/system';\nimport SwitchBase from '../internal/SwitchBase';\nimport CheckBoxOutlineBlankIcon from '../internal/svg-icons/CheckBoxOutlineBlank';\nimport CheckBoxIcon from '../internal/svg-icons/CheckBox';\nimport IndeterminateCheckBoxIcon from '../internal/svg-icons/IndeterminateCheckBox';\nimport capitalize from '../utils/capitalize';\nimport useThemeProps from '../styles/useThemeProps';\nimport styled, { rootShouldForwardProp } from '../styles/styled';\nimport checkboxClasses, { getCheckboxUtilityClass } from './checkboxClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n indeterminate,\n color\n } = ownerState;\n const slots = {\n root: ['root', indeterminate && 'indeterminate', `color${capitalize(color)}`]\n };\n const composedClasses = composeClasses(slots, getCheckboxUtilityClass, classes);\n return _extends({}, classes, composedClasses);\n};\nconst CheckboxRoot = styled(SwitchBase, {\n shouldForwardProp: prop => rootShouldForwardProp(prop) || prop === 'classes',\n name: 'MuiCheckbox',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.indeterminate && styles.indeterminate, ownerState.color !== 'default' && styles[`color${capitalize(ownerState.color)}`]];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n color: (theme.vars || theme).palette.text.secondary\n}, !ownerState.disableRipple && {\n '&:hover': {\n backgroundColor: theme.vars ? `rgba(${ownerState.color === 'default' ? theme.vars.palette.action.activeChannel : theme.vars.palette.primary.mainChannel} / ${theme.vars.palette.action.hoverOpacity})` : alpha(ownerState.color === 'default' ? theme.palette.action.active : theme.palette[ownerState.color].main, theme.palette.action.hoverOpacity),\n // Reset on touch devices, it doesn't add specificity\n '@media (hover: none)': {\n backgroundColor: 'transparent'\n }\n }\n}, ownerState.color !== 'default' && {\n [`&.${checkboxClasses.checked}, &.${checkboxClasses.indeterminate}`]: {\n color: (theme.vars || theme).palette[ownerState.color].main\n },\n [`&.${checkboxClasses.disabled}`]: {\n color: (theme.vars || theme).palette.action.disabled\n }\n}));\nconst defaultCheckedIcon = /*#__PURE__*/_jsx(CheckBoxIcon, {});\nconst defaultIcon = /*#__PURE__*/_jsx(CheckBoxOutlineBlankIcon, {});\nconst defaultIndeterminateIcon = /*#__PURE__*/_jsx(IndeterminateCheckBoxIcon, {});\nconst Checkbox = /*#__PURE__*/React.forwardRef(function Checkbox(inProps, ref) {\n var _icon$props$fontSize, _indeterminateIcon$pr;\n const props = useThemeProps({\n props: inProps,\n name: 'MuiCheckbox'\n });\n const {\n checkedIcon = defaultCheckedIcon,\n color = 'primary',\n icon: iconProp = defaultIcon,\n indeterminate = false,\n indeterminateIcon: indeterminateIconProp = defaultIndeterminateIcon,\n inputProps,\n size = 'medium',\n className\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const icon = indeterminate ? indeterminateIconProp : iconProp;\n const indeterminateIcon = indeterminate ? indeterminateIconProp : checkedIcon;\n const ownerState = _extends({}, props, {\n color,\n indeterminate,\n size\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(CheckboxRoot, _extends({\n type: \"checkbox\",\n inputProps: _extends({\n 'data-indeterminate': indeterminate\n }, inputProps),\n icon: /*#__PURE__*/React.cloneElement(icon, {\n fontSize: (_icon$props$fontSize = icon.props.fontSize) != null ? _icon$props$fontSize : size\n }),\n checkedIcon: /*#__PURE__*/React.cloneElement(indeterminateIcon, {\n fontSize: (_indeterminateIcon$pr = indeterminateIcon.props.fontSize) != null ? _indeterminateIcon$pr : size\n }),\n ownerState: ownerState,\n ref: ref,\n className: clsx(classes.root, className)\n }, other, {\n classes: classes\n }));\n});\nprocess.env.NODE_ENV !== \"production\" ? Checkbox.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * If `true`, the component is checked.\n */\n checked: PropTypes.bool,\n /**\n * The icon to display when the component is checked.\n * @default \n */\n checkedIcon: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The color of the component.\n * It supports both default and custom theme colors, which can be added as shown in the\n * [palette customization guide](https://mui.com/material-ui/customization/palette/#adding-new-colors).\n * @default 'primary'\n */\n color: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['default', 'primary', 'secondary', 'error', 'info', 'success', 'warning']), PropTypes.string]),\n /**\n * The default checked state. Use when the component is not controlled.\n */\n defaultChecked: PropTypes.bool,\n /**\n * If `true`, the component is disabled.\n * @default false\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the ripple effect is disabled.\n * @default false\n */\n disableRipple: PropTypes.bool,\n /**\n * The icon to display when the component is unchecked.\n * @default \n */\n icon: PropTypes.node,\n /**\n * The id of the `input` element.\n */\n id: PropTypes.string,\n /**\n * If `true`, the component appears indeterminate.\n * This does not set the native input element to indeterminate due\n * to inconsistent behavior across browsers.\n * However, we set a `data-indeterminate` attribute on the `input`.\n * @default false\n */\n indeterminate: PropTypes.bool,\n /**\n * The icon to display when the component is indeterminate.\n * @default \n */\n indeterminateIcon: PropTypes.node,\n /**\n * [Attributes](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#Attributes) applied to the `input` element.\n */\n inputProps: PropTypes.object,\n /**\n * Pass a ref to the `input` element.\n */\n inputRef: refType,\n /**\n * Callback fired when the state is changed.\n *\n * @param {React.ChangeEvent} event The event source of the callback.\n * You can pull out the new checked state by accessing `event.target.checked` (boolean).\n */\n onChange: PropTypes.func,\n /**\n * If `true`, the `input` element is required.\n * @default false\n */\n required: PropTypes.bool,\n /**\n * The size of the component.\n * `small` is equivalent to the dense checkbox styling.\n * @default 'medium'\n */\n size: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['medium', 'small']), PropTypes.string]),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The value of the component. The DOM API casts this to a string.\n * The browser uses \"on\" as the default value.\n */\n value: PropTypes.any\n} : void 0;\nexport default Checkbox;","import React, { useState } from \"react\";\n\nimport {\n Button,\n Checkbox,\n DialogContent,\n FormControl,\n FormControlLabel,\n InputAdornment,\n MenuItem,\n TextField,\n Typography,\n} from \"@mui/material\";\nimport LanguageIcon from \"@mui/icons-material/Language\";\n\nimport { useDebounce } from \"hooks\";\nimport { Model } from \"utils/models/Model\";\nimport { UploadedClassifier } from \"utils/models/UploadedClassifier/UploadedClassifier\";\nimport { ModelTask } from \"utils/models/enums\";\nimport { Shape } from \"store/data/types\";\n\nexport const CloudUpload = ({\n modelTask,\n isGraph,\n setModel,\n setInputShape,\n}: {\n modelTask: ModelTask;\n isGraph: boolean;\n setModel: (model: Model) => void;\n setInputShape: React.Dispatch>;\n}) => {\n const [errMessage, setErrMessage] = useState(\"\");\n const [successMessage, setSuccessMessage] = useState(\"\");\n const [modelUrl, setModelUrl] = useState(\"\");\n const [isFromTFHub, setIsFromTFHub] = useState(false);\n\n const verifySourceMatch = (url: string, isFromTFHub: boolean) => {\n if (isFromTFHub && !UploadedClassifier.verifyTFHubUrl(url)) {\n setErrMessage(\"URL must point to TFHub\");\n return;\n }\n\n setErrMessage(\"\");\n return;\n };\n\n const verifySourceMatchDebounced = useDebounce(verifySourceMatch, 1000);\n\n const handleSourceChange = (event: React.ChangeEvent) => {\n setIsFromTFHub(event.target.checked);\n verifySourceMatch(modelUrl, event.target.checked);\n };\n\n const handleModelUrlChange = (event: React.ChangeEvent) => {\n setModelUrl(event.target.value);\n verifySourceMatchDebounced(event.target.value, isFromTFHub);\n };\n\n const loadModel = async () => {\n setErrMessage(\"\");\n setSuccessMessage(\"\");\n\n if (modelTask === ModelTask.Classification) {\n const model = new UploadedClassifier({\n name: \"User Uploaded Classifier\",\n task: modelTask,\n pretrained: true,\n trainable: isGraph,\n TFHub: isFromTFHub,\n graph: isGraph,\n src: modelUrl,\n });\n\n try {\n await model.upload();\n } catch (err) {\n setErrMessage(`Failed to download model: ${err}`);\n return;\n }\n\n const inputShape = model.defaultInputShape;\n\n setInputShape((prevShape) => ({\n ...prevShape,\n height: inputShape[0],\n width: inputShape[1],\n channels: inputShape[2],\n }));\n\n setModel(model);\n\n setSuccessMessage(\n `Successfully uploaded Classification ${\n isGraph ? \"Graph\" : \"Layers\"\n } Model (\"${model.name}\")`\n );\n } else {\n // TODO - segmenter\n setErrMessage(\"Segmenter loading by url not yet supported\");\n }\n };\n\n return (\n <>\n \n {\"Upload a model from the internet.\"}\n \n \n \n \n \n \n ),\n }}\n size={\"small\"}\n value={modelUrl}\n onChange={handleModelUrlChange}\n error={errMessage.length > 0}\n />\n \n {errMessage}\n \n \n {successMessage}\n \n \n }\n label=\"From TF Hub?\"\n />\n \n loadModel()}\n color=\"primary\"\n disabled={errMessage.length !== 0 || modelUrl.length === 0}\n >\n Load Model\n \n \n \n );\n};\n","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getFormGroupUtilityClass(slot) {\n return generateUtilityClass('MuiFormGroup', slot);\n}\nconst formGroupClasses = generateUtilityClasses('MuiFormGroup', ['root', 'row', 'error']);\nexport default formGroupClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\", \"row\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nimport { getFormGroupUtilityClass } from './formGroupClasses';\nimport useFormControl from '../FormControl/useFormControl';\nimport formControlState from '../FormControl/formControlState';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n row,\n error\n } = ownerState;\n const slots = {\n root: ['root', row && 'row', error && 'error']\n };\n return composeClasses(slots, getFormGroupUtilityClass, classes);\n};\nconst FormGroupRoot = styled('div', {\n name: 'MuiFormGroup',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.row && styles.row];\n }\n})(({\n ownerState\n}) => _extends({\n display: 'flex',\n flexDirection: 'column',\n flexWrap: 'wrap'\n}, ownerState.row && {\n flexDirection: 'row'\n}));\n\n/**\n * `FormGroup` wraps controls such as `Checkbox` and `Switch`.\n * It provides compact row layout.\n * For the `Radio`, you should be using the `RadioGroup` component instead of this one.\n */\nconst FormGroup = /*#__PURE__*/React.forwardRef(function FormGroup(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiFormGroup'\n });\n const {\n className,\n row = false\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const muiFormControl = useFormControl();\n const fcs = formControlState({\n props,\n muiFormControl,\n states: ['error']\n });\n const ownerState = _extends({}, props, {\n row,\n error: fcs.error\n });\n const classes = useUtilityClasses(ownerState);\n return /*#__PURE__*/_jsx(FormGroupRoot, _extends({\n className: clsx(classes.root, className),\n ownerState: ownerState,\n ref: ref\n }, other));\n});\nprocess.env.NODE_ENV !== \"production\" ? FormGroup.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * Display group of elements in a compact row.\n * @default false\n */\n row: PropTypes.bool,\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default FormGroup;","import * as React from 'react';\n/**\n * @ignore - internal component.\n */\nconst RadioGroupContext = /*#__PURE__*/React.createContext(undefined);\nif (process.env.NODE_ENV !== 'production') {\n RadioGroupContext.displayName = 'RadioGroupContext';\n}\nexport default RadioGroupContext;","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nconst _excluded = [\"actions\", \"children\", \"defaultValue\", \"name\", \"onChange\", \"value\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport FormGroup from '../FormGroup';\nimport useForkRef from '../utils/useForkRef';\nimport useControlled from '../utils/useControlled';\nimport RadioGroupContext from './RadioGroupContext';\nimport useId from '../utils/useId';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst RadioGroup = /*#__PURE__*/React.forwardRef(function RadioGroup(props, ref) {\n const {\n // private\n // eslint-disable-next-line react/prop-types\n actions,\n children,\n defaultValue,\n name: nameProp,\n onChange,\n value: valueProp\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const rootRef = React.useRef(null);\n const [value, setValueState] = useControlled({\n controlled: valueProp,\n default: defaultValue,\n name: 'RadioGroup'\n });\n React.useImperativeHandle(actions, () => ({\n focus: () => {\n let input = rootRef.current.querySelector('input:not(:disabled):checked');\n if (!input) {\n input = rootRef.current.querySelector('input:not(:disabled)');\n }\n if (input) {\n input.focus();\n }\n }\n }), []);\n const handleRef = useForkRef(ref, rootRef);\n const name = useId(nameProp);\n const contextValue = React.useMemo(() => ({\n name,\n onChange(event) {\n setValueState(event.target.value);\n if (onChange) {\n onChange(event, event.target.value);\n }\n },\n value\n }), [name, onChange, setValueState, value]);\n return /*#__PURE__*/_jsx(RadioGroupContext.Provider, {\n value: contextValue,\n children: /*#__PURE__*/_jsx(FormGroup, _extends({\n role: \"radiogroup\",\n ref: handleRef\n }, other, {\n children: children\n }))\n });\n});\nprocess.env.NODE_ENV !== \"production\" ? RadioGroup.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * The content of the component.\n */\n children: PropTypes.node,\n /**\n * The default value. Use when the component is not controlled.\n */\n defaultValue: PropTypes.any,\n /**\n * The name used to reference the value of the control.\n * If you don't provide this prop, it falls back to a randomly generated name.\n */\n name: PropTypes.string,\n /**\n * Callback fired when a radio button is selected.\n *\n * @param {React.ChangeEvent} event The event source of the callback.\n * @param {string} value The value of the selected radio button.\n * You can pull out the new value by accessing `event.target.value` (string).\n */\n onChange: PropTypes.func,\n /**\n * Value of the selected radio button. The DOM API casts this to a string.\n */\n value: PropTypes.any\n} : void 0;\nexport default RadioGroup;","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm0 18c-4.42 0-8-3.58-8-8s3.58-8 8-8 8 3.58 8 8-3.58 8-8 8z\"\n}), 'RadioButtonUnchecked');","import * as React from 'react';\nimport createSvgIcon from '../../utils/createSvgIcon';\n\n/**\n * @ignore - internal component.\n */\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M8.465 8.465C9.37 7.56 10.62 7 12 7C14.76 7 17 9.24 17 12C17 13.38 16.44 14.63 15.535 15.535C14.63 16.44 13.38 17 12 17C9.24 17 7 14.76 7 12C7 10.62 7.56 9.37 8.465 8.465Z\"\n}), 'RadioButtonChecked');","import _extends from \"@babel/runtime/helpers/esm/extends\";\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport RadioButtonUncheckedIcon from '../internal/svg-icons/RadioButtonUnchecked';\nimport RadioButtonCheckedIcon from '../internal/svg-icons/RadioButtonChecked';\nimport styled from '../styles/styled';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst RadioButtonIconRoot = styled('span')({\n position: 'relative',\n display: 'flex'\n});\nconst RadioButtonIconBackground = styled(RadioButtonUncheckedIcon)({\n // Scale applied to prevent dot misalignment in Safari\n transform: 'scale(1)'\n});\nconst RadioButtonIconDot = styled(RadioButtonCheckedIcon)(({\n theme,\n ownerState\n}) => _extends({\n left: 0,\n position: 'absolute',\n transform: 'scale(0)',\n transition: theme.transitions.create('transform', {\n easing: theme.transitions.easing.easeIn,\n duration: theme.transitions.duration.shortest\n })\n}, ownerState.checked && {\n transform: 'scale(1)',\n transition: theme.transitions.create('transform', {\n easing: theme.transitions.easing.easeOut,\n duration: theme.transitions.duration.shortest\n })\n}));\n\n/**\n * @ignore - internal component.\n */\nfunction RadioButtonIcon(props) {\n const {\n checked = false,\n classes = {},\n fontSize\n } = props;\n const ownerState = _extends({}, props, {\n checked\n });\n return /*#__PURE__*/_jsxs(RadioButtonIconRoot, {\n className: classes.root,\n ownerState: ownerState,\n children: [/*#__PURE__*/_jsx(RadioButtonIconBackground, {\n fontSize: fontSize,\n className: classes.background,\n ownerState: ownerState\n }), /*#__PURE__*/_jsx(RadioButtonIconDot, {\n fontSize: fontSize,\n className: classes.dot,\n ownerState: ownerState\n })]\n });\n}\nprocess.env.NODE_ENV !== \"production\" ? RadioButtonIcon.propTypes = {\n /**\n * If `true`, the component is checked.\n */\n checked: PropTypes.bool,\n /**\n * Override or extend the styles applied to the component.\n * See [CSS API](#css) below for more details.\n */\n classes: PropTypes.object,\n /**\n * The size of the component.\n * `small` is equivalent to the dense radio styling.\n */\n fontSize: PropTypes.oneOf(['small', 'medium'])\n} : void 0;\nexport default RadioButtonIcon;","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getRadioUtilityClass(slot) {\n return generateUtilityClass('MuiRadio', slot);\n}\nconst radioClasses = generateUtilityClasses('MuiRadio', ['root', 'checked', 'disabled', 'colorPrimary', 'colorSecondary']);\nexport default radioClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"checked\", \"checkedIcon\", \"color\", \"icon\", \"name\", \"onChange\", \"size\", \"className\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport { alpha } from '@mui/system';\nimport SwitchBase from '../internal/SwitchBase';\nimport useThemeProps from '../styles/useThemeProps';\nimport RadioButtonIcon from './RadioButtonIcon';\nimport capitalize from '../utils/capitalize';\nimport createChainedFunction from '../utils/createChainedFunction';\nimport useRadioGroup from '../RadioGroup/useRadioGroup';\nimport radioClasses, { getRadioUtilityClass } from './radioClasses';\nimport styled, { rootShouldForwardProp } from '../styles/styled';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n color\n } = ownerState;\n const slots = {\n root: ['root', `color${capitalize(color)}`]\n };\n return _extends({}, classes, composeClasses(slots, getRadioUtilityClass, classes));\n};\nconst RadioRoot = styled(SwitchBase, {\n shouldForwardProp: prop => rootShouldForwardProp(prop) || prop === 'classes',\n name: 'MuiRadio',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, styles[`color${capitalize(ownerState.color)}`]];\n }\n})(({\n theme,\n ownerState\n}) => _extends({\n color: (theme.vars || theme).palette.text.secondary\n}, !ownerState.disableRipple && {\n '&:hover': {\n backgroundColor: theme.vars ? `rgba(${ownerState.color === 'default' ? theme.vars.palette.action.activeChannel : theme.vars.palette[ownerState.color].mainChannel} / ${theme.vars.palette.action.hoverOpacity})` : alpha(ownerState.color === 'default' ? theme.palette.action.active : theme.palette[ownerState.color].main, theme.palette.action.hoverOpacity),\n // Reset on touch devices, it doesn't add specificity\n '@media (hover: none)': {\n backgroundColor: 'transparent'\n }\n }\n}, ownerState.color !== 'default' && {\n [`&.${radioClasses.checked}`]: {\n color: (theme.vars || theme).palette[ownerState.color].main\n }\n}, {\n [`&.${radioClasses.disabled}`]: {\n color: (theme.vars || theme).palette.action.disabled\n }\n}));\nfunction areEqualValues(a, b) {\n if (typeof b === 'object' && b !== null) {\n return a === b;\n }\n\n // The value could be a number, the DOM will stringify it anyway.\n return String(a) === String(b);\n}\nconst defaultCheckedIcon = /*#__PURE__*/_jsx(RadioButtonIcon, {\n checked: true\n});\nconst defaultIcon = /*#__PURE__*/_jsx(RadioButtonIcon, {});\nconst Radio = /*#__PURE__*/React.forwardRef(function Radio(inProps, ref) {\n var _defaultIcon$props$fo, _defaultCheckedIcon$p;\n const props = useThemeProps({\n props: inProps,\n name: 'MuiRadio'\n });\n const {\n checked: checkedProp,\n checkedIcon = defaultCheckedIcon,\n color = 'primary',\n icon = defaultIcon,\n name: nameProp,\n onChange: onChangeProp,\n size = 'medium',\n className\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n color,\n size\n });\n const classes = useUtilityClasses(ownerState);\n const radioGroup = useRadioGroup();\n let checked = checkedProp;\n const onChange = createChainedFunction(onChangeProp, radioGroup && radioGroup.onChange);\n let name = nameProp;\n if (radioGroup) {\n if (typeof checked === 'undefined') {\n checked = areEqualValues(radioGroup.value, props.value);\n }\n if (typeof name === 'undefined') {\n name = radioGroup.name;\n }\n }\n return /*#__PURE__*/_jsx(RadioRoot, _extends({\n type: \"radio\",\n icon: /*#__PURE__*/React.cloneElement(icon, {\n fontSize: (_defaultIcon$props$fo = defaultIcon.props.fontSize) != null ? _defaultIcon$props$fo : size\n }),\n checkedIcon: /*#__PURE__*/React.cloneElement(checkedIcon, {\n fontSize: (_defaultCheckedIcon$p = defaultCheckedIcon.props.fontSize) != null ? _defaultCheckedIcon$p : size\n }),\n ownerState: ownerState,\n classes: classes,\n name: name,\n checked: checked,\n onChange: onChange,\n ref: ref,\n className: clsx(classes.root, className)\n }, other));\n});\nprocess.env.NODE_ENV !== \"production\" ? Radio.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * If `true`, the component is checked.\n */\n checked: PropTypes.bool,\n /**\n * The icon to display when the component is checked.\n * @default \n */\n checkedIcon: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The color of the component.\n * It supports both default and custom theme colors, which can be added as shown in the\n * [palette customization guide](https://mui.com/material-ui/customization/palette/#adding-new-colors).\n * @default 'primary'\n */\n color: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['default', 'primary', 'secondary', 'error', 'info', 'success', 'warning']), PropTypes.string]),\n /**\n * If `true`, the component is disabled.\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the ripple effect is disabled.\n * @default false\n */\n disableRipple: PropTypes.bool,\n /**\n * The icon to display when the component is unchecked.\n * @default \n */\n icon: PropTypes.node,\n /**\n * The id of the `input` element.\n */\n id: PropTypes.string,\n /**\n * [Attributes](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#Attributes) applied to the `input` element.\n */\n inputProps: PropTypes.object,\n /**\n * Pass a ref to the `input` element.\n */\n inputRef: refType,\n /**\n * Name attribute of the `input` element.\n */\n name: PropTypes.string,\n /**\n * Callback fired when the state is changed.\n *\n * @param {React.ChangeEvent} event The event source of the callback.\n * You can pull out the new value by accessing `event.target.value` (string).\n * You can pull out the new checked state by accessing `event.target.checked` (boolean).\n */\n onChange: PropTypes.func,\n /**\n * If `true`, the `input` element is required.\n * @default false\n */\n required: PropTypes.bool,\n /**\n * The size of the component.\n * `small` is equivalent to the dense radio styling.\n * @default 'medium'\n */\n size: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['medium', 'small']), PropTypes.string]),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The value of the component. The DOM API casts this to a string.\n */\n value: PropTypes.any\n} : void 0;\nexport default Radio;","import * as React from 'react';\nimport RadioGroupContext from './RadioGroupContext';\nexport default function useRadioGroup() {\n return React.useContext(RadioGroupContext);\n}","import {\n FormControl,\n FormControlLabel,\n FormLabel,\n MenuItem,\n Radio,\n RadioGroup,\n} from \"@mui/material\";\n\nexport const ModelFormatSelection = ({\n isGraph,\n setIsGraph,\n}: {\n isGraph: boolean;\n setIsGraph: React.Dispatch>;\n}) => {\n const handleModelFormatChange = (\n event: React.ChangeEvent\n ) => setIsGraph(event.target.value === \"Graph\");\n\n return (\n \n \n \n Model Format\n \n \n }\n label=\"Graph Model\"\n />\n }\n label=\"Layers Model\"\n />\n \n \n \n );\n};\n","import React, { useCallback, useEffect, useState } from \"react\";\n\nimport {\n Alert,\n Box,\n Button,\n Collapse,\n Dialog,\n DialogActions,\n DialogTitle,\n IconButton,\n Tab,\n TabProps,\n Tabs,\n Tooltip,\n TooltipProps,\n} from \"@mui/material\";\n\nimport CloseIcon from \"@mui/icons-material/Close\";\n\nimport { useHotkeys } from \"hooks\";\n\nimport { LocalFileUpload } from \"./LocalFileUpload\";\nimport { PretrainedModelSelector } from \"./PretrainedModelSelector\";\nimport { CloudUpload } from \"./CloudUpload\";\n\nimport { Model } from \"utils/models/Model/Model\";\nimport { ModelFormatSelection } from \"./ModelFormatSelection\";\nimport { Cellpose } from \"utils/models/Cellpose\";\nimport { ModelTask } from \"utils/models/enums\";\nimport { availableClassifierModels } from \"utils/models/availableClassificationModels\";\nimport { availableSegmenterModels } from \"utils/models/availableSegmentationModels\";\nimport { HotkeyView } from \"utils/common/enums\";\nimport { Shape } from \"store/data/types\";\n\nconst ToolTipTab = (\n props: TabProps & {\n disabledMessage: string;\n placement: TooltipProps[\"placement\"];\n }\n) => {\n const {\n label,\n disabled,\n onChange,\n value,\n placement,\n disabledMessage,\n ...rest\n } = props;\n\n return (\n \n {label}\n \n }\n disabled={disabled}\n onChange={onChange}\n {...rest}\n />\n );\n};\n\ntype ImportTensorflowModelDialogProps = {\n onClose: () => void;\n open: boolean;\n modelTask: ModelTask;\n dispatchFunction: (model: Model, inputShape: Shape) => void;\n};\n\nexport const ImportTensorflowModelDialog = ({\n onClose,\n open,\n modelTask,\n dispatchFunction,\n}: ImportTensorflowModelDialogProps) => {\n const [selectedModel, setSelectedModel] = useState();\n const [inputShape, setInputShape] = useState({\n height: 256,\n width: 256,\n channels: 3,\n planes: 1,\n });\n const [isGraph, setIsGraph] = useState(false);\n\n const [pretrainedModels, setPretrainedModels] = useState>([]);\n\n const [cloudWarning, setCloudWarning] = useState(false);\n\n const [tabVal, setTabVal] = useState(\"1\");\n\n const onModelChange = useCallback((model: Model | undefined) => {\n setSelectedModel(model);\n // TODO - segmenter: generalize to model.cloud\n if (model instanceof Cellpose) {\n setCloudWarning(true);\n } else {\n setCloudWarning(false);\n }\n }, []);\n\n const dispatchModelToStore = () => {\n if (!selectedModel) {\n process.env.NODE_ENV !== \"production\" &&\n console.warn(\"Attempting to dispatch undefined model\");\n return;\n }\n\n dispatchFunction(selectedModel, inputShape);\n\n closeDialog();\n };\n\n const closeDialog = () => {\n setCloudWarning(false);\n onClose();\n };\n\n const onTabSelect = (event: React.SyntheticEvent, newValue: string) => {\n setTabVal(newValue);\n };\n\n useHotkeys(\n \"enter\",\n () => dispatchModelToStore(),\n HotkeyView.ImportTensorflowModelDialog,\n { enableOnTags: [\"INPUT\"] },\n [dispatchModelToStore]\n );\n\n useEffect(() => {\n const allModels =\n modelTask === ModelTask.Classification\n ? availableClassifierModels\n : availableSegmenterModels;\n\n const _pretrainedModels = (allModels as Model[]).filter(\n (m) => m.pretrained\n );\n\n setPretrainedModels(_pretrainedModels);\n // if no pretrained models, make sure not on tab 1\n setTabVal((curr) =>\n _pretrainedModels.length === 0 && curr === \"1\" ? \"2\" : curr\n );\n }, [modelTask]);\n\n return (\n \n \n {\n setCloudWarning(false);\n }}\n >\n \n \n }\n sx={{ mb: 2 }}\n >\n This model performs inference in the cloud ☁️\n \n \n \n Load{\" \"}\n {modelTask === ModelTask.Classification\n ? \"Classification\"\n : \"Segmentation\"}{\" \"}\n model\n \n\n \n \n\n \n\n \n \n\n \n\n \n\n \n\n \n\n \n \n\n \n Open{\" \"}\n {modelTask === ModelTask.Classification\n ? \"Classification\"\n : \"Segmentation\"}{\" \"}\n model\n \n \n \n );\n};\n","import { useState } from \"react\";\n\nimport { DialogContentText, TextField } from \"@mui/material\";\n\nimport { useTranslation } from \"hooks\";\n\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { createGitHubIssue } from \"utils/common/helpers\";\nimport { AlertType } from \"utils/common/enums\";\n\ntype SendFeedbackDialogProps = {\n onClose: () => void;\n open: boolean;\n};\n\nexport const SendFeedbackDialog = ({\n onClose,\n open,\n}: SendFeedbackDialogProps) => {\n const t = useTranslation();\n\n const [issueTitle, setIssueTitle] = useState(\"\");\n const [issueComment, setIssueComment] = useState(\"\");\n\n const openGitHubIssue = () => {\n createGitHubIssue(issueTitle, issueComment, AlertType.Warning);\n\n setIssueTitle(\"\");\n setIssueComment(\"\");\n\n onClose();\n };\n\n return (\n \n {\" \"}\n \n {t(\n \"Use this form to report issues with Piximi via our GitHub page, or visit\"\n )}{\" \"}\n \n forum.image.sc/tag/piximi\n \n .\n \n setIssueTitle(e.target.value)}\n multiline\n rows={1}\n fullWidth\n />\n setIssueComment(e.target.value)}\n multiline\n rows={10}\n fullWidth\n />\n \n }\n onConfirm={openGitHubIssue}\n confirmText=\"Create Github Issue\"\n />\n );\n};\n","var checkboardCache = {};\n\nexport var render = function render(c1, c2, size, serverCanvas) {\n if (typeof document === 'undefined' && !serverCanvas) {\n return null;\n }\n var canvas = serverCanvas ? new serverCanvas() : document.createElement('canvas');\n canvas.width = size * 2;\n canvas.height = size * 2;\n var ctx = canvas.getContext('2d');\n if (!ctx) {\n return null;\n } // If no context can be found, return early.\n ctx.fillStyle = c1;\n ctx.fillRect(0, 0, canvas.width, canvas.height);\n ctx.fillStyle = c2;\n ctx.fillRect(0, 0, size, size);\n ctx.translate(size, size);\n ctx.fillRect(0, 0, size, size);\n return canvas.toDataURL();\n};\n\nexport var get = function get(c1, c2, size, serverCanvas) {\n var key = c1 + '-' + c2 + '-' + size + (serverCanvas ? '-server' : '');\n\n if (checkboardCache[key]) {\n return checkboardCache[key];\n }\n\n var checkboard = render(c1, c2, size, serverCanvas);\n checkboardCache[key] = checkboard;\n return checkboard;\n};","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React, { isValidElement } from 'react';\nimport reactCSS from 'reactcss';\nimport * as checkboard from '../../helpers/checkboard';\n\nexport var Checkboard = function Checkboard(_ref) {\n var white = _ref.white,\n grey = _ref.grey,\n size = _ref.size,\n renderers = _ref.renderers,\n borderRadius = _ref.borderRadius,\n boxShadow = _ref.boxShadow,\n children = _ref.children;\n\n var styles = reactCSS({\n 'default': {\n grid: {\n borderRadius: borderRadius,\n boxShadow: boxShadow,\n absolute: '0px 0px 0px 0px',\n background: 'url(' + checkboard.get(white, grey, size, renderers.canvas) + ') center left'\n }\n }\n });\n return isValidElement(children) ? React.cloneElement(children, _extends({}, children.props, { style: _extends({}, children.props.style, styles.grid) })) : React.createElement('div', { style: styles.grid });\n};\n\nCheckboard.defaultProps = {\n size: 8,\n white: 'transparent',\n grey: 'rgba(0,0,0,.08)',\n renderers: {}\n};\n\nexport default Checkboard;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React, { Component, PureComponent } from 'react';\nimport reactCSS from 'reactcss';\nimport * as alpha from '../../helpers/alpha';\n\nimport Checkboard from './Checkboard';\n\nexport var Alpha = function (_ref) {\n _inherits(Alpha, _ref);\n\n function Alpha() {\n var _ref2;\n\n var _temp, _this, _ret;\n\n _classCallCheck(this, Alpha);\n\n for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return _ret = (_temp = (_this = _possibleConstructorReturn(this, (_ref2 = Alpha.__proto__ || Object.getPrototypeOf(Alpha)).call.apply(_ref2, [this].concat(args))), _this), _this.handleChange = function (e) {\n var change = alpha.calculateChange(e, _this.props.hsl, _this.props.direction, _this.props.a, _this.container);\n change && typeof _this.props.onChange === 'function' && _this.props.onChange(change, e);\n }, _this.handleMouseDown = function (e) {\n _this.handleChange(e);\n window.addEventListener('mousemove', _this.handleChange);\n window.addEventListener('mouseup', _this.handleMouseUp);\n }, _this.handleMouseUp = function () {\n _this.unbindEventListeners();\n }, _this.unbindEventListeners = function () {\n window.removeEventListener('mousemove', _this.handleChange);\n window.removeEventListener('mouseup', _this.handleMouseUp);\n }, _temp), _possibleConstructorReturn(_this, _ret);\n }\n\n _createClass(Alpha, [{\n key: 'componentWillUnmount',\n value: function componentWillUnmount() {\n this.unbindEventListeners();\n }\n }, {\n key: 'render',\n value: function render() {\n var _this2 = this;\n\n var rgb = this.props.rgb;\n var styles = reactCSS({\n 'default': {\n alpha: {\n absolute: '0px 0px 0px 0px',\n borderRadius: this.props.radius\n },\n checkboard: {\n absolute: '0px 0px 0px 0px',\n overflow: 'hidden',\n borderRadius: this.props.radius\n },\n gradient: {\n absolute: '0px 0px 0px 0px',\n background: 'linear-gradient(to right, rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ', 0) 0%,\\n rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ', 1) 100%)',\n boxShadow: this.props.shadow,\n borderRadius: this.props.radius\n },\n container: {\n position: 'relative',\n height: '100%',\n margin: '0 3px'\n },\n pointer: {\n position: 'absolute',\n left: rgb.a * 100 + '%'\n },\n slider: {\n width: '4px',\n borderRadius: '1px',\n height: '8px',\n boxShadow: '0 0 2px rgba(0, 0, 0, .6)',\n background: '#fff',\n marginTop: '1px',\n transform: 'translateX(-2px)'\n }\n },\n 'vertical': {\n gradient: {\n background: 'linear-gradient(to bottom, rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ', 0) 0%,\\n rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ', 1) 100%)'\n },\n pointer: {\n left: 0,\n top: rgb.a * 100 + '%'\n }\n },\n 'overwrite': _extends({}, this.props.style)\n }, {\n vertical: this.props.direction === 'vertical',\n overwrite: true\n });\n\n return React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(\n 'div',\n { style: styles.checkboard },\n React.createElement(Checkboard, { renderers: this.props.renderers })\n ),\n React.createElement('div', { style: styles.gradient }),\n React.createElement(\n 'div',\n {\n style: styles.container,\n ref: function ref(container) {\n return _this2.container = container;\n },\n onMouseDown: this.handleMouseDown,\n onTouchMove: this.handleChange,\n onTouchStart: this.handleChange\n },\n React.createElement(\n 'div',\n { style: styles.pointer },\n this.props.pointer ? React.createElement(this.props.pointer, this.props) : React.createElement('div', { style: styles.slider })\n )\n )\n );\n }\n }]);\n\n return Alpha;\n}(PureComponent || Component);\n\nexport default Alpha;","export var calculateChange = function calculateChange(e, hsl, direction, initialA, container) {\n var containerWidth = container.clientWidth;\n var containerHeight = container.clientHeight;\n var x = typeof e.pageX === 'number' ? e.pageX : e.touches[0].pageX;\n var y = typeof e.pageY === 'number' ? e.pageY : e.touches[0].pageY;\n var left = x - (container.getBoundingClientRect().left + window.pageXOffset);\n var top = y - (container.getBoundingClientRect().top + window.pageYOffset);\n\n if (direction === 'vertical') {\n var a = void 0;\n if (top < 0) {\n a = 0;\n } else if (top > containerHeight) {\n a = 1;\n } else {\n a = Math.round(top * 100 / containerHeight) / 100;\n }\n\n if (hsl.a !== a) {\n return {\n h: hsl.h,\n s: hsl.s,\n l: hsl.l,\n a: a,\n source: 'rgb'\n };\n }\n } else {\n var _a = void 0;\n if (left < 0) {\n _a = 0;\n } else if (left > containerWidth) {\n _a = 1;\n } else {\n _a = Math.round(left * 100 / containerWidth) / 100;\n }\n\n if (initialA !== _a) {\n return {\n h: hsl.h,\n s: hsl.s,\n l: hsl.l,\n a: _a,\n source: 'rgb'\n };\n }\n }\n return null;\n};","var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React, { Component, PureComponent } from 'react';\nimport reactCSS from 'reactcss';\n\nvar DEFAULT_ARROW_OFFSET = 1;\n\nvar UP_KEY_CODE = 38;\nvar DOWN_KEY_CODE = 40;\nvar VALID_KEY_CODES = [UP_KEY_CODE, DOWN_KEY_CODE];\nvar isValidKeyCode = function isValidKeyCode(keyCode) {\n return VALID_KEY_CODES.indexOf(keyCode) > -1;\n};\nvar getNumberValue = function getNumberValue(value) {\n return Number(String(value).replace(/%/g, ''));\n};\n\nvar idCounter = 1;\n\nexport var EditableInput = function (_ref) {\n _inherits(EditableInput, _ref);\n\n function EditableInput(props) {\n _classCallCheck(this, EditableInput);\n\n var _this = _possibleConstructorReturn(this, (EditableInput.__proto__ || Object.getPrototypeOf(EditableInput)).call(this));\n\n _this.handleBlur = function () {\n if (_this.state.blurValue) {\n _this.setState({ value: _this.state.blurValue, blurValue: null });\n }\n };\n\n _this.handleChange = function (e) {\n _this.setUpdatedValue(e.target.value, e);\n };\n\n _this.handleKeyDown = function (e) {\n // In case `e.target.value` is a percentage remove the `%` character\n // and update accordingly with a percentage\n // https://github.com/casesandberg/react-color/issues/383\n var value = getNumberValue(e.target.value);\n if (!isNaN(value) && isValidKeyCode(e.keyCode)) {\n var offset = _this.getArrowOffset();\n var updatedValue = e.keyCode === UP_KEY_CODE ? value + offset : value - offset;\n\n _this.setUpdatedValue(updatedValue, e);\n }\n };\n\n _this.handleDrag = function (e) {\n if (_this.props.dragLabel) {\n var newValue = Math.round(_this.props.value + e.movementX);\n if (newValue >= 0 && newValue <= _this.props.dragMax) {\n _this.props.onChange && _this.props.onChange(_this.getValueObjectWithLabel(newValue), e);\n }\n }\n };\n\n _this.handleMouseDown = function (e) {\n if (_this.props.dragLabel) {\n e.preventDefault();\n _this.handleDrag(e);\n window.addEventListener('mousemove', _this.handleDrag);\n window.addEventListener('mouseup', _this.handleMouseUp);\n }\n };\n\n _this.handleMouseUp = function () {\n _this.unbindEventListeners();\n };\n\n _this.unbindEventListeners = function () {\n window.removeEventListener('mousemove', _this.handleDrag);\n window.removeEventListener('mouseup', _this.handleMouseUp);\n };\n\n _this.state = {\n value: String(props.value).toUpperCase(),\n blurValue: String(props.value).toUpperCase()\n };\n\n _this.inputId = 'rc-editable-input-' + idCounter++;\n return _this;\n }\n\n _createClass(EditableInput, [{\n key: 'componentDidUpdate',\n value: function componentDidUpdate(prevProps, prevState) {\n if (this.props.value !== this.state.value && (prevProps.value !== this.props.value || prevState.value !== this.state.value)) {\n if (this.input === document.activeElement) {\n this.setState({ blurValue: String(this.props.value).toUpperCase() });\n } else {\n this.setState({ value: String(this.props.value).toUpperCase(), blurValue: !this.state.blurValue && String(this.props.value).toUpperCase() });\n }\n }\n }\n }, {\n key: 'componentWillUnmount',\n value: function componentWillUnmount() {\n this.unbindEventListeners();\n }\n }, {\n key: 'getValueObjectWithLabel',\n value: function getValueObjectWithLabel(value) {\n return _defineProperty({}, this.props.label, value);\n }\n }, {\n key: 'getArrowOffset',\n value: function getArrowOffset() {\n return this.props.arrowOffset || DEFAULT_ARROW_OFFSET;\n }\n }, {\n key: 'setUpdatedValue',\n value: function setUpdatedValue(value, e) {\n var onChangeValue = this.props.label ? this.getValueObjectWithLabel(value) : value;\n this.props.onChange && this.props.onChange(onChangeValue, e);\n\n this.setState({ value: value });\n }\n }, {\n key: 'render',\n value: function render() {\n var _this2 = this;\n\n var styles = reactCSS({\n 'default': {\n wrap: {\n position: 'relative'\n }\n },\n 'user-override': {\n wrap: this.props.style && this.props.style.wrap ? this.props.style.wrap : {},\n input: this.props.style && this.props.style.input ? this.props.style.input : {},\n label: this.props.style && this.props.style.label ? this.props.style.label : {}\n },\n 'dragLabel-true': {\n label: {\n cursor: 'ew-resize'\n }\n }\n }, {\n 'user-override': true\n }, this.props);\n\n return React.createElement(\n 'div',\n { style: styles.wrap },\n React.createElement('input', {\n id: this.inputId,\n style: styles.input,\n ref: function ref(input) {\n return _this2.input = input;\n },\n value: this.state.value,\n onKeyDown: this.handleKeyDown,\n onChange: this.handleChange,\n onBlur: this.handleBlur,\n placeholder: this.props.placeholder,\n spellCheck: 'false'\n }),\n this.props.label && !this.props.hideLabel ? React.createElement(\n 'label',\n {\n htmlFor: this.inputId,\n style: styles.label,\n onMouseDown: this.handleMouseDown\n },\n this.props.label\n ) : null\n );\n }\n }]);\n\n return EditableInput;\n}(PureComponent || Component);\n\nexport default EditableInput;","var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React, { Component, PureComponent } from 'react';\nimport reactCSS from 'reactcss';\nimport * as hue from '../../helpers/hue';\n\nexport var Hue = function (_ref) {\n _inherits(Hue, _ref);\n\n function Hue() {\n var _ref2;\n\n var _temp, _this, _ret;\n\n _classCallCheck(this, Hue);\n\n for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return _ret = (_temp = (_this = _possibleConstructorReturn(this, (_ref2 = Hue.__proto__ || Object.getPrototypeOf(Hue)).call.apply(_ref2, [this].concat(args))), _this), _this.handleChange = function (e) {\n var change = hue.calculateChange(e, _this.props.direction, _this.props.hsl, _this.container);\n change && typeof _this.props.onChange === 'function' && _this.props.onChange(change, e);\n }, _this.handleMouseDown = function (e) {\n _this.handleChange(e);\n window.addEventListener('mousemove', _this.handleChange);\n window.addEventListener('mouseup', _this.handleMouseUp);\n }, _this.handleMouseUp = function () {\n _this.unbindEventListeners();\n }, _temp), _possibleConstructorReturn(_this, _ret);\n }\n\n _createClass(Hue, [{\n key: 'componentWillUnmount',\n value: function componentWillUnmount() {\n this.unbindEventListeners();\n }\n }, {\n key: 'unbindEventListeners',\n value: function unbindEventListeners() {\n window.removeEventListener('mousemove', this.handleChange);\n window.removeEventListener('mouseup', this.handleMouseUp);\n }\n }, {\n key: 'render',\n value: function render() {\n var _this2 = this;\n\n var _props$direction = this.props.direction,\n direction = _props$direction === undefined ? 'horizontal' : _props$direction;\n\n\n var styles = reactCSS({\n 'default': {\n hue: {\n absolute: '0px 0px 0px 0px',\n borderRadius: this.props.radius,\n boxShadow: this.props.shadow\n },\n container: {\n padding: '0 2px',\n position: 'relative',\n height: '100%',\n borderRadius: this.props.radius\n },\n pointer: {\n position: 'absolute',\n left: this.props.hsl.h * 100 / 360 + '%'\n },\n slider: {\n marginTop: '1px',\n width: '4px',\n borderRadius: '1px',\n height: '8px',\n boxShadow: '0 0 2px rgba(0, 0, 0, .6)',\n background: '#fff',\n transform: 'translateX(-2px)'\n }\n },\n 'vertical': {\n pointer: {\n left: '0px',\n top: -(this.props.hsl.h * 100 / 360) + 100 + '%'\n }\n }\n }, { vertical: direction === 'vertical' });\n\n return React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(\n 'div',\n {\n className: 'hue-' + direction,\n style: styles.container,\n ref: function ref(container) {\n return _this2.container = container;\n },\n onMouseDown: this.handleMouseDown,\n onTouchMove: this.handleChange,\n onTouchStart: this.handleChange\n },\n React.createElement(\n 'style',\n null,\n '\\n .hue-horizontal {\\n background: linear-gradient(to right, #f00 0%, #ff0 17%, #0f0\\n 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\\n background: -webkit-linear-gradient(to right, #f00 0%, #ff0\\n 17%, #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\\n }\\n\\n .hue-vertical {\\n background: linear-gradient(to top, #f00 0%, #ff0 17%, #0f0 33%,\\n #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\\n background: -webkit-linear-gradient(to top, #f00 0%, #ff0 17%,\\n #0f0 33%, #0ff 50%, #00f 67%, #f0f 83%, #f00 100%);\\n }\\n '\n ),\n React.createElement(\n 'div',\n { style: styles.pointer },\n this.props.pointer ? React.createElement(this.props.pointer, this.props) : React.createElement('div', { style: styles.slider })\n )\n )\n );\n }\n }]);\n\n return Hue;\n}(PureComponent || Component);\n\nexport default Hue;","export var calculateChange = function calculateChange(e, direction, hsl, container) {\n var containerWidth = container.clientWidth;\n var containerHeight = container.clientHeight;\n var x = typeof e.pageX === 'number' ? e.pageX : e.touches[0].pageX;\n var y = typeof e.pageY === 'number' ? e.pageY : e.touches[0].pageY;\n var left = x - (container.getBoundingClientRect().left + window.pageXOffset);\n var top = y - (container.getBoundingClientRect().top + window.pageYOffset);\n\n if (direction === 'vertical') {\n var h = void 0;\n if (top < 0) {\n h = 359;\n } else if (top > containerHeight) {\n h = 0;\n } else {\n var percent = -(top * 100 / containerHeight) + 100;\n h = 360 * percent / 100;\n }\n\n if (hsl.h !== h) {\n return {\n h: h,\n s: hsl.s,\n l: hsl.l,\n a: hsl.a,\n source: 'hsl'\n };\n }\n } else {\n var _h = void 0;\n if (left < 0) {\n _h = 0;\n } else if (left > containerWidth) {\n _h = 359;\n } else {\n var _percent = left * 100 / containerWidth;\n _h = 360 * _percent / 100;\n }\n\n if (hsl.h !== _h) {\n return {\n h: _h,\n s: hsl.s,\n l: hsl.l,\n a: hsl.a,\n source: 'hsl'\n };\n }\n }\n return null;\n};","/**\n * Removes all key-value entries from the list cache.\n *\n * @private\n * @name clear\n * @memberOf ListCache\n */\nfunction listCacheClear() {\n this.__data__ = [];\n this.size = 0;\n}\n\nexport default listCacheClear;\n","/**\n * Performs a\n * [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)\n * comparison between two values to determine if they are equivalent.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to compare.\n * @param {*} other The other value to compare.\n * @returns {boolean} Returns `true` if the values are equivalent, else `false`.\n * @example\n *\n * var object = { 'a': 1 };\n * var other = { 'a': 1 };\n *\n * _.eq(object, object);\n * // => true\n *\n * _.eq(object, other);\n * // => false\n *\n * _.eq('a', 'a');\n * // => true\n *\n * _.eq('a', Object('a'));\n * // => false\n *\n * _.eq(NaN, NaN);\n * // => true\n */\nfunction eq(value, other) {\n return value === other || (value !== value && other !== other);\n}\n\nexport default eq;\n","import eq from './eq.js';\n\n/**\n * Gets the index at which the `key` is found in `array` of key-value pairs.\n *\n * @private\n * @param {Array} array The array to inspect.\n * @param {*} key The key to search for.\n * @returns {number} Returns the index of the matched value, else `-1`.\n */\nfunction assocIndexOf(array, key) {\n var length = array.length;\n while (length--) {\n if (eq(array[length][0], key)) {\n return length;\n }\n }\n return -1;\n}\n\nexport default assocIndexOf;\n","import assocIndexOf from './_assocIndexOf.js';\n\n/** Used for built-in method references. */\nvar arrayProto = Array.prototype;\n\n/** Built-in value references. */\nvar splice = arrayProto.splice;\n\n/**\n * Removes `key` and its value from the list cache.\n *\n * @private\n * @name delete\n * @memberOf ListCache\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction listCacheDelete(key) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n if (index < 0) {\n return false;\n }\n var lastIndex = data.length - 1;\n if (index == lastIndex) {\n data.pop();\n } else {\n splice.call(data, index, 1);\n }\n --this.size;\n return true;\n}\n\nexport default listCacheDelete;\n","import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Gets the list cache value for `key`.\n *\n * @private\n * @name get\n * @memberOf ListCache\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction listCacheGet(key) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n return index < 0 ? undefined : data[index][1];\n}\n\nexport default listCacheGet;\n","import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Checks if a list cache value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf ListCache\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction listCacheHas(key) {\n return assocIndexOf(this.__data__, key) > -1;\n}\n\nexport default listCacheHas;\n","import assocIndexOf from './_assocIndexOf.js';\n\n/**\n * Sets the list cache `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf ListCache\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the list cache instance.\n */\nfunction listCacheSet(key, value) {\n var data = this.__data__,\n index = assocIndexOf(data, key);\n\n if (index < 0) {\n ++this.size;\n data.push([key, value]);\n } else {\n data[index][1] = value;\n }\n return this;\n}\n\nexport default listCacheSet;\n","import listCacheClear from './_listCacheClear.js';\nimport listCacheDelete from './_listCacheDelete.js';\nimport listCacheGet from './_listCacheGet.js';\nimport listCacheHas from './_listCacheHas.js';\nimport listCacheSet from './_listCacheSet.js';\n\n/**\n * Creates an list cache object.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction ListCache(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `ListCache`.\nListCache.prototype.clear = listCacheClear;\nListCache.prototype['delete'] = listCacheDelete;\nListCache.prototype.get = listCacheGet;\nListCache.prototype.has = listCacheHas;\nListCache.prototype.set = listCacheSet;\n\nexport default ListCache;\n","import ListCache from './_ListCache.js';\n\n/**\n * Removes all key-value entries from the stack.\n *\n * @private\n * @name clear\n * @memberOf Stack\n */\nfunction stackClear() {\n this.__data__ = new ListCache;\n this.size = 0;\n}\n\nexport default stackClear;\n","/**\n * Removes `key` and its value from the stack.\n *\n * @private\n * @name delete\n * @memberOf Stack\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction stackDelete(key) {\n var data = this.__data__,\n result = data['delete'](key);\n\n this.size = data.size;\n return result;\n}\n\nexport default stackDelete;\n","/**\n * Gets the stack value for `key`.\n *\n * @private\n * @name get\n * @memberOf Stack\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction stackGet(key) {\n return this.__data__.get(key);\n}\n\nexport default stackGet;\n","/**\n * Checks if a stack value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf Stack\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction stackHas(key) {\n return this.__data__.has(key);\n}\n\nexport default stackHas;\n","/** Detect free variable `global` from Node.js. */\nvar freeGlobal = typeof global == 'object' && global && global.Object === Object && global;\n\nexport default freeGlobal;\n","import freeGlobal from './_freeGlobal.js';\n\n/** Detect free variable `self`. */\nvar freeSelf = typeof self == 'object' && self && self.Object === Object && self;\n\n/** Used as a reference to the global object. */\nvar root = freeGlobal || freeSelf || Function('return this')();\n\nexport default root;\n","import root from './_root.js';\n\n/** Built-in value references. */\nvar Symbol = root.Symbol;\n\nexport default Symbol;\n","import Symbol from './_Symbol.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Used to resolve the\n * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)\n * of values.\n */\nvar nativeObjectToString = objectProto.toString;\n\n/** Built-in value references. */\nvar symToStringTag = Symbol ? Symbol.toStringTag : undefined;\n\n/**\n * A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the raw `toStringTag`.\n */\nfunction getRawTag(value) {\n var isOwn = hasOwnProperty.call(value, symToStringTag),\n tag = value[symToStringTag];\n\n try {\n value[symToStringTag] = undefined;\n var unmasked = true;\n } catch (e) {}\n\n var result = nativeObjectToString.call(value);\n if (unmasked) {\n if (isOwn) {\n value[symToStringTag] = tag;\n } else {\n delete value[symToStringTag];\n }\n }\n return result;\n}\n\nexport default getRawTag;\n","/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/**\n * Used to resolve the\n * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)\n * of values.\n */\nvar nativeObjectToString = objectProto.toString;\n\n/**\n * Converts `value` to a string using `Object.prototype.toString`.\n *\n * @private\n * @param {*} value The value to convert.\n * @returns {string} Returns the converted string.\n */\nfunction objectToString(value) {\n return nativeObjectToString.call(value);\n}\n\nexport default objectToString;\n","import Symbol from './_Symbol.js';\nimport getRawTag from './_getRawTag.js';\nimport objectToString from './_objectToString.js';\n\n/** `Object#toString` result references. */\nvar nullTag = '[object Null]',\n undefinedTag = '[object Undefined]';\n\n/** Built-in value references. */\nvar symToStringTag = Symbol ? Symbol.toStringTag : undefined;\n\n/**\n * The base implementation of `getTag` without fallbacks for buggy environments.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the `toStringTag`.\n */\nfunction baseGetTag(value) {\n if (value == null) {\n return value === undefined ? undefinedTag : nullTag;\n }\n return (symToStringTag && symToStringTag in Object(value))\n ? getRawTag(value)\n : objectToString(value);\n}\n\nexport default baseGetTag;\n","/**\n * Checks if `value` is the\n * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)\n * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an object, else `false`.\n * @example\n *\n * _.isObject({});\n * // => true\n *\n * _.isObject([1, 2, 3]);\n * // => true\n *\n * _.isObject(_.noop);\n * // => true\n *\n * _.isObject(null);\n * // => false\n */\nfunction isObject(value) {\n var type = typeof value;\n return value != null && (type == 'object' || type == 'function');\n}\n\nexport default isObject;\n","import baseGetTag from './_baseGetTag.js';\nimport isObject from './isObject.js';\n\n/** `Object#toString` result references. */\nvar asyncTag = '[object AsyncFunction]',\n funcTag = '[object Function]',\n genTag = '[object GeneratorFunction]',\n proxyTag = '[object Proxy]';\n\n/**\n * Checks if `value` is classified as a `Function` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a function, else `false`.\n * @example\n *\n * _.isFunction(_);\n * // => true\n *\n * _.isFunction(/abc/);\n * // => false\n */\nfunction isFunction(value) {\n if (!isObject(value)) {\n return false;\n }\n // The use of `Object#toString` avoids issues with the `typeof` operator\n // in Safari 9 which returns 'object' for typed arrays and other constructors.\n var tag = baseGetTag(value);\n return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag;\n}\n\nexport default isFunction;\n","import root from './_root.js';\n\n/** Used to detect overreaching core-js shims. */\nvar coreJsData = root['__core-js_shared__'];\n\nexport default coreJsData;\n","import coreJsData from './_coreJsData.js';\n\n/** Used to detect methods masquerading as native. */\nvar maskSrcKey = (function() {\n var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || '');\n return uid ? ('Symbol(src)_1.' + uid) : '';\n}());\n\n/**\n * Checks if `func` has its source masked.\n *\n * @private\n * @param {Function} func The function to check.\n * @returns {boolean} Returns `true` if `func` is masked, else `false`.\n */\nfunction isMasked(func) {\n return !!maskSrcKey && (maskSrcKey in func);\n}\n\nexport default isMasked;\n","/** Used for built-in method references. */\nvar funcProto = Function.prototype;\n\n/** Used to resolve the decompiled source of functions. */\nvar funcToString = funcProto.toString;\n\n/**\n * Converts `func` to its source code.\n *\n * @private\n * @param {Function} func The function to convert.\n * @returns {string} Returns the source code.\n */\nfunction toSource(func) {\n if (func != null) {\n try {\n return funcToString.call(func);\n } catch (e) {}\n try {\n return (func + '');\n } catch (e) {}\n }\n return '';\n}\n\nexport default toSource;\n","import isFunction from './isFunction.js';\nimport isMasked from './_isMasked.js';\nimport isObject from './isObject.js';\nimport toSource from './_toSource.js';\n\n/**\n * Used to match `RegExp`\n * [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns).\n */\nvar reRegExpChar = /[\\\\^$.*+?()[\\]{}|]/g;\n\n/** Used to detect host constructors (Safari). */\nvar reIsHostCtor = /^\\[object .+?Constructor\\]$/;\n\n/** Used for built-in method references. */\nvar funcProto = Function.prototype,\n objectProto = Object.prototype;\n\n/** Used to resolve the decompiled source of functions. */\nvar funcToString = funcProto.toString;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/** Used to detect if a method is native. */\nvar reIsNative = RegExp('^' +\n funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\\\$&')\n .replace(/hasOwnProperty|(function).*?(?=\\\\\\()| for .+?(?=\\\\\\])/g, '$1.*?') + '$'\n);\n\n/**\n * The base implementation of `_.isNative` without bad shim checks.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a native function,\n * else `false`.\n */\nfunction baseIsNative(value) {\n if (!isObject(value) || isMasked(value)) {\n return false;\n }\n var pattern = isFunction(value) ? reIsNative : reIsHostCtor;\n return pattern.test(toSource(value));\n}\n\nexport default baseIsNative;\n","/**\n * Gets the value at `key` of `object`.\n *\n * @private\n * @param {Object} [object] The object to query.\n * @param {string} key The key of the property to get.\n * @returns {*} Returns the property value.\n */\nfunction getValue(object, key) {\n return object == null ? undefined : object[key];\n}\n\nexport default getValue;\n","import baseIsNative from './_baseIsNative.js';\nimport getValue from './_getValue.js';\n\n/**\n * Gets the native function at `key` of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {string} key The key of the method to get.\n * @returns {*} Returns the function if it's native, else `undefined`.\n */\nfunction getNative(object, key) {\n var value = getValue(object, key);\n return baseIsNative(value) ? value : undefined;\n}\n\nexport default getNative;\n","import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Map = getNative(root, 'Map');\n\nexport default Map;\n","import getNative from './_getNative.js';\n\n/* Built-in method references that are verified to be native. */\nvar nativeCreate = getNative(Object, 'create');\n\nexport default nativeCreate;\n","import nativeCreate from './_nativeCreate.js';\n\n/**\n * Removes all key-value entries from the hash.\n *\n * @private\n * @name clear\n * @memberOf Hash\n */\nfunction hashClear() {\n this.__data__ = nativeCreate ? nativeCreate(null) : {};\n this.size = 0;\n}\n\nexport default hashClear;\n","/**\n * Removes `key` and its value from the hash.\n *\n * @private\n * @name delete\n * @memberOf Hash\n * @param {Object} hash The hash to modify.\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction hashDelete(key) {\n var result = this.has(key) && delete this.__data__[key];\n this.size -= result ? 1 : 0;\n return result;\n}\n\nexport default hashDelete;\n","import nativeCreate from './_nativeCreate.js';\n\n/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Gets the hash value for `key`.\n *\n * @private\n * @name get\n * @memberOf Hash\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction hashGet(key) {\n var data = this.__data__;\n if (nativeCreate) {\n var result = data[key];\n return result === HASH_UNDEFINED ? undefined : result;\n }\n return hasOwnProperty.call(data, key) ? data[key] : undefined;\n}\n\nexport default hashGet;\n","import nativeCreate from './_nativeCreate.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Checks if a hash value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf Hash\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction hashHas(key) {\n var data = this.__data__;\n return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key);\n}\n\nexport default hashHas;\n","import nativeCreate from './_nativeCreate.js';\n\n/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/**\n * Sets the hash `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf Hash\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the hash instance.\n */\nfunction hashSet(key, value) {\n var data = this.__data__;\n this.size += this.has(key) ? 0 : 1;\n data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value;\n return this;\n}\n\nexport default hashSet;\n","import hashClear from './_hashClear.js';\nimport hashDelete from './_hashDelete.js';\nimport hashGet from './_hashGet.js';\nimport hashHas from './_hashHas.js';\nimport hashSet from './_hashSet.js';\n\n/**\n * Creates a hash object.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction Hash(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `Hash`.\nHash.prototype.clear = hashClear;\nHash.prototype['delete'] = hashDelete;\nHash.prototype.get = hashGet;\nHash.prototype.has = hashHas;\nHash.prototype.set = hashSet;\n\nexport default Hash;\n","import Hash from './_Hash.js';\nimport ListCache from './_ListCache.js';\nimport Map from './_Map.js';\n\n/**\n * Removes all key-value entries from the map.\n *\n * @private\n * @name clear\n * @memberOf MapCache\n */\nfunction mapCacheClear() {\n this.size = 0;\n this.__data__ = {\n 'hash': new Hash,\n 'map': new (Map || ListCache),\n 'string': new Hash\n };\n}\n\nexport default mapCacheClear;\n","/**\n * Checks if `value` is suitable for use as unique object key.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is suitable, else `false`.\n */\nfunction isKeyable(value) {\n var type = typeof value;\n return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean')\n ? (value !== '__proto__')\n : (value === null);\n}\n\nexport default isKeyable;\n","import isKeyable from './_isKeyable.js';\n\n/**\n * Gets the data for `map`.\n *\n * @private\n * @param {Object} map The map to query.\n * @param {string} key The reference key.\n * @returns {*} Returns the map data.\n */\nfunction getMapData(map, key) {\n var data = map.__data__;\n return isKeyable(key)\n ? data[typeof key == 'string' ? 'string' : 'hash']\n : data.map;\n}\n\nexport default getMapData;\n","import getMapData from './_getMapData.js';\n\n/**\n * Removes `key` and its value from the map.\n *\n * @private\n * @name delete\n * @memberOf MapCache\n * @param {string} key The key of the value to remove.\n * @returns {boolean} Returns `true` if the entry was removed, else `false`.\n */\nfunction mapCacheDelete(key) {\n var result = getMapData(this, key)['delete'](key);\n this.size -= result ? 1 : 0;\n return result;\n}\n\nexport default mapCacheDelete;\n","import getMapData from './_getMapData.js';\n\n/**\n * Gets the map value for `key`.\n *\n * @private\n * @name get\n * @memberOf MapCache\n * @param {string} key The key of the value to get.\n * @returns {*} Returns the entry value.\n */\nfunction mapCacheGet(key) {\n return getMapData(this, key).get(key);\n}\n\nexport default mapCacheGet;\n","import getMapData from './_getMapData.js';\n\n/**\n * Checks if a map value for `key` exists.\n *\n * @private\n * @name has\n * @memberOf MapCache\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction mapCacheHas(key) {\n return getMapData(this, key).has(key);\n}\n\nexport default mapCacheHas;\n","import getMapData from './_getMapData.js';\n\n/**\n * Sets the map `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf MapCache\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the map cache instance.\n */\nfunction mapCacheSet(key, value) {\n var data = getMapData(this, key),\n size = data.size;\n\n data.set(key, value);\n this.size += data.size == size ? 0 : 1;\n return this;\n}\n\nexport default mapCacheSet;\n","import mapCacheClear from './_mapCacheClear.js';\nimport mapCacheDelete from './_mapCacheDelete.js';\nimport mapCacheGet from './_mapCacheGet.js';\nimport mapCacheHas from './_mapCacheHas.js';\nimport mapCacheSet from './_mapCacheSet.js';\n\n/**\n * Creates a map cache object to store key-value pairs.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction MapCache(entries) {\n var index = -1,\n length = entries == null ? 0 : entries.length;\n\n this.clear();\n while (++index < length) {\n var entry = entries[index];\n this.set(entry[0], entry[1]);\n }\n}\n\n// Add methods to `MapCache`.\nMapCache.prototype.clear = mapCacheClear;\nMapCache.prototype['delete'] = mapCacheDelete;\nMapCache.prototype.get = mapCacheGet;\nMapCache.prototype.has = mapCacheHas;\nMapCache.prototype.set = mapCacheSet;\n\nexport default MapCache;\n","import ListCache from './_ListCache.js';\nimport Map from './_Map.js';\nimport MapCache from './_MapCache.js';\n\n/** Used as the size to enable large array optimizations. */\nvar LARGE_ARRAY_SIZE = 200;\n\n/**\n * Sets the stack `key` to `value`.\n *\n * @private\n * @name set\n * @memberOf Stack\n * @param {string} key The key of the value to set.\n * @param {*} value The value to set.\n * @returns {Object} Returns the stack cache instance.\n */\nfunction stackSet(key, value) {\n var data = this.__data__;\n if (data instanceof ListCache) {\n var pairs = data.__data__;\n if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) {\n pairs.push([key, value]);\n this.size = ++data.size;\n return this;\n }\n data = this.__data__ = new MapCache(pairs);\n }\n data.set(key, value);\n this.size = data.size;\n return this;\n}\n\nexport default stackSet;\n","import ListCache from './_ListCache.js';\nimport stackClear from './_stackClear.js';\nimport stackDelete from './_stackDelete.js';\nimport stackGet from './_stackGet.js';\nimport stackHas from './_stackHas.js';\nimport stackSet from './_stackSet.js';\n\n/**\n * Creates a stack cache object to store key-value pairs.\n *\n * @private\n * @constructor\n * @param {Array} [entries] The key-value pairs to cache.\n */\nfunction Stack(entries) {\n var data = this.__data__ = new ListCache(entries);\n this.size = data.size;\n}\n\n// Add methods to `Stack`.\nStack.prototype.clear = stackClear;\nStack.prototype['delete'] = stackDelete;\nStack.prototype.get = stackGet;\nStack.prototype.has = stackHas;\nStack.prototype.set = stackSet;\n\nexport default Stack;\n","import getNative from './_getNative.js';\n\nvar defineProperty = (function() {\n try {\n var func = getNative(Object, 'defineProperty');\n func({}, '', {});\n return func;\n } catch (e) {}\n}());\n\nexport default defineProperty;\n","import defineProperty from './_defineProperty.js';\n\n/**\n * The base implementation of `assignValue` and `assignMergeValue` without\n * value checks.\n *\n * @private\n * @param {Object} object The object to modify.\n * @param {string} key The key of the property to assign.\n * @param {*} value The value to assign.\n */\nfunction baseAssignValue(object, key, value) {\n if (key == '__proto__' && defineProperty) {\n defineProperty(object, key, {\n 'configurable': true,\n 'enumerable': true,\n 'value': value,\n 'writable': true\n });\n } else {\n object[key] = value;\n }\n}\n\nexport default baseAssignValue;\n","import baseAssignValue from './_baseAssignValue.js';\nimport eq from './eq.js';\n\n/**\n * This function is like `assignValue` except that it doesn't assign\n * `undefined` values.\n *\n * @private\n * @param {Object} object The object to modify.\n * @param {string} key The key of the property to assign.\n * @param {*} value The value to assign.\n */\nfunction assignMergeValue(object, key, value) {\n if ((value !== undefined && !eq(object[key], value)) ||\n (value === undefined && !(key in object))) {\n baseAssignValue(object, key, value);\n }\n}\n\nexport default assignMergeValue;\n","/**\n * Creates a base function for methods like `_.forIn` and `_.forOwn`.\n *\n * @private\n * @param {boolean} [fromRight] Specify iterating from right to left.\n * @returns {Function} Returns the new base function.\n */\nfunction createBaseFor(fromRight) {\n return function(object, iteratee, keysFunc) {\n var index = -1,\n iterable = Object(object),\n props = keysFunc(object),\n length = props.length;\n\n while (length--) {\n var key = props[fromRight ? length : ++index];\n if (iteratee(iterable[key], key, iterable) === false) {\n break;\n }\n }\n return object;\n };\n}\n\nexport default createBaseFor;\n","import createBaseFor from './_createBaseFor.js';\n\n/**\n * The base implementation of `baseForOwn` which iterates over `object`\n * properties returned by `keysFunc` and invokes `iteratee` for each property.\n * Iteratee functions may exit iteration early by explicitly returning `false`.\n *\n * @private\n * @param {Object} object The object to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @param {Function} keysFunc The function to get the keys of `object`.\n * @returns {Object} Returns `object`.\n */\nvar baseFor = createBaseFor();\n\nexport default baseFor;\n","import root from './_root.js';\n\n/** Detect free variable `exports`. */\nvar freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;\n\n/** Detect free variable `module`. */\nvar freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;\n\n/** Detect the popular CommonJS extension `module.exports`. */\nvar moduleExports = freeModule && freeModule.exports === freeExports;\n\n/** Built-in value references. */\nvar Buffer = moduleExports ? root.Buffer : undefined,\n allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined;\n\n/**\n * Creates a clone of `buffer`.\n *\n * @private\n * @param {Buffer} buffer The buffer to clone.\n * @param {boolean} [isDeep] Specify a deep clone.\n * @returns {Buffer} Returns the cloned buffer.\n */\nfunction cloneBuffer(buffer, isDeep) {\n if (isDeep) {\n return buffer.slice();\n }\n var length = buffer.length,\n result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length);\n\n buffer.copy(result);\n return result;\n}\n\nexport default cloneBuffer;\n","import root from './_root.js';\n\n/** Built-in value references. */\nvar Uint8Array = root.Uint8Array;\n\nexport default Uint8Array;\n","import Uint8Array from './_Uint8Array.js';\n\n/**\n * Creates a clone of `arrayBuffer`.\n *\n * @private\n * @param {ArrayBuffer} arrayBuffer The array buffer to clone.\n * @returns {ArrayBuffer} Returns the cloned array buffer.\n */\nfunction cloneArrayBuffer(arrayBuffer) {\n var result = new arrayBuffer.constructor(arrayBuffer.byteLength);\n new Uint8Array(result).set(new Uint8Array(arrayBuffer));\n return result;\n}\n\nexport default cloneArrayBuffer;\n","import cloneArrayBuffer from './_cloneArrayBuffer.js';\n\n/**\n * Creates a clone of `typedArray`.\n *\n * @private\n * @param {Object} typedArray The typed array to clone.\n * @param {boolean} [isDeep] Specify a deep clone.\n * @returns {Object} Returns the cloned typed array.\n */\nfunction cloneTypedArray(typedArray, isDeep) {\n var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer;\n return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length);\n}\n\nexport default cloneTypedArray;\n","/**\n * Copies the values of `source` to `array`.\n *\n * @private\n * @param {Array} source The array to copy values from.\n * @param {Array} [array=[]] The array to copy values to.\n * @returns {Array} Returns `array`.\n */\nfunction copyArray(source, array) {\n var index = -1,\n length = source.length;\n\n array || (array = Array(length));\n while (++index < length) {\n array[index] = source[index];\n }\n return array;\n}\n\nexport default copyArray;\n","import isObject from './isObject.js';\n\n/** Built-in value references. */\nvar objectCreate = Object.create;\n\n/**\n * The base implementation of `_.create` without support for assigning\n * properties to the created object.\n *\n * @private\n * @param {Object} proto The object to inherit from.\n * @returns {Object} Returns the new object.\n */\nvar baseCreate = (function() {\n function object() {}\n return function(proto) {\n if (!isObject(proto)) {\n return {};\n }\n if (objectCreate) {\n return objectCreate(proto);\n }\n object.prototype = proto;\n var result = new object;\n object.prototype = undefined;\n return result;\n };\n}());\n\nexport default baseCreate;\n","/**\n * Creates a unary function that invokes `func` with its argument transformed.\n *\n * @private\n * @param {Function} func The function to wrap.\n * @param {Function} transform The argument transform.\n * @returns {Function} Returns the new function.\n */\nfunction overArg(func, transform) {\n return function(arg) {\n return func(transform(arg));\n };\n}\n\nexport default overArg;\n","import overArg from './_overArg.js';\n\n/** Built-in value references. */\nvar getPrototype = overArg(Object.getPrototypeOf, Object);\n\nexport default getPrototype;\n","/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/**\n * Checks if `value` is likely a prototype object.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a prototype, else `false`.\n */\nfunction isPrototype(value) {\n var Ctor = value && value.constructor,\n proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto;\n\n return value === proto;\n}\n\nexport default isPrototype;\n","import baseCreate from './_baseCreate.js';\nimport getPrototype from './_getPrototype.js';\nimport isPrototype from './_isPrototype.js';\n\n/**\n * Initializes an object clone.\n *\n * @private\n * @param {Object} object The object to clone.\n * @returns {Object} Returns the initialized clone.\n */\nfunction initCloneObject(object) {\n return (typeof object.constructor == 'function' && !isPrototype(object))\n ? baseCreate(getPrototype(object))\n : {};\n}\n\nexport default initCloneObject;\n","/**\n * Checks if `value` is object-like. A value is object-like if it's not `null`\n * and has a `typeof` result of \"object\".\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is object-like, else `false`.\n * @example\n *\n * _.isObjectLike({});\n * // => true\n *\n * _.isObjectLike([1, 2, 3]);\n * // => true\n *\n * _.isObjectLike(_.noop);\n * // => false\n *\n * _.isObjectLike(null);\n * // => false\n */\nfunction isObjectLike(value) {\n return value != null && typeof value == 'object';\n}\n\nexport default isObjectLike;\n","import baseGetTag from './_baseGetTag.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]';\n\n/**\n * The base implementation of `_.isArguments`.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an `arguments` object,\n */\nfunction baseIsArguments(value) {\n return isObjectLike(value) && baseGetTag(value) == argsTag;\n}\n\nexport default baseIsArguments;\n","import baseIsArguments from './_baseIsArguments.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/** Built-in value references. */\nvar propertyIsEnumerable = objectProto.propertyIsEnumerable;\n\n/**\n * Checks if `value` is likely an `arguments` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an `arguments` object,\n * else `false`.\n * @example\n *\n * _.isArguments(function() { return arguments; }());\n * // => true\n *\n * _.isArguments([1, 2, 3]);\n * // => false\n */\nvar isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) {\n return isObjectLike(value) && hasOwnProperty.call(value, 'callee') &&\n !propertyIsEnumerable.call(value, 'callee');\n};\n\nexport default isArguments;\n","/**\n * Checks if `value` is classified as an `Array` object.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an array, else `false`.\n * @example\n *\n * _.isArray([1, 2, 3]);\n * // => true\n *\n * _.isArray(document.body.children);\n * // => false\n *\n * _.isArray('abc');\n * // => false\n *\n * _.isArray(_.noop);\n * // => false\n */\nvar isArray = Array.isArray;\n\nexport default isArray;\n","/** Used as references for various `Number` constants. */\nvar MAX_SAFE_INTEGER = 9007199254740991;\n\n/**\n * Checks if `value` is a valid array-like length.\n *\n * **Note:** This method is loosely based on\n * [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a valid length, else `false`.\n * @example\n *\n * _.isLength(3);\n * // => true\n *\n * _.isLength(Number.MIN_VALUE);\n * // => false\n *\n * _.isLength(Infinity);\n * // => false\n *\n * _.isLength('3');\n * // => false\n */\nfunction isLength(value) {\n return typeof value == 'number' &&\n value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER;\n}\n\nexport default isLength;\n","import isFunction from './isFunction.js';\nimport isLength from './isLength.js';\n\n/**\n * Checks if `value` is array-like. A value is considered array-like if it's\n * not a function and has a `value.length` that's an integer greater than or\n * equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is array-like, else `false`.\n * @example\n *\n * _.isArrayLike([1, 2, 3]);\n * // => true\n *\n * _.isArrayLike(document.body.children);\n * // => true\n *\n * _.isArrayLike('abc');\n * // => true\n *\n * _.isArrayLike(_.noop);\n * // => false\n */\nfunction isArrayLike(value) {\n return value != null && isLength(value.length) && !isFunction(value);\n}\n\nexport default isArrayLike;\n","import isArrayLike from './isArrayLike.js';\nimport isObjectLike from './isObjectLike.js';\n\n/**\n * This method is like `_.isArrayLike` except that it also checks if `value`\n * is an object.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is an array-like object,\n * else `false`.\n * @example\n *\n * _.isArrayLikeObject([1, 2, 3]);\n * // => true\n *\n * _.isArrayLikeObject(document.body.children);\n * // => true\n *\n * _.isArrayLikeObject('abc');\n * // => false\n *\n * _.isArrayLikeObject(_.noop);\n * // => false\n */\nfunction isArrayLikeObject(value) {\n return isObjectLike(value) && isArrayLike(value);\n}\n\nexport default isArrayLikeObject;\n","/**\n * This method returns `false`.\n *\n * @static\n * @memberOf _\n * @since 4.13.0\n * @category Util\n * @returns {boolean} Returns `false`.\n * @example\n *\n * _.times(2, _.stubFalse);\n * // => [false, false]\n */\nfunction stubFalse() {\n return false;\n}\n\nexport default stubFalse;\n","import root from './_root.js';\nimport stubFalse from './stubFalse.js';\n\n/** Detect free variable `exports`. */\nvar freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;\n\n/** Detect free variable `module`. */\nvar freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;\n\n/** Detect the popular CommonJS extension `module.exports`. */\nvar moduleExports = freeModule && freeModule.exports === freeExports;\n\n/** Built-in value references. */\nvar Buffer = moduleExports ? root.Buffer : undefined;\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined;\n\n/**\n * Checks if `value` is a buffer.\n *\n * @static\n * @memberOf _\n * @since 4.3.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a buffer, else `false`.\n * @example\n *\n * _.isBuffer(new Buffer(2));\n * // => true\n *\n * _.isBuffer(new Uint8Array(2));\n * // => false\n */\nvar isBuffer = nativeIsBuffer || stubFalse;\n\nexport default isBuffer;\n","import baseGetTag from './_baseGetTag.js';\nimport getPrototype from './_getPrototype.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar objectTag = '[object Object]';\n\n/** Used for built-in method references. */\nvar funcProto = Function.prototype,\n objectProto = Object.prototype;\n\n/** Used to resolve the decompiled source of functions. */\nvar funcToString = funcProto.toString;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/** Used to infer the `Object` constructor. */\nvar objectCtorString = funcToString.call(Object);\n\n/**\n * Checks if `value` is a plain object, that is, an object created by the\n * `Object` constructor or one with a `[[Prototype]]` of `null`.\n *\n * @static\n * @memberOf _\n * @since 0.8.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a plain object, else `false`.\n * @example\n *\n * function Foo() {\n * this.a = 1;\n * }\n *\n * _.isPlainObject(new Foo);\n * // => false\n *\n * _.isPlainObject([1, 2, 3]);\n * // => false\n *\n * _.isPlainObject({ 'x': 0, 'y': 0 });\n * // => true\n *\n * _.isPlainObject(Object.create(null));\n * // => true\n */\nfunction isPlainObject(value) {\n if (!isObjectLike(value) || baseGetTag(value) != objectTag) {\n return false;\n }\n var proto = getPrototype(value);\n if (proto === null) {\n return true;\n }\n var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor;\n return typeof Ctor == 'function' && Ctor instanceof Ctor &&\n funcToString.call(Ctor) == objectCtorString;\n}\n\nexport default isPlainObject;\n","import baseGetTag from './_baseGetTag.js';\nimport isLength from './isLength.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]',\n arrayTag = '[object Array]',\n boolTag = '[object Boolean]',\n dateTag = '[object Date]',\n errorTag = '[object Error]',\n funcTag = '[object Function]',\n mapTag = '[object Map]',\n numberTag = '[object Number]',\n objectTag = '[object Object]',\n regexpTag = '[object RegExp]',\n setTag = '[object Set]',\n stringTag = '[object String]',\n weakMapTag = '[object WeakMap]';\n\nvar arrayBufferTag = '[object ArrayBuffer]',\n dataViewTag = '[object DataView]',\n float32Tag = '[object Float32Array]',\n float64Tag = '[object Float64Array]',\n int8Tag = '[object Int8Array]',\n int16Tag = '[object Int16Array]',\n int32Tag = '[object Int32Array]',\n uint8Tag = '[object Uint8Array]',\n uint8ClampedTag = '[object Uint8ClampedArray]',\n uint16Tag = '[object Uint16Array]',\n uint32Tag = '[object Uint32Array]';\n\n/** Used to identify `toStringTag` values of typed arrays. */\nvar typedArrayTags = {};\ntypedArrayTags[float32Tag] = typedArrayTags[float64Tag] =\ntypedArrayTags[int8Tag] = typedArrayTags[int16Tag] =\ntypedArrayTags[int32Tag] = typedArrayTags[uint8Tag] =\ntypedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] =\ntypedArrayTags[uint32Tag] = true;\ntypedArrayTags[argsTag] = typedArrayTags[arrayTag] =\ntypedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] =\ntypedArrayTags[dataViewTag] = typedArrayTags[dateTag] =\ntypedArrayTags[errorTag] = typedArrayTags[funcTag] =\ntypedArrayTags[mapTag] = typedArrayTags[numberTag] =\ntypedArrayTags[objectTag] = typedArrayTags[regexpTag] =\ntypedArrayTags[setTag] = typedArrayTags[stringTag] =\ntypedArrayTags[weakMapTag] = false;\n\n/**\n * The base implementation of `_.isTypedArray` without Node.js optimizations.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a typed array, else `false`.\n */\nfunction baseIsTypedArray(value) {\n return isObjectLike(value) &&\n isLength(value.length) && !!typedArrayTags[baseGetTag(value)];\n}\n\nexport default baseIsTypedArray;\n","/**\n * The base implementation of `_.unary` without support for storing metadata.\n *\n * @private\n * @param {Function} func The function to cap arguments for.\n * @returns {Function} Returns the new capped function.\n */\nfunction baseUnary(func) {\n return function(value) {\n return func(value);\n };\n}\n\nexport default baseUnary;\n","import freeGlobal from './_freeGlobal.js';\n\n/** Detect free variable `exports`. */\nvar freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;\n\n/** Detect free variable `module`. */\nvar freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;\n\n/** Detect the popular CommonJS extension `module.exports`. */\nvar moduleExports = freeModule && freeModule.exports === freeExports;\n\n/** Detect free variable `process` from Node.js. */\nvar freeProcess = moduleExports && freeGlobal.process;\n\n/** Used to access faster Node.js helpers. */\nvar nodeUtil = (function() {\n try {\n // Use `util.types` for Node.js 10+.\n var types = freeModule && freeModule.require && freeModule.require('util').types;\n\n if (types) {\n return types;\n }\n\n // Legacy `process.binding('util')` for Node.js < 10.\n return freeProcess && freeProcess.binding && freeProcess.binding('util');\n } catch (e) {}\n}());\n\nexport default nodeUtil;\n","import baseIsTypedArray from './_baseIsTypedArray.js';\nimport baseUnary from './_baseUnary.js';\nimport nodeUtil from './_nodeUtil.js';\n\n/* Node.js helper references. */\nvar nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray;\n\n/**\n * Checks if `value` is classified as a typed array.\n *\n * @static\n * @memberOf _\n * @since 3.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a typed array, else `false`.\n * @example\n *\n * _.isTypedArray(new Uint8Array);\n * // => true\n *\n * _.isTypedArray([]);\n * // => false\n */\nvar isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray;\n\nexport default isTypedArray;\n","/**\n * Gets the value at `key`, unless `key` is \"__proto__\" or \"constructor\".\n *\n * @private\n * @param {Object} object The object to query.\n * @param {string} key The key of the property to get.\n * @returns {*} Returns the property value.\n */\nfunction safeGet(object, key) {\n if (key === 'constructor' && typeof object[key] === 'function') {\n return;\n }\n\n if (key == '__proto__') {\n return;\n }\n\n return object[key];\n}\n\nexport default safeGet;\n","import baseAssignValue from './_baseAssignValue.js';\nimport eq from './eq.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Assigns `value` to `key` of `object` if the existing value is not equivalent\n * using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)\n * for equality comparisons.\n *\n * @private\n * @param {Object} object The object to modify.\n * @param {string} key The key of the property to assign.\n * @param {*} value The value to assign.\n */\nfunction assignValue(object, key, value) {\n var objValue = object[key];\n if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) ||\n (value === undefined && !(key in object))) {\n baseAssignValue(object, key, value);\n }\n}\n\nexport default assignValue;\n","import assignValue from './_assignValue.js';\nimport baseAssignValue from './_baseAssignValue.js';\n\n/**\n * Copies properties of `source` to `object`.\n *\n * @private\n * @param {Object} source The object to copy properties from.\n * @param {Array} props The property identifiers to copy.\n * @param {Object} [object={}] The object to copy properties to.\n * @param {Function} [customizer] The function to customize copied values.\n * @returns {Object} Returns `object`.\n */\nfunction copyObject(source, props, object, customizer) {\n var isNew = !object;\n object || (object = {});\n\n var index = -1,\n length = props.length;\n\n while (++index < length) {\n var key = props[index];\n\n var newValue = customizer\n ? customizer(object[key], source[key], key, object, source)\n : undefined;\n\n if (newValue === undefined) {\n newValue = source[key];\n }\n if (isNew) {\n baseAssignValue(object, key, newValue);\n } else {\n assignValue(object, key, newValue);\n }\n }\n return object;\n}\n\nexport default copyObject;\n","/**\n * The base implementation of `_.times` without support for iteratee shorthands\n * or max array length checks.\n *\n * @private\n * @param {number} n The number of times to invoke `iteratee`.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array} Returns the array of results.\n */\nfunction baseTimes(n, iteratee) {\n var index = -1,\n result = Array(n);\n\n while (++index < n) {\n result[index] = iteratee(index);\n }\n return result;\n}\n\nexport default baseTimes;\n","/** Used as references for various `Number` constants. */\nvar MAX_SAFE_INTEGER = 9007199254740991;\n\n/** Used to detect unsigned integer values. */\nvar reIsUint = /^(?:0|[1-9]\\d*)$/;\n\n/**\n * Checks if `value` is a valid array-like index.\n *\n * @private\n * @param {*} value The value to check.\n * @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.\n * @returns {boolean} Returns `true` if `value` is a valid index, else `false`.\n */\nfunction isIndex(value, length) {\n var type = typeof value;\n length = length == null ? MAX_SAFE_INTEGER : length;\n\n return !!length &&\n (type == 'number' ||\n (type != 'symbol' && reIsUint.test(value))) &&\n (value > -1 && value % 1 == 0 && value < length);\n}\n\nexport default isIndex;\n","import baseTimes from './_baseTimes.js';\nimport isArguments from './isArguments.js';\nimport isArray from './isArray.js';\nimport isBuffer from './isBuffer.js';\nimport isIndex from './_isIndex.js';\nimport isTypedArray from './isTypedArray.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * Creates an array of the enumerable property names of the array-like `value`.\n *\n * @private\n * @param {*} value The value to query.\n * @param {boolean} inherited Specify returning inherited property names.\n * @returns {Array} Returns the array of property names.\n */\nfunction arrayLikeKeys(value, inherited) {\n var isArr = isArray(value),\n isArg = !isArr && isArguments(value),\n isBuff = !isArr && !isArg && isBuffer(value),\n isType = !isArr && !isArg && !isBuff && isTypedArray(value),\n skipIndexes = isArr || isArg || isBuff || isType,\n result = skipIndexes ? baseTimes(value.length, String) : [],\n length = result.length;\n\n for (var key in value) {\n if ((inherited || hasOwnProperty.call(value, key)) &&\n !(skipIndexes && (\n // Safari 9 has enumerable `arguments.length` in strict mode.\n key == 'length' ||\n // Node.js 0.10 has enumerable non-index properties on buffers.\n (isBuff && (key == 'offset' || key == 'parent')) ||\n // PhantomJS 2 has enumerable non-index properties on typed arrays.\n (isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) ||\n // Skip index properties.\n isIndex(key, length)\n ))) {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default arrayLikeKeys;\n","/**\n * This function is like\n * [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)\n * except that it includes inherited enumerable properties.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n */\nfunction nativeKeysIn(object) {\n var result = [];\n if (object != null) {\n for (var key in Object(object)) {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default nativeKeysIn;\n","import isObject from './isObject.js';\nimport isPrototype from './_isPrototype.js';\nimport nativeKeysIn from './_nativeKeysIn.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n */\nfunction baseKeysIn(object) {\n if (!isObject(object)) {\n return nativeKeysIn(object);\n }\n var isProto = isPrototype(object),\n result = [];\n\n for (var key in object) {\n if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default baseKeysIn;\n","import arrayLikeKeys from './_arrayLikeKeys.js';\nimport baseKeysIn from './_baseKeysIn.js';\nimport isArrayLike from './isArrayLike.js';\n\n/**\n * Creates an array of the own and inherited enumerable property names of `object`.\n *\n * **Note:** Non-object values are coerced to objects.\n *\n * @static\n * @memberOf _\n * @since 3.0.0\n * @category Object\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n * @example\n *\n * function Foo() {\n * this.a = 1;\n * this.b = 2;\n * }\n *\n * Foo.prototype.c = 3;\n *\n * _.keysIn(new Foo);\n * // => ['a', 'b', 'c'] (iteration order is not guaranteed)\n */\nfunction keysIn(object) {\n return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object);\n}\n\nexport default keysIn;\n","import copyObject from './_copyObject.js';\nimport keysIn from './keysIn.js';\n\n/**\n * Converts `value` to a plain object flattening inherited enumerable string\n * keyed properties of `value` to own properties of the plain object.\n *\n * @static\n * @memberOf _\n * @since 3.0.0\n * @category Lang\n * @param {*} value The value to convert.\n * @returns {Object} Returns the converted plain object.\n * @example\n *\n * function Foo() {\n * this.b = 2;\n * }\n *\n * Foo.prototype.c = 3;\n *\n * _.assign({ 'a': 1 }, new Foo);\n * // => { 'a': 1, 'b': 2 }\n *\n * _.assign({ 'a': 1 }, _.toPlainObject(new Foo));\n * // => { 'a': 1, 'b': 2, 'c': 3 }\n */\nfunction toPlainObject(value) {\n return copyObject(value, keysIn(value));\n}\n\nexport default toPlainObject;\n","import assignMergeValue from './_assignMergeValue.js';\nimport cloneBuffer from './_cloneBuffer.js';\nimport cloneTypedArray from './_cloneTypedArray.js';\nimport copyArray from './_copyArray.js';\nimport initCloneObject from './_initCloneObject.js';\nimport isArguments from './isArguments.js';\nimport isArray from './isArray.js';\nimport isArrayLikeObject from './isArrayLikeObject.js';\nimport isBuffer from './isBuffer.js';\nimport isFunction from './isFunction.js';\nimport isObject from './isObject.js';\nimport isPlainObject from './isPlainObject.js';\nimport isTypedArray from './isTypedArray.js';\nimport safeGet from './_safeGet.js';\nimport toPlainObject from './toPlainObject.js';\n\n/**\n * A specialized version of `baseMerge` for arrays and objects which performs\n * deep merges and tracks traversed objects enabling objects with circular\n * references to be merged.\n *\n * @private\n * @param {Object} object The destination object.\n * @param {Object} source The source object.\n * @param {string} key The key of the value to merge.\n * @param {number} srcIndex The index of `source`.\n * @param {Function} mergeFunc The function to merge values.\n * @param {Function} [customizer] The function to customize assigned values.\n * @param {Object} [stack] Tracks traversed source values and their merged\n * counterparts.\n */\nfunction baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) {\n var objValue = safeGet(object, key),\n srcValue = safeGet(source, key),\n stacked = stack.get(srcValue);\n\n if (stacked) {\n assignMergeValue(object, key, stacked);\n return;\n }\n var newValue = customizer\n ? customizer(objValue, srcValue, (key + ''), object, source, stack)\n : undefined;\n\n var isCommon = newValue === undefined;\n\n if (isCommon) {\n var isArr = isArray(srcValue),\n isBuff = !isArr && isBuffer(srcValue),\n isTyped = !isArr && !isBuff && isTypedArray(srcValue);\n\n newValue = srcValue;\n if (isArr || isBuff || isTyped) {\n if (isArray(objValue)) {\n newValue = objValue;\n }\n else if (isArrayLikeObject(objValue)) {\n newValue = copyArray(objValue);\n }\n else if (isBuff) {\n isCommon = false;\n newValue = cloneBuffer(srcValue, true);\n }\n else if (isTyped) {\n isCommon = false;\n newValue = cloneTypedArray(srcValue, true);\n }\n else {\n newValue = [];\n }\n }\n else if (isPlainObject(srcValue) || isArguments(srcValue)) {\n newValue = objValue;\n if (isArguments(objValue)) {\n newValue = toPlainObject(objValue);\n }\n else if (!isObject(objValue) || isFunction(objValue)) {\n newValue = initCloneObject(srcValue);\n }\n }\n else {\n isCommon = false;\n }\n }\n if (isCommon) {\n // Recursively merge objects and arrays (susceptible to call stack limits).\n stack.set(srcValue, newValue);\n mergeFunc(newValue, srcValue, srcIndex, customizer, stack);\n stack['delete'](srcValue);\n }\n assignMergeValue(object, key, newValue);\n}\n\nexport default baseMergeDeep;\n","import Stack from './_Stack.js';\nimport assignMergeValue from './_assignMergeValue.js';\nimport baseFor from './_baseFor.js';\nimport baseMergeDeep from './_baseMergeDeep.js';\nimport isObject from './isObject.js';\nimport keysIn from './keysIn.js';\nimport safeGet from './_safeGet.js';\n\n/**\n * The base implementation of `_.merge` without support for multiple sources.\n *\n * @private\n * @param {Object} object The destination object.\n * @param {Object} source The source object.\n * @param {number} srcIndex The index of `source`.\n * @param {Function} [customizer] The function to customize merged values.\n * @param {Object} [stack] Tracks traversed source values and their merged\n * counterparts.\n */\nfunction baseMerge(object, source, srcIndex, customizer, stack) {\n if (object === source) {\n return;\n }\n baseFor(source, function(srcValue, key) {\n stack || (stack = new Stack);\n if (isObject(srcValue)) {\n baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack);\n }\n else {\n var newValue = customizer\n ? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack)\n : undefined;\n\n if (newValue === undefined) {\n newValue = srcValue;\n }\n assignMergeValue(object, key, newValue);\n }\n }, keysIn);\n}\n\nexport default baseMerge;\n","/**\n * This method returns the first argument it receives.\n *\n * @static\n * @since 0.1.0\n * @memberOf _\n * @category Util\n * @param {*} value Any value.\n * @returns {*} Returns `value`.\n * @example\n *\n * var object = { 'a': 1 };\n *\n * console.log(_.identity(object) === object);\n * // => true\n */\nfunction identity(value) {\n return value;\n}\n\nexport default identity;\n","/**\n * A faster alternative to `Function#apply`, this function invokes `func`\n * with the `this` binding of `thisArg` and the arguments of `args`.\n *\n * @private\n * @param {Function} func The function to invoke.\n * @param {*} thisArg The `this` binding of `func`.\n * @param {Array} args The arguments to invoke `func` with.\n * @returns {*} Returns the result of `func`.\n */\nfunction apply(func, thisArg, args) {\n switch (args.length) {\n case 0: return func.call(thisArg);\n case 1: return func.call(thisArg, args[0]);\n case 2: return func.call(thisArg, args[0], args[1]);\n case 3: return func.call(thisArg, args[0], args[1], args[2]);\n }\n return func.apply(thisArg, args);\n}\n\nexport default apply;\n","import apply from './_apply.js';\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeMax = Math.max;\n\n/**\n * A specialized version of `baseRest` which transforms the rest array.\n *\n * @private\n * @param {Function} func The function to apply a rest parameter to.\n * @param {number} [start=func.length-1] The start position of the rest parameter.\n * @param {Function} transform The rest array transform.\n * @returns {Function} Returns the new function.\n */\nfunction overRest(func, start, transform) {\n start = nativeMax(start === undefined ? (func.length - 1) : start, 0);\n return function() {\n var args = arguments,\n index = -1,\n length = nativeMax(args.length - start, 0),\n array = Array(length);\n\n while (++index < length) {\n array[index] = args[start + index];\n }\n index = -1;\n var otherArgs = Array(start + 1);\n while (++index < start) {\n otherArgs[index] = args[index];\n }\n otherArgs[start] = transform(array);\n return apply(func, this, otherArgs);\n };\n}\n\nexport default overRest;\n","/**\n * Creates a function that returns `value`.\n *\n * @static\n * @memberOf _\n * @since 2.4.0\n * @category Util\n * @param {*} value The value to return from the new function.\n * @returns {Function} Returns the new constant function.\n * @example\n *\n * var objects = _.times(2, _.constant({ 'a': 1 }));\n *\n * console.log(objects);\n * // => [{ 'a': 1 }, { 'a': 1 }]\n *\n * console.log(objects[0] === objects[1]);\n * // => true\n */\nfunction constant(value) {\n return function() {\n return value;\n };\n}\n\nexport default constant;\n","import constant from './constant.js';\nimport defineProperty from './_defineProperty.js';\nimport identity from './identity.js';\n\n/**\n * The base implementation of `setToString` without support for hot loop shorting.\n *\n * @private\n * @param {Function} func The function to modify.\n * @param {Function} string The `toString` result.\n * @returns {Function} Returns `func`.\n */\nvar baseSetToString = !defineProperty ? identity : function(func, string) {\n return defineProperty(func, 'toString', {\n 'configurable': true,\n 'enumerable': false,\n 'value': constant(string),\n 'writable': true\n });\n};\n\nexport default baseSetToString;\n","/** Used to detect hot functions by number of calls within a span of milliseconds. */\nvar HOT_COUNT = 800,\n HOT_SPAN = 16;\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeNow = Date.now;\n\n/**\n * Creates a function that'll short out and invoke `identity` instead\n * of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN`\n * milliseconds.\n *\n * @private\n * @param {Function} func The function to restrict.\n * @returns {Function} Returns the new shortable function.\n */\nfunction shortOut(func) {\n var count = 0,\n lastCalled = 0;\n\n return function() {\n var stamp = nativeNow(),\n remaining = HOT_SPAN - (stamp - lastCalled);\n\n lastCalled = stamp;\n if (remaining > 0) {\n if (++count >= HOT_COUNT) {\n return arguments[0];\n }\n } else {\n count = 0;\n }\n return func.apply(undefined, arguments);\n };\n}\n\nexport default shortOut;\n","import baseSetToString from './_baseSetToString.js';\nimport shortOut from './_shortOut.js';\n\n/**\n * Sets the `toString` method of `func` to return `string`.\n *\n * @private\n * @param {Function} func The function to modify.\n * @param {Function} string The `toString` result.\n * @returns {Function} Returns `func`.\n */\nvar setToString = shortOut(baseSetToString);\n\nexport default setToString;\n","import identity from './identity.js';\nimport overRest from './_overRest.js';\nimport setToString from './_setToString.js';\n\n/**\n * The base implementation of `_.rest` which doesn't validate or coerce arguments.\n *\n * @private\n * @param {Function} func The function to apply a rest parameter to.\n * @param {number} [start=func.length-1] The start position of the rest parameter.\n * @returns {Function} Returns the new function.\n */\nfunction baseRest(func, start) {\n return setToString(overRest(func, start, identity), func + '');\n}\n\nexport default baseRest;\n","import eq from './eq.js';\nimport isArrayLike from './isArrayLike.js';\nimport isIndex from './_isIndex.js';\nimport isObject from './isObject.js';\n\n/**\n * Checks if the given arguments are from an iteratee call.\n *\n * @private\n * @param {*} value The potential iteratee value argument.\n * @param {*} index The potential iteratee index or key argument.\n * @param {*} object The potential iteratee object argument.\n * @returns {boolean} Returns `true` if the arguments are from an iteratee call,\n * else `false`.\n */\nfunction isIterateeCall(value, index, object) {\n if (!isObject(object)) {\n return false;\n }\n var type = typeof index;\n if (type == 'number'\n ? (isArrayLike(object) && isIndex(index, object.length))\n : (type == 'string' && index in object)\n ) {\n return eq(object[index], value);\n }\n return false;\n}\n\nexport default isIterateeCall;\n","import baseRest from './_baseRest.js';\nimport isIterateeCall from './_isIterateeCall.js';\n\n/**\n * Creates a function like `_.assign`.\n *\n * @private\n * @param {Function} assigner The function to assign values.\n * @returns {Function} Returns the new assigner function.\n */\nfunction createAssigner(assigner) {\n return baseRest(function(object, sources) {\n var index = -1,\n length = sources.length,\n customizer = length > 1 ? sources[length - 1] : undefined,\n guard = length > 2 ? sources[2] : undefined;\n\n customizer = (assigner.length > 3 && typeof customizer == 'function')\n ? (length--, customizer)\n : undefined;\n\n if (guard && isIterateeCall(sources[0], sources[1], guard)) {\n customizer = length < 3 ? undefined : customizer;\n length = 1;\n }\n object = Object(object);\n while (++index < length) {\n var source = sources[index];\n if (source) {\n assigner(object, source, index, customizer);\n }\n }\n return object;\n });\n}\n\nexport default createAssigner;\n","import baseMerge from './_baseMerge.js';\nimport createAssigner from './_createAssigner.js';\n\n/**\n * This method is like `_.assign` except that it recursively merges own and\n * inherited enumerable string keyed properties of source objects into the\n * destination object. Source properties that resolve to `undefined` are\n * skipped if a destination value exists. Array and plain object properties\n * are merged recursively. Other objects and value types are overridden by\n * assignment. Source objects are applied from left to right. Subsequent\n * sources overwrite property assignments of previous sources.\n *\n * **Note:** This method mutates `object`.\n *\n * @static\n * @memberOf _\n * @since 0.5.0\n * @category Object\n * @param {Object} object The destination object.\n * @param {...Object} [sources] The source objects.\n * @returns {Object} Returns `object`.\n * @example\n *\n * var object = {\n * 'a': [{ 'b': 2 }, { 'd': 4 }]\n * };\n *\n * var other = {\n * 'a': [{ 'c': 3 }, { 'e': 5 }]\n * };\n *\n * _.merge(object, other);\n * // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] }\n */\nvar merge = createAssigner(function(object, source, srcIndex) {\n baseMerge(object, source, srcIndex);\n});\n\nexport default merge;\n","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nexport var Raised = function Raised(_ref) {\n var zDepth = _ref.zDepth,\n radius = _ref.radius,\n background = _ref.background,\n children = _ref.children,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles;\n\n var styles = reactCSS(merge({\n 'default': {\n wrap: {\n position: 'relative',\n display: 'inline-block'\n },\n content: {\n position: 'relative'\n },\n bg: {\n absolute: '0px 0px 0px 0px',\n boxShadow: '0 ' + zDepth + 'px ' + zDepth * 4 + 'px rgba(0,0,0,.24)',\n borderRadius: radius,\n background: background\n }\n },\n 'zDepth-0': {\n bg: {\n boxShadow: 'none'\n }\n },\n\n 'zDepth-1': {\n bg: {\n boxShadow: '0 2px 10px rgba(0,0,0,.12), 0 2px 5px rgba(0,0,0,.16)'\n }\n },\n 'zDepth-2': {\n bg: {\n boxShadow: '0 6px 20px rgba(0,0,0,.19), 0 8px 17px rgba(0,0,0,.2)'\n }\n },\n 'zDepth-3': {\n bg: {\n boxShadow: '0 17px 50px rgba(0,0,0,.19), 0 12px 15px rgba(0,0,0,.24)'\n }\n },\n 'zDepth-4': {\n bg: {\n boxShadow: '0 25px 55px rgba(0,0,0,.21), 0 16px 28px rgba(0,0,0,.22)'\n }\n },\n 'zDepth-5': {\n bg: {\n boxShadow: '0 40px 77px rgba(0,0,0,.22), 0 27px 24px rgba(0,0,0,.2)'\n }\n },\n 'square': {\n bg: {\n borderRadius: '0'\n }\n },\n 'circle': {\n bg: {\n borderRadius: '50%'\n }\n }\n }, passedStyles), { 'zDepth-1': zDepth === 1 });\n\n return React.createElement(\n 'div',\n { style: styles.wrap },\n React.createElement('div', { style: styles.bg }),\n React.createElement(\n 'div',\n { style: styles.content },\n children\n )\n );\n};\n\nRaised.propTypes = {\n background: PropTypes.string,\n zDepth: PropTypes.oneOf([0, 1, 2, 3, 4, 5]),\n radius: PropTypes.number,\n styles: PropTypes.object\n};\n\nRaised.defaultProps = {\n background: '#fff',\n zDepth: 1,\n radius: 2,\n styles: {}\n};\n\nexport default Raised;","import root from './_root.js';\n\n/**\n * Gets the timestamp of the number of milliseconds that have elapsed since\n * the Unix epoch (1 January 1970 00:00:00 UTC).\n *\n * @static\n * @memberOf _\n * @since 2.4.0\n * @category Date\n * @returns {number} Returns the timestamp.\n * @example\n *\n * _.defer(function(stamp) {\n * console.log(_.now() - stamp);\n * }, _.now());\n * // => Logs the number of milliseconds it took for the deferred invocation.\n */\nvar now = function() {\n return root.Date.now();\n};\n\nexport default now;\n","/** Used to match a single whitespace character. */\nvar reWhitespace = /\\s/;\n\n/**\n * Used by `_.trim` and `_.trimEnd` to get the index of the last non-whitespace\n * character of `string`.\n *\n * @private\n * @param {string} string The string to inspect.\n * @returns {number} Returns the index of the last non-whitespace character.\n */\nfunction trimmedEndIndex(string) {\n var index = string.length;\n\n while (index-- && reWhitespace.test(string.charAt(index))) {}\n return index;\n}\n\nexport default trimmedEndIndex;\n","import trimmedEndIndex from './_trimmedEndIndex.js';\n\n/** Used to match leading whitespace. */\nvar reTrimStart = /^\\s+/;\n\n/**\n * The base implementation of `_.trim`.\n *\n * @private\n * @param {string} string The string to trim.\n * @returns {string} Returns the trimmed string.\n */\nfunction baseTrim(string) {\n return string\n ? string.slice(0, trimmedEndIndex(string) + 1).replace(reTrimStart, '')\n : string;\n}\n\nexport default baseTrim;\n","import baseGetTag from './_baseGetTag.js';\nimport isObjectLike from './isObjectLike.js';\n\n/** `Object#toString` result references. */\nvar symbolTag = '[object Symbol]';\n\n/**\n * Checks if `value` is classified as a `Symbol` primitive or object.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is a symbol, else `false`.\n * @example\n *\n * _.isSymbol(Symbol.iterator);\n * // => true\n *\n * _.isSymbol('abc');\n * // => false\n */\nfunction isSymbol(value) {\n return typeof value == 'symbol' ||\n (isObjectLike(value) && baseGetTag(value) == symbolTag);\n}\n\nexport default isSymbol;\n","import baseTrim from './_baseTrim.js';\nimport isObject from './isObject.js';\nimport isSymbol from './isSymbol.js';\n\n/** Used as references for various `Number` constants. */\nvar NAN = 0 / 0;\n\n/** Used to detect bad signed hexadecimal string values. */\nvar reIsBadHex = /^[-+]0x[0-9a-f]+$/i;\n\n/** Used to detect binary string values. */\nvar reIsBinary = /^0b[01]+$/i;\n\n/** Used to detect octal string values. */\nvar reIsOctal = /^0o[0-7]+$/i;\n\n/** Built-in method references without a dependency on `root`. */\nvar freeParseInt = parseInt;\n\n/**\n * Converts `value` to a number.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to process.\n * @returns {number} Returns the number.\n * @example\n *\n * _.toNumber(3.2);\n * // => 3.2\n *\n * _.toNumber(Number.MIN_VALUE);\n * // => 5e-324\n *\n * _.toNumber(Infinity);\n * // => Infinity\n *\n * _.toNumber('3.2');\n * // => 3.2\n */\nfunction toNumber(value) {\n if (typeof value == 'number') {\n return value;\n }\n if (isSymbol(value)) {\n return NAN;\n }\n if (isObject(value)) {\n var other = typeof value.valueOf == 'function' ? value.valueOf() : value;\n value = isObject(other) ? (other + '') : other;\n }\n if (typeof value != 'string') {\n return value === 0 ? value : +value;\n }\n value = baseTrim(value);\n var isBinary = reIsBinary.test(value);\n return (isBinary || reIsOctal.test(value))\n ? freeParseInt(value.slice(2), isBinary ? 2 : 8)\n : (reIsBadHex.test(value) ? NAN : +value);\n}\n\nexport default toNumber;\n","import isObject from './isObject.js';\nimport now from './now.js';\nimport toNumber from './toNumber.js';\n\n/** Error message constants. */\nvar FUNC_ERROR_TEXT = 'Expected a function';\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeMax = Math.max,\n nativeMin = Math.min;\n\n/**\n * Creates a debounced function that delays invoking `func` until after `wait`\n * milliseconds have elapsed since the last time the debounced function was\n * invoked. The debounced function comes with a `cancel` method to cancel\n * delayed `func` invocations and a `flush` method to immediately invoke them.\n * Provide `options` to indicate whether `func` should be invoked on the\n * leading and/or trailing edge of the `wait` timeout. The `func` is invoked\n * with the last arguments provided to the debounced function. Subsequent\n * calls to the debounced function return the result of the last `func`\n * invocation.\n *\n * **Note:** If `leading` and `trailing` options are `true`, `func` is\n * invoked on the trailing edge of the timeout only if the debounced function\n * is invoked more than once during the `wait` timeout.\n *\n * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred\n * until to the next tick, similar to `setTimeout` with a timeout of `0`.\n *\n * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)\n * for details over the differences between `_.debounce` and `_.throttle`.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Function\n * @param {Function} func The function to debounce.\n * @param {number} [wait=0] The number of milliseconds to delay.\n * @param {Object} [options={}] The options object.\n * @param {boolean} [options.leading=false]\n * Specify invoking on the leading edge of the timeout.\n * @param {number} [options.maxWait]\n * The maximum time `func` is allowed to be delayed before it's invoked.\n * @param {boolean} [options.trailing=true]\n * Specify invoking on the trailing edge of the timeout.\n * @returns {Function} Returns the new debounced function.\n * @example\n *\n * // Avoid costly calculations while the window size is in flux.\n * jQuery(window).on('resize', _.debounce(calculateLayout, 150));\n *\n * // Invoke `sendMail` when clicked, debouncing subsequent calls.\n * jQuery(element).on('click', _.debounce(sendMail, 300, {\n * 'leading': true,\n * 'trailing': false\n * }));\n *\n * // Ensure `batchLog` is invoked once after 1 second of debounced calls.\n * var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 });\n * var source = new EventSource('/stream');\n * jQuery(source).on('message', debounced);\n *\n * // Cancel the trailing debounced invocation.\n * jQuery(window).on('popstate', debounced.cancel);\n */\nfunction debounce(func, wait, options) {\n var lastArgs,\n lastThis,\n maxWait,\n result,\n timerId,\n lastCallTime,\n lastInvokeTime = 0,\n leading = false,\n maxing = false,\n trailing = true;\n\n if (typeof func != 'function') {\n throw new TypeError(FUNC_ERROR_TEXT);\n }\n wait = toNumber(wait) || 0;\n if (isObject(options)) {\n leading = !!options.leading;\n maxing = 'maxWait' in options;\n maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait;\n trailing = 'trailing' in options ? !!options.trailing : trailing;\n }\n\n function invokeFunc(time) {\n var args = lastArgs,\n thisArg = lastThis;\n\n lastArgs = lastThis = undefined;\n lastInvokeTime = time;\n result = func.apply(thisArg, args);\n return result;\n }\n\n function leadingEdge(time) {\n // Reset any `maxWait` timer.\n lastInvokeTime = time;\n // Start the timer for the trailing edge.\n timerId = setTimeout(timerExpired, wait);\n // Invoke the leading edge.\n return leading ? invokeFunc(time) : result;\n }\n\n function remainingWait(time) {\n var timeSinceLastCall = time - lastCallTime,\n timeSinceLastInvoke = time - lastInvokeTime,\n timeWaiting = wait - timeSinceLastCall;\n\n return maxing\n ? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke)\n : timeWaiting;\n }\n\n function shouldInvoke(time) {\n var timeSinceLastCall = time - lastCallTime,\n timeSinceLastInvoke = time - lastInvokeTime;\n\n // Either this is the first call, activity has stopped and we're at the\n // trailing edge, the system time has gone backwards and we're treating\n // it as the trailing edge, or we've hit the `maxWait` limit.\n return (lastCallTime === undefined || (timeSinceLastCall >= wait) ||\n (timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait));\n }\n\n function timerExpired() {\n var time = now();\n if (shouldInvoke(time)) {\n return trailingEdge(time);\n }\n // Restart the timer.\n timerId = setTimeout(timerExpired, remainingWait(time));\n }\n\n function trailingEdge(time) {\n timerId = undefined;\n\n // Only invoke if we have `lastArgs` which means `func` has been\n // debounced at least once.\n if (trailing && lastArgs) {\n return invokeFunc(time);\n }\n lastArgs = lastThis = undefined;\n return result;\n }\n\n function cancel() {\n if (timerId !== undefined) {\n clearTimeout(timerId);\n }\n lastInvokeTime = 0;\n lastArgs = lastCallTime = lastThis = timerId = undefined;\n }\n\n function flush() {\n return timerId === undefined ? result : trailingEdge(now());\n }\n\n function debounced() {\n var time = now(),\n isInvoking = shouldInvoke(time);\n\n lastArgs = arguments;\n lastThis = this;\n lastCallTime = time;\n\n if (isInvoking) {\n if (timerId === undefined) {\n return leadingEdge(lastCallTime);\n }\n if (maxing) {\n // Handle invocations in a tight loop.\n clearTimeout(timerId);\n timerId = setTimeout(timerExpired, wait);\n return invokeFunc(lastCallTime);\n }\n }\n if (timerId === undefined) {\n timerId = setTimeout(timerExpired, wait);\n }\n return result;\n }\n debounced.cancel = cancel;\n debounced.flush = flush;\n return debounced;\n}\n\nexport default debounce;\n","import debounce from './debounce.js';\nimport isObject from './isObject.js';\n\n/** Error message constants. */\nvar FUNC_ERROR_TEXT = 'Expected a function';\n\n/**\n * Creates a throttled function that only invokes `func` at most once per\n * every `wait` milliseconds. The throttled function comes with a `cancel`\n * method to cancel delayed `func` invocations and a `flush` method to\n * immediately invoke them. Provide `options` to indicate whether `func`\n * should be invoked on the leading and/or trailing edge of the `wait`\n * timeout. The `func` is invoked with the last arguments provided to the\n * throttled function. Subsequent calls to the throttled function return the\n * result of the last `func` invocation.\n *\n * **Note:** If `leading` and `trailing` options are `true`, `func` is\n * invoked on the trailing edge of the timeout only if the throttled function\n * is invoked more than once during the `wait` timeout.\n *\n * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred\n * until to the next tick, similar to `setTimeout` with a timeout of `0`.\n *\n * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)\n * for details over the differences between `_.throttle` and `_.debounce`.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Function\n * @param {Function} func The function to throttle.\n * @param {number} [wait=0] The number of milliseconds to throttle invocations to.\n * @param {Object} [options={}] The options object.\n * @param {boolean} [options.leading=true]\n * Specify invoking on the leading edge of the timeout.\n * @param {boolean} [options.trailing=true]\n * Specify invoking on the trailing edge of the timeout.\n * @returns {Function} Returns the new throttled function.\n * @example\n *\n * // Avoid excessively updating the position while scrolling.\n * jQuery(window).on('scroll', _.throttle(updatePosition, 100));\n *\n * // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes.\n * var throttled = _.throttle(renewToken, 300000, { 'trailing': false });\n * jQuery(element).on('click', throttled);\n *\n * // Cancel the trailing throttled invocation.\n * jQuery(window).on('popstate', throttled.cancel);\n */\nfunction throttle(func, wait, options) {\n var leading = true,\n trailing = true;\n\n if (typeof func != 'function') {\n throw new TypeError(FUNC_ERROR_TEXT);\n }\n if (isObject(options)) {\n leading = 'leading' in options ? !!options.leading : leading;\n trailing = 'trailing' in options ? !!options.trailing : trailing;\n }\n return debounce(func, wait, {\n 'leading': leading,\n 'maxWait': wait,\n 'trailing': trailing\n });\n}\n\nexport default throttle;\n","var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React, { Component, PureComponent } from 'react';\nimport reactCSS from 'reactcss';\nimport throttle from 'lodash-es/throttle';\nimport * as saturation from '../../helpers/saturation';\n\nexport var Saturation = function (_ref) {\n _inherits(Saturation, _ref);\n\n function Saturation(props) {\n _classCallCheck(this, Saturation);\n\n var _this = _possibleConstructorReturn(this, (Saturation.__proto__ || Object.getPrototypeOf(Saturation)).call(this, props));\n\n _this.handleChange = function (e) {\n typeof _this.props.onChange === 'function' && _this.throttle(_this.props.onChange, saturation.calculateChange(e, _this.props.hsl, _this.container), e);\n };\n\n _this.handleMouseDown = function (e) {\n _this.handleChange(e);\n var renderWindow = _this.getContainerRenderWindow();\n renderWindow.addEventListener('mousemove', _this.handleChange);\n renderWindow.addEventListener('mouseup', _this.handleMouseUp);\n };\n\n _this.handleMouseUp = function () {\n _this.unbindEventListeners();\n };\n\n _this.throttle = throttle(function (fn, data, e) {\n fn(data, e);\n }, 50);\n return _this;\n }\n\n _createClass(Saturation, [{\n key: 'componentWillUnmount',\n value: function componentWillUnmount() {\n this.throttle.cancel();\n this.unbindEventListeners();\n }\n }, {\n key: 'getContainerRenderWindow',\n value: function getContainerRenderWindow() {\n var container = this.container;\n\n var renderWindow = window;\n while (!renderWindow.document.contains(container) && renderWindow.parent !== renderWindow) {\n renderWindow = renderWindow.parent;\n }\n return renderWindow;\n }\n }, {\n key: 'unbindEventListeners',\n value: function unbindEventListeners() {\n var renderWindow = this.getContainerRenderWindow();\n renderWindow.removeEventListener('mousemove', this.handleChange);\n renderWindow.removeEventListener('mouseup', this.handleMouseUp);\n }\n }, {\n key: 'render',\n value: function render() {\n var _this2 = this;\n\n var _ref2 = this.props.style || {},\n color = _ref2.color,\n white = _ref2.white,\n black = _ref2.black,\n pointer = _ref2.pointer,\n circle = _ref2.circle;\n\n var styles = reactCSS({\n 'default': {\n color: {\n absolute: '0px 0px 0px 0px',\n background: 'hsl(' + this.props.hsl.h + ',100%, 50%)',\n borderRadius: this.props.radius\n },\n white: {\n absolute: '0px 0px 0px 0px',\n borderRadius: this.props.radius\n },\n black: {\n absolute: '0px 0px 0px 0px',\n boxShadow: this.props.shadow,\n borderRadius: this.props.radius\n },\n pointer: {\n position: 'absolute',\n top: -(this.props.hsv.v * 100) + 100 + '%',\n left: this.props.hsv.s * 100 + '%',\n cursor: 'default'\n },\n circle: {\n width: '4px',\n height: '4px',\n boxShadow: '0 0 0 1.5px #fff, inset 0 0 1px 1px rgba(0,0,0,.3),\\n 0 0 1px 2px rgba(0,0,0,.4)',\n borderRadius: '50%',\n cursor: 'hand',\n transform: 'translate(-2px, -2px)'\n }\n },\n 'custom': {\n color: color,\n white: white,\n black: black,\n pointer: pointer,\n circle: circle\n }\n }, { 'custom': !!this.props.style });\n\n return React.createElement(\n 'div',\n {\n style: styles.color,\n ref: function ref(container) {\n return _this2.container = container;\n },\n onMouseDown: this.handleMouseDown,\n onTouchMove: this.handleChange,\n onTouchStart: this.handleChange\n },\n React.createElement(\n 'style',\n null,\n '\\n .saturation-white {\\n background: -webkit-linear-gradient(to right, #fff, rgba(255,255,255,0));\\n background: linear-gradient(to right, #fff, rgba(255,255,255,0));\\n }\\n .saturation-black {\\n background: -webkit-linear-gradient(to top, #000, rgba(0,0,0,0));\\n background: linear-gradient(to top, #000, rgba(0,0,0,0));\\n }\\n '\n ),\n React.createElement(\n 'div',\n { style: styles.white, className: 'saturation-white' },\n React.createElement('div', { style: styles.black, className: 'saturation-black' }),\n React.createElement(\n 'div',\n { style: styles.pointer },\n this.props.pointer ? React.createElement(this.props.pointer, this.props) : React.createElement('div', { style: styles.circle })\n )\n )\n );\n }\n }]);\n\n return Saturation;\n}(PureComponent || Component);\n\nexport default Saturation;","export var calculateChange = function calculateChange(e, hsl, container) {\n var _container$getBoundin = container.getBoundingClientRect(),\n containerWidth = _container$getBoundin.width,\n containerHeight = _container$getBoundin.height;\n\n var x = typeof e.pageX === 'number' ? e.pageX : e.touches[0].pageX;\n var y = typeof e.pageY === 'number' ? e.pageY : e.touches[0].pageY;\n var left = x - (container.getBoundingClientRect().left + window.pageXOffset);\n var top = y - (container.getBoundingClientRect().top + window.pageYOffset);\n\n if (left < 0) {\n left = 0;\n } else if (left > containerWidth) {\n left = containerWidth;\n }\n\n if (top < 0) {\n top = 0;\n } else if (top > containerHeight) {\n top = containerHeight;\n }\n\n var saturation = left / containerWidth;\n var bright = 1 - top / containerHeight;\n\n return {\n h: hsl.h,\n s: saturation,\n v: bright,\n a: hsl.a,\n source: 'hsv'\n };\n};","/**\n * A specialized version of `_.forEach` for arrays without support for\n * iteratee shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array} Returns `array`.\n */\nfunction arrayEach(array, iteratee) {\n var index = -1,\n length = array == null ? 0 : array.length;\n\n while (++index < length) {\n if (iteratee(array[index], index, array) === false) {\n break;\n }\n }\n return array;\n}\n\nexport default arrayEach;\n","import overArg from './_overArg.js';\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeKeys = overArg(Object.keys, Object);\n\nexport default nativeKeys;\n","import isPrototype from './_isPrototype.js';\nimport nativeKeys from './_nativeKeys.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * The base implementation of `_.keys` which doesn't treat sparse arrays as dense.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n */\nfunction baseKeys(object) {\n if (!isPrototype(object)) {\n return nativeKeys(object);\n }\n var result = [];\n for (var key in Object(object)) {\n if (hasOwnProperty.call(object, key) && key != 'constructor') {\n result.push(key);\n }\n }\n return result;\n}\n\nexport default baseKeys;\n","import arrayLikeKeys from './_arrayLikeKeys.js';\nimport baseKeys from './_baseKeys.js';\nimport isArrayLike from './isArrayLike.js';\n\n/**\n * Creates an array of the own enumerable property names of `object`.\n *\n * **Note:** Non-object values are coerced to objects. See the\n * [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)\n * for more details.\n *\n * @static\n * @since 0.1.0\n * @memberOf _\n * @category Object\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names.\n * @example\n *\n * function Foo() {\n * this.a = 1;\n * this.b = 2;\n * }\n *\n * Foo.prototype.c = 3;\n *\n * _.keys(new Foo);\n * // => ['a', 'b'] (iteration order is not guaranteed)\n *\n * _.keys('hi');\n * // => ['0', '1']\n */\nfunction keys(object) {\n return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);\n}\n\nexport default keys;\n","import baseFor from './_baseFor.js';\nimport keys from './keys.js';\n\n/**\n * The base implementation of `_.forOwn` without support for iteratee shorthands.\n *\n * @private\n * @param {Object} object The object to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Object} Returns `object`.\n */\nfunction baseForOwn(object, iteratee) {\n return object && baseFor(object, iteratee, keys);\n}\n\nexport default baseForOwn;\n","import isArrayLike from './isArrayLike.js';\n\n/**\n * Creates a `baseEach` or `baseEachRight` function.\n *\n * @private\n * @param {Function} eachFunc The function to iterate over a collection.\n * @param {boolean} [fromRight] Specify iterating from right to left.\n * @returns {Function} Returns the new base function.\n */\nfunction createBaseEach(eachFunc, fromRight) {\n return function(collection, iteratee) {\n if (collection == null) {\n return collection;\n }\n if (!isArrayLike(collection)) {\n return eachFunc(collection, iteratee);\n }\n var length = collection.length,\n index = fromRight ? length : -1,\n iterable = Object(collection);\n\n while ((fromRight ? index-- : ++index < length)) {\n if (iteratee(iterable[index], index, iterable) === false) {\n break;\n }\n }\n return collection;\n };\n}\n\nexport default createBaseEach;\n","import baseForOwn from './_baseForOwn.js';\nimport createBaseEach from './_createBaseEach.js';\n\n/**\n * The base implementation of `_.forEach` without support for iteratee shorthands.\n *\n * @private\n * @param {Array|Object} collection The collection to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array|Object} Returns `collection`.\n */\nvar baseEach = createBaseEach(baseForOwn);\n\nexport default baseEach;\n","import identity from './identity.js';\n\n/**\n * Casts `value` to `identity` if it's not a function.\n *\n * @private\n * @param {*} value The value to inspect.\n * @returns {Function} Returns cast function.\n */\nfunction castFunction(value) {\n return typeof value == 'function' ? value : identity;\n}\n\nexport default castFunction;\n","import arrayEach from './_arrayEach.js';\nimport baseEach from './_baseEach.js';\nimport castFunction from './_castFunction.js';\nimport isArray from './isArray.js';\n\n/**\n * Iterates over elements of `collection` and invokes `iteratee` for each element.\n * The iteratee is invoked with three arguments: (value, index|key, collection).\n * Iteratee functions may exit iteration early by explicitly returning `false`.\n *\n * **Note:** As with other \"Collections\" methods, objects with a \"length\"\n * property are iterated like arrays. To avoid this behavior use `_.forIn`\n * or `_.forOwn` for object iteration.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @alias each\n * @category Collection\n * @param {Array|Object} collection The collection to iterate over.\n * @param {Function} [iteratee=_.identity] The function invoked per iteration.\n * @returns {Array|Object} Returns `collection`.\n * @see _.forEachRight\n * @example\n *\n * _.forEach([1, 2], function(value) {\n * console.log(value);\n * });\n * // => Logs `1` then `2`.\n *\n * _.forEach({ 'a': 1, 'b': 2 }, function(value, key) {\n * console.log(key);\n * });\n * // => Logs 'a' then 'b' (iteration order is not guaranteed).\n */\nfunction forEach(collection, iteratee) {\n var func = isArray(collection) ? arrayEach : baseEach;\n return func(collection, castFunction(iteratee));\n}\n\nexport default forEach;\n","// This file is autogenerated. It's used to publish ESM to npm.\nfunction _typeof(obj) {\n \"@babel/helpers - typeof\";\n\n return _typeof = \"function\" == typeof Symbol && \"symbol\" == typeof Symbol.iterator ? function (obj) {\n return typeof obj;\n } : function (obj) {\n return obj && \"function\" == typeof Symbol && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj;\n }, _typeof(obj);\n}\n\n// https://github.com/bgrins/TinyColor\n// Brian Grinstead, MIT License\n\nvar trimLeft = /^\\s+/;\nvar trimRight = /\\s+$/;\nfunction tinycolor(color, opts) {\n color = color ? color : \"\";\n opts = opts || {};\n\n // If input is already a tinycolor, return itself\n if (color instanceof tinycolor) {\n return color;\n }\n // If we are called as a function, call using new instead\n if (!(this instanceof tinycolor)) {\n return new tinycolor(color, opts);\n }\n var rgb = inputToRGB(color);\n this._originalInput = color, this._r = rgb.r, this._g = rgb.g, this._b = rgb.b, this._a = rgb.a, this._roundA = Math.round(100 * this._a) / 100, this._format = opts.format || rgb.format;\n this._gradientType = opts.gradientType;\n\n // Don't let the range of [0,255] come back in [0,1].\n // Potentially lose a little bit of precision here, but will fix issues where\n // .5 gets interpreted as half of the total, instead of half of 1\n // If it was supposed to be 128, this was already taken care of by `inputToRgb`\n if (this._r < 1) this._r = Math.round(this._r);\n if (this._g < 1) this._g = Math.round(this._g);\n if (this._b < 1) this._b = Math.round(this._b);\n this._ok = rgb.ok;\n}\ntinycolor.prototype = {\n isDark: function isDark() {\n return this.getBrightness() < 128;\n },\n isLight: function isLight() {\n return !this.isDark();\n },\n isValid: function isValid() {\n return this._ok;\n },\n getOriginalInput: function getOriginalInput() {\n return this._originalInput;\n },\n getFormat: function getFormat() {\n return this._format;\n },\n getAlpha: function getAlpha() {\n return this._a;\n },\n getBrightness: function getBrightness() {\n //http://www.w3.org/TR/AERT#color-contrast\n var rgb = this.toRgb();\n return (rgb.r * 299 + rgb.g * 587 + rgb.b * 114) / 1000;\n },\n getLuminance: function getLuminance() {\n //http://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef\n var rgb = this.toRgb();\n var RsRGB, GsRGB, BsRGB, R, G, B;\n RsRGB = rgb.r / 255;\n GsRGB = rgb.g / 255;\n BsRGB = rgb.b / 255;\n if (RsRGB <= 0.03928) R = RsRGB / 12.92;else R = Math.pow((RsRGB + 0.055) / 1.055, 2.4);\n if (GsRGB <= 0.03928) G = GsRGB / 12.92;else G = Math.pow((GsRGB + 0.055) / 1.055, 2.4);\n if (BsRGB <= 0.03928) B = BsRGB / 12.92;else B = Math.pow((BsRGB + 0.055) / 1.055, 2.4);\n return 0.2126 * R + 0.7152 * G + 0.0722 * B;\n },\n setAlpha: function setAlpha(value) {\n this._a = boundAlpha(value);\n this._roundA = Math.round(100 * this._a) / 100;\n return this;\n },\n toHsv: function toHsv() {\n var hsv = rgbToHsv(this._r, this._g, this._b);\n return {\n h: hsv.h * 360,\n s: hsv.s,\n v: hsv.v,\n a: this._a\n };\n },\n toHsvString: function toHsvString() {\n var hsv = rgbToHsv(this._r, this._g, this._b);\n var h = Math.round(hsv.h * 360),\n s = Math.round(hsv.s * 100),\n v = Math.round(hsv.v * 100);\n return this._a == 1 ? \"hsv(\" + h + \", \" + s + \"%, \" + v + \"%)\" : \"hsva(\" + h + \", \" + s + \"%, \" + v + \"%, \" + this._roundA + \")\";\n },\n toHsl: function toHsl() {\n var hsl = rgbToHsl(this._r, this._g, this._b);\n return {\n h: hsl.h * 360,\n s: hsl.s,\n l: hsl.l,\n a: this._a\n };\n },\n toHslString: function toHslString() {\n var hsl = rgbToHsl(this._r, this._g, this._b);\n var h = Math.round(hsl.h * 360),\n s = Math.round(hsl.s * 100),\n l = Math.round(hsl.l * 100);\n return this._a == 1 ? \"hsl(\" + h + \", \" + s + \"%, \" + l + \"%)\" : \"hsla(\" + h + \", \" + s + \"%, \" + l + \"%, \" + this._roundA + \")\";\n },\n toHex: function toHex(allow3Char) {\n return rgbToHex(this._r, this._g, this._b, allow3Char);\n },\n toHexString: function toHexString(allow3Char) {\n return \"#\" + this.toHex(allow3Char);\n },\n toHex8: function toHex8(allow4Char) {\n return rgbaToHex(this._r, this._g, this._b, this._a, allow4Char);\n },\n toHex8String: function toHex8String(allow4Char) {\n return \"#\" + this.toHex8(allow4Char);\n },\n toRgb: function toRgb() {\n return {\n r: Math.round(this._r),\n g: Math.round(this._g),\n b: Math.round(this._b),\n a: this._a\n };\n },\n toRgbString: function toRgbString() {\n return this._a == 1 ? \"rgb(\" + Math.round(this._r) + \", \" + Math.round(this._g) + \", \" + Math.round(this._b) + \")\" : \"rgba(\" + Math.round(this._r) + \", \" + Math.round(this._g) + \", \" + Math.round(this._b) + \", \" + this._roundA + \")\";\n },\n toPercentageRgb: function toPercentageRgb() {\n return {\n r: Math.round(bound01(this._r, 255) * 100) + \"%\",\n g: Math.round(bound01(this._g, 255) * 100) + \"%\",\n b: Math.round(bound01(this._b, 255) * 100) + \"%\",\n a: this._a\n };\n },\n toPercentageRgbString: function toPercentageRgbString() {\n return this._a == 1 ? \"rgb(\" + Math.round(bound01(this._r, 255) * 100) + \"%, \" + Math.round(bound01(this._g, 255) * 100) + \"%, \" + Math.round(bound01(this._b, 255) * 100) + \"%)\" : \"rgba(\" + Math.round(bound01(this._r, 255) * 100) + \"%, \" + Math.round(bound01(this._g, 255) * 100) + \"%, \" + Math.round(bound01(this._b, 255) * 100) + \"%, \" + this._roundA + \")\";\n },\n toName: function toName() {\n if (this._a === 0) {\n return \"transparent\";\n }\n if (this._a < 1) {\n return false;\n }\n return hexNames[rgbToHex(this._r, this._g, this._b, true)] || false;\n },\n toFilter: function toFilter(secondColor) {\n var hex8String = \"#\" + rgbaToArgbHex(this._r, this._g, this._b, this._a);\n var secondHex8String = hex8String;\n var gradientType = this._gradientType ? \"GradientType = 1, \" : \"\";\n if (secondColor) {\n var s = tinycolor(secondColor);\n secondHex8String = \"#\" + rgbaToArgbHex(s._r, s._g, s._b, s._a);\n }\n return \"progid:DXImageTransform.Microsoft.gradient(\" + gradientType + \"startColorstr=\" + hex8String + \",endColorstr=\" + secondHex8String + \")\";\n },\n toString: function toString(format) {\n var formatSet = !!format;\n format = format || this._format;\n var formattedString = false;\n var hasAlpha = this._a < 1 && this._a >= 0;\n var needsAlphaFormat = !formatSet && hasAlpha && (format === \"hex\" || format === \"hex6\" || format === \"hex3\" || format === \"hex4\" || format === \"hex8\" || format === \"name\");\n if (needsAlphaFormat) {\n // Special case for \"transparent\", all other non-alpha formats\n // will return rgba when there is transparency.\n if (format === \"name\" && this._a === 0) {\n return this.toName();\n }\n return this.toRgbString();\n }\n if (format === \"rgb\") {\n formattedString = this.toRgbString();\n }\n if (format === \"prgb\") {\n formattedString = this.toPercentageRgbString();\n }\n if (format === \"hex\" || format === \"hex6\") {\n formattedString = this.toHexString();\n }\n if (format === \"hex3\") {\n formattedString = this.toHexString(true);\n }\n if (format === \"hex4\") {\n formattedString = this.toHex8String(true);\n }\n if (format === \"hex8\") {\n formattedString = this.toHex8String();\n }\n if (format === \"name\") {\n formattedString = this.toName();\n }\n if (format === \"hsl\") {\n formattedString = this.toHslString();\n }\n if (format === \"hsv\") {\n formattedString = this.toHsvString();\n }\n return formattedString || this.toHexString();\n },\n clone: function clone() {\n return tinycolor(this.toString());\n },\n _applyModification: function _applyModification(fn, args) {\n var color = fn.apply(null, [this].concat([].slice.call(args)));\n this._r = color._r;\n this._g = color._g;\n this._b = color._b;\n this.setAlpha(color._a);\n return this;\n },\n lighten: function lighten() {\n return this._applyModification(_lighten, arguments);\n },\n brighten: function brighten() {\n return this._applyModification(_brighten, arguments);\n },\n darken: function darken() {\n return this._applyModification(_darken, arguments);\n },\n desaturate: function desaturate() {\n return this._applyModification(_desaturate, arguments);\n },\n saturate: function saturate() {\n return this._applyModification(_saturate, arguments);\n },\n greyscale: function greyscale() {\n return this._applyModification(_greyscale, arguments);\n },\n spin: function spin() {\n return this._applyModification(_spin, arguments);\n },\n _applyCombination: function _applyCombination(fn, args) {\n return fn.apply(null, [this].concat([].slice.call(args)));\n },\n analogous: function analogous() {\n return this._applyCombination(_analogous, arguments);\n },\n complement: function complement() {\n return this._applyCombination(_complement, arguments);\n },\n monochromatic: function monochromatic() {\n return this._applyCombination(_monochromatic, arguments);\n },\n splitcomplement: function splitcomplement() {\n return this._applyCombination(_splitcomplement, arguments);\n },\n // Disabled until https://github.com/bgrins/TinyColor/issues/254\n // polyad: function (number) {\n // return this._applyCombination(polyad, [number]);\n // },\n triad: function triad() {\n return this._applyCombination(polyad, [3]);\n },\n tetrad: function tetrad() {\n return this._applyCombination(polyad, [4]);\n }\n};\n\n// If input is an object, force 1 into \"1.0\" to handle ratios properly\n// String input requires \"1.0\" as input, so 1 will be treated as 1\ntinycolor.fromRatio = function (color, opts) {\n if (_typeof(color) == \"object\") {\n var newColor = {};\n for (var i in color) {\n if (color.hasOwnProperty(i)) {\n if (i === \"a\") {\n newColor[i] = color[i];\n } else {\n newColor[i] = convertToPercentage(color[i]);\n }\n }\n }\n color = newColor;\n }\n return tinycolor(color, opts);\n};\n\n// Given a string or object, convert that input to RGB\n// Possible string inputs:\n//\n// \"red\"\n// \"#f00\" or \"f00\"\n// \"#ff0000\" or \"ff0000\"\n// \"#ff000000\" or \"ff000000\"\n// \"rgb 255 0 0\" or \"rgb (255, 0, 0)\"\n// \"rgb 1.0 0 0\" or \"rgb (1, 0, 0)\"\n// \"rgba (255, 0, 0, 1)\" or \"rgba 255, 0, 0, 1\"\n// \"rgba (1.0, 0, 0, 1)\" or \"rgba 1.0, 0, 0, 1\"\n// \"hsl(0, 100%, 50%)\" or \"hsl 0 100% 50%\"\n// \"hsla(0, 100%, 50%, 1)\" or \"hsla 0 100% 50%, 1\"\n// \"hsv(0, 100%, 100%)\" or \"hsv 0 100% 100%\"\n//\nfunction inputToRGB(color) {\n var rgb = {\n r: 0,\n g: 0,\n b: 0\n };\n var a = 1;\n var s = null;\n var v = null;\n var l = null;\n var ok = false;\n var format = false;\n if (typeof color == \"string\") {\n color = stringInputToObject(color);\n }\n if (_typeof(color) == \"object\") {\n if (isValidCSSUnit(color.r) && isValidCSSUnit(color.g) && isValidCSSUnit(color.b)) {\n rgb = rgbToRgb(color.r, color.g, color.b);\n ok = true;\n format = String(color.r).substr(-1) === \"%\" ? \"prgb\" : \"rgb\";\n } else if (isValidCSSUnit(color.h) && isValidCSSUnit(color.s) && isValidCSSUnit(color.v)) {\n s = convertToPercentage(color.s);\n v = convertToPercentage(color.v);\n rgb = hsvToRgb(color.h, s, v);\n ok = true;\n format = \"hsv\";\n } else if (isValidCSSUnit(color.h) && isValidCSSUnit(color.s) && isValidCSSUnit(color.l)) {\n s = convertToPercentage(color.s);\n l = convertToPercentage(color.l);\n rgb = hslToRgb(color.h, s, l);\n ok = true;\n format = \"hsl\";\n }\n if (color.hasOwnProperty(\"a\")) {\n a = color.a;\n }\n }\n a = boundAlpha(a);\n return {\n ok: ok,\n format: color.format || format,\n r: Math.min(255, Math.max(rgb.r, 0)),\n g: Math.min(255, Math.max(rgb.g, 0)),\n b: Math.min(255, Math.max(rgb.b, 0)),\n a: a\n };\n}\n\n// Conversion Functions\n// --------------------\n\n// `rgbToHsl`, `rgbToHsv`, `hslToRgb`, `hsvToRgb` modified from:\n// \n\n// `rgbToRgb`\n// Handle bounds / percentage checking to conform to CSS color spec\n// \n// *Assumes:* r, g, b in [0, 255] or [0, 1]\n// *Returns:* { r, g, b } in [0, 255]\nfunction rgbToRgb(r, g, b) {\n return {\n r: bound01(r, 255) * 255,\n g: bound01(g, 255) * 255,\n b: bound01(b, 255) * 255\n };\n}\n\n// `rgbToHsl`\n// Converts an RGB color value to HSL.\n// *Assumes:* r, g, and b are contained in [0, 255] or [0, 1]\n// *Returns:* { h, s, l } in [0,1]\nfunction rgbToHsl(r, g, b) {\n r = bound01(r, 255);\n g = bound01(g, 255);\n b = bound01(b, 255);\n var max = Math.max(r, g, b),\n min = Math.min(r, g, b);\n var h,\n s,\n l = (max + min) / 2;\n if (max == min) {\n h = s = 0; // achromatic\n } else {\n var d = max - min;\n s = l > 0.5 ? d / (2 - max - min) : d / (max + min);\n switch (max) {\n case r:\n h = (g - b) / d + (g < b ? 6 : 0);\n break;\n case g:\n h = (b - r) / d + 2;\n break;\n case b:\n h = (r - g) / d + 4;\n break;\n }\n h /= 6;\n }\n return {\n h: h,\n s: s,\n l: l\n };\n}\n\n// `hslToRgb`\n// Converts an HSL color value to RGB.\n// *Assumes:* h is contained in [0, 1] or [0, 360] and s and l are contained [0, 1] or [0, 100]\n// *Returns:* { r, g, b } in the set [0, 255]\nfunction hslToRgb(h, s, l) {\n var r, g, b;\n h = bound01(h, 360);\n s = bound01(s, 100);\n l = bound01(l, 100);\n function hue2rgb(p, q, t) {\n if (t < 0) t += 1;\n if (t > 1) t -= 1;\n if (t < 1 / 6) return p + (q - p) * 6 * t;\n if (t < 1 / 2) return q;\n if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6;\n return p;\n }\n if (s === 0) {\n r = g = b = l; // achromatic\n } else {\n var q = l < 0.5 ? l * (1 + s) : l + s - l * s;\n var p = 2 * l - q;\n r = hue2rgb(p, q, h + 1 / 3);\n g = hue2rgb(p, q, h);\n b = hue2rgb(p, q, h - 1 / 3);\n }\n return {\n r: r * 255,\n g: g * 255,\n b: b * 255\n };\n}\n\n// `rgbToHsv`\n// Converts an RGB color value to HSV\n// *Assumes:* r, g, and b are contained in the set [0, 255] or [0, 1]\n// *Returns:* { h, s, v } in [0,1]\nfunction rgbToHsv(r, g, b) {\n r = bound01(r, 255);\n g = bound01(g, 255);\n b = bound01(b, 255);\n var max = Math.max(r, g, b),\n min = Math.min(r, g, b);\n var h,\n s,\n v = max;\n var d = max - min;\n s = max === 0 ? 0 : d / max;\n if (max == min) {\n h = 0; // achromatic\n } else {\n switch (max) {\n case r:\n h = (g - b) / d + (g < b ? 6 : 0);\n break;\n case g:\n h = (b - r) / d + 2;\n break;\n case b:\n h = (r - g) / d + 4;\n break;\n }\n h /= 6;\n }\n return {\n h: h,\n s: s,\n v: v\n };\n}\n\n// `hsvToRgb`\n// Converts an HSV color value to RGB.\n// *Assumes:* h is contained in [0, 1] or [0, 360] and s and v are contained in [0, 1] or [0, 100]\n// *Returns:* { r, g, b } in the set [0, 255]\nfunction hsvToRgb(h, s, v) {\n h = bound01(h, 360) * 6;\n s = bound01(s, 100);\n v = bound01(v, 100);\n var i = Math.floor(h),\n f = h - i,\n p = v * (1 - s),\n q = v * (1 - f * s),\n t = v * (1 - (1 - f) * s),\n mod = i % 6,\n r = [v, q, p, p, t, v][mod],\n g = [t, v, v, q, p, p][mod],\n b = [p, p, t, v, v, q][mod];\n return {\n r: r * 255,\n g: g * 255,\n b: b * 255\n };\n}\n\n// `rgbToHex`\n// Converts an RGB color to hex\n// Assumes r, g, and b are contained in the set [0, 255]\n// Returns a 3 or 6 character hex\nfunction rgbToHex(r, g, b, allow3Char) {\n var hex = [pad2(Math.round(r).toString(16)), pad2(Math.round(g).toString(16)), pad2(Math.round(b).toString(16))];\n\n // Return a 3 character hex if possible\n if (allow3Char && hex[0].charAt(0) == hex[0].charAt(1) && hex[1].charAt(0) == hex[1].charAt(1) && hex[2].charAt(0) == hex[2].charAt(1)) {\n return hex[0].charAt(0) + hex[1].charAt(0) + hex[2].charAt(0);\n }\n return hex.join(\"\");\n}\n\n// `rgbaToHex`\n// Converts an RGBA color plus alpha transparency to hex\n// Assumes r, g, b are contained in the set [0, 255] and\n// a in [0, 1]. Returns a 4 or 8 character rgba hex\nfunction rgbaToHex(r, g, b, a, allow4Char) {\n var hex = [pad2(Math.round(r).toString(16)), pad2(Math.round(g).toString(16)), pad2(Math.round(b).toString(16)), pad2(convertDecimalToHex(a))];\n\n // Return a 4 character hex if possible\n if (allow4Char && hex[0].charAt(0) == hex[0].charAt(1) && hex[1].charAt(0) == hex[1].charAt(1) && hex[2].charAt(0) == hex[2].charAt(1) && hex[3].charAt(0) == hex[3].charAt(1)) {\n return hex[0].charAt(0) + hex[1].charAt(0) + hex[2].charAt(0) + hex[3].charAt(0);\n }\n return hex.join(\"\");\n}\n\n// `rgbaToArgbHex`\n// Converts an RGBA color to an ARGB Hex8 string\n// Rarely used, but required for \"toFilter()\"\nfunction rgbaToArgbHex(r, g, b, a) {\n var hex = [pad2(convertDecimalToHex(a)), pad2(Math.round(r).toString(16)), pad2(Math.round(g).toString(16)), pad2(Math.round(b).toString(16))];\n return hex.join(\"\");\n}\n\n// `equals`\n// Can be called with any tinycolor input\ntinycolor.equals = function (color1, color2) {\n if (!color1 || !color2) return false;\n return tinycolor(color1).toRgbString() == tinycolor(color2).toRgbString();\n};\ntinycolor.random = function () {\n return tinycolor.fromRatio({\n r: Math.random(),\n g: Math.random(),\n b: Math.random()\n });\n};\n\n// Modification Functions\n// ----------------------\n// Thanks to less.js for some of the basics here\n// \n\nfunction _desaturate(color, amount) {\n amount = amount === 0 ? 0 : amount || 10;\n var hsl = tinycolor(color).toHsl();\n hsl.s -= amount / 100;\n hsl.s = clamp01(hsl.s);\n return tinycolor(hsl);\n}\nfunction _saturate(color, amount) {\n amount = amount === 0 ? 0 : amount || 10;\n var hsl = tinycolor(color).toHsl();\n hsl.s += amount / 100;\n hsl.s = clamp01(hsl.s);\n return tinycolor(hsl);\n}\nfunction _greyscale(color) {\n return tinycolor(color).desaturate(100);\n}\nfunction _lighten(color, amount) {\n amount = amount === 0 ? 0 : amount || 10;\n var hsl = tinycolor(color).toHsl();\n hsl.l += amount / 100;\n hsl.l = clamp01(hsl.l);\n return tinycolor(hsl);\n}\nfunction _brighten(color, amount) {\n amount = amount === 0 ? 0 : amount || 10;\n var rgb = tinycolor(color).toRgb();\n rgb.r = Math.max(0, Math.min(255, rgb.r - Math.round(255 * -(amount / 100))));\n rgb.g = Math.max(0, Math.min(255, rgb.g - Math.round(255 * -(amount / 100))));\n rgb.b = Math.max(0, Math.min(255, rgb.b - Math.round(255 * -(amount / 100))));\n return tinycolor(rgb);\n}\nfunction _darken(color, amount) {\n amount = amount === 0 ? 0 : amount || 10;\n var hsl = tinycolor(color).toHsl();\n hsl.l -= amount / 100;\n hsl.l = clamp01(hsl.l);\n return tinycolor(hsl);\n}\n\n// Spin takes a positive or negative amount within [-360, 360] indicating the change of hue.\n// Values outside of this range will be wrapped into this range.\nfunction _spin(color, amount) {\n var hsl = tinycolor(color).toHsl();\n var hue = (hsl.h + amount) % 360;\n hsl.h = hue < 0 ? 360 + hue : hue;\n return tinycolor(hsl);\n}\n\n// Combination Functions\n// ---------------------\n// Thanks to jQuery xColor for some of the ideas behind these\n// \n\nfunction _complement(color) {\n var hsl = tinycolor(color).toHsl();\n hsl.h = (hsl.h + 180) % 360;\n return tinycolor(hsl);\n}\nfunction polyad(color, number) {\n if (isNaN(number) || number <= 0) {\n throw new Error(\"Argument to polyad must be a positive number\");\n }\n var hsl = tinycolor(color).toHsl();\n var result = [tinycolor(color)];\n var step = 360 / number;\n for (var i = 1; i < number; i++) {\n result.push(tinycolor({\n h: (hsl.h + i * step) % 360,\n s: hsl.s,\n l: hsl.l\n }));\n }\n return result;\n}\nfunction _splitcomplement(color) {\n var hsl = tinycolor(color).toHsl();\n var h = hsl.h;\n return [tinycolor(color), tinycolor({\n h: (h + 72) % 360,\n s: hsl.s,\n l: hsl.l\n }), tinycolor({\n h: (h + 216) % 360,\n s: hsl.s,\n l: hsl.l\n })];\n}\nfunction _analogous(color, results, slices) {\n results = results || 6;\n slices = slices || 30;\n var hsl = tinycolor(color).toHsl();\n var part = 360 / slices;\n var ret = [tinycolor(color)];\n for (hsl.h = (hsl.h - (part * results >> 1) + 720) % 360; --results;) {\n hsl.h = (hsl.h + part) % 360;\n ret.push(tinycolor(hsl));\n }\n return ret;\n}\nfunction _monochromatic(color, results) {\n results = results || 6;\n var hsv = tinycolor(color).toHsv();\n var h = hsv.h,\n s = hsv.s,\n v = hsv.v;\n var ret = [];\n var modification = 1 / results;\n while (results--) {\n ret.push(tinycolor({\n h: h,\n s: s,\n v: v\n }));\n v = (v + modification) % 1;\n }\n return ret;\n}\n\n// Utility Functions\n// ---------------------\n\ntinycolor.mix = function (color1, color2, amount) {\n amount = amount === 0 ? 0 : amount || 50;\n var rgb1 = tinycolor(color1).toRgb();\n var rgb2 = tinycolor(color2).toRgb();\n var p = amount / 100;\n var rgba = {\n r: (rgb2.r - rgb1.r) * p + rgb1.r,\n g: (rgb2.g - rgb1.g) * p + rgb1.g,\n b: (rgb2.b - rgb1.b) * p + rgb1.b,\n a: (rgb2.a - rgb1.a) * p + rgb1.a\n };\n return tinycolor(rgba);\n};\n\n// Readability Functions\n// ---------------------\n// false\n// tinycolor.isReadable(\"#000\", \"#111\",{level:\"AA\",size:\"large\"}) => false\ntinycolor.isReadable = function (color1, color2, wcag2) {\n var readability = tinycolor.readability(color1, color2);\n var wcag2Parms, out;\n out = false;\n wcag2Parms = validateWCAG2Parms(wcag2);\n switch (wcag2Parms.level + wcag2Parms.size) {\n case \"AAsmall\":\n case \"AAAlarge\":\n out = readability >= 4.5;\n break;\n case \"AAlarge\":\n out = readability >= 3;\n break;\n case \"AAAsmall\":\n out = readability >= 7;\n break;\n }\n return out;\n};\n\n// `mostReadable`\n// Given a base color and a list of possible foreground or background\n// colors for that base, returns the most readable color.\n// Optionally returns Black or White if the most readable color is unreadable.\n// *Example*\n// tinycolor.mostReadable(tinycolor.mostReadable(\"#123\", [\"#124\", \"#125\"],{includeFallbackColors:false}).toHexString(); // \"#112255\"\n// tinycolor.mostReadable(tinycolor.mostReadable(\"#123\", [\"#124\", \"#125\"],{includeFallbackColors:true}).toHexString(); // \"#ffffff\"\n// tinycolor.mostReadable(\"#a8015a\", [\"#faf3f3\"],{includeFallbackColors:true,level:\"AAA\",size:\"large\"}).toHexString(); // \"#faf3f3\"\n// tinycolor.mostReadable(\"#a8015a\", [\"#faf3f3\"],{includeFallbackColors:true,level:\"AAA\",size:\"small\"}).toHexString(); // \"#ffffff\"\ntinycolor.mostReadable = function (baseColor, colorList, args) {\n var bestColor = null;\n var bestScore = 0;\n var readability;\n var includeFallbackColors, level, size;\n args = args || {};\n includeFallbackColors = args.includeFallbackColors;\n level = args.level;\n size = args.size;\n for (var i = 0; i < colorList.length; i++) {\n readability = tinycolor.readability(baseColor, colorList[i]);\n if (readability > bestScore) {\n bestScore = readability;\n bestColor = tinycolor(colorList[i]);\n }\n }\n if (tinycolor.isReadable(baseColor, bestColor, {\n level: level,\n size: size\n }) || !includeFallbackColors) {\n return bestColor;\n } else {\n args.includeFallbackColors = false;\n return tinycolor.mostReadable(baseColor, [\"#fff\", \"#000\"], args);\n }\n};\n\n// Big List of Colors\n// ------------------\n// \nvar names = tinycolor.names = {\n aliceblue: \"f0f8ff\",\n antiquewhite: \"faebd7\",\n aqua: \"0ff\",\n aquamarine: \"7fffd4\",\n azure: \"f0ffff\",\n beige: \"f5f5dc\",\n bisque: \"ffe4c4\",\n black: \"000\",\n blanchedalmond: \"ffebcd\",\n blue: \"00f\",\n blueviolet: \"8a2be2\",\n brown: \"a52a2a\",\n burlywood: \"deb887\",\n burntsienna: \"ea7e5d\",\n cadetblue: \"5f9ea0\",\n chartreuse: \"7fff00\",\n chocolate: \"d2691e\",\n coral: \"ff7f50\",\n cornflowerblue: \"6495ed\",\n cornsilk: \"fff8dc\",\n crimson: \"dc143c\",\n cyan: \"0ff\",\n darkblue: \"00008b\",\n darkcyan: \"008b8b\",\n darkgoldenrod: \"b8860b\",\n darkgray: \"a9a9a9\",\n darkgreen: \"006400\",\n darkgrey: \"a9a9a9\",\n darkkhaki: \"bdb76b\",\n darkmagenta: \"8b008b\",\n darkolivegreen: \"556b2f\",\n darkorange: \"ff8c00\",\n darkorchid: \"9932cc\",\n darkred: \"8b0000\",\n darksalmon: \"e9967a\",\n darkseagreen: \"8fbc8f\",\n darkslateblue: \"483d8b\",\n darkslategray: \"2f4f4f\",\n darkslategrey: \"2f4f4f\",\n darkturquoise: \"00ced1\",\n darkviolet: \"9400d3\",\n deeppink: \"ff1493\",\n deepskyblue: \"00bfff\",\n dimgray: \"696969\",\n dimgrey: \"696969\",\n dodgerblue: \"1e90ff\",\n firebrick: \"b22222\",\n floralwhite: \"fffaf0\",\n forestgreen: \"228b22\",\n fuchsia: \"f0f\",\n gainsboro: \"dcdcdc\",\n ghostwhite: \"f8f8ff\",\n gold: \"ffd700\",\n goldenrod: \"daa520\",\n gray: \"808080\",\n green: \"008000\",\n greenyellow: \"adff2f\",\n grey: \"808080\",\n honeydew: \"f0fff0\",\n hotpink: \"ff69b4\",\n indianred: \"cd5c5c\",\n indigo: \"4b0082\",\n ivory: \"fffff0\",\n khaki: \"f0e68c\",\n lavender: \"e6e6fa\",\n lavenderblush: \"fff0f5\",\n lawngreen: \"7cfc00\",\n lemonchiffon: \"fffacd\",\n lightblue: \"add8e6\",\n lightcoral: \"f08080\",\n lightcyan: \"e0ffff\",\n lightgoldenrodyellow: \"fafad2\",\n lightgray: \"d3d3d3\",\n lightgreen: \"90ee90\",\n lightgrey: \"d3d3d3\",\n lightpink: \"ffb6c1\",\n lightsalmon: \"ffa07a\",\n lightseagreen: \"20b2aa\",\n lightskyblue: \"87cefa\",\n lightslategray: \"789\",\n lightslategrey: \"789\",\n lightsteelblue: \"b0c4de\",\n lightyellow: \"ffffe0\",\n lime: \"0f0\",\n limegreen: \"32cd32\",\n linen: \"faf0e6\",\n magenta: \"f0f\",\n maroon: \"800000\",\n mediumaquamarine: \"66cdaa\",\n mediumblue: \"0000cd\",\n mediumorchid: \"ba55d3\",\n mediumpurple: \"9370db\",\n mediumseagreen: \"3cb371\",\n mediumslateblue: \"7b68ee\",\n mediumspringgreen: \"00fa9a\",\n mediumturquoise: \"48d1cc\",\n mediumvioletred: \"c71585\",\n midnightblue: \"191970\",\n mintcream: \"f5fffa\",\n mistyrose: \"ffe4e1\",\n moccasin: \"ffe4b5\",\n navajowhite: \"ffdead\",\n navy: \"000080\",\n oldlace: \"fdf5e6\",\n olive: \"808000\",\n olivedrab: \"6b8e23\",\n orange: \"ffa500\",\n orangered: \"ff4500\",\n orchid: \"da70d6\",\n palegoldenrod: \"eee8aa\",\n palegreen: \"98fb98\",\n paleturquoise: \"afeeee\",\n palevioletred: \"db7093\",\n papayawhip: \"ffefd5\",\n peachpuff: \"ffdab9\",\n peru: \"cd853f\",\n pink: \"ffc0cb\",\n plum: \"dda0dd\",\n powderblue: \"b0e0e6\",\n purple: \"800080\",\n rebeccapurple: \"663399\",\n red: \"f00\",\n rosybrown: \"bc8f8f\",\n royalblue: \"4169e1\",\n saddlebrown: \"8b4513\",\n salmon: \"fa8072\",\n sandybrown: \"f4a460\",\n seagreen: \"2e8b57\",\n seashell: \"fff5ee\",\n sienna: \"a0522d\",\n silver: \"c0c0c0\",\n skyblue: \"87ceeb\",\n slateblue: \"6a5acd\",\n slategray: \"708090\",\n slategrey: \"708090\",\n snow: \"fffafa\",\n springgreen: \"00ff7f\",\n steelblue: \"4682b4\",\n tan: \"d2b48c\",\n teal: \"008080\",\n thistle: \"d8bfd8\",\n tomato: \"ff6347\",\n turquoise: \"40e0d0\",\n violet: \"ee82ee\",\n wheat: \"f5deb3\",\n white: \"fff\",\n whitesmoke: \"f5f5f5\",\n yellow: \"ff0\",\n yellowgreen: \"9acd32\"\n};\n\n// Make it easy to access colors via `hexNames[hex]`\nvar hexNames = tinycolor.hexNames = flip(names);\n\n// Utilities\n// ---------\n\n// `{ 'name1': 'val1' }` becomes `{ 'val1': 'name1' }`\nfunction flip(o) {\n var flipped = {};\n for (var i in o) {\n if (o.hasOwnProperty(i)) {\n flipped[o[i]] = i;\n }\n }\n return flipped;\n}\n\n// Return a valid alpha value [0,1] with all invalid values being set to 1\nfunction boundAlpha(a) {\n a = parseFloat(a);\n if (isNaN(a) || a < 0 || a > 1) {\n a = 1;\n }\n return a;\n}\n\n// Take input from [0, n] and return it as [0, 1]\nfunction bound01(n, max) {\n if (isOnePointZero(n)) n = \"100%\";\n var processPercent = isPercentage(n);\n n = Math.min(max, Math.max(0, parseFloat(n)));\n\n // Automatically convert percentage into number\n if (processPercent) {\n n = parseInt(n * max, 10) / 100;\n }\n\n // Handle floating point rounding errors\n if (Math.abs(n - max) < 0.000001) {\n return 1;\n }\n\n // Convert into [0, 1] range if it isn't already\n return n % max / parseFloat(max);\n}\n\n// Force a number between 0 and 1\nfunction clamp01(val) {\n return Math.min(1, Math.max(0, val));\n}\n\n// Parse a base-16 hex value into a base-10 integer\nfunction parseIntFromHex(val) {\n return parseInt(val, 16);\n}\n\n// Need to handle 1.0 as 100%, since once it is a number, there is no difference between it and 1\n// \nfunction isOnePointZero(n) {\n return typeof n == \"string\" && n.indexOf(\".\") != -1 && parseFloat(n) === 1;\n}\n\n// Check to see if string passed in is a percentage\nfunction isPercentage(n) {\n return typeof n === \"string\" && n.indexOf(\"%\") != -1;\n}\n\n// Force a hex value to have 2 characters\nfunction pad2(c) {\n return c.length == 1 ? \"0\" + c : \"\" + c;\n}\n\n// Replace a decimal with it's percentage value\nfunction convertToPercentage(n) {\n if (n <= 1) {\n n = n * 100 + \"%\";\n }\n return n;\n}\n\n// Converts a decimal to a hex value\nfunction convertDecimalToHex(d) {\n return Math.round(parseFloat(d) * 255).toString(16);\n}\n// Converts a hex value to a decimal\nfunction convertHexToDecimal(h) {\n return parseIntFromHex(h) / 255;\n}\nvar matchers = function () {\n // \n var CSS_INTEGER = \"[-\\\\+]?\\\\d+%?\";\n\n // \n var CSS_NUMBER = \"[-\\\\+]?\\\\d*\\\\.\\\\d+%?\";\n\n // Allow positive/negative integer/number. Don't capture the either/or, just the entire outcome.\n var CSS_UNIT = \"(?:\" + CSS_NUMBER + \")|(?:\" + CSS_INTEGER + \")\";\n\n // Actual matching.\n // Parentheses and commas are optional, but not required.\n // Whitespace can take the place of commas or opening paren\n var PERMISSIVE_MATCH3 = \"[\\\\s|\\\\(]+(\" + CSS_UNIT + \")[,|\\\\s]+(\" + CSS_UNIT + \")[,|\\\\s]+(\" + CSS_UNIT + \")\\\\s*\\\\)?\";\n var PERMISSIVE_MATCH4 = \"[\\\\s|\\\\(]+(\" + CSS_UNIT + \")[,|\\\\s]+(\" + CSS_UNIT + \")[,|\\\\s]+(\" + CSS_UNIT + \")[,|\\\\s]+(\" + CSS_UNIT + \")\\\\s*\\\\)?\";\n return {\n CSS_UNIT: new RegExp(CSS_UNIT),\n rgb: new RegExp(\"rgb\" + PERMISSIVE_MATCH3),\n rgba: new RegExp(\"rgba\" + PERMISSIVE_MATCH4),\n hsl: new RegExp(\"hsl\" + PERMISSIVE_MATCH3),\n hsla: new RegExp(\"hsla\" + PERMISSIVE_MATCH4),\n hsv: new RegExp(\"hsv\" + PERMISSIVE_MATCH3),\n hsva: new RegExp(\"hsva\" + PERMISSIVE_MATCH4),\n hex3: /^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,\n hex6: /^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,\n hex4: /^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,\n hex8: /^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/\n };\n}();\n\n// `isValidCSSUnit`\n// Take in a single string / number and check to see if it looks like a CSS unit\n// (see `matchers` above for definition).\nfunction isValidCSSUnit(color) {\n return !!matchers.CSS_UNIT.exec(color);\n}\n\n// `stringInputToObject`\n// Permissive string parsing. Take in a number of formats, and output an object\n// based on detected format. Returns `{ r, g, b }` or `{ h, s, l }` or `{ h, s, v}`\nfunction stringInputToObject(color) {\n color = color.replace(trimLeft, \"\").replace(trimRight, \"\").toLowerCase();\n var named = false;\n if (names[color]) {\n color = names[color];\n named = true;\n } else if (color == \"transparent\") {\n return {\n r: 0,\n g: 0,\n b: 0,\n a: 0,\n format: \"name\"\n };\n }\n\n // Try to match string input using regular expressions.\n // Keep most of the number bounding out of this function - don't worry about [0,1] or [0,100] or [0,360]\n // Just return an object and let the conversion functions handle that.\n // This way the result will be the same whether the tinycolor is initialized with string or object.\n var match;\n if (match = matchers.rgb.exec(color)) {\n return {\n r: match[1],\n g: match[2],\n b: match[3]\n };\n }\n if (match = matchers.rgba.exec(color)) {\n return {\n r: match[1],\n g: match[2],\n b: match[3],\n a: match[4]\n };\n }\n if (match = matchers.hsl.exec(color)) {\n return {\n h: match[1],\n s: match[2],\n l: match[3]\n };\n }\n if (match = matchers.hsla.exec(color)) {\n return {\n h: match[1],\n s: match[2],\n l: match[3],\n a: match[4]\n };\n }\n if (match = matchers.hsv.exec(color)) {\n return {\n h: match[1],\n s: match[2],\n v: match[3]\n };\n }\n if (match = matchers.hsva.exec(color)) {\n return {\n h: match[1],\n s: match[2],\n v: match[3],\n a: match[4]\n };\n }\n if (match = matchers.hex8.exec(color)) {\n return {\n r: parseIntFromHex(match[1]),\n g: parseIntFromHex(match[2]),\n b: parseIntFromHex(match[3]),\n a: convertHexToDecimal(match[4]),\n format: named ? \"name\" : \"hex8\"\n };\n }\n if (match = matchers.hex6.exec(color)) {\n return {\n r: parseIntFromHex(match[1]),\n g: parseIntFromHex(match[2]),\n b: parseIntFromHex(match[3]),\n format: named ? \"name\" : \"hex\"\n };\n }\n if (match = matchers.hex4.exec(color)) {\n return {\n r: parseIntFromHex(match[1] + \"\" + match[1]),\n g: parseIntFromHex(match[2] + \"\" + match[2]),\n b: parseIntFromHex(match[3] + \"\" + match[3]),\n a: convertHexToDecimal(match[4] + \"\" + match[4]),\n format: named ? \"name\" : \"hex8\"\n };\n }\n if (match = matchers.hex3.exec(color)) {\n return {\n r: parseIntFromHex(match[1] + \"\" + match[1]),\n g: parseIntFromHex(match[2] + \"\" + match[2]),\n b: parseIntFromHex(match[3] + \"\" + match[3]),\n format: named ? \"name\" : \"hex\"\n };\n }\n return false;\n}\nfunction validateWCAG2Parms(parms) {\n // return valid WCAG2 parms for isReadable.\n // If input parms are invalid, return {\"level\":\"AA\", \"size\":\"small\"}\n var level, size;\n parms = parms || {\n level: \"AA\",\n size: \"small\"\n };\n level = (parms.level || \"AA\").toUpperCase();\n size = (parms.size || \"small\").toLowerCase();\n if (level !== \"AA\" && level !== \"AAA\") {\n level = \"AA\";\n }\n if (size !== \"small\" && size !== \"large\") {\n size = \"small\";\n }\n return {\n level: level,\n size: size\n };\n}\n\nexport { tinycolor as default };\n","import each from 'lodash-es/each';\nimport tinycolor from 'tinycolor2';\n\nexport var simpleCheckForValidColor = function simpleCheckForValidColor(data) {\n var keysToCheck = ['r', 'g', 'b', 'a', 'h', 's', 'l', 'v'];\n var checked = 0;\n var passed = 0;\n each(keysToCheck, function (letter) {\n if (data[letter]) {\n checked += 1;\n if (!isNaN(data[letter])) {\n passed += 1;\n }\n if (letter === 's' || letter === 'l') {\n var percentPatt = /^\\d+%$/;\n if (percentPatt.test(data[letter])) {\n passed += 1;\n }\n }\n }\n });\n return checked === passed ? data : false;\n};\n\nexport var toState = function toState(data, oldHue) {\n var color = data.hex ? tinycolor(data.hex) : tinycolor(data);\n var hsl = color.toHsl();\n var hsv = color.toHsv();\n var rgb = color.toRgb();\n var hex = color.toHex();\n if (hsl.s === 0) {\n hsl.h = oldHue || 0;\n hsv.h = oldHue || 0;\n }\n var transparent = hex === '000000' && rgb.a === 0;\n\n return {\n hsl: hsl,\n hex: transparent ? 'transparent' : '#' + hex,\n rgb: rgb,\n hsv: hsv,\n oldHue: data.h || oldHue || hsl.h,\n source: data.source\n };\n};\n\nexport var isValidHex = function isValidHex(hex) {\n if (hex === 'transparent') {\n return true;\n }\n // disable hex4 and hex8\n var lh = String(hex).charAt(0) === '#' ? 1 : 0;\n return hex.length !== 4 + lh && hex.length < 7 + lh && tinycolor(hex).isValid();\n};\n\nexport var getContrastingColor = function getContrastingColor(data) {\n if (!data) {\n return '#fff';\n }\n var col = toState(data);\n if (col.hex === 'transparent') {\n return 'rgba(0,0,0,0.4)';\n }\n var yiq = (col.rgb.r * 299 + col.rgb.g * 587 + col.rgb.b * 114) / 1000;\n return yiq >= 128 ? '#000' : '#fff';\n};\n\nexport var red = {\n hsl: { a: 1, h: 0, l: 0.5, s: 1 },\n hex: '#ff0000',\n rgb: { r: 255, g: 0, b: 0, a: 1 },\n hsv: { h: 0, s: 1, v: 1, a: 1 }\n};\n\nexport var isvalidColorString = function isvalidColorString(string, type) {\n var stringWithoutDegree = string.replace('°', '');\n return tinycolor(type + ' (' + stringWithoutDegree + ')')._ok;\n};","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React, { Component, PureComponent } from 'react';\nimport debounce from 'lodash-es/debounce';\nimport * as color from '../../helpers/color';\n\nexport var ColorWrap = function ColorWrap(Picker) {\n var ColorPicker = function (_ref) {\n _inherits(ColorPicker, _ref);\n\n function ColorPicker(props) {\n _classCallCheck(this, ColorPicker);\n\n var _this = _possibleConstructorReturn(this, (ColorPicker.__proto__ || Object.getPrototypeOf(ColorPicker)).call(this));\n\n _this.handleChange = function (data, event) {\n var isValidColor = color.simpleCheckForValidColor(data);\n if (isValidColor) {\n var colors = color.toState(data, data.h || _this.state.oldHue);\n _this.setState(colors);\n _this.props.onChangeComplete && _this.debounce(_this.props.onChangeComplete, colors, event);\n _this.props.onChange && _this.props.onChange(colors, event);\n }\n };\n\n _this.handleSwatchHover = function (data, event) {\n var isValidColor = color.simpleCheckForValidColor(data);\n if (isValidColor) {\n var colors = color.toState(data, data.h || _this.state.oldHue);\n _this.props.onSwatchHover && _this.props.onSwatchHover(colors, event);\n }\n };\n\n _this.state = _extends({}, color.toState(props.color, 0));\n\n _this.debounce = debounce(function (fn, data, event) {\n fn(data, event);\n }, 100);\n return _this;\n }\n\n _createClass(ColorPicker, [{\n key: 'render',\n value: function render() {\n var optionalEvents = {};\n if (this.props.onSwatchHover) {\n optionalEvents.onSwatchHover = this.handleSwatchHover;\n }\n\n return React.createElement(Picker, _extends({}, this.props, this.state, {\n onChange: this.handleChange\n }, optionalEvents));\n }\n }], [{\n key: 'getDerivedStateFromProps',\n value: function getDerivedStateFromProps(nextProps, state) {\n return _extends({}, color.toState(nextProps.color, state.oldHue));\n }\n }]);\n\n return ColorPicker;\n }(PureComponent || Component);\n\n ColorPicker.propTypes = _extends({}, Picker.propTypes);\n\n ColorPicker.defaultProps = _extends({}, Picker.defaultProps, {\n color: {\n h: 250,\n s: 0.50,\n l: 0.20,\n a: 1\n }\n });\n\n return ColorPicker;\n};\n\nexport default ColorWrap;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nvar _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\n/* eslint-disable no-invalid-this */\nimport React from 'react';\n\nexport var handleFocus = function handleFocus(Component) {\n var Span = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'span';\n return function (_React$Component) {\n _inherits(Focus, _React$Component);\n\n function Focus() {\n var _ref;\n\n var _temp, _this, _ret;\n\n _classCallCheck(this, Focus);\n\n for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return _ret = (_temp = (_this = _possibleConstructorReturn(this, (_ref = Focus.__proto__ || Object.getPrototypeOf(Focus)).call.apply(_ref, [this].concat(args))), _this), _this.state = { focus: false }, _this.handleFocus = function () {\n return _this.setState({ focus: true });\n }, _this.handleBlur = function () {\n return _this.setState({ focus: false });\n }, _temp), _possibleConstructorReturn(_this, _ret);\n }\n\n _createClass(Focus, [{\n key: 'render',\n value: function render() {\n return React.createElement(\n Span,\n { onFocus: this.handleFocus, onBlur: this.handleBlur },\n React.createElement(Component, _extends({}, this.props, this.state))\n );\n }\n }]);\n\n return Focus;\n }(React.Component);\n};","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React from 'react';\nimport reactCSS from 'reactcss';\nimport { handleFocus } from '../../helpers/interaction';\n\nimport Checkboard from './Checkboard';\n\nvar ENTER = 13;\n\nexport var Swatch = function Swatch(_ref) {\n var color = _ref.color,\n style = _ref.style,\n _ref$onClick = _ref.onClick,\n onClick = _ref$onClick === undefined ? function () {} : _ref$onClick,\n onHover = _ref.onHover,\n _ref$title = _ref.title,\n title = _ref$title === undefined ? color : _ref$title,\n children = _ref.children,\n focus = _ref.focus,\n _ref$focusStyle = _ref.focusStyle,\n focusStyle = _ref$focusStyle === undefined ? {} : _ref$focusStyle;\n\n var transparent = color === 'transparent';\n var styles = reactCSS({\n default: {\n swatch: _extends({\n background: color,\n height: '100%',\n width: '100%',\n cursor: 'pointer',\n position: 'relative',\n outline: 'none'\n }, style, focus ? focusStyle : {})\n }\n });\n\n var handleClick = function handleClick(e) {\n return onClick(color, e);\n };\n var handleKeyDown = function handleKeyDown(e) {\n return e.keyCode === ENTER && onClick(color, e);\n };\n var handleHover = function handleHover(e) {\n return onHover(color, e);\n };\n\n var optionalEvents = {};\n if (onHover) {\n optionalEvents.onMouseOver = handleHover;\n }\n\n return React.createElement(\n 'div',\n _extends({\n style: styles.swatch,\n onClick: handleClick,\n title: title,\n tabIndex: 0,\n onKeyDown: handleKeyDown\n }, optionalEvents),\n children,\n transparent && React.createElement(Checkboard, {\n borderRadius: styles.swatch.borderRadius,\n boxShadow: 'inset 0 0 0 1px rgba(0,0,0,0.1)'\n })\n );\n};\n\nexport default handleFocus(Swatch);","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var AlphaPointer = function AlphaPointer(_ref) {\n var direction = _ref.direction;\n\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '18px',\n height: '18px',\n borderRadius: '50%',\n transform: 'translate(-9px, -1px)',\n backgroundColor: 'rgb(248, 248, 248)',\n boxShadow: '0 1px 4px 0 rgba(0, 0, 0, 0.37)'\n }\n },\n 'vertical': {\n picker: {\n transform: 'translate(-3px, -9px)'\n }\n }\n }, { vertical: direction === 'vertical' });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default AlphaPointer;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React from 'react';\nimport reactCSS from 'reactcss';\n\nimport { ColorWrap, Alpha } from '../common';\nimport AlphaPointer from './AlphaPointer';\n\nexport var AlphaPicker = function AlphaPicker(_ref) {\n var rgb = _ref.rgb,\n hsl = _ref.hsl,\n width = _ref.width,\n height = _ref.height,\n onChange = _ref.onChange,\n direction = _ref.direction,\n style = _ref.style,\n renderers = _ref.renderers,\n pointer = _ref.pointer,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS({\n 'default': {\n picker: {\n position: 'relative',\n width: width,\n height: height\n },\n alpha: {\n radius: '2px',\n style: style\n }\n }\n });\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'alpha-picker ' + className },\n React.createElement(Alpha, _extends({}, styles.alpha, {\n rgb: rgb,\n hsl: hsl,\n pointer: pointer,\n renderers: renderers,\n onChange: onChange,\n direction: direction\n }))\n );\n};\n\nAlphaPicker.defaultProps = {\n width: '316px',\n height: '16px',\n direction: 'horizontal',\n pointer: AlphaPointer\n};\n\nexport default ColorWrap(AlphaPicker);","/**\n * A specialized version of `_.map` for arrays without support for iteratee\n * shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array} Returns the new mapped array.\n */\nfunction arrayMap(array, iteratee) {\n var index = -1,\n length = array == null ? 0 : array.length,\n result = Array(length);\n\n while (++index < length) {\n result[index] = iteratee(array[index], index, array);\n }\n return result;\n}\n\nexport default arrayMap;\n","/** Used to stand-in for `undefined` hash values. */\nvar HASH_UNDEFINED = '__lodash_hash_undefined__';\n\n/**\n * Adds `value` to the array cache.\n *\n * @private\n * @name add\n * @memberOf SetCache\n * @alias push\n * @param {*} value The value to cache.\n * @returns {Object} Returns the cache instance.\n */\nfunction setCacheAdd(value) {\n this.__data__.set(value, HASH_UNDEFINED);\n return this;\n}\n\nexport default setCacheAdd;\n","/**\n * Checks if `value` is in the array cache.\n *\n * @private\n * @name has\n * @memberOf SetCache\n * @param {*} value The value to search for.\n * @returns {number} Returns `true` if `value` is found, else `false`.\n */\nfunction setCacheHas(value) {\n return this.__data__.has(value);\n}\n\nexport default setCacheHas;\n","import MapCache from './_MapCache.js';\nimport setCacheAdd from './_setCacheAdd.js';\nimport setCacheHas from './_setCacheHas.js';\n\n/**\n *\n * Creates an array cache object to store unique values.\n *\n * @private\n * @constructor\n * @param {Array} [values] The values to cache.\n */\nfunction SetCache(values) {\n var index = -1,\n length = values == null ? 0 : values.length;\n\n this.__data__ = new MapCache;\n while (++index < length) {\n this.add(values[index]);\n }\n}\n\n// Add methods to `SetCache`.\nSetCache.prototype.add = SetCache.prototype.push = setCacheAdd;\nSetCache.prototype.has = setCacheHas;\n\nexport default SetCache;\n","/**\n * A specialized version of `_.some` for arrays without support for iteratee\n * shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} predicate The function invoked per iteration.\n * @returns {boolean} Returns `true` if any element passes the predicate check,\n * else `false`.\n */\nfunction arraySome(array, predicate) {\n var index = -1,\n length = array == null ? 0 : array.length;\n\n while (++index < length) {\n if (predicate(array[index], index, array)) {\n return true;\n }\n }\n return false;\n}\n\nexport default arraySome;\n","/**\n * Checks if a `cache` value for `key` exists.\n *\n * @private\n * @param {Object} cache The cache to query.\n * @param {string} key The key of the entry to check.\n * @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.\n */\nfunction cacheHas(cache, key) {\n return cache.has(key);\n}\n\nexport default cacheHas;\n","import SetCache from './_SetCache.js';\nimport arraySome from './_arraySome.js';\nimport cacheHas from './_cacheHas.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/**\n * A specialized version of `baseIsEqualDeep` for arrays with support for\n * partial deep comparisons.\n *\n * @private\n * @param {Array} array The array to compare.\n * @param {Array} other The other array to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `array` and `other` objects.\n * @returns {boolean} Returns `true` if the arrays are equivalent, else `false`.\n */\nfunction equalArrays(array, other, bitmask, customizer, equalFunc, stack) {\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG,\n arrLength = array.length,\n othLength = other.length;\n\n if (arrLength != othLength && !(isPartial && othLength > arrLength)) {\n return false;\n }\n // Check that cyclic values are equal.\n var arrStacked = stack.get(array);\n var othStacked = stack.get(other);\n if (arrStacked && othStacked) {\n return arrStacked == other && othStacked == array;\n }\n var index = -1,\n result = true,\n seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined;\n\n stack.set(array, other);\n stack.set(other, array);\n\n // Ignore non-index properties.\n while (++index < arrLength) {\n var arrValue = array[index],\n othValue = other[index];\n\n if (customizer) {\n var compared = isPartial\n ? customizer(othValue, arrValue, index, other, array, stack)\n : customizer(arrValue, othValue, index, array, other, stack);\n }\n if (compared !== undefined) {\n if (compared) {\n continue;\n }\n result = false;\n break;\n }\n // Recursively compare arrays (susceptible to call stack limits).\n if (seen) {\n if (!arraySome(other, function(othValue, othIndex) {\n if (!cacheHas(seen, othIndex) &&\n (arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) {\n return seen.push(othIndex);\n }\n })) {\n result = false;\n break;\n }\n } else if (!(\n arrValue === othValue ||\n equalFunc(arrValue, othValue, bitmask, customizer, stack)\n )) {\n result = false;\n break;\n }\n }\n stack['delete'](array);\n stack['delete'](other);\n return result;\n}\n\nexport default equalArrays;\n","/**\n * Converts `map` to its key-value pairs.\n *\n * @private\n * @param {Object} map The map to convert.\n * @returns {Array} Returns the key-value pairs.\n */\nfunction mapToArray(map) {\n var index = -1,\n result = Array(map.size);\n\n map.forEach(function(value, key) {\n result[++index] = [key, value];\n });\n return result;\n}\n\nexport default mapToArray;\n","/**\n * Converts `set` to an array of its values.\n *\n * @private\n * @param {Object} set The set to convert.\n * @returns {Array} Returns the values.\n */\nfunction setToArray(set) {\n var index = -1,\n result = Array(set.size);\n\n set.forEach(function(value) {\n result[++index] = value;\n });\n return result;\n}\n\nexport default setToArray;\n","import Symbol from './_Symbol.js';\nimport Uint8Array from './_Uint8Array.js';\nimport eq from './eq.js';\nimport equalArrays from './_equalArrays.js';\nimport mapToArray from './_mapToArray.js';\nimport setToArray from './_setToArray.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/** `Object#toString` result references. */\nvar boolTag = '[object Boolean]',\n dateTag = '[object Date]',\n errorTag = '[object Error]',\n mapTag = '[object Map]',\n numberTag = '[object Number]',\n regexpTag = '[object RegExp]',\n setTag = '[object Set]',\n stringTag = '[object String]',\n symbolTag = '[object Symbol]';\n\nvar arrayBufferTag = '[object ArrayBuffer]',\n dataViewTag = '[object DataView]';\n\n/** Used to convert symbols to primitives and strings. */\nvar symbolProto = Symbol ? Symbol.prototype : undefined,\n symbolValueOf = symbolProto ? symbolProto.valueOf : undefined;\n\n/**\n * A specialized version of `baseIsEqualDeep` for comparing objects of\n * the same `toStringTag`.\n *\n * **Note:** This function only supports comparing values with tags of\n * `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {string} tag The `toStringTag` of the objects to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) {\n switch (tag) {\n case dataViewTag:\n if ((object.byteLength != other.byteLength) ||\n (object.byteOffset != other.byteOffset)) {\n return false;\n }\n object = object.buffer;\n other = other.buffer;\n\n case arrayBufferTag:\n if ((object.byteLength != other.byteLength) ||\n !equalFunc(new Uint8Array(object), new Uint8Array(other))) {\n return false;\n }\n return true;\n\n case boolTag:\n case dateTag:\n case numberTag:\n // Coerce booleans to `1` or `0` and dates to milliseconds.\n // Invalid dates are coerced to `NaN`.\n return eq(+object, +other);\n\n case errorTag:\n return object.name == other.name && object.message == other.message;\n\n case regexpTag:\n case stringTag:\n // Coerce regexes to strings and treat strings, primitives and objects,\n // as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring\n // for more details.\n return object == (other + '');\n\n case mapTag:\n var convert = mapToArray;\n\n case setTag:\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG;\n convert || (convert = setToArray);\n\n if (object.size != other.size && !isPartial) {\n return false;\n }\n // Assume cyclic values are equal.\n var stacked = stack.get(object);\n if (stacked) {\n return stacked == other;\n }\n bitmask |= COMPARE_UNORDERED_FLAG;\n\n // Recursively compare objects (susceptible to call stack limits).\n stack.set(object, other);\n var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack);\n stack['delete'](object);\n return result;\n\n case symbolTag:\n if (symbolValueOf) {\n return symbolValueOf.call(object) == symbolValueOf.call(other);\n }\n }\n return false;\n}\n\nexport default equalByTag;\n","/**\n * Appends the elements of `values` to `array`.\n *\n * @private\n * @param {Array} array The array to modify.\n * @param {Array} values The values to append.\n * @returns {Array} Returns `array`.\n */\nfunction arrayPush(array, values) {\n var index = -1,\n length = values.length,\n offset = array.length;\n\n while (++index < length) {\n array[offset + index] = values[index];\n }\n return array;\n}\n\nexport default arrayPush;\n","import arrayPush from './_arrayPush.js';\nimport isArray from './isArray.js';\n\n/**\n * The base implementation of `getAllKeys` and `getAllKeysIn` which uses\n * `keysFunc` and `symbolsFunc` to get the enumerable property names and\n * symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {Function} keysFunc The function to get the keys of `object`.\n * @param {Function} symbolsFunc The function to get the symbols of `object`.\n * @returns {Array} Returns the array of property names and symbols.\n */\nfunction baseGetAllKeys(object, keysFunc, symbolsFunc) {\n var result = keysFunc(object);\n return isArray(object) ? result : arrayPush(result, symbolsFunc(object));\n}\n\nexport default baseGetAllKeys;\n","/**\n * A specialized version of `_.filter` for arrays without support for\n * iteratee shorthands.\n *\n * @private\n * @param {Array} [array] The array to iterate over.\n * @param {Function} predicate The function invoked per iteration.\n * @returns {Array} Returns the new filtered array.\n */\nfunction arrayFilter(array, predicate) {\n var index = -1,\n length = array == null ? 0 : array.length,\n resIndex = 0,\n result = [];\n\n while (++index < length) {\n var value = array[index];\n if (predicate(value, index, array)) {\n result[resIndex++] = value;\n }\n }\n return result;\n}\n\nexport default arrayFilter;\n","/**\n * This method returns a new empty array.\n *\n * @static\n * @memberOf _\n * @since 4.13.0\n * @category Util\n * @returns {Array} Returns the new empty array.\n * @example\n *\n * var arrays = _.times(2, _.stubArray);\n *\n * console.log(arrays);\n * // => [[], []]\n *\n * console.log(arrays[0] === arrays[1]);\n * // => false\n */\nfunction stubArray() {\n return [];\n}\n\nexport default stubArray;\n","import arrayFilter from './_arrayFilter.js';\nimport stubArray from './stubArray.js';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Built-in value references. */\nvar propertyIsEnumerable = objectProto.propertyIsEnumerable;\n\n/* Built-in method references for those with the same name as other `lodash` methods. */\nvar nativeGetSymbols = Object.getOwnPropertySymbols;\n\n/**\n * Creates an array of the own enumerable symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of symbols.\n */\nvar getSymbols = !nativeGetSymbols ? stubArray : function(object) {\n if (object == null) {\n return [];\n }\n object = Object(object);\n return arrayFilter(nativeGetSymbols(object), function(symbol) {\n return propertyIsEnumerable.call(object, symbol);\n });\n};\n\nexport default getSymbols;\n","import baseGetAllKeys from './_baseGetAllKeys.js';\nimport getSymbols from './_getSymbols.js';\nimport keys from './keys.js';\n\n/**\n * Creates an array of own enumerable property names and symbols of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the array of property names and symbols.\n */\nfunction getAllKeys(object) {\n return baseGetAllKeys(object, keys, getSymbols);\n}\n\nexport default getAllKeys;\n","import getAllKeys from './_getAllKeys.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1;\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * A specialized version of `baseIsEqualDeep` for objects with support for\n * partial deep comparisons.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} stack Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction equalObjects(object, other, bitmask, customizer, equalFunc, stack) {\n var isPartial = bitmask & COMPARE_PARTIAL_FLAG,\n objProps = getAllKeys(object),\n objLength = objProps.length,\n othProps = getAllKeys(other),\n othLength = othProps.length;\n\n if (objLength != othLength && !isPartial) {\n return false;\n }\n var index = objLength;\n while (index--) {\n var key = objProps[index];\n if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) {\n return false;\n }\n }\n // Check that cyclic values are equal.\n var objStacked = stack.get(object);\n var othStacked = stack.get(other);\n if (objStacked && othStacked) {\n return objStacked == other && othStacked == object;\n }\n var result = true;\n stack.set(object, other);\n stack.set(other, object);\n\n var skipCtor = isPartial;\n while (++index < objLength) {\n key = objProps[index];\n var objValue = object[key],\n othValue = other[key];\n\n if (customizer) {\n var compared = isPartial\n ? customizer(othValue, objValue, key, other, object, stack)\n : customizer(objValue, othValue, key, object, other, stack);\n }\n // Recursively compare objects (susceptible to call stack limits).\n if (!(compared === undefined\n ? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack))\n : compared\n )) {\n result = false;\n break;\n }\n skipCtor || (skipCtor = key == 'constructor');\n }\n if (result && !skipCtor) {\n var objCtor = object.constructor,\n othCtor = other.constructor;\n\n // Non `Object` object instances with different constructors are not equal.\n if (objCtor != othCtor &&\n ('constructor' in object && 'constructor' in other) &&\n !(typeof objCtor == 'function' && objCtor instanceof objCtor &&\n typeof othCtor == 'function' && othCtor instanceof othCtor)) {\n result = false;\n }\n }\n stack['delete'](object);\n stack['delete'](other);\n return result;\n}\n\nexport default equalObjects;\n","import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar DataView = getNative(root, 'DataView');\n\nexport default DataView;\n","import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Promise = getNative(root, 'Promise');\n\nexport default Promise;\n","import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar Set = getNative(root, 'Set');\n\nexport default Set;\n","import getNative from './_getNative.js';\nimport root from './_root.js';\n\n/* Built-in method references that are verified to be native. */\nvar WeakMap = getNative(root, 'WeakMap');\n\nexport default WeakMap;\n","import DataView from './_DataView.js';\nimport Map from './_Map.js';\nimport Promise from './_Promise.js';\nimport Set from './_Set.js';\nimport WeakMap from './_WeakMap.js';\nimport baseGetTag from './_baseGetTag.js';\nimport toSource from './_toSource.js';\n\n/** `Object#toString` result references. */\nvar mapTag = '[object Map]',\n objectTag = '[object Object]',\n promiseTag = '[object Promise]',\n setTag = '[object Set]',\n weakMapTag = '[object WeakMap]';\n\nvar dataViewTag = '[object DataView]';\n\n/** Used to detect maps, sets, and weakmaps. */\nvar dataViewCtorString = toSource(DataView),\n mapCtorString = toSource(Map),\n promiseCtorString = toSource(Promise),\n setCtorString = toSource(Set),\n weakMapCtorString = toSource(WeakMap);\n\n/**\n * Gets the `toStringTag` of `value`.\n *\n * @private\n * @param {*} value The value to query.\n * @returns {string} Returns the `toStringTag`.\n */\nvar getTag = baseGetTag;\n\n// Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6.\nif ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) ||\n (Map && getTag(new Map) != mapTag) ||\n (Promise && getTag(Promise.resolve()) != promiseTag) ||\n (Set && getTag(new Set) != setTag) ||\n (WeakMap && getTag(new WeakMap) != weakMapTag)) {\n getTag = function(value) {\n var result = baseGetTag(value),\n Ctor = result == objectTag ? value.constructor : undefined,\n ctorString = Ctor ? toSource(Ctor) : '';\n\n if (ctorString) {\n switch (ctorString) {\n case dataViewCtorString: return dataViewTag;\n case mapCtorString: return mapTag;\n case promiseCtorString: return promiseTag;\n case setCtorString: return setTag;\n case weakMapCtorString: return weakMapTag;\n }\n }\n return result;\n };\n}\n\nexport default getTag;\n","import Stack from './_Stack.js';\nimport equalArrays from './_equalArrays.js';\nimport equalByTag from './_equalByTag.js';\nimport equalObjects from './_equalObjects.js';\nimport getTag from './_getTag.js';\nimport isArray from './isArray.js';\nimport isBuffer from './isBuffer.js';\nimport isTypedArray from './isTypedArray.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1;\n\n/** `Object#toString` result references. */\nvar argsTag = '[object Arguments]',\n arrayTag = '[object Array]',\n objectTag = '[object Object]';\n\n/** Used for built-in method references. */\nvar objectProto = Object.prototype;\n\n/** Used to check objects for own properties. */\nvar hasOwnProperty = objectProto.hasOwnProperty;\n\n/**\n * A specialized version of `baseIsEqual` for arrays and objects which performs\n * deep comparisons and tracks traversed objects enabling objects with circular\n * references to be compared.\n *\n * @private\n * @param {Object} object The object to compare.\n * @param {Object} other The other object to compare.\n * @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.\n * @param {Function} customizer The function to customize comparisons.\n * @param {Function} equalFunc The function to determine equivalents of values.\n * @param {Object} [stack] Tracks traversed `object` and `other` objects.\n * @returns {boolean} Returns `true` if the objects are equivalent, else `false`.\n */\nfunction baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) {\n var objIsArr = isArray(object),\n othIsArr = isArray(other),\n objTag = objIsArr ? arrayTag : getTag(object),\n othTag = othIsArr ? arrayTag : getTag(other);\n\n objTag = objTag == argsTag ? objectTag : objTag;\n othTag = othTag == argsTag ? objectTag : othTag;\n\n var objIsObj = objTag == objectTag,\n othIsObj = othTag == objectTag,\n isSameTag = objTag == othTag;\n\n if (isSameTag && isBuffer(object)) {\n if (!isBuffer(other)) {\n return false;\n }\n objIsArr = true;\n objIsObj = false;\n }\n if (isSameTag && !objIsObj) {\n stack || (stack = new Stack);\n return (objIsArr || isTypedArray(object))\n ? equalArrays(object, other, bitmask, customizer, equalFunc, stack)\n : equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack);\n }\n if (!(bitmask & COMPARE_PARTIAL_FLAG)) {\n var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'),\n othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__');\n\n if (objIsWrapped || othIsWrapped) {\n var objUnwrapped = objIsWrapped ? object.value() : object,\n othUnwrapped = othIsWrapped ? other.value() : other;\n\n stack || (stack = new Stack);\n return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack);\n }\n }\n if (!isSameTag) {\n return false;\n }\n stack || (stack = new Stack);\n return equalObjects(object, other, bitmask, customizer, equalFunc, stack);\n}\n\nexport default baseIsEqualDeep;\n","import baseIsEqualDeep from './_baseIsEqualDeep.js';\nimport isObjectLike from './isObjectLike.js';\n\n/**\n * The base implementation of `_.isEqual` which supports partial comparisons\n * and tracks traversed objects.\n *\n * @private\n * @param {*} value The value to compare.\n * @param {*} other The other value to compare.\n * @param {boolean} bitmask The bitmask flags.\n * 1 - Unordered comparison\n * 2 - Partial comparison\n * @param {Function} [customizer] The function to customize comparisons.\n * @param {Object} [stack] Tracks traversed `value` and `other` objects.\n * @returns {boolean} Returns `true` if the values are equivalent, else `false`.\n */\nfunction baseIsEqual(value, other, bitmask, customizer, stack) {\n if (value === other) {\n return true;\n }\n if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) {\n return value !== value && other !== other;\n }\n return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack);\n}\n\nexport default baseIsEqual;\n","import Stack from './_Stack.js';\nimport baseIsEqual from './_baseIsEqual.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/**\n * The base implementation of `_.isMatch` without support for iteratee shorthands.\n *\n * @private\n * @param {Object} object The object to inspect.\n * @param {Object} source The object of property values to match.\n * @param {Array} matchData The property names, values, and compare flags to match.\n * @param {Function} [customizer] The function to customize comparisons.\n * @returns {boolean} Returns `true` if `object` is a match, else `false`.\n */\nfunction baseIsMatch(object, source, matchData, customizer) {\n var index = matchData.length,\n length = index,\n noCustomizer = !customizer;\n\n if (object == null) {\n return !length;\n }\n object = Object(object);\n while (index--) {\n var data = matchData[index];\n if ((noCustomizer && data[2])\n ? data[1] !== object[data[0]]\n : !(data[0] in object)\n ) {\n return false;\n }\n }\n while (++index < length) {\n data = matchData[index];\n var key = data[0],\n objValue = object[key],\n srcValue = data[1];\n\n if (noCustomizer && data[2]) {\n if (objValue === undefined && !(key in object)) {\n return false;\n }\n } else {\n var stack = new Stack;\n if (customizer) {\n var result = customizer(objValue, srcValue, key, object, source, stack);\n }\n if (!(result === undefined\n ? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack)\n : result\n )) {\n return false;\n }\n }\n }\n return true;\n}\n\nexport default baseIsMatch;\n","import isObject from './isObject.js';\n\n/**\n * Checks if `value` is suitable for strict equality comparisons, i.e. `===`.\n *\n * @private\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` if suitable for strict\n * equality comparisons, else `false`.\n */\nfunction isStrictComparable(value) {\n return value === value && !isObject(value);\n}\n\nexport default isStrictComparable;\n","import isStrictComparable from './_isStrictComparable.js';\nimport keys from './keys.js';\n\n/**\n * Gets the property names, values, and compare flags of `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @returns {Array} Returns the match data of `object`.\n */\nfunction getMatchData(object) {\n var result = keys(object),\n length = result.length;\n\n while (length--) {\n var key = result[length],\n value = object[key];\n\n result[length] = [key, value, isStrictComparable(value)];\n }\n return result;\n}\n\nexport default getMatchData;\n","/**\n * A specialized version of `matchesProperty` for source values suitable\n * for strict equality comparisons, i.e. `===`.\n *\n * @private\n * @param {string} key The key of the property to get.\n * @param {*} srcValue The value to match.\n * @returns {Function} Returns the new spec function.\n */\nfunction matchesStrictComparable(key, srcValue) {\n return function(object) {\n if (object == null) {\n return false;\n }\n return object[key] === srcValue &&\n (srcValue !== undefined || (key in Object(object)));\n };\n}\n\nexport default matchesStrictComparable;\n","import baseIsMatch from './_baseIsMatch.js';\nimport getMatchData from './_getMatchData.js';\nimport matchesStrictComparable from './_matchesStrictComparable.js';\n\n/**\n * The base implementation of `_.matches` which doesn't clone `source`.\n *\n * @private\n * @param {Object} source The object of property values to match.\n * @returns {Function} Returns the new spec function.\n */\nfunction baseMatches(source) {\n var matchData = getMatchData(source);\n if (matchData.length == 1 && matchData[0][2]) {\n return matchesStrictComparable(matchData[0][0], matchData[0][1]);\n }\n return function(object) {\n return object === source || baseIsMatch(object, source, matchData);\n };\n}\n\nexport default baseMatches;\n","import isArray from './isArray.js';\nimport isSymbol from './isSymbol.js';\n\n/** Used to match property names within property paths. */\nvar reIsDeepProp = /\\.|\\[(?:[^[\\]]*|([\"'])(?:(?!\\1)[^\\\\]|\\\\.)*?\\1)\\]/,\n reIsPlainProp = /^\\w*$/;\n\n/**\n * Checks if `value` is a property name and not a property path.\n *\n * @private\n * @param {*} value The value to check.\n * @param {Object} [object] The object to query keys on.\n * @returns {boolean} Returns `true` if `value` is a property name, else `false`.\n */\nfunction isKey(value, object) {\n if (isArray(value)) {\n return false;\n }\n var type = typeof value;\n if (type == 'number' || type == 'symbol' || type == 'boolean' ||\n value == null || isSymbol(value)) {\n return true;\n }\n return reIsPlainProp.test(value) || !reIsDeepProp.test(value) ||\n (object != null && value in Object(object));\n}\n\nexport default isKey;\n","import MapCache from './_MapCache.js';\n\n/** Error message constants. */\nvar FUNC_ERROR_TEXT = 'Expected a function';\n\n/**\n * Creates a function that memoizes the result of `func`. If `resolver` is\n * provided, it determines the cache key for storing the result based on the\n * arguments provided to the memoized function. By default, the first argument\n * provided to the memoized function is used as the map cache key. The `func`\n * is invoked with the `this` binding of the memoized function.\n *\n * **Note:** The cache is exposed as the `cache` property on the memoized\n * function. Its creation may be customized by replacing the `_.memoize.Cache`\n * constructor with one whose instances implement the\n * [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object)\n * method interface of `clear`, `delete`, `get`, `has`, and `set`.\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Function\n * @param {Function} func The function to have its output memoized.\n * @param {Function} [resolver] The function to resolve the cache key.\n * @returns {Function} Returns the new memoized function.\n * @example\n *\n * var object = { 'a': 1, 'b': 2 };\n * var other = { 'c': 3, 'd': 4 };\n *\n * var values = _.memoize(_.values);\n * values(object);\n * // => [1, 2]\n *\n * values(other);\n * // => [3, 4]\n *\n * object.a = 2;\n * values(object);\n * // => [1, 2]\n *\n * // Modify the result cache.\n * values.cache.set(object, ['a', 'b']);\n * values(object);\n * // => ['a', 'b']\n *\n * // Replace `_.memoize.Cache`.\n * _.memoize.Cache = WeakMap;\n */\nfunction memoize(func, resolver) {\n if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) {\n throw new TypeError(FUNC_ERROR_TEXT);\n }\n var memoized = function() {\n var args = arguments,\n key = resolver ? resolver.apply(this, args) : args[0],\n cache = memoized.cache;\n\n if (cache.has(key)) {\n return cache.get(key);\n }\n var result = func.apply(this, args);\n memoized.cache = cache.set(key, result) || cache;\n return result;\n };\n memoized.cache = new (memoize.Cache || MapCache);\n return memoized;\n}\n\n// Expose `MapCache`.\nmemoize.Cache = MapCache;\n\nexport default memoize;\n","import memoize from './memoize.js';\n\n/** Used as the maximum memoize cache size. */\nvar MAX_MEMOIZE_SIZE = 500;\n\n/**\n * A specialized version of `_.memoize` which clears the memoized function's\n * cache when it exceeds `MAX_MEMOIZE_SIZE`.\n *\n * @private\n * @param {Function} func The function to have its output memoized.\n * @returns {Function} Returns the new memoized function.\n */\nfunction memoizeCapped(func) {\n var result = memoize(func, function(key) {\n if (cache.size === MAX_MEMOIZE_SIZE) {\n cache.clear();\n }\n return key;\n });\n\n var cache = result.cache;\n return result;\n}\n\nexport default memoizeCapped;\n","import memoizeCapped from './_memoizeCapped.js';\n\n/** Used to match property names within property paths. */\nvar rePropName = /[^.[\\]]+|\\[(?:(-?\\d+(?:\\.\\d+)?)|([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2)\\]|(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))/g;\n\n/** Used to match backslashes in property paths. */\nvar reEscapeChar = /\\\\(\\\\)?/g;\n\n/**\n * Converts `string` to a property path array.\n *\n * @private\n * @param {string} string The string to convert.\n * @returns {Array} Returns the property path array.\n */\nvar stringToPath = memoizeCapped(function(string) {\n var result = [];\n if (string.charCodeAt(0) === 46 /* . */) {\n result.push('');\n }\n string.replace(rePropName, function(match, number, quote, subString) {\n result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match));\n });\n return result;\n});\n\nexport default stringToPath;\n","import Symbol from './_Symbol.js';\nimport arrayMap from './_arrayMap.js';\nimport isArray from './isArray.js';\nimport isSymbol from './isSymbol.js';\n\n/** Used as references for various `Number` constants. */\nvar INFINITY = 1 / 0;\n\n/** Used to convert symbols to primitives and strings. */\nvar symbolProto = Symbol ? Symbol.prototype : undefined,\n symbolToString = symbolProto ? symbolProto.toString : undefined;\n\n/**\n * The base implementation of `_.toString` which doesn't convert nullish\n * values to empty strings.\n *\n * @private\n * @param {*} value The value to process.\n * @returns {string} Returns the string.\n */\nfunction baseToString(value) {\n // Exit early for strings to avoid a performance hit in some environments.\n if (typeof value == 'string') {\n return value;\n }\n if (isArray(value)) {\n // Recursively convert values (susceptible to call stack limits).\n return arrayMap(value, baseToString) + '';\n }\n if (isSymbol(value)) {\n return symbolToString ? symbolToString.call(value) : '';\n }\n var result = (value + '');\n return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;\n}\n\nexport default baseToString;\n","import baseToString from './_baseToString.js';\n\n/**\n * Converts `value` to a string. An empty string is returned for `null`\n * and `undefined` values. The sign of `-0` is preserved.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Lang\n * @param {*} value The value to convert.\n * @returns {string} Returns the converted string.\n * @example\n *\n * _.toString(null);\n * // => ''\n *\n * _.toString(-0);\n * // => '-0'\n *\n * _.toString([1, 2, 3]);\n * // => '1,2,3'\n */\nfunction toString(value) {\n return value == null ? '' : baseToString(value);\n}\n\nexport default toString;\n","import isArray from './isArray.js';\nimport isKey from './_isKey.js';\nimport stringToPath from './_stringToPath.js';\nimport toString from './toString.js';\n\n/**\n * Casts `value` to a path array if it's not one.\n *\n * @private\n * @param {*} value The value to inspect.\n * @param {Object} [object] The object to query keys on.\n * @returns {Array} Returns the cast property path array.\n */\nfunction castPath(value, object) {\n if (isArray(value)) {\n return value;\n }\n return isKey(value, object) ? [value] : stringToPath(toString(value));\n}\n\nexport default castPath;\n","import isSymbol from './isSymbol.js';\n\n/** Used as references for various `Number` constants. */\nvar INFINITY = 1 / 0;\n\n/**\n * Converts `value` to a string key if it's not a string or symbol.\n *\n * @private\n * @param {*} value The value to inspect.\n * @returns {string|symbol} Returns the key.\n */\nfunction toKey(value) {\n if (typeof value == 'string' || isSymbol(value)) {\n return value;\n }\n var result = (value + '');\n return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;\n}\n\nexport default toKey;\n","import castPath from './_castPath.js';\nimport toKey from './_toKey.js';\n\n/**\n * The base implementation of `_.get` without support for default values.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {Array|string} path The path of the property to get.\n * @returns {*} Returns the resolved value.\n */\nfunction baseGet(object, path) {\n path = castPath(path, object);\n\n var index = 0,\n length = path.length;\n\n while (object != null && index < length) {\n object = object[toKey(path[index++])];\n }\n return (index && index == length) ? object : undefined;\n}\n\nexport default baseGet;\n","import baseGet from './_baseGet.js';\n\n/**\n * Gets the value at `path` of `object`. If the resolved value is\n * `undefined`, the `defaultValue` is returned in its place.\n *\n * @static\n * @memberOf _\n * @since 3.7.0\n * @category Object\n * @param {Object} object The object to query.\n * @param {Array|string} path The path of the property to get.\n * @param {*} [defaultValue] The value returned for `undefined` resolved values.\n * @returns {*} Returns the resolved value.\n * @example\n *\n * var object = { 'a': [{ 'b': { 'c': 3 } }] };\n *\n * _.get(object, 'a[0].b.c');\n * // => 3\n *\n * _.get(object, ['a', '0', 'b', 'c']);\n * // => 3\n *\n * _.get(object, 'a.b.c', 'default');\n * // => 'default'\n */\nfunction get(object, path, defaultValue) {\n var result = object == null ? undefined : baseGet(object, path);\n return result === undefined ? defaultValue : result;\n}\n\nexport default get;\n","/**\n * The base implementation of `_.hasIn` without support for deep paths.\n *\n * @private\n * @param {Object} [object] The object to query.\n * @param {Array|string} key The key to check.\n * @returns {boolean} Returns `true` if `key` exists, else `false`.\n */\nfunction baseHasIn(object, key) {\n return object != null && key in Object(object);\n}\n\nexport default baseHasIn;\n","import castPath from './_castPath.js';\nimport isArguments from './isArguments.js';\nimport isArray from './isArray.js';\nimport isIndex from './_isIndex.js';\nimport isLength from './isLength.js';\nimport toKey from './_toKey.js';\n\n/**\n * Checks if `path` exists on `object`.\n *\n * @private\n * @param {Object} object The object to query.\n * @param {Array|string} path The path to check.\n * @param {Function} hasFunc The function to check properties.\n * @returns {boolean} Returns `true` if `path` exists, else `false`.\n */\nfunction hasPath(object, path, hasFunc) {\n path = castPath(path, object);\n\n var index = -1,\n length = path.length,\n result = false;\n\n while (++index < length) {\n var key = toKey(path[index]);\n if (!(result = object != null && hasFunc(object, key))) {\n break;\n }\n object = object[key];\n }\n if (result || ++index != length) {\n return result;\n }\n length = object == null ? 0 : object.length;\n return !!length && isLength(length) && isIndex(key, length) &&\n (isArray(object) || isArguments(object));\n}\n\nexport default hasPath;\n","import baseHasIn from './_baseHasIn.js';\nimport hasPath from './_hasPath.js';\n\n/**\n * Checks if `path` is a direct or inherited property of `object`.\n *\n * @static\n * @memberOf _\n * @since 4.0.0\n * @category Object\n * @param {Object} object The object to query.\n * @param {Array|string} path The path to check.\n * @returns {boolean} Returns `true` if `path` exists, else `false`.\n * @example\n *\n * var object = _.create({ 'a': _.create({ 'b': 2 }) });\n *\n * _.hasIn(object, 'a');\n * // => true\n *\n * _.hasIn(object, 'a.b');\n * // => true\n *\n * _.hasIn(object, ['a', 'b']);\n * // => true\n *\n * _.hasIn(object, 'b');\n * // => false\n */\nfunction hasIn(object, path) {\n return object != null && hasPath(object, path, baseHasIn);\n}\n\nexport default hasIn;\n","import baseIsEqual from './_baseIsEqual.js';\nimport get from './get.js';\nimport hasIn from './hasIn.js';\nimport isKey from './_isKey.js';\nimport isStrictComparable from './_isStrictComparable.js';\nimport matchesStrictComparable from './_matchesStrictComparable.js';\nimport toKey from './_toKey.js';\n\n/** Used to compose bitmasks for value comparisons. */\nvar COMPARE_PARTIAL_FLAG = 1,\n COMPARE_UNORDERED_FLAG = 2;\n\n/**\n * The base implementation of `_.matchesProperty` which doesn't clone `srcValue`.\n *\n * @private\n * @param {string} path The path of the property to get.\n * @param {*} srcValue The value to match.\n * @returns {Function} Returns the new spec function.\n */\nfunction baseMatchesProperty(path, srcValue) {\n if (isKey(path) && isStrictComparable(srcValue)) {\n return matchesStrictComparable(toKey(path), srcValue);\n }\n return function(object) {\n var objValue = get(object, path);\n return (objValue === undefined && objValue === srcValue)\n ? hasIn(object, path)\n : baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG);\n };\n}\n\nexport default baseMatchesProperty;\n","/**\n * The base implementation of `_.property` without support for deep paths.\n *\n * @private\n * @param {string} key The key of the property to get.\n * @returns {Function} Returns the new accessor function.\n */\nfunction baseProperty(key) {\n return function(object) {\n return object == null ? undefined : object[key];\n };\n}\n\nexport default baseProperty;\n","import baseGet from './_baseGet.js';\n\n/**\n * A specialized version of `baseProperty` which supports deep paths.\n *\n * @private\n * @param {Array|string} path The path of the property to get.\n * @returns {Function} Returns the new accessor function.\n */\nfunction basePropertyDeep(path) {\n return function(object) {\n return baseGet(object, path);\n };\n}\n\nexport default basePropertyDeep;\n","import baseProperty from './_baseProperty.js';\nimport basePropertyDeep from './_basePropertyDeep.js';\nimport isKey from './_isKey.js';\nimport toKey from './_toKey.js';\n\n/**\n * Creates a function that returns the value at `path` of a given object.\n *\n * @static\n * @memberOf _\n * @since 2.4.0\n * @category Util\n * @param {Array|string} path The path of the property to get.\n * @returns {Function} Returns the new accessor function.\n * @example\n *\n * var objects = [\n * { 'a': { 'b': 2 } },\n * { 'a': { 'b': 1 } }\n * ];\n *\n * _.map(objects, _.property('a.b'));\n * // => [2, 1]\n *\n * _.map(_.sortBy(objects, _.property(['a', 'b'])), 'a.b');\n * // => [1, 2]\n */\nfunction property(path) {\n return isKey(path) ? baseProperty(toKey(path)) : basePropertyDeep(path);\n}\n\nexport default property;\n","import baseMatches from './_baseMatches.js';\nimport baseMatchesProperty from './_baseMatchesProperty.js';\nimport identity from './identity.js';\nimport isArray from './isArray.js';\nimport property from './property.js';\n\n/**\n * The base implementation of `_.iteratee`.\n *\n * @private\n * @param {*} [value=_.identity] The value to convert to an iteratee.\n * @returns {Function} Returns the iteratee.\n */\nfunction baseIteratee(value) {\n // Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9.\n // See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details.\n if (typeof value == 'function') {\n return value;\n }\n if (value == null) {\n return identity;\n }\n if (typeof value == 'object') {\n return isArray(value)\n ? baseMatchesProperty(value[0], value[1])\n : baseMatches(value);\n }\n return property(value);\n}\n\nexport default baseIteratee;\n","import baseEach from './_baseEach.js';\nimport isArrayLike from './isArrayLike.js';\n\n/**\n * The base implementation of `_.map` without support for iteratee shorthands.\n *\n * @private\n * @param {Array|Object} collection The collection to iterate over.\n * @param {Function} iteratee The function invoked per iteration.\n * @returns {Array} Returns the new mapped array.\n */\nfunction baseMap(collection, iteratee) {\n var index = -1,\n result = isArrayLike(collection) ? Array(collection.length) : [];\n\n baseEach(collection, function(value, key, collection) {\n result[++index] = iteratee(value, key, collection);\n });\n return result;\n}\n\nexport default baseMap;\n","import arrayMap from './_arrayMap.js';\nimport baseIteratee from './_baseIteratee.js';\nimport baseMap from './_baseMap.js';\nimport isArray from './isArray.js';\n\n/**\n * Creates an array of values by running each element in `collection` thru\n * `iteratee`. The iteratee is invoked with three arguments:\n * (value, index|key, collection).\n *\n * Many lodash methods are guarded to work as iteratees for methods like\n * `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`.\n *\n * The guarded methods are:\n * `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`,\n * `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`,\n * `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`,\n * `template`, `trim`, `trimEnd`, `trimStart`, and `words`\n *\n * @static\n * @memberOf _\n * @since 0.1.0\n * @category Collection\n * @param {Array|Object} collection The collection to iterate over.\n * @param {Function} [iteratee=_.identity] The function invoked per iteration.\n * @returns {Array} Returns the new mapped array.\n * @example\n *\n * function square(n) {\n * return n * n;\n * }\n *\n * _.map([4, 8], square);\n * // => [16, 64]\n *\n * _.map({ 'a': 4, 'b': 8 }, square);\n * // => [16, 64] (iteration order is not guaranteed)\n *\n * var users = [\n * { 'user': 'barney' },\n * { 'user': 'fred' }\n * ];\n *\n * // The `_.property` iteratee shorthand.\n * _.map(users, 'user');\n * // => ['barney', 'fred']\n */\nfunction map(collection, iteratee) {\n var func = isArray(collection) ? arrayMap : baseMap;\n return func(collection, baseIteratee(iteratee, 3));\n}\n\nexport default map;\n","import React from 'react';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\n\nimport { Swatch } from '../common';\n\nexport var BlockSwatches = function BlockSwatches(_ref) {\n var colors = _ref.colors,\n onClick = _ref.onClick,\n onSwatchHover = _ref.onSwatchHover;\n\n var styles = reactCSS({\n 'default': {\n swatches: {\n marginRight: '-10px'\n },\n swatch: {\n width: '22px',\n height: '22px',\n float: 'left',\n marginRight: '10px',\n marginBottom: '10px',\n borderRadius: '4px'\n },\n clear: {\n clear: 'both'\n }\n }\n });\n\n return React.createElement(\n 'div',\n { style: styles.swatches },\n map(colors, function (c) {\n return React.createElement(Swatch, {\n key: c,\n color: c,\n style: styles.swatch,\n onClick: onClick,\n onHover: onSwatchHover,\n focusStyle: {\n boxShadow: '0 0 4px ' + c\n }\n });\n }),\n React.createElement('div', { style: styles.clear })\n );\n};\n\nexport default BlockSwatches;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\nimport * as color from '../../helpers/color';\n\nimport { ColorWrap, EditableInput, Checkboard } from '../common';\nimport BlockSwatches from './BlockSwatches';\n\nexport var Block = function Block(_ref) {\n var onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n hex = _ref.hex,\n colors = _ref.colors,\n width = _ref.width,\n triangle = _ref.triangle,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var transparent = hex === 'transparent';\n var handleChange = function handleChange(hexCode, e) {\n color.isValidHex(hexCode) && onChange({\n hex: hexCode,\n source: 'hex'\n }, e);\n };\n\n var styles = reactCSS(merge({\n 'default': {\n card: {\n width: width,\n background: '#fff',\n boxShadow: '0 1px rgba(0,0,0,.1)',\n borderRadius: '6px',\n position: 'relative'\n },\n head: {\n height: '110px',\n background: hex,\n borderRadius: '6px 6px 0 0',\n display: 'flex',\n alignItems: 'center',\n justifyContent: 'center',\n position: 'relative'\n },\n body: {\n padding: '10px'\n },\n label: {\n fontSize: '18px',\n color: color.getContrastingColor(hex),\n position: 'relative'\n },\n triangle: {\n width: '0px',\n height: '0px',\n borderStyle: 'solid',\n borderWidth: '0 10px 10px 10px',\n borderColor: 'transparent transparent ' + hex + ' transparent',\n position: 'absolute',\n top: '-10px',\n left: '50%',\n marginLeft: '-10px'\n },\n input: {\n width: '100%',\n fontSize: '12px',\n color: '#666',\n border: '0px',\n outline: 'none',\n height: '22px',\n boxShadow: 'inset 0 0 0 1px #ddd',\n borderRadius: '4px',\n padding: '0 7px',\n boxSizing: 'border-box'\n }\n },\n 'hide-triangle': {\n triangle: {\n display: 'none'\n }\n }\n }, passedStyles), { 'hide-triangle': triangle === 'hide' });\n\n return React.createElement(\n 'div',\n { style: styles.card, className: 'block-picker ' + className },\n React.createElement('div', { style: styles.triangle }),\n React.createElement(\n 'div',\n { style: styles.head },\n transparent && React.createElement(Checkboard, { borderRadius: '6px 6px 0 0' }),\n React.createElement(\n 'div',\n { style: styles.label },\n hex\n )\n ),\n React.createElement(\n 'div',\n { style: styles.body },\n React.createElement(BlockSwatches, { colors: colors, onClick: handleChange, onSwatchHover: onSwatchHover }),\n React.createElement(EditableInput, {\n style: { input: styles.input },\n value: hex,\n onChange: handleChange\n })\n )\n );\n};\n\nBlock.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n colors: PropTypes.arrayOf(PropTypes.string),\n triangle: PropTypes.oneOf(['top', 'hide']),\n styles: PropTypes.object\n};\n\nBlock.defaultProps = {\n width: 170,\n colors: ['#D9E3F0', '#F47373', '#697689', '#37D67A', '#2CCCE4', '#555555', '#dce775', '#ff8a65', '#ba68c8'],\n triangle: 'top',\n styles: {}\n};\n\nexport default ColorWrap(Block);","export var red = {\"50\":\"#ffebee\",\"100\":\"#ffcdd2\",\"200\":\"#ef9a9a\",\"300\":\"#e57373\",\"400\":\"#ef5350\",\"500\":\"#f44336\",\"600\":\"#e53935\",\"700\":\"#d32f2f\",\"800\":\"#c62828\",\"900\":\"#b71c1c\",\"a100\":\"#ff8a80\",\"a200\":\"#ff5252\",\"a400\":\"#ff1744\",\"a700\":\"#d50000\"};\nexport var pink = {\"50\":\"#fce4ec\",\"100\":\"#f8bbd0\",\"200\":\"#f48fb1\",\"300\":\"#f06292\",\"400\":\"#ec407a\",\"500\":\"#e91e63\",\"600\":\"#d81b60\",\"700\":\"#c2185b\",\"800\":\"#ad1457\",\"900\":\"#880e4f\",\"a100\":\"#ff80ab\",\"a200\":\"#ff4081\",\"a400\":\"#f50057\",\"a700\":\"#c51162\"};\nexport var purple = {\"50\":\"#f3e5f5\",\"100\":\"#e1bee7\",\"200\":\"#ce93d8\",\"300\":\"#ba68c8\",\"400\":\"#ab47bc\",\"500\":\"#9c27b0\",\"600\":\"#8e24aa\",\"700\":\"#7b1fa2\",\"800\":\"#6a1b9a\",\"900\":\"#4a148c\",\"a100\":\"#ea80fc\",\"a200\":\"#e040fb\",\"a400\":\"#d500f9\",\"a700\":\"#aa00ff\"};\nexport var deepPurple = {\"50\":\"#ede7f6\",\"100\":\"#d1c4e9\",\"200\":\"#b39ddb\",\"300\":\"#9575cd\",\"400\":\"#7e57c2\",\"500\":\"#673ab7\",\"600\":\"#5e35b1\",\"700\":\"#512da8\",\"800\":\"#4527a0\",\"900\":\"#311b92\",\"a100\":\"#b388ff\",\"a200\":\"#7c4dff\",\"a400\":\"#651fff\",\"a700\":\"#6200ea\"};\nexport var indigo = {\"50\":\"#e8eaf6\",\"100\":\"#c5cae9\",\"200\":\"#9fa8da\",\"300\":\"#7986cb\",\"400\":\"#5c6bc0\",\"500\":\"#3f51b5\",\"600\":\"#3949ab\",\"700\":\"#303f9f\",\"800\":\"#283593\",\"900\":\"#1a237e\",\"a100\":\"#8c9eff\",\"a200\":\"#536dfe\",\"a400\":\"#3d5afe\",\"a700\":\"#304ffe\"};\nexport var blue = {\"50\":\"#e3f2fd\",\"100\":\"#bbdefb\",\"200\":\"#90caf9\",\"300\":\"#64b5f6\",\"400\":\"#42a5f5\",\"500\":\"#2196f3\",\"600\":\"#1e88e5\",\"700\":\"#1976d2\",\"800\":\"#1565c0\",\"900\":\"#0d47a1\",\"a100\":\"#82b1ff\",\"a200\":\"#448aff\",\"a400\":\"#2979ff\",\"a700\":\"#2962ff\"};\nexport var lightBlue = {\"50\":\"#e1f5fe\",\"100\":\"#b3e5fc\",\"200\":\"#81d4fa\",\"300\":\"#4fc3f7\",\"400\":\"#29b6f6\",\"500\":\"#03a9f4\",\"600\":\"#039be5\",\"700\":\"#0288d1\",\"800\":\"#0277bd\",\"900\":\"#01579b\",\"a100\":\"#80d8ff\",\"a200\":\"#40c4ff\",\"a400\":\"#00b0ff\",\"a700\":\"#0091ea\"};\nexport var cyan = {\"50\":\"#e0f7fa\",\"100\":\"#b2ebf2\",\"200\":\"#80deea\",\"300\":\"#4dd0e1\",\"400\":\"#26c6da\",\"500\":\"#00bcd4\",\"600\":\"#00acc1\",\"700\":\"#0097a7\",\"800\":\"#00838f\",\"900\":\"#006064\",\"a100\":\"#84ffff\",\"a200\":\"#18ffff\",\"a400\":\"#00e5ff\",\"a700\":\"#00b8d4\"};\nexport var teal = {\"50\":\"#e0f2f1\",\"100\":\"#b2dfdb\",\"200\":\"#80cbc4\",\"300\":\"#4db6ac\",\"400\":\"#26a69a\",\"500\":\"#009688\",\"600\":\"#00897b\",\"700\":\"#00796b\",\"800\":\"#00695c\",\"900\":\"#004d40\",\"a100\":\"#a7ffeb\",\"a200\":\"#64ffda\",\"a400\":\"#1de9b6\",\"a700\":\"#00bfa5\"};\nexport var green = {\"50\":\"#e8f5e9\",\"100\":\"#c8e6c9\",\"200\":\"#a5d6a7\",\"300\":\"#81c784\",\"400\":\"#66bb6a\",\"500\":\"#4caf50\",\"600\":\"#43a047\",\"700\":\"#388e3c\",\"800\":\"#2e7d32\",\"900\":\"#1b5e20\",\"a100\":\"#b9f6ca\",\"a200\":\"#69f0ae\",\"a400\":\"#00e676\",\"a700\":\"#00c853\"};\nexport var lightGreen = {\"50\":\"#f1f8e9\",\"100\":\"#dcedc8\",\"200\":\"#c5e1a5\",\"300\":\"#aed581\",\"400\":\"#9ccc65\",\"500\":\"#8bc34a\",\"600\":\"#7cb342\",\"700\":\"#689f38\",\"800\":\"#558b2f\",\"900\":\"#33691e\",\"a100\":\"#ccff90\",\"a200\":\"#b2ff59\",\"a400\":\"#76ff03\",\"a700\":\"#64dd17\"};\nexport var lime = {\"50\":\"#f9fbe7\",\"100\":\"#f0f4c3\",\"200\":\"#e6ee9c\",\"300\":\"#dce775\",\"400\":\"#d4e157\",\"500\":\"#cddc39\",\"600\":\"#c0ca33\",\"700\":\"#afb42b\",\"800\":\"#9e9d24\",\"900\":\"#827717\",\"a100\":\"#f4ff81\",\"a200\":\"#eeff41\",\"a400\":\"#c6ff00\",\"a700\":\"#aeea00\"};\nexport var yellow = {\"50\":\"#fffde7\",\"100\":\"#fff9c4\",\"200\":\"#fff59d\",\"300\":\"#fff176\",\"400\":\"#ffee58\",\"500\":\"#ffeb3b\",\"600\":\"#fdd835\",\"700\":\"#fbc02d\",\"800\":\"#f9a825\",\"900\":\"#f57f17\",\"a100\":\"#ffff8d\",\"a200\":\"#ffff00\",\"a400\":\"#ffea00\",\"a700\":\"#ffd600\"};\nexport var amber = {\"50\":\"#fff8e1\",\"100\":\"#ffecb3\",\"200\":\"#ffe082\",\"300\":\"#ffd54f\",\"400\":\"#ffca28\",\"500\":\"#ffc107\",\"600\":\"#ffb300\",\"700\":\"#ffa000\",\"800\":\"#ff8f00\",\"900\":\"#ff6f00\",\"a100\":\"#ffe57f\",\"a200\":\"#ffd740\",\"a400\":\"#ffc400\",\"a700\":\"#ffab00\"};\nexport var orange = {\"50\":\"#fff3e0\",\"100\":\"#ffe0b2\",\"200\":\"#ffcc80\",\"300\":\"#ffb74d\",\"400\":\"#ffa726\",\"500\":\"#ff9800\",\"600\":\"#fb8c00\",\"700\":\"#f57c00\",\"800\":\"#ef6c00\",\"900\":\"#e65100\",\"a100\":\"#ffd180\",\"a200\":\"#ffab40\",\"a400\":\"#ff9100\",\"a700\":\"#ff6d00\"};\nexport var deepOrange = {\"50\":\"#fbe9e7\",\"100\":\"#ffccbc\",\"200\":\"#ffab91\",\"300\":\"#ff8a65\",\"400\":\"#ff7043\",\"500\":\"#ff5722\",\"600\":\"#f4511e\",\"700\":\"#e64a19\",\"800\":\"#d84315\",\"900\":\"#bf360c\",\"a100\":\"#ff9e80\",\"a200\":\"#ff6e40\",\"a400\":\"#ff3d00\",\"a700\":\"#dd2c00\"};\nexport var brown = {\"50\":\"#efebe9\",\"100\":\"#d7ccc8\",\"200\":\"#bcaaa4\",\"300\":\"#a1887f\",\"400\":\"#8d6e63\",\"500\":\"#795548\",\"600\":\"#6d4c41\",\"700\":\"#5d4037\",\"800\":\"#4e342e\",\"900\":\"#3e2723\"};\nexport var grey = {\"50\":\"#fafafa\",\"100\":\"#f5f5f5\",\"200\":\"#eeeeee\",\"300\":\"#e0e0e0\",\"400\":\"#bdbdbd\",\"500\":\"#9e9e9e\",\"600\":\"#757575\",\"700\":\"#616161\",\"800\":\"#424242\",\"900\":\"#212121\"};\nexport var blueGrey = {\"50\":\"#eceff1\",\"100\":\"#cfd8dc\",\"200\":\"#b0bec5\",\"300\":\"#90a4ae\",\"400\":\"#78909c\",\"500\":\"#607d8b\",\"600\":\"#546e7a\",\"700\":\"#455a64\",\"800\":\"#37474f\",\"900\":\"#263238\"};\nexport var darkText = {\"primary\":\"rgba(0, 0, 0, 0.87)\",\"secondary\":\"rgba(0, 0, 0, 0.54)\",\"disabled\":\"rgba(0, 0, 0, 0.38)\",\"dividers\":\"rgba(0, 0, 0, 0.12)\"};\nexport var lightText = {\"primary\":\"rgba(255, 255, 255, 1)\",\"secondary\":\"rgba(255, 255, 255, 0.7)\",\"disabled\":\"rgba(255, 255, 255, 0.5)\",\"dividers\":\"rgba(255, 255, 255, 0.12)\"};\nexport var darkIcons = {\"active\":\"rgba(0, 0, 0, 0.54)\",\"inactive\":\"rgba(0, 0, 0, 0.38)\"};\nexport var lightIcons = {\"active\":\"rgba(255, 255, 255, 1)\",\"inactive\":\"rgba(255, 255, 255, 0.5)\"};\nexport var white = \"#ffffff\";\nexport var black = \"#000000\";\n\nexport default {\n red: red,\n pink: pink,\n purple: purple,\n deepPurple: deepPurple,\n indigo: indigo,\n blue: blue,\n lightBlue: lightBlue,\n cyan: cyan,\n teal: teal,\n green: green,\n lightGreen: lightGreen,\n lime: lime,\n yellow: yellow,\n amber: amber,\n orange: orange,\n deepOrange: deepOrange,\n brown: brown,\n grey: grey,\n blueGrey: blueGrey,\n darkText: darkText,\n lightText: lightText,\n darkIcons: darkIcons,\n lightIcons: lightIcons,\n white: white,\n black: black\n};\n","import React from 'react';\nimport reactCSS, { handleHover } from 'reactcss';\n\nimport { Swatch } from '../common';\n\nexport var CircleSwatch = function CircleSwatch(_ref) {\n var color = _ref.color,\n onClick = _ref.onClick,\n onSwatchHover = _ref.onSwatchHover,\n hover = _ref.hover,\n active = _ref.active,\n circleSize = _ref.circleSize,\n circleSpacing = _ref.circleSpacing;\n\n var styles = reactCSS({\n 'default': {\n swatch: {\n width: circleSize,\n height: circleSize,\n marginRight: circleSpacing,\n marginBottom: circleSpacing,\n transform: 'scale(1)',\n transition: '100ms transform ease'\n },\n Swatch: {\n borderRadius: '50%',\n background: 'transparent',\n boxShadow: 'inset 0 0 0 ' + (circleSize / 2 + 1) + 'px ' + color,\n transition: '100ms box-shadow ease'\n }\n },\n 'hover': {\n swatch: {\n transform: 'scale(1.2)'\n }\n },\n 'active': {\n Swatch: {\n boxShadow: 'inset 0 0 0 3px ' + color\n }\n }\n }, { hover: hover, active: active });\n\n return React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(Swatch, {\n style: styles.Swatch,\n color: color,\n onClick: onClick,\n onHover: onSwatchHover,\n focusStyle: { boxShadow: styles.Swatch.boxShadow + ', 0 0 5px ' + color }\n })\n );\n};\n\nCircleSwatch.defaultProps = {\n circleSize: 28,\n circleSpacing: 14\n};\n\nexport default handleHover(CircleSwatch);","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\nimport merge from 'lodash-es/merge';\nimport * as material from 'material-colors';\n\nimport { ColorWrap } from '../common';\nimport CircleSwatch from './CircleSwatch';\n\nexport var Circle = function Circle(_ref) {\n var width = _ref.width,\n onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n colors = _ref.colors,\n hex = _ref.hex,\n circleSize = _ref.circleSize,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n circleSpacing = _ref.circleSpacing,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n card: {\n width: width,\n display: 'flex',\n flexWrap: 'wrap',\n marginRight: -circleSpacing,\n marginBottom: -circleSpacing\n }\n }\n }, passedStyles));\n\n var handleChange = function handleChange(hexCode, e) {\n return onChange({ hex: hexCode, source: 'hex' }, e);\n };\n\n return React.createElement(\n 'div',\n { style: styles.card, className: 'circle-picker ' + className },\n map(colors, function (c) {\n return React.createElement(CircleSwatch, {\n key: c,\n color: c,\n onClick: handleChange,\n onSwatchHover: onSwatchHover,\n active: hex === c.toLowerCase(),\n circleSize: circleSize,\n circleSpacing: circleSpacing\n });\n })\n );\n};\n\nCircle.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n circleSize: PropTypes.number,\n circleSpacing: PropTypes.number,\n styles: PropTypes.object\n};\n\nCircle.defaultProps = {\n width: 252,\n circleSize: 28,\n circleSpacing: 14,\n colors: [material.red['500'], material.pink['500'], material.purple['500'], material.deepPurple['500'], material.indigo['500'], material.blue['500'], material.lightBlue['500'], material.cyan['500'], material.teal['500'], material.green['500'], material.lightGreen['500'], material.lime['500'], material.yellow['500'], material.amber['500'], material.orange['500'], material.deepOrange['500'], material.brown['500'], material.blueGrey['500']],\n styles: {}\n};\n\nexport default ColorWrap(Circle);","/**\n * Checks if `value` is `undefined`.\n *\n * @static\n * @since 0.1.0\n * @memberOf _\n * @category Lang\n * @param {*} value The value to check.\n * @returns {boolean} Returns `true` if `value` is `undefined`, else `false`.\n * @example\n *\n * _.isUndefined(void 0);\n * // => true\n *\n * _.isUndefined(null);\n * // => false\n */\nfunction isUndefined(value) {\n return value === undefined;\n}\n\nexport default isUndefined;\n","var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\n/* eslint-disable react/no-did-mount-set-state, no-param-reassign */\n\nimport React from 'react';\nimport reactCSS from 'reactcss';\nimport * as color from '../../helpers/color';\nimport isUndefined from 'lodash-es/isUndefined';\n\nimport { EditableInput } from '../common';\nimport UnfoldMoreHorizontalIcon from '@icons/material/UnfoldMoreHorizontalIcon';\n\nexport var ChromeFields = function (_React$Component) {\n _inherits(ChromeFields, _React$Component);\n\n function ChromeFields(props) {\n _classCallCheck(this, ChromeFields);\n\n var _this = _possibleConstructorReturn(this, (ChromeFields.__proto__ || Object.getPrototypeOf(ChromeFields)).call(this));\n\n _this.toggleViews = function () {\n if (_this.state.view === 'hex') {\n _this.setState({ view: 'rgb' });\n } else if (_this.state.view === 'rgb') {\n _this.setState({ view: 'hsl' });\n } else if (_this.state.view === 'hsl') {\n if (_this.props.hsl.a === 1) {\n _this.setState({ view: 'hex' });\n } else {\n _this.setState({ view: 'rgb' });\n }\n }\n };\n\n _this.handleChange = function (data, e) {\n if (data.hex) {\n color.isValidHex(data.hex) && _this.props.onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n } else if (data.r || data.g || data.b) {\n _this.props.onChange({\n r: data.r || _this.props.rgb.r,\n g: data.g || _this.props.rgb.g,\n b: data.b || _this.props.rgb.b,\n source: 'rgb'\n }, e);\n } else if (data.a) {\n if (data.a < 0) {\n data.a = 0;\n } else if (data.a > 1) {\n data.a = 1;\n }\n\n _this.props.onChange({\n h: _this.props.hsl.h,\n s: _this.props.hsl.s,\n l: _this.props.hsl.l,\n a: Math.round(data.a * 100) / 100,\n source: 'rgb'\n }, e);\n } else if (data.h || data.s || data.l) {\n // Remove any occurances of '%'.\n if (typeof data.s === 'string' && data.s.includes('%')) {\n data.s = data.s.replace('%', '');\n }\n if (typeof data.l === 'string' && data.l.includes('%')) {\n data.l = data.l.replace('%', '');\n }\n\n // We store HSL as a unit interval so we need to override the 1 input to 0.01\n if (data.s == 1) {\n data.s = 0.01;\n } else if (data.l == 1) {\n data.l = 0.01;\n }\n\n _this.props.onChange({\n h: data.h || _this.props.hsl.h,\n s: Number(!isUndefined(data.s) ? data.s : _this.props.hsl.s),\n l: Number(!isUndefined(data.l) ? data.l : _this.props.hsl.l),\n source: 'hsl'\n }, e);\n }\n };\n\n _this.showHighlight = function (e) {\n e.currentTarget.style.background = '#eee';\n };\n\n _this.hideHighlight = function (e) {\n e.currentTarget.style.background = 'transparent';\n };\n\n if (props.hsl.a !== 1 && props.view === \"hex\") {\n _this.state = {\n view: \"rgb\"\n };\n } else {\n _this.state = {\n view: props.view\n };\n }\n return _this;\n }\n\n _createClass(ChromeFields, [{\n key: 'render',\n value: function render() {\n var _this2 = this;\n\n var styles = reactCSS({\n 'default': {\n wrap: {\n paddingTop: '16px',\n display: 'flex'\n },\n fields: {\n flex: '1',\n display: 'flex',\n marginLeft: '-6px'\n },\n field: {\n paddingLeft: '6px',\n width: '100%'\n },\n alpha: {\n paddingLeft: '6px',\n width: '100%'\n },\n toggle: {\n width: '32px',\n textAlign: 'right',\n position: 'relative'\n },\n icon: {\n marginRight: '-4px',\n marginTop: '12px',\n cursor: 'pointer',\n position: 'relative'\n },\n iconHighlight: {\n position: 'absolute',\n width: '24px',\n height: '28px',\n background: '#eee',\n borderRadius: '4px',\n top: '10px',\n left: '12px',\n display: 'none'\n },\n input: {\n fontSize: '11px',\n color: '#333',\n width: '100%',\n borderRadius: '2px',\n border: 'none',\n boxShadow: 'inset 0 0 0 1px #dadada',\n height: '21px',\n textAlign: 'center'\n },\n label: {\n textTransform: 'uppercase',\n fontSize: '11px',\n lineHeight: '11px',\n color: '#969696',\n textAlign: 'center',\n display: 'block',\n marginTop: '12px'\n },\n svg: {\n fill: '#333',\n width: '24px',\n height: '24px',\n border: '1px transparent solid',\n borderRadius: '5px'\n }\n },\n 'disableAlpha': {\n alpha: {\n display: 'none'\n }\n }\n }, this.props, this.state);\n\n var fields = void 0;\n if (this.state.view === 'hex') {\n fields = React.createElement(\n 'div',\n { style: styles.fields, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'hex', value: this.props.hex,\n onChange: this.handleChange\n })\n )\n );\n } else if (this.state.view === 'rgb') {\n fields = React.createElement(\n 'div',\n { style: styles.fields, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'r',\n value: this.props.rgb.r,\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'g',\n value: this.props.rgb.g,\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'b',\n value: this.props.rgb.b,\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'a',\n value: this.props.rgb.a,\n arrowOffset: 0.01,\n onChange: this.handleChange\n })\n )\n );\n } else if (this.state.view === 'hsl') {\n fields = React.createElement(\n 'div',\n { style: styles.fields, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'h',\n value: Math.round(this.props.hsl.h),\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 's',\n value: Math.round(this.props.hsl.s * 100) + '%',\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.field },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'l',\n value: Math.round(this.props.hsl.l * 100) + '%',\n onChange: this.handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'a',\n value: this.props.hsl.a,\n arrowOffset: 0.01,\n onChange: this.handleChange\n })\n )\n );\n }\n\n return React.createElement(\n 'div',\n { style: styles.wrap, className: 'flexbox-fix' },\n fields,\n React.createElement(\n 'div',\n { style: styles.toggle },\n React.createElement(\n 'div',\n { style: styles.icon, onClick: this.toggleViews, ref: function ref(icon) {\n return _this2.icon = icon;\n } },\n React.createElement(UnfoldMoreHorizontalIcon, {\n style: styles.svg,\n onMouseOver: this.showHighlight,\n onMouseEnter: this.showHighlight,\n onMouseOut: this.hideHighlight\n })\n )\n )\n );\n }\n }], [{\n key: 'getDerivedStateFromProps',\n value: function getDerivedStateFromProps(nextProps, state) {\n if (nextProps.hsl.a !== 1 && state.view === 'hex') {\n return { view: 'rgb' };\n }\n return null;\n }\n }]);\n\n return ChromeFields;\n}(React.Component);\n\nChromeFields.defaultProps = {\n view: \"hex\"\n};\n\nexport default ChromeFields;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var ChromePointer = function ChromePointer() {\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '12px',\n height: '12px',\n borderRadius: '6px',\n transform: 'translate(-6px, -1px)',\n backgroundColor: 'rgb(248, 248, 248)',\n boxShadow: '0 1px 4px 0 rgba(0, 0, 0, 0.37)'\n }\n }\n });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default ChromePointer;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var ChromePointerCircle = function ChromePointerCircle() {\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '12px',\n height: '12px',\n borderRadius: '6px',\n boxShadow: 'inset 0 0 0 1px #fff',\n transform: 'translate(-6px, -6px)'\n }\n }\n });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default ChromePointerCircle;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Saturation, Hue, Alpha, Checkboard } from '../common';\nimport ChromeFields from './ChromeFields';\nimport ChromePointer from './ChromePointer';\nimport ChromePointerCircle from './ChromePointerCircle';\n\nexport var Chrome = function Chrome(_ref) {\n var width = _ref.width,\n onChange = _ref.onChange,\n disableAlpha = _ref.disableAlpha,\n rgb = _ref.rgb,\n hsl = _ref.hsl,\n hsv = _ref.hsv,\n hex = _ref.hex,\n renderers = _ref.renderers,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className,\n defaultView = _ref.defaultView;\n\n var styles = reactCSS(merge({\n 'default': {\n picker: {\n width: width,\n background: '#fff',\n borderRadius: '2px',\n boxShadow: '0 0 2px rgba(0,0,0,.3), 0 4px 8px rgba(0,0,0,.3)',\n boxSizing: 'initial',\n fontFamily: 'Menlo'\n },\n saturation: {\n width: '100%',\n paddingBottom: '55%',\n position: 'relative',\n borderRadius: '2px 2px 0 0',\n overflow: 'hidden'\n },\n Saturation: {\n radius: '2px 2px 0 0'\n },\n body: {\n padding: '16px 16px 12px'\n },\n controls: {\n display: 'flex'\n },\n color: {\n width: '32px'\n },\n swatch: {\n marginTop: '6px',\n width: '16px',\n height: '16px',\n borderRadius: '8px',\n position: 'relative',\n overflow: 'hidden'\n },\n active: {\n absolute: '0px 0px 0px 0px',\n borderRadius: '8px',\n boxShadow: 'inset 0 0 0 1px rgba(0,0,0,.1)',\n background: 'rgba(' + rgb.r + ', ' + rgb.g + ', ' + rgb.b + ', ' + rgb.a + ')',\n zIndex: '2'\n },\n toggles: {\n flex: '1'\n },\n hue: {\n height: '10px',\n position: 'relative',\n marginBottom: '8px'\n },\n Hue: {\n radius: '2px'\n },\n alpha: {\n height: '10px',\n position: 'relative'\n },\n Alpha: {\n radius: '2px'\n }\n },\n 'disableAlpha': {\n color: {\n width: '22px'\n },\n alpha: {\n display: 'none'\n },\n hue: {\n marginBottom: '0px'\n },\n swatch: {\n width: '10px',\n height: '10px',\n marginTop: '0px'\n }\n }\n }, passedStyles), { disableAlpha: disableAlpha });\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'chrome-picker ' + className },\n React.createElement(\n 'div',\n { style: styles.saturation },\n React.createElement(Saturation, {\n style: styles.Saturation,\n hsl: hsl,\n hsv: hsv,\n pointer: ChromePointerCircle,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.body },\n React.createElement(\n 'div',\n { style: styles.controls, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.color },\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement('div', { style: styles.active }),\n React.createElement(Checkboard, { renderers: renderers })\n )\n ),\n React.createElement(\n 'div',\n { style: styles.toggles },\n React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(Hue, {\n style: styles.Hue,\n hsl: hsl,\n pointer: ChromePointer,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(Alpha, {\n style: styles.Alpha,\n rgb: rgb,\n hsl: hsl,\n pointer: ChromePointer,\n renderers: renderers,\n onChange: onChange\n })\n )\n )\n ),\n React.createElement(ChromeFields, {\n rgb: rgb,\n hsl: hsl,\n hex: hex,\n view: defaultView,\n onChange: onChange,\n disableAlpha: disableAlpha\n })\n )\n );\n};\n\nChrome.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n disableAlpha: PropTypes.bool,\n styles: PropTypes.object,\n defaultView: PropTypes.oneOf([\"hex\", \"rgb\", \"hsl\"])\n};\n\nChrome.defaultProps = {\n width: 225,\n disableAlpha: false,\n styles: {}\n};\n\nexport default ColorWrap(Chrome);","import React from 'react';\nimport reactCSS from 'reactcss';\nimport * as colorUtils from '../../helpers/color';\n\nimport { Swatch } from '../common';\n\nexport var CompactColor = function CompactColor(_ref) {\n var color = _ref.color,\n _ref$onClick = _ref.onClick,\n onClick = _ref$onClick === undefined ? function () {} : _ref$onClick,\n onSwatchHover = _ref.onSwatchHover,\n active = _ref.active;\n\n var styles = reactCSS({\n 'default': {\n color: {\n background: color,\n width: '15px',\n height: '15px',\n float: 'left',\n marginRight: '5px',\n marginBottom: '5px',\n position: 'relative',\n cursor: 'pointer'\n },\n dot: {\n absolute: '5px 5px 5px 5px',\n background: colorUtils.getContrastingColor(color),\n borderRadius: '50%',\n opacity: '0'\n }\n },\n 'active': {\n dot: {\n opacity: '1'\n }\n },\n 'color-#FFFFFF': {\n color: {\n boxShadow: 'inset 0 0 0 1px #ddd'\n },\n dot: {\n background: '#000'\n }\n },\n 'transparent': {\n dot: {\n background: '#000'\n }\n }\n }, { active: active, 'color-#FFFFFF': color === '#FFFFFF', 'transparent': color === 'transparent' });\n\n return React.createElement(\n Swatch,\n {\n style: styles.color,\n color: color,\n onClick: onClick,\n onHover: onSwatchHover,\n focusStyle: { boxShadow: '0 0 4px ' + color }\n },\n React.createElement('div', { style: styles.dot })\n );\n};\n\nexport default CompactColor;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nimport { EditableInput } from '../common';\n\nexport var CompactFields = function CompactFields(_ref) {\n var hex = _ref.hex,\n rgb = _ref.rgb,\n onChange = _ref.onChange;\n\n var styles = reactCSS({\n 'default': {\n fields: {\n display: 'flex',\n paddingBottom: '6px',\n paddingRight: '5px',\n position: 'relative'\n },\n active: {\n position: 'absolute',\n top: '6px',\n left: '5px',\n height: '9px',\n width: '9px',\n background: hex\n },\n HEXwrap: {\n flex: '6',\n position: 'relative'\n },\n HEXinput: {\n width: '80%',\n padding: '0px',\n paddingLeft: '20%',\n border: 'none',\n outline: 'none',\n background: 'none',\n fontSize: '12px',\n color: '#333',\n height: '16px'\n },\n HEXlabel: {\n display: 'none'\n },\n RGBwrap: {\n flex: '3',\n position: 'relative'\n },\n RGBinput: {\n width: '70%',\n padding: '0px',\n paddingLeft: '30%',\n border: 'none',\n outline: 'none',\n background: 'none',\n fontSize: '12px',\n color: '#333',\n height: '16px'\n },\n RGBlabel: {\n position: 'absolute',\n top: '3px',\n left: '0px',\n lineHeight: '16px',\n textTransform: 'uppercase',\n fontSize: '12px',\n color: '#999'\n }\n }\n });\n\n var handleChange = function handleChange(data, e) {\n if (data.r || data.g || data.b) {\n onChange({\n r: data.r || rgb.r,\n g: data.g || rgb.g,\n b: data.b || rgb.b,\n source: 'rgb'\n }, e);\n } else {\n onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n }\n };\n\n return React.createElement(\n 'div',\n { style: styles.fields, className: 'flexbox-fix' },\n React.createElement('div', { style: styles.active }),\n React.createElement(EditableInput, {\n style: { wrap: styles.HEXwrap, input: styles.HEXinput, label: styles.HEXlabel },\n label: 'hex',\n value: hex,\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'r',\n value: rgb.r,\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'g',\n value: rgb.g,\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'b',\n value: rgb.b,\n onChange: handleChange\n })\n );\n};\n\nexport default CompactFields;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\nimport merge from 'lodash-es/merge';\nimport * as color from '../../helpers/color';\n\nimport { ColorWrap, Raised } from '../common';\nimport CompactColor from './CompactColor';\nimport CompactFields from './CompactFields';\n\nexport var Compact = function Compact(_ref) {\n var onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n colors = _ref.colors,\n hex = _ref.hex,\n rgb = _ref.rgb,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n Compact: {\n background: '#f6f6f6',\n radius: '4px'\n },\n compact: {\n paddingTop: '5px',\n paddingLeft: '5px',\n boxSizing: 'initial',\n width: '240px'\n },\n clear: {\n clear: 'both'\n }\n }\n }, passedStyles));\n\n var handleChange = function handleChange(data, e) {\n if (data.hex) {\n color.isValidHex(data.hex) && onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n } else {\n onChange(data, e);\n }\n };\n\n return React.createElement(\n Raised,\n { style: styles.Compact, styles: passedStyles },\n React.createElement(\n 'div',\n { style: styles.compact, className: 'compact-picker ' + className },\n React.createElement(\n 'div',\n null,\n map(colors, function (c) {\n return React.createElement(CompactColor, {\n key: c,\n color: c,\n active: c.toLowerCase() === hex,\n onClick: handleChange,\n onSwatchHover: onSwatchHover\n });\n }),\n React.createElement('div', { style: styles.clear })\n ),\n React.createElement(CompactFields, { hex: hex, rgb: rgb, onChange: handleChange })\n )\n );\n};\n\nCompact.propTypes = {\n colors: PropTypes.arrayOf(PropTypes.string),\n styles: PropTypes.object\n};\n\nCompact.defaultProps = {\n colors: ['#4D4D4D', '#999999', '#FFFFFF', '#F44E3B', '#FE9200', '#FCDC00', '#DBDF00', '#A4DD00', '#68CCCA', '#73D8FF', '#AEA1FF', '#FDA1FF', '#333333', '#808080', '#cccccc', '#D33115', '#E27300', '#FCC400', '#B0BC00', '#68BC00', '#16A5A5', '#009CE0', '#7B64FF', '#FA28FF', '#000000', '#666666', '#B3B3B3', '#9F0500', '#C45100', '#FB9E00', '#808900', '#194D33', '#0C797D', '#0062B1', '#653294', '#AB149E'],\n styles: {}\n};\n\nexport default ColorWrap(Compact);","import React from 'react';\nimport reactCSS, { handleHover } from 'reactcss';\n\nimport { Swatch } from '../common';\n\nexport var GithubSwatch = function GithubSwatch(_ref) {\n var hover = _ref.hover,\n color = _ref.color,\n onClick = _ref.onClick,\n onSwatchHover = _ref.onSwatchHover;\n\n var hoverSwatch = {\n position: 'relative',\n zIndex: '2',\n outline: '2px solid #fff',\n boxShadow: '0 0 5px 2px rgba(0,0,0,0.25)'\n };\n\n var styles = reactCSS({\n 'default': {\n swatch: {\n width: '25px',\n height: '25px',\n fontSize: '0'\n }\n },\n 'hover': {\n swatch: hoverSwatch\n }\n }, { hover: hover });\n\n return React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(Swatch, {\n color: color,\n onClick: onClick,\n onHover: onSwatchHover,\n focusStyle: hoverSwatch\n })\n );\n};\n\nexport default handleHover(GithubSwatch);","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap } from '../common';\nimport GithubSwatch from './GithubSwatch';\n\nexport var Github = function Github(_ref) {\n var width = _ref.width,\n colors = _ref.colors,\n onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n triangle = _ref.triangle,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n card: {\n width: width,\n background: '#fff',\n border: '1px solid rgba(0,0,0,0.2)',\n boxShadow: '0 3px 12px rgba(0,0,0,0.15)',\n borderRadius: '4px',\n position: 'relative',\n padding: '5px',\n display: 'flex',\n flexWrap: 'wrap'\n },\n triangle: {\n position: 'absolute',\n border: '7px solid transparent',\n borderBottomColor: '#fff'\n },\n triangleShadow: {\n position: 'absolute',\n border: '8px solid transparent',\n borderBottomColor: 'rgba(0,0,0,0.15)'\n }\n },\n 'hide-triangle': {\n triangle: {\n display: 'none'\n },\n triangleShadow: {\n display: 'none'\n }\n },\n 'top-left-triangle': {\n triangle: {\n top: '-14px',\n left: '10px'\n },\n triangleShadow: {\n top: '-16px',\n left: '9px'\n }\n },\n 'top-right-triangle': {\n triangle: {\n top: '-14px',\n right: '10px'\n },\n triangleShadow: {\n top: '-16px',\n right: '9px'\n }\n },\n 'bottom-left-triangle': {\n triangle: {\n top: '35px',\n left: '10px',\n transform: 'rotate(180deg)'\n },\n triangleShadow: {\n top: '37px',\n left: '9px',\n transform: 'rotate(180deg)'\n }\n },\n 'bottom-right-triangle': {\n triangle: {\n top: '35px',\n right: '10px',\n transform: 'rotate(180deg)'\n },\n triangleShadow: {\n top: '37px',\n right: '9px',\n transform: 'rotate(180deg)'\n }\n }\n }, passedStyles), {\n 'hide-triangle': triangle === 'hide',\n 'top-left-triangle': triangle === 'top-left',\n 'top-right-triangle': triangle === 'top-right',\n 'bottom-left-triangle': triangle === 'bottom-left',\n 'bottom-right-triangle': triangle === 'bottom-right'\n });\n\n var handleChange = function handleChange(hex, e) {\n return onChange({ hex: hex, source: 'hex' }, e);\n };\n\n return React.createElement(\n 'div',\n { style: styles.card, className: 'github-picker ' + className },\n React.createElement('div', { style: styles.triangleShadow }),\n React.createElement('div', { style: styles.triangle }),\n map(colors, function (c) {\n return React.createElement(GithubSwatch, {\n color: c,\n key: c,\n onClick: handleChange,\n onSwatchHover: onSwatchHover\n });\n })\n );\n};\n\nGithub.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n colors: PropTypes.arrayOf(PropTypes.string),\n triangle: PropTypes.oneOf(['hide', 'top-left', 'top-right', 'bottom-left', 'bottom-right']),\n styles: PropTypes.object\n};\n\nGithub.defaultProps = {\n width: 200,\n colors: ['#B80000', '#DB3E00', '#FCCB00', '#008B02', '#006B76', '#1273DE', '#004DCF', '#5300EB', '#EB9694', '#FAD0C3', '#FEF3BD', '#C1E1C5', '#BEDADC', '#C4DEF6', '#BED3F3', '#D4C4FB'],\n triangle: 'top-left',\n styles: {}\n};\n\nexport default ColorWrap(Github);","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var SliderPointer = function SliderPointer(_ref) {\n var direction = _ref.direction;\n\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '18px',\n height: '18px',\n borderRadius: '50%',\n transform: 'translate(-9px, -1px)',\n backgroundColor: 'rgb(248, 248, 248)',\n boxShadow: '0 1px 4px 0 rgba(0, 0, 0, 0.37)'\n }\n },\n 'vertical': {\n picker: {\n transform: 'translate(-3px, -9px)'\n }\n }\n }, { vertical: direction === 'vertical' });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default SliderPointer;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Hue } from '../common';\nimport HuePointer from './HuePointer';\n\nexport var HuePicker = function HuePicker(_ref) {\n var width = _ref.width,\n height = _ref.height,\n onChange = _ref.onChange,\n hsl = _ref.hsl,\n direction = _ref.direction,\n pointer = _ref.pointer,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n picker: {\n position: 'relative',\n width: width,\n height: height\n },\n hue: {\n radius: '2px'\n }\n }\n }, passedStyles));\n\n // Overwrite to provide pure hue color\n var handleChange = function handleChange(data) {\n return onChange({ a: 1, h: data.h, l: 0.5, s: 1 });\n };\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'hue-picker ' + className },\n React.createElement(Hue, _extends({}, styles.hue, {\n hsl: hsl,\n pointer: pointer,\n onChange: handleChange,\n direction: direction\n }))\n );\n};\n\nHuePicker.propTypes = {\n styles: PropTypes.object\n};\nHuePicker.defaultProps = {\n width: '316px',\n height: '16px',\n direction: 'horizontal',\n pointer: HuePointer,\n styles: {}\n};\n\nexport default ColorWrap(HuePicker);","import React from 'react';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\nimport * as color from '../../helpers/color';\n\nimport { ColorWrap, EditableInput, Raised } from '../common';\n\nexport var Material = function Material(_ref) {\n var onChange = _ref.onChange,\n hex = _ref.hex,\n rgb = _ref.rgb,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n material: {\n width: '98px',\n height: '98px',\n padding: '16px',\n fontFamily: 'Roboto'\n },\n HEXwrap: {\n position: 'relative'\n },\n HEXinput: {\n width: '100%',\n marginTop: '12px',\n fontSize: '15px',\n color: '#333',\n padding: '0px',\n border: '0px',\n borderBottom: '2px solid ' + hex,\n outline: 'none',\n height: '30px'\n },\n HEXlabel: {\n position: 'absolute',\n top: '0px',\n left: '0px',\n fontSize: '11px',\n color: '#999999',\n textTransform: 'capitalize'\n },\n Hex: {\n style: {}\n },\n RGBwrap: {\n position: 'relative'\n },\n RGBinput: {\n width: '100%',\n marginTop: '12px',\n fontSize: '15px',\n color: '#333',\n padding: '0px',\n border: '0px',\n borderBottom: '1px solid #eee',\n outline: 'none',\n height: '30px'\n },\n RGBlabel: {\n position: 'absolute',\n top: '0px',\n left: '0px',\n fontSize: '11px',\n color: '#999999',\n textTransform: 'capitalize'\n },\n split: {\n display: 'flex',\n marginRight: '-10px',\n paddingTop: '11px'\n },\n third: {\n flex: '1',\n paddingRight: '10px'\n }\n }\n }, passedStyles));\n\n var handleChange = function handleChange(data, e) {\n if (data.hex) {\n color.isValidHex(data.hex) && onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n } else if (data.r || data.g || data.b) {\n onChange({\n r: data.r || rgb.r,\n g: data.g || rgb.g,\n b: data.b || rgb.b,\n source: 'rgb'\n }, e);\n }\n };\n\n return React.createElement(\n Raised,\n { styles: passedStyles },\n React.createElement(\n 'div',\n { style: styles.material, className: 'material-picker ' + className },\n React.createElement(EditableInput, {\n style: { wrap: styles.HEXwrap, input: styles.HEXinput, label: styles.HEXlabel },\n label: 'hex',\n value: hex,\n onChange: handleChange\n }),\n React.createElement(\n 'div',\n { style: styles.split, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.third },\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'r', value: rgb.r,\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.third },\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'g',\n value: rgb.g,\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.third },\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'b',\n value: rgb.b,\n onChange: handleChange\n })\n )\n )\n )\n );\n};\n\nexport default ColorWrap(Material);","import React from 'react';\nimport reactCSS from 'reactcss';\nimport * as color from '../../helpers/color';\n\nimport { EditableInput } from '../common';\n\nexport var PhotoshopPicker = function PhotoshopPicker(_ref) {\n var onChange = _ref.onChange,\n rgb = _ref.rgb,\n hsv = _ref.hsv,\n hex = _ref.hex;\n\n var styles = reactCSS({\n 'default': {\n fields: {\n paddingTop: '5px',\n paddingBottom: '9px',\n width: '80px',\n position: 'relative'\n },\n divider: {\n height: '5px'\n },\n RGBwrap: {\n position: 'relative'\n },\n RGBinput: {\n marginLeft: '40%',\n width: '40%',\n height: '18px',\n border: '1px solid #888888',\n boxShadow: 'inset 0 1px 1px rgba(0,0,0,.1), 0 1px 0 0 #ECECEC',\n marginBottom: '5px',\n fontSize: '13px',\n paddingLeft: '3px',\n marginRight: '10px'\n },\n RGBlabel: {\n left: '0px',\n top: '0px',\n width: '34px',\n textTransform: 'uppercase',\n fontSize: '13px',\n height: '18px',\n lineHeight: '22px',\n position: 'absolute'\n },\n HEXwrap: {\n position: 'relative'\n },\n HEXinput: {\n marginLeft: '20%',\n width: '80%',\n height: '18px',\n border: '1px solid #888888',\n boxShadow: 'inset 0 1px 1px rgba(0,0,0,.1), 0 1px 0 0 #ECECEC',\n marginBottom: '6px',\n fontSize: '13px',\n paddingLeft: '3px'\n },\n HEXlabel: {\n position: 'absolute',\n top: '0px',\n left: '0px',\n width: '14px',\n textTransform: 'uppercase',\n fontSize: '13px',\n height: '18px',\n lineHeight: '22px'\n },\n fieldSymbols: {\n position: 'absolute',\n top: '5px',\n right: '-7px',\n fontSize: '13px'\n },\n symbol: {\n height: '20px',\n lineHeight: '22px',\n paddingBottom: '7px'\n }\n }\n });\n\n var handleChange = function handleChange(data, e) {\n if (data['#']) {\n color.isValidHex(data['#']) && onChange({\n hex: data['#'],\n source: 'hex'\n }, e);\n } else if (data.r || data.g || data.b) {\n onChange({\n r: data.r || rgb.r,\n g: data.g || rgb.g,\n b: data.b || rgb.b,\n source: 'rgb'\n }, e);\n } else if (data.h || data.s || data.v) {\n onChange({\n h: data.h || hsv.h,\n s: data.s || hsv.s,\n v: data.v || hsv.v,\n source: 'hsv'\n }, e);\n }\n };\n\n return React.createElement(\n 'div',\n { style: styles.fields },\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'h',\n value: Math.round(hsv.h),\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 's',\n value: Math.round(hsv.s * 100),\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'v',\n value: Math.round(hsv.v * 100),\n onChange: handleChange\n }),\n React.createElement('div', { style: styles.divider }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'r',\n value: rgb.r,\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'g',\n value: rgb.g,\n onChange: handleChange\n }),\n React.createElement(EditableInput, {\n style: { wrap: styles.RGBwrap, input: styles.RGBinput, label: styles.RGBlabel },\n label: 'b',\n value: rgb.b,\n onChange: handleChange\n }),\n React.createElement('div', { style: styles.divider }),\n React.createElement(EditableInput, {\n style: { wrap: styles.HEXwrap, input: styles.HEXinput, label: styles.HEXlabel },\n label: '#',\n value: hex.replace('#', ''),\n onChange: handleChange\n }),\n React.createElement(\n 'div',\n { style: styles.fieldSymbols },\n React.createElement(\n 'div',\n { style: styles.symbol },\n '\\xB0'\n ),\n React.createElement(\n 'div',\n { style: styles.symbol },\n '%'\n ),\n React.createElement(\n 'div',\n { style: styles.symbol },\n '%'\n )\n )\n );\n};\n\nexport default PhotoshopPicker;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var PhotoshopPointerCircle = function PhotoshopPointerCircle(_ref) {\n var hsl = _ref.hsl;\n\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '12px',\n height: '12px',\n borderRadius: '6px',\n boxShadow: 'inset 0 0 0 1px #fff',\n transform: 'translate(-6px, -6px)'\n }\n },\n 'black-outline': {\n picker: {\n boxShadow: 'inset 0 0 0 1px #000'\n }\n }\n }, { 'black-outline': hsl.l > 0.5 });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default PhotoshopPointerCircle;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var PhotoshopPointerCircle = function PhotoshopPointerCircle() {\n var styles = reactCSS({\n 'default': {\n triangle: {\n width: 0,\n height: 0,\n borderStyle: 'solid',\n borderWidth: '4px 0 4px 6px',\n borderColor: 'transparent transparent transparent #fff',\n position: 'absolute',\n top: '1px',\n left: '1px'\n },\n triangleBorder: {\n width: 0,\n height: 0,\n borderStyle: 'solid',\n borderWidth: '5px 0 5px 8px',\n borderColor: 'transparent transparent transparent #555'\n },\n\n left: {\n Extend: 'triangleBorder',\n transform: 'translate(-13px, -4px)'\n },\n leftInside: {\n Extend: 'triangle',\n transform: 'translate(-8px, -5px)'\n },\n\n right: {\n Extend: 'triangleBorder',\n transform: 'translate(20px, -14px) rotate(180deg)'\n },\n rightInside: {\n Extend: 'triangle',\n transform: 'translate(-8px, -5px)'\n }\n }\n });\n\n return React.createElement(\n 'div',\n { style: styles.pointer },\n React.createElement(\n 'div',\n { style: styles.left },\n React.createElement('div', { style: styles.leftInside })\n ),\n React.createElement(\n 'div',\n { style: styles.right },\n React.createElement('div', { style: styles.rightInside })\n )\n );\n};\n\nexport default PhotoshopPointerCircle;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var PhotoshopButton = function PhotoshopButton(_ref) {\n var onClick = _ref.onClick,\n label = _ref.label,\n children = _ref.children,\n active = _ref.active;\n\n var styles = reactCSS({\n 'default': {\n button: {\n backgroundImage: 'linear-gradient(-180deg, #FFFFFF 0%, #E6E6E6 100%)',\n border: '1px solid #878787',\n borderRadius: '2px',\n height: '20px',\n boxShadow: '0 1px 0 0 #EAEAEA',\n fontSize: '14px',\n color: '#000',\n lineHeight: '20px',\n textAlign: 'center',\n marginBottom: '10px',\n cursor: 'pointer'\n }\n },\n 'active': {\n button: {\n boxShadow: '0 0 0 1px #878787'\n }\n }\n }, { active: active });\n\n return React.createElement(\n 'div',\n { style: styles.button, onClick: onClick },\n label || children\n );\n};\n\nexport default PhotoshopButton;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var PhotoshopPreviews = function PhotoshopPreviews(_ref) {\n var rgb = _ref.rgb,\n currentColor = _ref.currentColor;\n\n var styles = reactCSS({\n 'default': {\n swatches: {\n border: '1px solid #B3B3B3',\n borderBottom: '1px solid #F0F0F0',\n marginBottom: '2px',\n marginTop: '1px'\n },\n new: {\n height: '34px',\n background: 'rgb(' + rgb.r + ',' + rgb.g + ', ' + rgb.b + ')',\n boxShadow: 'inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 1px 0 #000'\n },\n current: {\n height: '34px',\n background: currentColor,\n boxShadow: 'inset 1px 0 0 #000, inset -1px 0 0 #000, inset 0 -1px 0 #000'\n },\n label: {\n fontSize: '14px',\n color: '#000',\n textAlign: 'center'\n }\n }\n });\n\n return React.createElement(\n 'div',\n null,\n React.createElement(\n 'div',\n { style: styles.label },\n 'new'\n ),\n React.createElement(\n 'div',\n { style: styles.swatches },\n React.createElement('div', { style: styles.new }),\n React.createElement('div', { style: styles.current })\n ),\n React.createElement(\n 'div',\n { style: styles.label },\n 'current'\n )\n );\n};\n\nexport default PhotoshopPreviews;","var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return call && (typeof call === \"object\" || typeof call === \"function\") ? call : self; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function, not \" + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }\n\nimport React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Saturation, Hue } from '../common';\nimport PhotoshopFields from './PhotoshopFields';\nimport PhotoshopPointerCircle from './PhotoshopPointerCircle';\nimport PhotoshopPointer from './PhotoshopPointer';\nimport PhotoshopButton from './PhotoshopButton';\nimport PhotoshopPreviews from './PhotoshopPreviews';\n\nexport var Photoshop = function (_React$Component) {\n _inherits(Photoshop, _React$Component);\n\n function Photoshop(props) {\n _classCallCheck(this, Photoshop);\n\n var _this = _possibleConstructorReturn(this, (Photoshop.__proto__ || Object.getPrototypeOf(Photoshop)).call(this));\n\n _this.state = {\n currentColor: props.hex\n };\n return _this;\n }\n\n _createClass(Photoshop, [{\n key: 'render',\n value: function render() {\n var _props = this.props,\n _props$styles = _props.styles,\n passedStyles = _props$styles === undefined ? {} : _props$styles,\n _props$className = _props.className,\n className = _props$className === undefined ? '' : _props$className;\n\n var styles = reactCSS(merge({\n 'default': {\n picker: {\n background: '#DCDCDC',\n borderRadius: '4px',\n boxShadow: '0 0 0 1px rgba(0,0,0,.25), 0 8px 16px rgba(0,0,0,.15)',\n boxSizing: 'initial',\n width: '513px'\n },\n head: {\n backgroundImage: 'linear-gradient(-180deg, #F0F0F0 0%, #D4D4D4 100%)',\n borderBottom: '1px solid #B1B1B1',\n boxShadow: 'inset 0 1px 0 0 rgba(255,255,255,.2), inset 0 -1px 0 0 rgba(0,0,0,.02)',\n height: '23px',\n lineHeight: '24px',\n borderRadius: '4px 4px 0 0',\n fontSize: '13px',\n color: '#4D4D4D',\n textAlign: 'center'\n },\n body: {\n padding: '15px 15px 0',\n display: 'flex'\n },\n saturation: {\n width: '256px',\n height: '256px',\n position: 'relative',\n border: '2px solid #B3B3B3',\n borderBottom: '2px solid #F0F0F0',\n overflow: 'hidden'\n },\n hue: {\n position: 'relative',\n height: '256px',\n width: '19px',\n marginLeft: '10px',\n border: '2px solid #B3B3B3',\n borderBottom: '2px solid #F0F0F0'\n },\n controls: {\n width: '180px',\n marginLeft: '10px'\n },\n top: {\n display: 'flex'\n },\n previews: {\n width: '60px'\n },\n actions: {\n flex: '1',\n marginLeft: '20px'\n }\n }\n }, passedStyles));\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'photoshop-picker ' + className },\n React.createElement(\n 'div',\n { style: styles.head },\n this.props.header\n ),\n React.createElement(\n 'div',\n { style: styles.body, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.saturation },\n React.createElement(Saturation, {\n hsl: this.props.hsl,\n hsv: this.props.hsv,\n pointer: PhotoshopPointerCircle,\n onChange: this.props.onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(Hue, {\n direction: 'vertical',\n hsl: this.props.hsl,\n pointer: PhotoshopPointer,\n onChange: this.props.onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.controls },\n React.createElement(\n 'div',\n { style: styles.top, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.previews },\n React.createElement(PhotoshopPreviews, {\n rgb: this.props.rgb,\n currentColor: this.state.currentColor\n })\n ),\n React.createElement(\n 'div',\n { style: styles.actions },\n React.createElement(PhotoshopButton, { label: 'OK', onClick: this.props.onAccept, active: true }),\n React.createElement(PhotoshopButton, { label: 'Cancel', onClick: this.props.onCancel }),\n React.createElement(PhotoshopFields, {\n onChange: this.props.onChange,\n rgb: this.props.rgb,\n hsv: this.props.hsv,\n hex: this.props.hex\n })\n )\n )\n )\n )\n );\n }\n }]);\n\n return Photoshop;\n}(React.Component);\n\nPhotoshop.propTypes = {\n header: PropTypes.string,\n styles: PropTypes.object\n};\n\nPhotoshop.defaultProps = {\n header: 'Color Picker',\n styles: {}\n};\n\nexport default ColorWrap(Photoshop);","/* eslint-disable no-param-reassign */\n\nimport React from 'react';\nimport reactCSS from 'reactcss';\nimport * as color from '../../helpers/color';\n\nimport { EditableInput } from '../common';\n\nexport var SketchFields = function SketchFields(_ref) {\n var onChange = _ref.onChange,\n rgb = _ref.rgb,\n hsl = _ref.hsl,\n hex = _ref.hex,\n disableAlpha = _ref.disableAlpha;\n\n var styles = reactCSS({\n 'default': {\n fields: {\n display: 'flex',\n paddingTop: '4px'\n },\n single: {\n flex: '1',\n paddingLeft: '6px'\n },\n alpha: {\n flex: '1',\n paddingLeft: '6px'\n },\n double: {\n flex: '2'\n },\n input: {\n width: '80%',\n padding: '4px 10% 3px',\n border: 'none',\n boxShadow: 'inset 0 0 0 1px #ccc',\n fontSize: '11px'\n },\n label: {\n display: 'block',\n textAlign: 'center',\n fontSize: '11px',\n color: '#222',\n paddingTop: '3px',\n paddingBottom: '4px',\n textTransform: 'capitalize'\n }\n },\n 'disableAlpha': {\n alpha: {\n display: 'none'\n }\n }\n }, { disableAlpha: disableAlpha });\n\n var handleChange = function handleChange(data, e) {\n if (data.hex) {\n color.isValidHex(data.hex) && onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n } else if (data.r || data.g || data.b) {\n onChange({\n r: data.r || rgb.r,\n g: data.g || rgb.g,\n b: data.b || rgb.b,\n a: rgb.a,\n source: 'rgb'\n }, e);\n } else if (data.a) {\n if (data.a < 0) {\n data.a = 0;\n } else if (data.a > 100) {\n data.a = 100;\n }\n\n data.a /= 100;\n onChange({\n h: hsl.h,\n s: hsl.s,\n l: hsl.l,\n a: data.a,\n source: 'rgb'\n }, e);\n }\n };\n\n return React.createElement(\n 'div',\n { style: styles.fields, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.double },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'hex',\n value: hex.replace('#', ''),\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'r',\n value: rgb.r,\n onChange: handleChange,\n dragLabel: 'true',\n dragMax: '255'\n })\n ),\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'g',\n value: rgb.g,\n onChange: handleChange,\n dragLabel: 'true',\n dragMax: '255'\n })\n ),\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'b',\n value: rgb.b,\n onChange: handleChange,\n dragLabel: 'true',\n dragMax: '255'\n })\n ),\n React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'a',\n value: Math.round(rgb.a * 100),\n onChange: handleChange,\n dragLabel: 'true',\n dragMax: '100'\n })\n )\n );\n};\n\nexport default SketchFields;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\n\nimport { Swatch } from '../common';\n\nexport var SketchPresetColors = function SketchPresetColors(_ref) {\n var colors = _ref.colors,\n _ref$onClick = _ref.onClick,\n onClick = _ref$onClick === undefined ? function () {} : _ref$onClick,\n onSwatchHover = _ref.onSwatchHover;\n\n var styles = reactCSS({\n 'default': {\n colors: {\n margin: '0 -10px',\n padding: '10px 0 0 10px',\n borderTop: '1px solid #eee',\n display: 'flex',\n flexWrap: 'wrap',\n position: 'relative'\n },\n swatchWrap: {\n width: '16px',\n height: '16px',\n margin: '0 10px 10px 0'\n },\n swatch: {\n borderRadius: '3px',\n boxShadow: 'inset 0 0 0 1px rgba(0,0,0,.15)'\n }\n },\n 'no-presets': {\n colors: {\n display: 'none'\n }\n }\n }, {\n 'no-presets': !colors || !colors.length\n });\n\n var handleClick = function handleClick(hex, e) {\n onClick({\n hex: hex,\n source: 'hex'\n }, e);\n };\n\n return React.createElement(\n 'div',\n { style: styles.colors, className: 'flexbox-fix' },\n colors.map(function (colorObjOrString) {\n var c = typeof colorObjOrString === 'string' ? { color: colorObjOrString } : colorObjOrString;\n var key = '' + c.color + (c.title || '');\n return React.createElement(\n 'div',\n { key: key, style: styles.swatchWrap },\n React.createElement(Swatch, _extends({}, c, {\n style: styles.swatch,\n onClick: handleClick,\n onHover: onSwatchHover,\n focusStyle: {\n boxShadow: 'inset 0 0 0 1px rgba(0,0,0,.15), 0 0 4px ' + c.color\n }\n }))\n );\n })\n );\n};\n\nSketchPresetColors.propTypes = {\n colors: PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.string, PropTypes.shape({\n color: PropTypes.string,\n title: PropTypes.string\n })])).isRequired\n};\n\nexport default SketchPresetColors;","var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };\n\nimport React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Saturation, Hue, Alpha, Checkboard } from '../common';\nimport SketchFields from './SketchFields';\nimport SketchPresetColors from './SketchPresetColors';\n\nexport var Sketch = function Sketch(_ref) {\n var width = _ref.width,\n rgb = _ref.rgb,\n hex = _ref.hex,\n hsv = _ref.hsv,\n hsl = _ref.hsl,\n onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n disableAlpha = _ref.disableAlpha,\n presetColors = _ref.presetColors,\n renderers = _ref.renderers,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': _extends({\n picker: {\n width: width,\n padding: '10px 10px 0',\n boxSizing: 'initial',\n background: '#fff',\n borderRadius: '4px',\n boxShadow: '0 0 0 1px rgba(0,0,0,.15), 0 8px 16px rgba(0,0,0,.15)'\n },\n saturation: {\n width: '100%',\n paddingBottom: '75%',\n position: 'relative',\n overflow: 'hidden'\n },\n Saturation: {\n radius: '3px',\n shadow: 'inset 0 0 0 1px rgba(0,0,0,.15), inset 0 0 4px rgba(0,0,0,.25)'\n },\n controls: {\n display: 'flex'\n },\n sliders: {\n padding: '4px 0',\n flex: '1'\n },\n color: {\n width: '24px',\n height: '24px',\n position: 'relative',\n marginTop: '4px',\n marginLeft: '4px',\n borderRadius: '3px'\n },\n activeColor: {\n absolute: '0px 0px 0px 0px',\n borderRadius: '2px',\n background: 'rgba(' + rgb.r + ',' + rgb.g + ',' + rgb.b + ',' + rgb.a + ')',\n boxShadow: 'inset 0 0 0 1px rgba(0,0,0,.15), inset 0 0 4px rgba(0,0,0,.25)'\n },\n hue: {\n position: 'relative',\n height: '10px',\n overflow: 'hidden'\n },\n Hue: {\n radius: '2px',\n shadow: 'inset 0 0 0 1px rgba(0,0,0,.15), inset 0 0 4px rgba(0,0,0,.25)'\n },\n\n alpha: {\n position: 'relative',\n height: '10px',\n marginTop: '4px',\n overflow: 'hidden'\n },\n Alpha: {\n radius: '2px',\n shadow: 'inset 0 0 0 1px rgba(0,0,0,.15), inset 0 0 4px rgba(0,0,0,.25)'\n }\n }, passedStyles),\n 'disableAlpha': {\n color: {\n height: '10px'\n },\n hue: {\n height: '10px'\n },\n alpha: {\n display: 'none'\n }\n }\n }, passedStyles), { disableAlpha: disableAlpha });\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'sketch-picker ' + className },\n React.createElement(\n 'div',\n { style: styles.saturation },\n React.createElement(Saturation, {\n style: styles.Saturation,\n hsl: hsl,\n hsv: hsv,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.controls, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.sliders },\n React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(Hue, {\n style: styles.Hue,\n hsl: hsl,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.alpha },\n React.createElement(Alpha, {\n style: styles.Alpha,\n rgb: rgb,\n hsl: hsl,\n renderers: renderers,\n onChange: onChange\n })\n )\n ),\n React.createElement(\n 'div',\n { style: styles.color },\n React.createElement(Checkboard, null),\n React.createElement('div', { style: styles.activeColor })\n )\n ),\n React.createElement(SketchFields, {\n rgb: rgb,\n hsl: hsl,\n hex: hex,\n onChange: onChange,\n disableAlpha: disableAlpha\n }),\n React.createElement(SketchPresetColors, {\n colors: presetColors,\n onClick: onChange,\n onSwatchHover: onSwatchHover\n })\n );\n};\n\nSketch.propTypes = {\n disableAlpha: PropTypes.bool,\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n styles: PropTypes.object\n};\n\nSketch.defaultProps = {\n disableAlpha: false,\n width: 200,\n styles: {},\n presetColors: ['#D0021B', '#F5A623', '#F8E71C', '#8B572A', '#7ED321', '#417505', '#BD10E0', '#9013FE', '#4A90E2', '#50E3C2', '#B8E986', '#000000', '#4A4A4A', '#9B9B9B', '#FFFFFF']\n};\n\nexport default ColorWrap(Sketch);","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var SliderSwatch = function SliderSwatch(_ref) {\n var hsl = _ref.hsl,\n offset = _ref.offset,\n _ref$onClick = _ref.onClick,\n onClick = _ref$onClick === undefined ? function () {} : _ref$onClick,\n active = _ref.active,\n first = _ref.first,\n last = _ref.last;\n\n var styles = reactCSS({\n 'default': {\n swatch: {\n height: '12px',\n background: 'hsl(' + hsl.h + ', 50%, ' + offset * 100 + '%)',\n cursor: 'pointer'\n }\n },\n 'first': {\n swatch: {\n borderRadius: '2px 0 0 2px'\n }\n },\n 'last': {\n swatch: {\n borderRadius: '0 2px 2px 0'\n }\n },\n 'active': {\n swatch: {\n transform: 'scaleY(1.8)',\n borderRadius: '3.6px/2px'\n }\n }\n }, { active: active, first: first, last: last });\n\n var handleClick = function handleClick(e) {\n return onClick({\n h: hsl.h,\n s: 0.5,\n l: offset,\n source: 'hsl'\n }, e);\n };\n\n return React.createElement('div', { style: styles.swatch, onClick: handleClick });\n};\n\nexport default SliderSwatch;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nimport SliderSwatch from './SliderSwatch';\n\nexport var SliderSwatches = function SliderSwatches(_ref) {\n var onClick = _ref.onClick,\n hsl = _ref.hsl;\n\n var styles = reactCSS({\n 'default': {\n swatches: {\n marginTop: '20px'\n },\n swatch: {\n boxSizing: 'border-box',\n width: '20%',\n paddingRight: '1px',\n float: 'left'\n },\n clear: {\n clear: 'both'\n }\n }\n });\n\n // Acceptible difference in floating point equality\n var epsilon = 0.1;\n\n return React.createElement(\n 'div',\n { style: styles.swatches },\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(SliderSwatch, {\n hsl: hsl,\n offset: '.80',\n active: Math.abs(hsl.l - 0.80) < epsilon && Math.abs(hsl.s - 0.50) < epsilon,\n onClick: onClick,\n first: true\n })\n ),\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(SliderSwatch, {\n hsl: hsl,\n offset: '.65',\n active: Math.abs(hsl.l - 0.65) < epsilon && Math.abs(hsl.s - 0.50) < epsilon,\n onClick: onClick\n })\n ),\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(SliderSwatch, {\n hsl: hsl,\n offset: '.50',\n active: Math.abs(hsl.l - 0.50) < epsilon && Math.abs(hsl.s - 0.50) < epsilon,\n onClick: onClick\n })\n ),\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(SliderSwatch, {\n hsl: hsl,\n offset: '.35',\n active: Math.abs(hsl.l - 0.35) < epsilon && Math.abs(hsl.s - 0.50) < epsilon,\n onClick: onClick\n })\n ),\n React.createElement(\n 'div',\n { style: styles.swatch },\n React.createElement(SliderSwatch, {\n hsl: hsl,\n offset: '.20',\n active: Math.abs(hsl.l - 0.20) < epsilon && Math.abs(hsl.s - 0.50) < epsilon,\n onClick: onClick,\n last: true\n })\n ),\n React.createElement('div', { style: styles.clear })\n );\n};\n\nexport default SliderSwatches;","import React from 'react';\nimport reactCSS from 'reactcss';\n\nexport var SliderPointer = function SliderPointer() {\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '14px',\n height: '14px',\n borderRadius: '6px',\n transform: 'translate(-7px, -1px)',\n backgroundColor: 'rgb(248, 248, 248)',\n boxShadow: '0 1px 4px 0 rgba(0, 0, 0, 0.37)'\n }\n }\n });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nexport default SliderPointer;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Hue } from '../common';\nimport SliderSwatches from './SliderSwatches';\nimport SliderPointer from './SliderPointer';\n\nexport var Slider = function Slider(_ref) {\n var hsl = _ref.hsl,\n onChange = _ref.onChange,\n pointer = _ref.pointer,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n hue: {\n height: '12px',\n position: 'relative'\n },\n Hue: {\n radius: '2px'\n }\n }\n }, passedStyles));\n\n return React.createElement(\n 'div',\n { style: styles.wrap || {}, className: 'slider-picker ' + className },\n React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(Hue, {\n style: styles.Hue,\n hsl: hsl,\n pointer: pointer,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.swatches },\n React.createElement(SliderSwatches, { hsl: hsl, onClick: onChange })\n )\n );\n};\n\nSlider.propTypes = {\n styles: PropTypes.object\n};\nSlider.defaultProps = {\n pointer: SliderPointer,\n styles: {}\n};\n\nexport default ColorWrap(Slider);","import React from 'react';\nimport reactCSS from 'reactcss';\nimport * as colorUtils from '../../helpers/color';\n\nimport { Swatch } from '../common';\nimport CheckIcon from '@icons/material/CheckIcon';\n\nexport var SwatchesColor = function SwatchesColor(_ref) {\n var color = _ref.color,\n _ref$onClick = _ref.onClick,\n onClick = _ref$onClick === undefined ? function () {} : _ref$onClick,\n onSwatchHover = _ref.onSwatchHover,\n first = _ref.first,\n last = _ref.last,\n active = _ref.active;\n\n var styles = reactCSS({\n 'default': {\n color: {\n width: '40px',\n height: '24px',\n cursor: 'pointer',\n background: color,\n marginBottom: '1px'\n },\n check: {\n color: colorUtils.getContrastingColor(color),\n marginLeft: '8px',\n display: 'none'\n }\n },\n 'first': {\n color: {\n overflow: 'hidden',\n borderRadius: '2px 2px 0 0'\n }\n },\n 'last': {\n color: {\n overflow: 'hidden',\n borderRadius: '0 0 2px 2px'\n }\n },\n 'active': {\n check: {\n display: 'block'\n }\n },\n 'color-#FFFFFF': {\n color: {\n boxShadow: 'inset 0 0 0 1px #ddd'\n },\n check: {\n color: '#333'\n }\n },\n 'transparent': {\n check: {\n color: '#333'\n }\n }\n }, {\n first: first,\n last: last,\n active: active,\n 'color-#FFFFFF': color === '#FFFFFF',\n 'transparent': color === 'transparent'\n });\n\n return React.createElement(\n Swatch,\n {\n color: color,\n style: styles.color,\n onClick: onClick,\n onHover: onSwatchHover,\n focusStyle: { boxShadow: '0 0 4px ' + color }\n },\n React.createElement(\n 'div',\n { style: styles.check },\n React.createElement(CheckIcon, null)\n )\n );\n};\n\nexport default SwatchesColor;","import React from 'react';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\n\nimport SwatchesColor from './SwatchesColor';\n\nexport var SwatchesGroup = function SwatchesGroup(_ref) {\n var onClick = _ref.onClick,\n onSwatchHover = _ref.onSwatchHover,\n group = _ref.group,\n active = _ref.active;\n\n var styles = reactCSS({\n 'default': {\n group: {\n paddingBottom: '10px',\n width: '40px',\n float: 'left',\n marginRight: '10px'\n }\n }\n });\n\n return React.createElement(\n 'div',\n { style: styles.group },\n map(group, function (color, i) {\n return React.createElement(SwatchesColor, {\n key: color,\n color: color,\n active: color.toLowerCase() === active,\n first: i === 0,\n last: i === group.length - 1,\n onClick: onClick,\n onSwatchHover: onSwatchHover\n });\n })\n );\n};\n\nexport default SwatchesGroup;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\nimport merge from 'lodash-es/merge';\nimport * as material from 'material-colors';\n\nimport { ColorWrap, Raised } from '../common';\nimport SwatchesGroup from './SwatchesGroup';\n\nexport var Swatches = function Swatches(_ref) {\n var width = _ref.width,\n height = _ref.height,\n onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n colors = _ref.colors,\n hex = _ref.hex,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n picker: {\n width: width,\n height: height\n },\n overflow: {\n height: height,\n overflowY: 'scroll'\n },\n body: {\n padding: '16px 0 6px 16px'\n },\n clear: {\n clear: 'both'\n }\n }\n }, passedStyles));\n\n var handleChange = function handleChange(data, e) {\n return onChange({ hex: data, source: 'hex' }, e);\n };\n\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'swatches-picker ' + className },\n React.createElement(\n Raised,\n null,\n React.createElement(\n 'div',\n { style: styles.overflow },\n React.createElement(\n 'div',\n { style: styles.body },\n map(colors, function (group) {\n return React.createElement(SwatchesGroup, {\n key: group.toString(),\n group: group,\n active: hex,\n onClick: handleChange,\n onSwatchHover: onSwatchHover\n });\n }),\n React.createElement('div', { style: styles.clear })\n )\n )\n )\n );\n};\n\nSwatches.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n height: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n colors: PropTypes.arrayOf(PropTypes.arrayOf(PropTypes.string)),\n styles: PropTypes.object\n\n /* eslint-disable max-len */\n};Swatches.defaultProps = {\n width: 320,\n height: 240,\n colors: [[material.red['900'], material.red['700'], material.red['500'], material.red['300'], material.red['100']], [material.pink['900'], material.pink['700'], material.pink['500'], material.pink['300'], material.pink['100']], [material.purple['900'], material.purple['700'], material.purple['500'], material.purple['300'], material.purple['100']], [material.deepPurple['900'], material.deepPurple['700'], material.deepPurple['500'], material.deepPurple['300'], material.deepPurple['100']], [material.indigo['900'], material.indigo['700'], material.indigo['500'], material.indigo['300'], material.indigo['100']], [material.blue['900'], material.blue['700'], material.blue['500'], material.blue['300'], material.blue['100']], [material.lightBlue['900'], material.lightBlue['700'], material.lightBlue['500'], material.lightBlue['300'], material.lightBlue['100']], [material.cyan['900'], material.cyan['700'], material.cyan['500'], material.cyan['300'], material.cyan['100']], [material.teal['900'], material.teal['700'], material.teal['500'], material.teal['300'], material.teal['100']], ['#194D33', material.green['700'], material.green['500'], material.green['300'], material.green['100']], [material.lightGreen['900'], material.lightGreen['700'], material.lightGreen['500'], material.lightGreen['300'], material.lightGreen['100']], [material.lime['900'], material.lime['700'], material.lime['500'], material.lime['300'], material.lime['100']], [material.yellow['900'], material.yellow['700'], material.yellow['500'], material.yellow['300'], material.yellow['100']], [material.amber['900'], material.amber['700'], material.amber['500'], material.amber['300'], material.amber['100']], [material.orange['900'], material.orange['700'], material.orange['500'], material.orange['300'], material.orange['100']], [material.deepOrange['900'], material.deepOrange['700'], material.deepOrange['500'], material.deepOrange['300'], material.deepOrange['100']], [material.brown['900'], material.brown['700'], material.brown['500'], material.brown['300'], material.brown['100']], [material.blueGrey['900'], material.blueGrey['700'], material.blueGrey['500'], material.blueGrey['300'], material.blueGrey['100']], ['#000000', '#525252', '#969696', '#D9D9D9', '#FFFFFF']],\n styles: {}\n};\n\nexport default ColorWrap(Swatches);","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport map from 'lodash-es/map';\nimport merge from 'lodash-es/merge';\nimport * as color from '../../helpers/color';\n\nimport { ColorWrap, EditableInput, Swatch } from '../common';\n\nexport var Twitter = function Twitter(_ref) {\n var onChange = _ref.onChange,\n onSwatchHover = _ref.onSwatchHover,\n hex = _ref.hex,\n colors = _ref.colors,\n width = _ref.width,\n triangle = _ref.triangle,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n card: {\n width: width,\n background: '#fff',\n border: '0 solid rgba(0,0,0,0.25)',\n boxShadow: '0 1px 4px rgba(0,0,0,0.25)',\n borderRadius: '4px',\n position: 'relative'\n },\n body: {\n padding: '15px 9px 9px 15px'\n },\n label: {\n fontSize: '18px',\n color: '#fff'\n },\n triangle: {\n width: '0px',\n height: '0px',\n borderStyle: 'solid',\n borderWidth: '0 9px 10px 9px',\n borderColor: 'transparent transparent #fff transparent',\n position: 'absolute'\n },\n triangleShadow: {\n width: '0px',\n height: '0px',\n borderStyle: 'solid',\n borderWidth: '0 9px 10px 9px',\n borderColor: 'transparent transparent rgba(0,0,0,.1) transparent',\n position: 'absolute'\n },\n hash: {\n background: '#F0F0F0',\n height: '30px',\n width: '30px',\n borderRadius: '4px 0 0 4px',\n float: 'left',\n color: '#98A1A4',\n display: 'flex',\n alignItems: 'center',\n justifyContent: 'center'\n },\n input: {\n width: '100px',\n fontSize: '14px',\n color: '#666',\n border: '0px',\n outline: 'none',\n height: '28px',\n boxShadow: 'inset 0 0 0 1px #F0F0F0',\n boxSizing: 'content-box',\n borderRadius: '0 4px 4px 0',\n float: 'left',\n paddingLeft: '8px'\n },\n swatch: {\n width: '30px',\n height: '30px',\n float: 'left',\n borderRadius: '4px',\n margin: '0 6px 6px 0'\n },\n clear: {\n clear: 'both'\n }\n },\n 'hide-triangle': {\n triangle: {\n display: 'none'\n },\n triangleShadow: {\n display: 'none'\n }\n },\n 'top-left-triangle': {\n triangle: {\n top: '-10px',\n left: '12px'\n },\n triangleShadow: {\n top: '-11px',\n left: '12px'\n }\n },\n 'top-right-triangle': {\n triangle: {\n top: '-10px',\n right: '12px'\n },\n triangleShadow: {\n top: '-11px',\n right: '12px'\n }\n }\n }, passedStyles), {\n 'hide-triangle': triangle === 'hide',\n 'top-left-triangle': triangle === 'top-left',\n 'top-right-triangle': triangle === 'top-right'\n });\n\n var handleChange = function handleChange(hexcode, e) {\n color.isValidHex(hexcode) && onChange({\n hex: hexcode,\n source: 'hex'\n }, e);\n };\n\n return React.createElement(\n 'div',\n { style: styles.card, className: 'twitter-picker ' + className },\n React.createElement('div', { style: styles.triangleShadow }),\n React.createElement('div', { style: styles.triangle }),\n React.createElement(\n 'div',\n { style: styles.body },\n map(colors, function (c, i) {\n return React.createElement(Swatch, {\n key: i,\n color: c,\n hex: c,\n style: styles.swatch,\n onClick: handleChange,\n onHover: onSwatchHover,\n focusStyle: {\n boxShadow: '0 0 4px ' + c\n }\n });\n }),\n React.createElement(\n 'div',\n { style: styles.hash },\n '#'\n ),\n React.createElement(EditableInput, {\n label: null,\n style: { input: styles.input },\n value: hex.replace('#', ''),\n onChange: handleChange\n }),\n React.createElement('div', { style: styles.clear })\n )\n );\n};\n\nTwitter.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n triangle: PropTypes.oneOf(['hide', 'top-left', 'top-right']),\n colors: PropTypes.arrayOf(PropTypes.string),\n styles: PropTypes.object\n};\n\nTwitter.defaultProps = {\n width: 276,\n colors: ['#FF6900', '#FCB900', '#7BDCB5', '#00D084', '#8ED1FC', '#0693E3', '#ABB8C3', '#EB144C', '#F78DA7', '#9900EF'],\n triangle: 'top-left',\n styles: {}\n};\n\nexport default ColorWrap(Twitter);","import React from 'react';\nimport reactCSS from 'reactcss';\nimport PropTypes from 'prop-types';\n\nexport var GooglePointerCircle = function GooglePointerCircle(props) {\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '20px',\n height: '20px',\n borderRadius: '22px',\n border: '2px #fff solid',\n transform: 'translate(-12px, -13px)',\n background: 'hsl(' + Math.round(props.hsl.h) + ', ' + Math.round(props.hsl.s * 100) + '%, ' + Math.round(props.hsl.l * 100) + '%)'\n }\n }\n });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nGooglePointerCircle.propTypes = {\n hsl: PropTypes.shape({\n h: PropTypes.number,\n s: PropTypes.number,\n l: PropTypes.number,\n a: PropTypes.number\n })\n};\n\nGooglePointerCircle.defaultProps = {\n hsl: { a: 1, h: 249.94, l: 0.2, s: 0.50 }\n};\n\nexport default GooglePointerCircle;","import React from 'react';\nimport reactCSS from 'reactcss';\nimport PropTypes from 'prop-types';\n\nexport var GooglePointer = function GooglePointer(props) {\n var styles = reactCSS({\n 'default': {\n picker: {\n width: '20px',\n height: '20px',\n borderRadius: '22px',\n transform: 'translate(-10px, -7px)',\n background: 'hsl(' + Math.round(props.hsl.h) + ', 100%, 50%)',\n border: '2px white solid'\n }\n }\n });\n\n return React.createElement('div', { style: styles.picker });\n};\n\nGooglePointer.propTypes = {\n hsl: PropTypes.shape({\n h: PropTypes.number,\n s: PropTypes.number,\n l: PropTypes.number,\n a: PropTypes.number\n })\n};\n\nGooglePointer.defaultProps = {\n hsl: { a: 1, h: 249.94, l: 0.2, s: 0.50 }\n};\n\nexport default GooglePointer;","import React from 'react';\nimport reactCSS from 'reactcss';\nimport * as color from '../../helpers/color';\nimport { EditableInput } from '../common';\n\nexport var GoogleFields = function GoogleFields(_ref) {\n var onChange = _ref.onChange,\n rgb = _ref.rgb,\n hsl = _ref.hsl,\n hex = _ref.hex,\n hsv = _ref.hsv;\n\n\n var handleChange = function handleChange(data, e) {\n if (data.hex) {\n color.isValidHex(data.hex) && onChange({\n hex: data.hex,\n source: 'hex'\n }, e);\n } else if (data.rgb) {\n var values = data.rgb.split(',');\n color.isvalidColorString(data.rgb, 'rgb') && onChange({\n r: values[0],\n g: values[1],\n b: values[2],\n a: 1,\n source: 'rgb'\n }, e);\n } else if (data.hsv) {\n var _values = data.hsv.split(',');\n if (color.isvalidColorString(data.hsv, 'hsv')) {\n _values[2] = _values[2].replace('%', '');\n _values[1] = _values[1].replace('%', '');\n _values[0] = _values[0].replace('°', '');\n if (_values[1] == 1) {\n _values[1] = 0.01;\n } else if (_values[2] == 1) {\n _values[2] = 0.01;\n }\n onChange({\n h: Number(_values[0]),\n s: Number(_values[1]),\n v: Number(_values[2]),\n source: 'hsv'\n }, e);\n }\n } else if (data.hsl) {\n var _values2 = data.hsl.split(',');\n if (color.isvalidColorString(data.hsl, 'hsl')) {\n _values2[2] = _values2[2].replace('%', '');\n _values2[1] = _values2[1].replace('%', '');\n _values2[0] = _values2[0].replace('°', '');\n if (hsvValue[1] == 1) {\n hsvValue[1] = 0.01;\n } else if (hsvValue[2] == 1) {\n hsvValue[2] = 0.01;\n }\n onChange({\n h: Number(_values2[0]),\n s: Number(_values2[1]),\n v: Number(_values2[2]),\n source: 'hsl'\n }, e);\n }\n }\n };\n\n var styles = reactCSS({\n 'default': {\n wrap: {\n display: 'flex',\n height: '100px',\n marginTop: '4px'\n },\n fields: {\n width: '100%'\n },\n column: {\n paddingTop: '10px',\n display: 'flex',\n justifyContent: 'space-between'\n },\n double: {\n padding: '0px 4.4px',\n boxSizing: 'border-box'\n },\n input: {\n width: '100%',\n height: '38px',\n boxSizing: 'border-box',\n padding: '4px 10% 3px',\n textAlign: 'center',\n border: '1px solid #dadce0',\n fontSize: '11px',\n textTransform: 'lowercase',\n borderRadius: '5px',\n outline: 'none',\n fontFamily: 'Roboto,Arial,sans-serif'\n },\n input2: {\n height: '38px',\n width: '100%',\n border: '1px solid #dadce0',\n boxSizing: 'border-box',\n fontSize: '11px',\n textTransform: 'lowercase',\n borderRadius: '5px',\n outline: 'none',\n paddingLeft: '10px',\n fontFamily: 'Roboto,Arial,sans-serif'\n },\n label: {\n textAlign: 'center',\n fontSize: '12px',\n background: '#fff',\n position: 'absolute',\n textTransform: 'uppercase',\n color: '#3c4043',\n width: '35px',\n top: '-6px',\n left: '0',\n right: '0',\n marginLeft: 'auto',\n marginRight: 'auto',\n fontFamily: 'Roboto,Arial,sans-serif'\n },\n label2: {\n left: '10px',\n textAlign: 'center',\n fontSize: '12px',\n background: '#fff',\n position: 'absolute',\n textTransform: 'uppercase',\n color: '#3c4043',\n width: '32px',\n top: '-6px',\n fontFamily: 'Roboto,Arial,sans-serif'\n },\n single: {\n flexGrow: '1',\n margin: '0px 4.4px'\n }\n }\n });\n\n var rgbValue = rgb.r + ', ' + rgb.g + ', ' + rgb.b;\n var hslValue = Math.round(hsl.h) + '\\xB0, ' + Math.round(hsl.s * 100) + '%, ' + Math.round(hsl.l * 100) + '%';\n var hsvValue = Math.round(hsv.h) + '\\xB0, ' + Math.round(hsv.s * 100) + '%, ' + Math.round(hsv.v * 100) + '%';\n\n return React.createElement(\n 'div',\n { style: styles.wrap, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.fields },\n React.createElement(\n 'div',\n { style: styles.double },\n React.createElement(EditableInput, {\n style: { input: styles.input, label: styles.label },\n label: 'hex',\n value: hex,\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.column },\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input2, label: styles.label2 },\n label: 'rgb',\n value: rgbValue,\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input2, label: styles.label2 },\n label: 'hsv',\n value: hsvValue,\n onChange: handleChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.single },\n React.createElement(EditableInput, {\n style: { input: styles.input2, label: styles.label2 },\n label: 'hsl',\n value: hslValue,\n onChange: handleChange\n })\n )\n )\n )\n );\n};\n\nexport default GoogleFields;","import React from 'react';\nimport PropTypes from 'prop-types';\nimport reactCSS from 'reactcss';\nimport merge from 'lodash-es/merge';\n\nimport { ColorWrap, Saturation, Hue } from '../common';\nimport GooglePointerCircle from './GooglePointerCircle';\nimport GooglePointer from './GooglePointer';\nimport GoogleFields from './GoogleFields';\n\nexport var Google = function Google(_ref) {\n var width = _ref.width,\n onChange = _ref.onChange,\n rgb = _ref.rgb,\n hsl = _ref.hsl,\n hsv = _ref.hsv,\n hex = _ref.hex,\n header = _ref.header,\n _ref$styles = _ref.styles,\n passedStyles = _ref$styles === undefined ? {} : _ref$styles,\n _ref$className = _ref.className,\n className = _ref$className === undefined ? '' : _ref$className;\n\n var styles = reactCSS(merge({\n 'default': {\n picker: {\n width: width,\n background: '#fff',\n border: '1px solid #dfe1e5',\n boxSizing: 'initial',\n display: 'flex',\n flexWrap: 'wrap',\n borderRadius: '8px 8px 0px 0px'\n },\n head: {\n height: '57px',\n width: '100%',\n paddingTop: '16px',\n paddingBottom: '16px',\n paddingLeft: '16px',\n fontSize: '20px',\n boxSizing: 'border-box',\n fontFamily: 'Roboto-Regular,HelveticaNeue,Arial,sans-serif'\n },\n saturation: {\n width: '70%',\n padding: '0px',\n position: 'relative',\n overflow: 'hidden'\n },\n swatch: {\n width: '30%',\n height: '228px',\n padding: '0px',\n background: 'rgba(' + rgb.r + ', ' + rgb.g + ', ' + rgb.b + ', 1)',\n position: 'relative',\n overflow: 'hidden'\n },\n body: {\n margin: 'auto',\n width: '95%'\n },\n controls: {\n display: 'flex',\n boxSizing: 'border-box',\n height: '52px',\n paddingTop: '22px'\n },\n color: {\n width: '32px'\n },\n hue: {\n height: '8px',\n position: 'relative',\n margin: '0px 16px 0px 16px',\n width: '100%'\n },\n Hue: {\n radius: '2px'\n }\n }\n }, passedStyles));\n return React.createElement(\n 'div',\n { style: styles.picker, className: 'google-picker ' + className },\n React.createElement(\n 'div',\n { style: styles.head },\n header\n ),\n React.createElement('div', { style: styles.swatch }),\n React.createElement(\n 'div',\n { style: styles.saturation },\n React.createElement(Saturation, {\n hsl: hsl,\n hsv: hsv,\n pointer: GooglePointerCircle,\n onChange: onChange\n })\n ),\n React.createElement(\n 'div',\n { style: styles.body },\n React.createElement(\n 'div',\n { style: styles.controls, className: 'flexbox-fix' },\n React.createElement(\n 'div',\n { style: styles.hue },\n React.createElement(Hue, {\n style: styles.Hue,\n hsl: hsl,\n radius: '4px',\n pointer: GooglePointer,\n onChange: onChange\n })\n )\n ),\n React.createElement(GoogleFields, {\n rgb: rgb,\n hsl: hsl,\n hex: hex,\n hsv: hsv,\n onChange: onChange\n })\n )\n );\n};\n\nGoogle.propTypes = {\n width: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n styles: PropTypes.object,\n header: PropTypes.string\n\n};\n\nGoogle.defaultProps = {\n width: 652,\n styles: {},\n header: 'Color picker'\n};\n\nexport default ColorWrap(Google);","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\", \"component\", \"disableGutters\", \"fixed\", \"maxWidth\", \"classes\"];\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { unstable_capitalize as capitalize, unstable_composeClasses as composeClasses, unstable_generateUtilityClass as generateUtilityClass } from '@mui/utils';\nimport useThemePropsSystem from '../useThemeProps';\nimport systemStyled from '../styled';\nimport createTheme from '../createTheme';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nconst defaultTheme = createTheme();\nconst defaultCreateStyledComponent = systemStyled('div', {\n name: 'MuiContainer',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, styles[`maxWidth${capitalize(String(ownerState.maxWidth))}`], ownerState.fixed && styles.fixed, ownerState.disableGutters && styles.disableGutters];\n }\n});\nconst useThemePropsDefault = inProps => useThemePropsSystem({\n props: inProps,\n name: 'MuiContainer',\n defaultTheme\n});\nconst useUtilityClasses = (ownerState, componentName) => {\n const getContainerUtilityClass = slot => {\n return generateUtilityClass(componentName, slot);\n };\n const {\n classes,\n fixed,\n disableGutters,\n maxWidth\n } = ownerState;\n const slots = {\n root: ['root', maxWidth && `maxWidth${capitalize(String(maxWidth))}`, fixed && 'fixed', disableGutters && 'disableGutters']\n };\n return composeClasses(slots, getContainerUtilityClass, classes);\n};\nexport default function createContainer(options = {}) {\n const {\n // This will allow adding custom styled fn (for example for custom sx style function)\n createStyledComponent = defaultCreateStyledComponent,\n useThemeProps = useThemePropsDefault,\n componentName = 'MuiContainer'\n } = options;\n const ContainerRoot = createStyledComponent(({\n theme,\n ownerState\n }) => _extends({\n width: '100%',\n marginLeft: 'auto',\n boxSizing: 'border-box',\n marginRight: 'auto',\n display: 'block'\n }, !ownerState.disableGutters && {\n paddingLeft: theme.spacing(2),\n paddingRight: theme.spacing(2),\n // @ts-ignore module augmentation fails if custom breakpoints are used\n [theme.breakpoints.up('sm')]: {\n paddingLeft: theme.spacing(3),\n paddingRight: theme.spacing(3)\n }\n }), ({\n theme,\n ownerState\n }) => ownerState.fixed && Object.keys(theme.breakpoints.values).reduce((acc, breakpointValueKey) => {\n const breakpoint = breakpointValueKey;\n const value = theme.breakpoints.values[breakpoint];\n if (value !== 0) {\n // @ts-ignore\n acc[theme.breakpoints.up(breakpoint)] = {\n maxWidth: `${value}${theme.breakpoints.unit}`\n };\n }\n return acc;\n }, {}), ({\n theme,\n ownerState\n }) => _extends({}, ownerState.maxWidth === 'xs' && {\n // @ts-ignore module augmentation fails if custom breakpoints are used\n [theme.breakpoints.up('xs')]: {\n // @ts-ignore module augmentation fails if custom breakpoints are used\n maxWidth: Math.max(theme.breakpoints.values.xs, 444)\n }\n }, ownerState.maxWidth &&\n // @ts-ignore module augmentation fails if custom breakpoints are used\n ownerState.maxWidth !== 'xs' && {\n // @ts-ignore module augmentation fails if custom breakpoints are used\n [theme.breakpoints.up(ownerState.maxWidth)]: {\n // @ts-ignore module augmentation fails if custom breakpoints are used\n maxWidth: `${theme.breakpoints.values[ownerState.maxWidth]}${theme.breakpoints.unit}`\n }\n }));\n const Container = /*#__PURE__*/React.forwardRef(function Container(inProps, ref) {\n const props = useThemeProps(inProps);\n const {\n className,\n component = 'div',\n disableGutters = false,\n fixed = false,\n maxWidth = 'lg'\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n component,\n disableGutters,\n fixed,\n maxWidth\n });\n\n // @ts-ignore module augmentation fails if custom breakpoints are used\n const classes = useUtilityClasses(ownerState, componentName);\n return (\n /*#__PURE__*/\n // @ts-ignore theme is injected by the styled util\n _jsx(ContainerRoot, _extends({\n as: component\n // @ts-ignore module augmentation fails if custom breakpoints are used\n ,\n ownerState: ownerState,\n className: clsx(classes.root, className),\n ref: ref\n }, other))\n );\n });\n process.env.NODE_ENV !== \"production\" ? Container.propTypes /* remove-proptypes */ = {\n children: PropTypes.node,\n classes: PropTypes.object,\n className: PropTypes.string,\n component: PropTypes.elementType,\n disableGutters: PropTypes.bool,\n fixed: PropTypes.bool,\n maxWidth: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['xs', 'sm', 'md', 'lg', 'xl', false]), PropTypes.string]),\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n } : void 0;\n return Container;\n}","import PropTypes from 'prop-types';\nimport { createContainer } from '@mui/system';\nimport capitalize from '../utils/capitalize';\nimport styled from '../styles/styled';\nimport useThemeProps from '../styles/useThemeProps';\nconst Container = createContainer({\n createStyledComponent: styled('div', {\n name: 'MuiContainer',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, styles[`maxWidth${capitalize(String(ownerState.maxWidth))}`], ownerState.fixed && styles.fixed, ownerState.disableGutters && styles.disableGutters];\n }\n }),\n useThemeProps: inProps => useThemeProps({\n props: inProps,\n name: 'MuiContainer'\n })\n});\nprocess.env.NODE_ENV !== \"production\" ? Container.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * @ignore\n */\n children: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * The component used for the root node.\n * Either a string to use a HTML element or a component.\n */\n component: PropTypes.elementType,\n /**\n * If `true`, the left and right padding is removed.\n * @default false\n */\n disableGutters: PropTypes.bool,\n /**\n * Set the max-width to match the min-width of the current breakpoint.\n * This is useful if you'd prefer to design for a fixed set of sizes\n * instead of trying to accommodate a fully fluid viewport.\n * It's fluid by default.\n * @default false\n */\n fixed: PropTypes.bool,\n /**\n * Determine the max-width of the container.\n * The container width grows with the size of the screen.\n * Set to `false` to disable `maxWidth`.\n * @default 'lg'\n */\n maxWidth: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['xs', 'sm', 'md', 'lg', 'xl', false]), PropTypes.string]),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object])\n} : void 0;\nexport default Container;","import createSvgIcon from './utils/createSvgIcon';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z\"\n}), 'Close');","import createSvgIcon from './utils/createSvgIcon';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M12 2C6.49 2 2 6.49 2 12s4.49 10 10 10c1.38 0 2.5-1.12 2.5-2.5 0-.61-.23-1.2-.64-1.67-.08-.1-.13-.21-.13-.33 0-.28.22-.5.5-.5H16c3.31 0 6-2.69 6-6 0-4.96-4.49-9-10-9zm5.5 11c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm-3-4c-.83 0-1.5-.67-1.5-1.5S13.67 6 14.5 6s1.5.67 1.5 1.5S15.33 9 14.5 9zM5 11.5c0-.83.67-1.5 1.5-1.5s1.5.67 1.5 1.5S7.33 13 6.5 13 5 12.33 5 11.5zm6-4c0 .83-.67 1.5-1.5 1.5S8 8.33 8 7.5 8.67 6 9.5 6s1.5.67 1.5 1.5z\"\n}), 'Palette');","import createSvgIcon from './utils/createSvgIcon';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nexport default createSvgIcon( /*#__PURE__*/_jsx(\"path\", {\n d: \"M17.63 5.84C17.27 5.33 16.67 5 16 5L5 5.01C3.9 5.01 3 5.9 3 7v10c0 1.1.9 1.99 2 1.99L16 19c.67 0 1.27-.33 1.63-.84L22 12l-4.37-6.16z\"\n}), 'Label');","import * as React from \"react\";\nimport { CirclePicker, ColorResult } from \"react-color\";\n\nimport { Popover, Box } from \"@mui/material\";\nimport { Label } from \"@mui/icons-material\";\nimport ArrowDropDownIcon from \"@mui/icons-material/ArrowDropDown\";\nimport ArrowDropUpIcon from \"@mui/icons-material/ArrowDropUp\";\n\ntype ColorIconButtonProps = {\n color: string;\n onColorChange: (color: any) => void;\n unusedColors?: string[];\n};\n\nexport const ColorIcon = ({\n color,\n onColorChange,\n unusedColors,\n}: ColorIconButtonProps) => {\n const [colorMenuAnchorEl, setColorMenuAnchorEl] =\n React.useState(null);\n\n const colorPopupOpen = Boolean(colorMenuAnchorEl);\n\n const onOpenColorPicker = (event: React.MouseEvent) => {\n if (colorPopupOpen) {\n setColorMenuAnchorEl(null);\n } else {\n setColorMenuAnchorEl(event.currentTarget);\n }\n };\n\n const onCloseColorPicker = () => {\n setColorMenuAnchorEl(null);\n };\n\n const onChange = (color: ColorResult) => {\n onColorChange(color);\n onCloseColorPicker();\n };\n //TODO: should be dialog and button\n return (\n \n ({\n p: 1,\n gap: 2,\n borderRadius: 1,\n \" :hover\": {\n backgroundColor: theme.palette.action.hover,\n },\n })}\n >\n \n );\n};\n","import { unstable_generateUtilityClasses as generateUtilityClasses } from '@mui/utils';\nimport generateUtilityClass from '../generateUtilityClass';\nexport function getSwitchUtilityClass(slot) {\n return generateUtilityClass('MuiSwitch', slot);\n}\nconst switchClasses = generateUtilityClasses('MuiSwitch', ['root', 'edgeStart', 'edgeEnd', 'switchBase', 'colorPrimary', 'colorSecondary', 'sizeSmall', 'sizeMedium', 'checked', 'disabled', 'input', 'thumb', 'track']);\nexport default switchClasses;","import _objectWithoutPropertiesLoose from \"@babel/runtime/helpers/esm/objectWithoutPropertiesLoose\";\nimport _extends from \"@babel/runtime/helpers/esm/extends\";\nconst _excluded = [\"className\", \"color\", \"edge\", \"size\", \"sx\"];\n// @inheritedComponent IconButton\nimport * as React from 'react';\nimport PropTypes from 'prop-types';\nimport clsx from 'clsx';\nimport { refType } from '@mui/utils';\nimport { unstable_composeClasses as composeClasses } from '@mui/base';\nimport { alpha, darken, lighten } from '@mui/system';\nimport capitalize from '../utils/capitalize';\nimport SwitchBase from '../internal/SwitchBase';\nimport useThemeProps from '../styles/useThemeProps';\nimport styled from '../styles/styled';\nimport switchClasses, { getSwitchUtilityClass } from './switchClasses';\nimport { jsx as _jsx } from \"react/jsx-runtime\";\nimport { jsxs as _jsxs } from \"react/jsx-runtime\";\nconst useUtilityClasses = ownerState => {\n const {\n classes,\n edge,\n size,\n color,\n checked,\n disabled\n } = ownerState;\n const slots = {\n root: ['root', edge && `edge${capitalize(edge)}`, `size${capitalize(size)}`],\n switchBase: ['switchBase', `color${capitalize(color)}`, checked && 'checked', disabled && 'disabled'],\n thumb: ['thumb'],\n track: ['track'],\n input: ['input']\n };\n const composedClasses = composeClasses(slots, getSwitchUtilityClass, classes);\n return _extends({}, classes, composedClasses);\n};\nconst SwitchRoot = styled('span', {\n name: 'MuiSwitch',\n slot: 'Root',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.root, ownerState.edge && styles[`edge${capitalize(ownerState.edge)}`], styles[`size${capitalize(ownerState.size)}`]];\n }\n})(({\n ownerState\n}) => _extends({\n display: 'inline-flex',\n width: 34 + 12 * 2,\n height: 14 + 12 * 2,\n overflow: 'hidden',\n padding: 12,\n boxSizing: 'border-box',\n position: 'relative',\n flexShrink: 0,\n zIndex: 0,\n // Reset the stacking context.\n verticalAlign: 'middle',\n // For correct alignment with the text.\n '@media print': {\n colorAdjust: 'exact'\n }\n}, ownerState.edge === 'start' && {\n marginLeft: -8\n}, ownerState.edge === 'end' && {\n marginRight: -8\n}, ownerState.size === 'small' && {\n width: 40,\n height: 24,\n padding: 7,\n [`& .${switchClasses.thumb}`]: {\n width: 16,\n height: 16\n },\n [`& .${switchClasses.switchBase}`]: {\n padding: 4,\n [`&.${switchClasses.checked}`]: {\n transform: 'translateX(16px)'\n }\n }\n}));\nconst SwitchSwitchBase = styled(SwitchBase, {\n name: 'MuiSwitch',\n slot: 'SwitchBase',\n overridesResolver: (props, styles) => {\n const {\n ownerState\n } = props;\n return [styles.switchBase, {\n [`& .${switchClasses.input}`]: styles.input\n }, ownerState.color !== 'default' && styles[`color${capitalize(ownerState.color)}`]];\n }\n})(({\n theme\n}) => ({\n position: 'absolute',\n top: 0,\n left: 0,\n zIndex: 1,\n // Render above the focus ripple.\n color: theme.vars ? theme.vars.palette.Switch.defaultColor : `${theme.palette.mode === 'light' ? theme.palette.common.white : theme.palette.grey[300]}`,\n transition: theme.transitions.create(['left', 'transform'], {\n duration: theme.transitions.duration.shortest\n }),\n [`&.${switchClasses.checked}`]: {\n transform: 'translateX(20px)'\n },\n [`&.${switchClasses.disabled}`]: {\n color: theme.vars ? theme.vars.palette.Switch.defaultDisabledColor : `${theme.palette.mode === 'light' ? theme.palette.grey[100] : theme.palette.grey[600]}`\n },\n [`&.${switchClasses.checked} + .${switchClasses.track}`]: {\n opacity: 0.5\n },\n [`&.${switchClasses.disabled} + .${switchClasses.track}`]: {\n opacity: theme.vars ? theme.vars.opacity.switchTrackDisabled : `${theme.palette.mode === 'light' ? 0.12 : 0.2}`\n },\n [`& .${switchClasses.input}`]: {\n left: '-100%',\n width: '300%'\n }\n}), ({\n theme,\n ownerState\n}) => _extends({\n '&:hover': {\n backgroundColor: theme.vars ? `rgba(${theme.vars.palette.action.activeChannel} / ${theme.vars.palette.action.hoverOpacity})` : alpha(theme.palette.action.active, theme.palette.action.hoverOpacity),\n // Reset on touch devices, it doesn't add specificity\n '@media (hover: none)': {\n backgroundColor: 'transparent'\n }\n }\n}, ownerState.color !== 'default' && {\n [`&.${switchClasses.checked}`]: {\n color: (theme.vars || theme).palette[ownerState.color].main,\n '&:hover': {\n backgroundColor: theme.vars ? `rgba(${theme.vars.palette[ownerState.color].mainChannel} / ${theme.vars.palette.action.hoverOpacity})` : alpha(theme.palette[ownerState.color].main, theme.palette.action.hoverOpacity),\n '@media (hover: none)': {\n backgroundColor: 'transparent'\n }\n },\n [`&.${switchClasses.disabled}`]: {\n color: theme.vars ? theme.vars.palette.Switch[`${ownerState.color}DisabledColor`] : `${theme.palette.mode === 'light' ? lighten(theme.palette[ownerState.color].main, 0.62) : darken(theme.palette[ownerState.color].main, 0.55)}`\n }\n },\n [`&.${switchClasses.checked} + .${switchClasses.track}`]: {\n backgroundColor: (theme.vars || theme).palette[ownerState.color].main\n }\n}));\nconst SwitchTrack = styled('span', {\n name: 'MuiSwitch',\n slot: 'Track',\n overridesResolver: (props, styles) => styles.track\n})(({\n theme\n}) => ({\n height: '100%',\n width: '100%',\n borderRadius: 14 / 2,\n zIndex: -1,\n transition: theme.transitions.create(['opacity', 'background-color'], {\n duration: theme.transitions.duration.shortest\n }),\n backgroundColor: theme.vars ? theme.vars.palette.common.onBackground : `${theme.palette.mode === 'light' ? theme.palette.common.black : theme.palette.common.white}`,\n opacity: theme.vars ? theme.vars.opacity.switchTrack : `${theme.palette.mode === 'light' ? 0.38 : 0.3}`\n}));\nconst SwitchThumb = styled('span', {\n name: 'MuiSwitch',\n slot: 'Thumb',\n overridesResolver: (props, styles) => styles.thumb\n})(({\n theme\n}) => ({\n boxShadow: (theme.vars || theme).shadows[1],\n backgroundColor: 'currentColor',\n width: 20,\n height: 20,\n borderRadius: '50%'\n}));\nconst Switch = /*#__PURE__*/React.forwardRef(function Switch(inProps, ref) {\n const props = useThemeProps({\n props: inProps,\n name: 'MuiSwitch'\n });\n const {\n className,\n color = 'primary',\n edge = false,\n size = 'medium',\n sx\n } = props,\n other = _objectWithoutPropertiesLoose(props, _excluded);\n const ownerState = _extends({}, props, {\n color,\n edge,\n size\n });\n const classes = useUtilityClasses(ownerState);\n const icon = /*#__PURE__*/_jsx(SwitchThumb, {\n className: classes.thumb,\n ownerState: ownerState\n });\n return /*#__PURE__*/_jsxs(SwitchRoot, {\n className: clsx(classes.root, className),\n sx: sx,\n ownerState: ownerState,\n children: [/*#__PURE__*/_jsx(SwitchSwitchBase, _extends({\n type: \"checkbox\",\n icon: icon,\n checkedIcon: icon,\n ref: ref,\n ownerState: ownerState\n }, other, {\n classes: _extends({}, classes, {\n root: classes.switchBase\n })\n })), /*#__PURE__*/_jsx(SwitchTrack, {\n className: classes.track,\n ownerState: ownerState\n })]\n });\n});\nprocess.env.NODE_ENV !== \"production\" ? Switch.propTypes /* remove-proptypes */ = {\n // ----------------------------- Warning --------------------------------\n // | These PropTypes are generated from the TypeScript type definitions |\n // | To update them edit the d.ts file and run \"yarn proptypes\" |\n // ----------------------------------------------------------------------\n /**\n * If `true`, the component is checked.\n */\n checked: PropTypes.bool,\n /**\n * The icon to display when the component is checked.\n */\n checkedIcon: PropTypes.node,\n /**\n * Override or extend the styles applied to the component.\n */\n classes: PropTypes.object,\n /**\n * @ignore\n */\n className: PropTypes.string,\n /**\n * The color of the component.\n * It supports both default and custom theme colors, which can be added as shown in the\n * [palette customization guide](https://mui.com/material-ui/customization/palette/#adding-new-colors).\n * @default 'primary'\n */\n color: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['default', 'primary', 'secondary', 'error', 'info', 'success', 'warning']), PropTypes.string]),\n /**\n * The default checked state. Use when the component is not controlled.\n */\n defaultChecked: PropTypes.bool,\n /**\n * If `true`, the component is disabled.\n */\n disabled: PropTypes.bool,\n /**\n * If `true`, the ripple effect is disabled.\n * @default false\n */\n disableRipple: PropTypes.bool,\n /**\n * If given, uses a negative margin to counteract the padding on one\n * side (this is often helpful for aligning the left or right\n * side of the icon with content above or below, without ruining the border\n * size and shape).\n * @default false\n */\n edge: PropTypes.oneOf(['end', 'start', false]),\n /**\n * The icon to display when the component is unchecked.\n */\n icon: PropTypes.node,\n /**\n * The id of the `input` element.\n */\n id: PropTypes.string,\n /**\n * [Attributes](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#Attributes) applied to the `input` element.\n */\n inputProps: PropTypes.object,\n /**\n * Pass a ref to the `input` element.\n */\n inputRef: refType,\n /**\n * Callback fired when the state is changed.\n *\n * @param {React.ChangeEvent} event The event source of the callback.\n * You can pull out the new value by accessing `event.target.value` (string).\n * You can pull out the new checked state by accessing `event.target.checked` (boolean).\n */\n onChange: PropTypes.func,\n /**\n * If `true`, the `input` element is required.\n * @default false\n */\n required: PropTypes.bool,\n /**\n * The size of the component.\n * `small` is equivalent to the dense switch styling.\n * @default 'medium'\n */\n size: PropTypes /* @typescript-to-proptypes-ignore */.oneOfType([PropTypes.oneOf(['medium', 'small']), PropTypes.string]),\n /**\n * The system prop that allows defining system overrides as well as additional CSS styles.\n */\n sx: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.oneOfType([PropTypes.func, PropTypes.object, PropTypes.bool])), PropTypes.func, PropTypes.object]),\n /**\n * The value of the component. The DOM API casts this to a string.\n * The browser uses \"on\" as the default value.\n */\n value: PropTypes.any\n} : void 0;\nexport default Switch;","import { Switch, styled } from \"@mui/material\";\n\ntype MUISwitchProps = {\n disable_icon: string;\n enable_icon: string;\n};\n\n// source: https://mui.com/components/switches/\nexport const MaterialUISwitch = styled(Switch)(\n ({ theme, disable_icon, enable_icon }) => ({\n width: 62,\n height: 34,\n padding: 7,\n \"& .MuiSwitch-switchBase\": {\n margin: 1,\n padding: 0,\n transform: \"translateX(6px)\",\n \"&.Mui-checked\": {\n color: \"#fff\",\n transform: \"translateX(22px)\",\n \"& .MuiSwitch-thumb:before\": {\n backgroundImage: `url(${disable_icon})`,\n },\n \"& + .MuiSwitch-track\": {\n opacity: 1,\n backgroundColor:\n theme.palette.mode === \"dark\" ? \"#8796A5\" : \"#aab4be\",\n },\n },\n },\n \"& .MuiSwitch-thumb\": {\n backgroundColor: theme.palette.mode === \"dark\" ? \"#003892\" : \"#001e3c\",\n width: 32,\n height: 32,\n \"&:before\": {\n content: \"''\",\n position: \"absolute\",\n width: \"100%\",\n height: \"100%\",\n left: 0,\n top: 0,\n backgroundRepeat: \"no-repeat\",\n backgroundPosition: \"center\",\n backgroundImage: `url(${enable_icon})`,\n },\n },\n \"& .MuiSwitch-track\": {\n opacity: 1,\n backgroundColor: theme.palette.mode === \"dark\" ? \"#8796A5\" : \"#aab4be\",\n borderRadius: 20 / 2,\n },\n })\n);\n","var _path;\nvar _excluded = [\"title\", \"titleId\"];\nfunction _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }\nfunction _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\nimport * as React from \"react\";\nfunction SvgSun(_ref, svgRef) {\n var title = _ref.title,\n titleId = _ref.titleId,\n props = _objectWithoutProperties(_ref, _excluded);\n return /*#__PURE__*/React.createElement(\"svg\", _extends({\n xmlns: \"http://www.w3.org/2000/svg\",\n height: 20,\n width: 20,\n viewBox: \"0 0 20 20\",\n ref: svgRef,\n \"aria-labelledby\": titleId\n }, props), title ? /*#__PURE__*/React.createElement(\"title\", {\n id: titleId\n }, title) : null, _path || (_path = /*#__PURE__*/React.createElement(\"path\", {\n fill: \"#fff\",\n d: \"M9.305 1.667V3.75h1.389V1.667h-1.39zm-4.707 1.95l-.982.982L5.09 6.072l.982-.982-1.473-1.473zm10.802 0L13.927 5.09l.982.982 1.473-1.473-.982-.982zM10 5.139a4.872 4.872 0 00-4.862 4.86A4.872 4.872 0 0010 14.862 4.872 4.872 0 0014.86 10 4.872 4.872 0 0010 5.139zm0 1.389A3.462 3.462 0 0113.471 10a3.462 3.462 0 01-3.473 3.472A3.462 3.462 0 016.527 10 3.462 3.462 0 0110 6.528zM1.665 9.305v1.39h2.083v-1.39H1.666zm14.583 0v1.39h2.084v-1.39h-2.084zM5.09 13.928L3.616 15.4l.982.982 1.473-1.473-.982-.982zm9.82 0l-.982.982 1.473 1.473.982-.982-1.473-1.473zM9.305 16.25v2.083h1.389V16.25h-1.39z\"\n })));\n}\nvar ForwardRef = /*#__PURE__*/React.forwardRef(SvgSun);\nexport default __webpack_public_path__ + \"static/media/Sun.81cb4cfc83ddcfb6458af8e10b7c94a6.svg\";\nexport { ForwardRef as ReactComponent };","var _path;\nvar _excluded = [\"title\", \"titleId\"];\nfunction _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }\nfunction _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\nimport * as React from \"react\";\nfunction SvgMoon(_ref, svgRef) {\n var title = _ref.title,\n titleId = _ref.titleId,\n props = _objectWithoutProperties(_ref, _excluded);\n return /*#__PURE__*/React.createElement(\"svg\", _extends({\n xmlns: \"http://www.w3.org/2000/svg\",\n height: 20,\n width: 20,\n viewBox: \"0 0 20 20\",\n ref: svgRef,\n \"aria-labelledby\": titleId\n }, props), title ? /*#__PURE__*/React.createElement(\"title\", {\n id: titleId\n }, title) : null, _path || (_path = /*#__PURE__*/React.createElement(\"path\", {\n fill: \"#fff\",\n d: \"M4.2 2.5l-.7 1.8-1.8.7 1.8.7.7 1.8.6-1.8L6.7 5l-1.9-.7-.6-1.8zm15 8.3a6.7 6.7 0 11-6.6-6.6 5.8 5.8 0 006.6 6.6z\"\n })));\n}\nvar ForwardRef = /*#__PURE__*/React.forwardRef(SvgMoon);\nexport default __webpack_public_path__ + \"static/media/Moon.593035e92f1f02f04668bb75454d1fb3.svg\";\nexport { ForwardRef as ReactComponent };","var _path, _path2;\nvar _excluded = [\"title\", \"titleId\"];\nfunction _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }\nfunction _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\nimport * as React from \"react\";\nfunction SvgVolumeUp(_ref, svgRef) {\n var title = _ref.title,\n titleId = _ref.titleId,\n props = _objectWithoutProperties(_ref, _excluded);\n return /*#__PURE__*/React.createElement(\"svg\", _extends({\n xmlns: \"http://www.w3.org/2000/svg\",\n height: \"24px\",\n viewBox: \"0 0 24 24\",\n width: \"24px\",\n fill: \"#FFFFFF\",\n ref: svgRef,\n \"aria-labelledby\": titleId\n }, props), title ? /*#__PURE__*/React.createElement(\"title\", {\n id: titleId\n }, title) : null, _path || (_path = /*#__PURE__*/React.createElement(\"path\", {\n d: \"M0 0h24v24H0z\",\n fill: \"none\"\n })), _path2 || (_path2 = /*#__PURE__*/React.createElement(\"path\", {\n d: \"M3 9v6h4l5 5V4L7 9H3zm13.5 3c0-1.77-1.02-3.29-2.5-4.03v8.05c1.48-.73 2.5-2.25 2.5-4.02zM14 3.23v2.06c2.89.86 5 3.54 5 6.71s-2.11 5.85-5 6.71v2.06c4.01-.91 7-4.49 7-8.77s-2.99-7.86-7-8.77z\"\n })));\n}\nvar ForwardRef = /*#__PURE__*/React.forwardRef(SvgVolumeUp);\nexport default __webpack_public_path__ + \"static/media/VolumeUp.30de055e122b96e2aa207a710b1bd74a.svg\";\nexport { ForwardRef as ReactComponent };","var _path, _path2;\nvar _excluded = [\"title\", \"titleId\"];\nfunction _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }\nfunction _objectWithoutProperties(source, excluded) { if (source == null) return {}; var target = _objectWithoutPropertiesLoose(source, excluded); var key, i; if (Object.getOwnPropertySymbols) { var sourceSymbolKeys = Object.getOwnPropertySymbols(source); for (i = 0; i < sourceSymbolKeys.length; i++) { key = sourceSymbolKeys[i]; if (excluded.indexOf(key) >= 0) continue; if (!Object.prototype.propertyIsEnumerable.call(source, key)) continue; target[key] = source[key]; } } return target; }\nfunction _objectWithoutPropertiesLoose(source, excluded) { if (source == null) return {}; var target = {}; var sourceKeys = Object.keys(source); var key, i; for (i = 0; i < sourceKeys.length; i++) { key = sourceKeys[i]; if (excluded.indexOf(key) >= 0) continue; target[key] = source[key]; } return target; }\nimport * as React from \"react\";\nfunction SvgVolumeOff(_ref, svgRef) {\n var title = _ref.title,\n titleId = _ref.titleId,\n props = _objectWithoutProperties(_ref, _excluded);\n return /*#__PURE__*/React.createElement(\"svg\", _extends({\n xmlns: \"http://www.w3.org/2000/svg\",\n height: \"24px\",\n viewBox: \"0 0 24 24\",\n width: \"24px\",\n fill: \"#FFFFFF\",\n ref: svgRef,\n \"aria-labelledby\": titleId\n }, props), title ? /*#__PURE__*/React.createElement(\"title\", {\n id: titleId\n }, title) : null, _path || (_path = /*#__PURE__*/React.createElement(\"path\", {\n d: \"M0 0h24v24H0z\",\n fill: \"none\"\n })), _path2 || (_path2 = /*#__PURE__*/React.createElement(\"path\", {\n d: \"M16.5 12c0-1.77-1.02-3.29-2.5-4.03v2.21l2.45 2.45c.03-.2.05-.41.05-.63zm2.5 0c0 .94-.2 1.82-.54 2.64l1.51 1.51C20.63 14.91 21 13.5 21 12c0-4.28-2.99-7.86-7-8.77v2.06c2.89.86 5 3.54 5 6.71zM4.27 3L3 4.27 7.73 9H3v6h4l5 5v-6.73l4.25 4.25c-.67.52-1.42.93-2.25 1.18v2.06c1.38-.31 2.63-.95 3.69-1.81L19.73 21 21 19.73l-9-9L4.27 3zM12 4L9.91 6.09 12 8.18V4z\"\n })));\n}\nvar ForwardRef = /*#__PURE__*/React.forwardRef(SvgVolumeOff);\nexport default __webpack_public_path__ + \"static/media/VolumeOff.1149786552b52a25580655f2ce3391c1.svg\";\nexport { ForwardRef as ReactComponent };","import { createSelector } from \"@reduxjs/toolkit\";\nimport { difference } from \"lodash\";\nimport { ProjectState } from \"store/types\";\n\nimport { ThingSortKey } from \"utils/common/enums\";\nimport { Partition } from \"utils/models/enums\";\n\nexport const selectProject = ({\n project,\n}: {\n project: ProjectState;\n}): ProjectState => {\n return project;\n};\n\n/*\nNAME\n*/\n\nexport const selectProjectName = ({ project }: { project: ProjectState }) => {\n return project.name;\n};\n\n/*\nSELECTED THINGS\n*/\n\nexport const selectSelectedThingIds = ({\n project,\n}: {\n project: ProjectState;\n}): Array => {\n return project.selectedThingIds;\n};\n\nexport const selectSelectedThingIdsLength = ({\n project,\n}: {\n project: ProjectState;\n}) => {\n return project.selectedThingIds.length;\n};\n\n/*\nSORT TYPE\n*/\n\nexport const selectSortType = ({\n project,\n}: {\n project: ProjectState;\n}): ThingSortKey => {\n return project.sortType;\n};\n\n/*\nACTIVE KIND\n*/\n\nexport const selectActiveKindId = ({ project }: { project: ProjectState }) => {\n return project.activeKind;\n};\n\n/*\nHIGHLIGHTED CATEGORY\n*/\n\nexport const selectHighlightedCategory = ({\n project,\n}: {\n project: ProjectState;\n}) => {\n return project.highlightedCategory;\n};\n\n/*\nTHING FILTERS\n*/\n\nexport const selectThingFilters = ({ project }: { project: ProjectState }) => {\n return project.thingFilters;\n};\n\nexport const selectActiveThingFilters = ({\n project,\n}: {\n project: ProjectState;\n}) => {\n const activeKind = project.activeKind;\n return project.thingFilters[activeKind] ?? {};\n};\nexport const selectActiveFilteredStateHasFilters = ({\n project,\n}: {\n project: ProjectState;\n}) => {\n const activeKind = project.activeKind;\n const thingFilters = project.thingFilters[activeKind];\n if (!thingFilters) return false;\n const hasFilters = Object.values(thingFilters).some((filters) => {\n return filters.length > 0;\n });\n\n return hasFilters;\n};\n\nexport const selectUnfilteredActivePartitions = createSelector(\n selectActiveThingFilters,\n (thingFilters) => {\n const filteredPartitions = thingFilters.partition;\n const allPartitions = Object.values(Partition);\n const unfilteredPartitions = difference(allPartitions, filteredPartitions);\n return unfilteredPartitions;\n }\n);\n\nexport const selectKindTabFilters = ({\n project,\n}: {\n project: ProjectState;\n}) => {\n return project.kindTabFilters;\n};\n\n/*\nLOAD PERCENT\n*/\n\nexport const selectLoadPercent = ({ project }: { project: ProjectState }) => {\n return project.loadPercent;\n};\n\n/*\nLOAD MESSAGE\n*/\n\nexport const selectLoadMessage = ({ project }: { project: ProjectState }) => {\n return project.loadMessage;\n};\n","import {\n selectAllKindIds,\n selectCategoriesDictionary,\n selectKindDictionary,\n selectThingsDictionary,\n} from \"store/data/selectors\";\nimport {\n selectActiveKindId,\n selectActiveThingFilters,\n selectKindTabFilters,\n selectSelectedThingIds,\n} from \"./selectors\";\nimport { createSelector } from \"@reduxjs/toolkit\";\nimport { difference, intersection } from \"lodash\";\nimport { isUnknownCategory, updateRecord } from \"utils/common/helpers\";\nimport { CATEGORY_COLORS } from \"utils/common/constants\";\nimport { AnnotationObject, ImageObject, Thing } from \"store/data/types\";\nimport { Partition } from \"utils/models/enums\";\n\nexport const selectVisibleKinds = createSelector(\n selectKindTabFilters,\n selectAllKindIds,\n (filteredKinds, allKinds) => {\n return difference(allKinds, filteredKinds);\n }\n);\n\nexport const selectActiveKindObject = createSelector(\n selectActiveKindId,\n selectKindDictionary,\n (activeKind, kindDict) => {\n return kindDict[activeKind];\n }\n);\n\nexport const selectActiveUnknownCategoryId = createSelector(\n selectActiveKindObject,\n (activeKind) => {\n return activeKind.unknownCategoryId;\n }\n);\n\nexport const selectActiveCategories = createSelector(\n [selectKindDictionary, selectCategoriesDictionary, selectActiveKindId],\n (kindDict, categoriesDict, kind) => {\n const categoriesOfKind = kindDict[kind].categories;\n\n return categoriesOfKind.map((catId) => categoriesDict[catId]);\n }\n);\n\nexport const selectActiveKnownCategories = createSelector(\n selectActiveCategories,\n (activeCategories) => {\n return activeCategories.filter((cat) => !isUnknownCategory(cat.id));\n }\n);\n\nexport const selectActiveUnknownCategory = createSelector(\n selectActiveUnknownCategoryId,\n selectCategoriesDictionary,\n (unknownCatId, catDict) => {\n return catDict[unknownCatId];\n }\n);\n\nexport const selectActiveCategoryCount = createSelector(\n selectActiveCategories,\n (activeCategories) => {\n return activeCategories.length;\n }\n);\n\nexport const selectActiveKnownCategoryCount = createSelector(\n selectActiveKnownCategories,\n (activeKnownCategories) => {\n return activeKnownCategories.length;\n }\n);\n\nexport const selectActiveCategoryNames = createSelector(\n selectActiveCategories,\n (activeCategories) => {\n return activeCategories.map((cat) => cat.name);\n }\n);\n\nexport const selectActiveCategoryColors = createSelector(\n selectActiveCategories,\n (activeCategories) => {\n const activeColors = activeCategories.map((cat) => cat.color.toUpperCase());\n const allCategoryColors = Object.values(CATEGORY_COLORS).map((color) =>\n color.toUpperCase()\n );\n const availableColors = difference(allCategoryColors, activeColors);\n return availableColors;\n }\n);\n\nexport const selectUnfilteredActiveCategoryIds = createSelector(\n selectActiveThingFilters,\n selectActiveCategories,\n (thingFilters, activeCategories) => {\n const filteredCategories = thingFilters.categoryId;\n const unfilteredCategories = difference(\n activeCategories.map((cat) => cat.id),\n filteredCategories\n );\n return unfilteredCategories;\n }\n);\n\nexport const selectActiveThingIds = createSelector(\n selectActiveKindObject,\n (kind) => {\n return kind.containing;\n }\n);\n\nexport const selectActiveThings = createSelector(\n [selectActiveThingIds, selectThingsDictionary],\n (activeThingIds, thingDict) => {\n return activeThingIds.map((thingId) => thingDict[thingId]);\n }\n);\n\nexport const selectActiveLabeledThingsIds = createSelector(\n selectActiveKindObject,\n selectCategoriesDictionary,\n (activeKind, catDict) => {\n if (!activeKind) return [];\n const thingsInKind = activeKind.containing;\n const unknownCategoryId = activeKind.unknownCategoryId;\n const unknownThings = catDict[unknownCategoryId].containing;\n return difference(thingsInKind, unknownThings);\n }\n);\n\nexport const selectActiveLabeledThingsCount = createSelector(\n selectActiveLabeledThingsIds,\n (activeLabeledThings) => {\n return activeLabeledThings.length;\n }\n);\n\nexport const selectActiveLabeledThings = createSelector(\n selectActiveLabeledThingsIds,\n selectThingsDictionary,\n (activeLabeledThingIds, thingDict) => {\n const activeLabeledThings: Array = [];\n for (const thingId of activeLabeledThingIds) {\n const thing = thingDict[thingId];\n thing && activeLabeledThings.push(thing);\n }\n\n return activeLabeledThings;\n }\n);\n\nexport const selectActiveUnlabeledThingsIds = createSelector(\n selectActiveKindObject,\n selectCategoriesDictionary,\n (activeKind, catDict) => {\n if (!activeKind) return [];\n const thingsInKind = activeKind.containing;\n const unknownCategoryId = activeKind.unknownCategoryId;\n const unknownThings = catDict[unknownCategoryId].containing;\n return intersection(thingsInKind, unknownThings);\n }\n);\n\nexport const selectActiveUnlabeledThings = createSelector(\n selectActiveUnlabeledThingsIds,\n selectThingsDictionary,\n (activeUnlabeledThingIds, thingDict) => {\n const activeLabeledThings: Array = [];\n for (const thingId of activeUnlabeledThingIds) {\n const thing = thingDict[thingId];\n thing && activeLabeledThings.push(thing);\n }\n\n return activeLabeledThings;\n }\n);\n\nexport const selectActiveSelectedThingIds = createSelector(\n selectSelectedThingIds,\n selectActiveThingIds,\n (selectedIds, activeIds) => {\n return intersection(activeIds, selectedIds);\n }\n);\n\nexport const selectActiveSelectedThings = createSelector(\n selectActiveSelectedThingIds,\n selectThingsDictionary,\n (activeSelectedThingIds, thingDict) => {\n const activeSelectedThings = activeSelectedThingIds.reduce(\n (things: Thing[], thingId) => {\n const thing = thingDict[thingId];\n if (thing) {\n things.push(thing);\n }\n return things;\n },\n []\n );\n\n return activeSelectedThings;\n }\n);\n\nexport const selectActiveThingsByPartition = createSelector(\n selectActiveThings,\n (activeThings) => {\n const thingsByPartition = activeThings.reduce(\n (\n byPartition: Record>,\n thing\n ) => {\n switch (thing.partition) {\n case Partition.Inference:\n updateRecord(byPartition, Partition.Inference, thing);\n break;\n case Partition.Training:\n updateRecord(byPartition, Partition.Training, thing);\n break;\n case Partition.Unassigned:\n updateRecord(byPartition, Partition.Unassigned, thing);\n break;\n case Partition.Validation:\n updateRecord(byPartition, Partition.Validation, thing);\n break;\n }\n return byPartition;\n },\n {}\n );\n return thingsByPartition;\n }\n);\n","import React, { ChangeEvent, ReactNode, useState } from \"react\";\nimport { batch, useDispatch, useSelector } from \"react-redux\";\nimport { BlockPicker, ColorResult } from \"react-color\";\n\nimport {\n Container,\n Dialog,\n DialogContent,\n IconButton,\n Popover,\n TextField,\n Toolbar,\n Typography,\n Stack,\n Box,\n} from \"@mui/material\";\n\nimport {\n Close as CloseIcon,\n Palette as PaletteIcon,\n} from \"@mui/icons-material\";\n\nimport { MaterialUISwitch } from \"components/controls\";\nimport { applicationSettingsSlice } from \"store/applicationSettings\";\n\nimport Sun from \"icons/Sun.svg\";\nimport Moon from \"icons/Moon.svg\";\nimport VolumeUp from \"icons/VolumeUp.svg\";\nimport VolumeOff from \"icons/VolumeOff.svg\";\nimport { ThemeMode } from \"themes/enums\";\nimport { selectActiveCategoryColors } from \"store/project/reselectors\";\nimport {\n selectImageSelectionColor,\n selectSelectedImageBorderWidth,\n selectSoundEnabled,\n selectThemeMode,\n} from \"store/applicationSettings/selectors\";\n\nconst SettingsItem = ({\n title,\n children,\n}: {\n title: string;\n children: ReactNode;\n}) => {\n return (\n \n {title}\n {children}\n \n );\n};\n\ntype SettingsDialogProps = {\n onClose: () => void;\n open: boolean;\n};\n\nexport const SettingsDialog = ({ onClose, open }: SettingsDialogProps) => {\n const dispatch = useDispatch();\n\n const themeMode = useSelector(selectThemeMode);\n\n const initialSelectedImageBorderWidth = useSelector(\n selectSelectedImageBorderWidth\n );\n const [selectionSize, setSelectionSize] = useState(\n initialSelectedImageBorderWidth\n );\n\n const initialSelectionColor = useSelector(selectImageSelectionColor);\n const [selectionColor, setSelectionColor] = useState(\n initialSelectionColor\n );\n const availableColors = useSelector(selectActiveCategoryColors);\n const [colorMenuAnchorEl, setColorMenuAnchorEl] =\n React.useState(null);\n const colorPopupOpen = Boolean(colorMenuAnchorEl);\n\n const soundEnabled = useSelector(selectSoundEnabled);\n\n const preClose = () => {\n batch(() => {\n dispatch(\n applicationSettingsSlice.actions.setSelectedImageBorderWidth({\n selectionSize,\n })\n );\n dispatch(\n applicationSettingsSlice.actions.setImageSelectionColor({\n selectionColor,\n })\n );\n });\n\n onClose();\n };\n\n const onToggleTheme = (mode: ThemeMode) => {\n dispatch(applicationSettingsSlice.actions.setThemeMode({ mode }));\n };\n\n const onChangeImageSelectionWidth = (\n event: ChangeEvent\n ) => {\n let size = parseInt(event.target.value);\n if (!size) return;\n size = size < 0 ? 0 : size;\n\n setSelectionSize(size);\n };\n\n const onOpenColorPicker = (event: React.MouseEvent) => {\n setColorMenuAnchorEl(event.currentTarget);\n };\n const onCloseColorPicker = () => {\n setColorMenuAnchorEl(null);\n };\n\n const toggleSoundEnabled = () => {\n dispatch(\n applicationSettingsSlice.actions.setSoundEnabled({\n soundEnabled: !soundEnabled,\n })\n );\n };\n\n return (\n \n \n \n Settings\n \n\n \n \n \n \n\n theme.spacing(2) }}>\n \n \n \n \n onToggleTheme(\n themeMode === ThemeMode.Dark\n ? ThemeMode.Light\n : ThemeMode.Dark\n )\n }\n />\n \n \n \n \n \n \n \n \n\n \n \n setSelectionColor(color.hex)\n }\n colors={availableColors}\n />\n \n \n \n \n \n {/* */}\n \n \n \n \n );\n};\n\n// const LanguageSettings = () => {\n// const dispatch = useDispatch();\n\n// const language = useSelector(languageSelector);\n\n// const onLanguageChange = (event: SelectChangeEvent) => {\n// dispatch(\n// imageViewerSlice.actions.setLanguage({\n// language: event.target.value as LanguageType,\n// })\n// );\n// };\n\n// return (\n// \n// \n// \n// \n// \n// Language\n// \n// {_.map(LanguageType, (v, k) => {\n// return (\n// \n// {v}\n// \n// );\n// })}\n// \n// \n// \n// \n// \n// \n// );\n// };\n","import { batch, useDispatch, useSelector } from \"react-redux\";\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { imageViewerSlice } from \"store/imageViewer\";\nimport { dataSlice } from \"store/data/dataSlice\";\nimport { selectActiveImageId } from \"store/imageViewer/selectors\";\n\ntype ExitAnnotatorDialogProps = {\n onReturnToProject: () => void;\n onClose: () => void;\n open: boolean;\n};\n\nexport const ExitAnnotatorDialog = ({\n onReturnToProject,\n onClose,\n open,\n}: ExitAnnotatorDialogProps) => {\n const dispatch = useDispatch();\n\n const activeImageId = useSelector(selectActiveImageId);\n\n const onSaveChanges = () => {\n batch(() => {\n dispatch(\n imageViewerSlice.actions.setActiveImageId({\n imageId: undefined,\n prevImageId: activeImageId,\n })\n );\n dispatch(dataSlice.actions.reconcile({ keepChanges: true }));\n dispatch(imageViewerSlice.actions.setImageStack({ imageIds: [] }));\n });\n\n onReturnToProject();\n };\n\n const onDiscardChanges = () => {\n batch(() => {\n dispatch(\n imageViewerSlice.actions.setActiveImageId({\n imageId: undefined,\n prevImageId: activeImageId,\n })\n );\n dispatch(dataSlice.actions.reconcile({ keepChanges: false }));\n });\n\n onReturnToProject();\n };\n\n return (\n \n );\n};\n","import { ChangeEvent, useState } from \"react\";\nimport { useHotkeys } from \"hooks\";\n\nimport { Grid, TextField } from \"@mui/material\";\n\nimport { HotkeyView } from \"utils/common/enums\";\nimport { Model } from \"utils/models/Model/Model\";\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { ModelStatus } from \"utils/models/enums\";\n\ntype SaveFittedModelDialogProps = {\n model: Model;\n modelStatus: ModelStatus;\n onClose: () => void;\n open: boolean;\n};\n\nexport const SaveFittedModelDialog = ({\n model,\n modelStatus,\n onClose,\n open,\n}: SaveFittedModelDialogProps) => {\n const [name, setName] = useState(model.name);\n\n const onSaveClassifierClick = async () => {\n await model.saveModel();\n\n onClose();\n };\n\n const onNameChange = (event: ChangeEvent) => {\n setName(event.target.value);\n };\n\n useHotkeys(\n \"enter\",\n () => {\n onSaveClassifierClick();\n },\n HotkeyView.SaveFittedModelDialog,\n { enableOnTags: [\"INPUT\"] },\n [onSaveClassifierClick]\n );\n\n return (\n \n \n \n \n \n }\n onConfirm={onSaveClassifierClick}\n confirmText=\"Save\"\n confirmDisabled={modelStatus !== ModelStatus.Trained}\n />\n );\n};\n","import arrayWithHoles from \"./arrayWithHoles.js\";\nimport iterableToArray from \"./iterableToArray.js\";\nimport unsupportedIterableToArray from \"./unsupportedIterableToArray.js\";\nimport nonIterableRest from \"./nonIterableRest.js\";\nexport default function _toArray(arr) {\n return arrayWithHoles(arr) || iterableToArray(arr) || unsupportedIterableToArray(arr) || nonIterableRest();\n}","import { TooManyIndicesError, BoundsCheckError, NegativeStepError } from '../errors';\nimport { ZarrArray } from './index';\nimport { Slice, ArraySelection, ChunkDimProjection, Indexer, DimIndexer, ChunkProjection, NormalizedArraySelection, SliceIndices, DimensionArraySelection } from './types';\nimport { sliceIndices, slice } from \"./slice\";\n\nfunction ensureArray(selection: ArraySelection): DimensionArraySelection[] {\n if (!Array.isArray(selection)) {\n return [selection];\n }\n return selection;\n}\n\nfunction checkSelectionLength(selection: DimensionArraySelection[], shape: number[]) {\n if (selection.length > shape.length) {\n throw new TooManyIndicesError(selection, shape);\n }\n}\n\n/**\n * Returns both the sliceIndices per dimension and the output shape after slicing.\n */\nexport function selectionToSliceIndices(selection: NormalizedArraySelection, shape: number[]): [(number | SliceIndices)[], number[]] {\n const sliceIndicesResult = [];\n const outShape = [];\n\n for (let i = 0; i < selection.length; i++) {\n const s = selection[i];\n if (typeof s === \"number\") {\n sliceIndicesResult.push(s);\n } else {\n const x = sliceIndices(s, shape[i]);\n const dimLength = x[3];\n\n outShape.push(dimLength);\n sliceIndicesResult.push(x);\n }\n }\n\n return [sliceIndicesResult, outShape];\n}\n\n/**\n * This translates \"...\", \":\", null into a list of slices or non-negative integer selections of length shape\n */\nexport function normalizeArraySelection(selection: ArraySelection | number, shape: number[], convertIntegerSelectionToSlices = false): NormalizedArraySelection {\n selection = replaceEllipsis(selection, shape);\n\n for (let i = 0; i < selection.length; i++) {\n const dimSelection = selection[i];\n\n if (typeof dimSelection === \"number\") {\n if (convertIntegerSelectionToSlices) {\n selection[i] = slice(dimSelection, dimSelection + 1, 1);\n } else {\n selection[i] = normalizeIntegerSelection(dimSelection, shape[i]);\n }\n } else if (isIntegerArray(dimSelection)) {\n throw new TypeError(\"Integer array selections are not supported (yet)\");\n } else if (dimSelection === \":\" || dimSelection === null) {\n selection[i] = slice(null, null, 1);\n }\n }\n\n return selection as NormalizedArraySelection;\n}\n\nexport function replaceEllipsis(selection: ArraySelection | number, shape: number[]) {\n selection = ensureArray(selection);\n\n let ellipsisIndex = -1;\n let numEllipsis = 0;\n for (let i = 0; i < selection.length; i++) {\n if (selection[i] === \"...\") {\n ellipsisIndex = i;\n numEllipsis += 1;\n }\n }\n\n if (numEllipsis > 1) {\n throw new RangeError(\"an index can only have a single ellipsis ('...')\");\n }\n if (numEllipsis === 1) {\n // count how many items to left and right of ellipsis\n const numItemsLeft = ellipsisIndex;\n const numItemsRight = selection.length - (numItemsLeft + 1);\n const numItems = selection.length - 1; // All non-ellipsis items\n if (numItems >= shape.length) {\n // Ellipsis does nothing, just remove it\n selection = selection.filter((x) => x !== \"...\");\n } else {\n // Replace ellipsis with as many slices are needed for number of dims\n const numNewItems = shape.length - numItems;\n let newItem = selection.slice(0, numItemsLeft).concat(new Array(numNewItems).fill(null));\n if (numItemsRight > 0) {\n newItem = newItem.concat(selection.slice(selection.length - numItemsRight));\n }\n selection = newItem;\n }\n }\n // Fill out selection if not completely specified\n if (selection.length < shape.length) {\n const numMissing = shape.length - selection.length;\n selection = selection.concat(new Array(numMissing).fill(null));\n }\n\n checkSelectionLength(selection, shape);\n return selection;\n}\n\nexport function normalizeIntegerSelection(dimSelection: number, dimLength: number): number {\n // Note: Maybe we should convert to integer or warn if dimSelection is not an integer\n\n // handle wraparound\n if (dimSelection < 0) {\n dimSelection = dimLength + dimSelection;\n }\n\n // handle out of bounds\n if (dimSelection >= dimLength || dimSelection < 0) {\n throw new BoundsCheckError(`index out of bounds for dimension with length ${dimLength}`);\n }\n\n return dimSelection;\n}\n\nfunction isInteger(s: any) {\n return typeof s === \"number\";\n}\n\nexport function isIntegerArray(s: any) {\n if (!Array.isArray(s)) {\n return false;\n }\n for (const e of s) {\n if (typeof e !== \"number\") {\n return false;\n }\n }\n return true;\n}\n\nexport function isSlice(s: (Slice | number | number[] | \"...\" | \":\" | null)): boolean {\n if (s !== null && (s as any)[\"_slice\"] === true) {\n return true;\n }\n return false;\n}\n\nfunction isContiguousSlice(s: (Slice | number | number[] | \"...\" | \":\" | null)): boolean {\n return isSlice(s) && ((s as Slice).step === null || (s as Slice).step === 1);\n}\n\nfunction isPositiveSlice(s: (Slice | number | number[] | \"...\" | \":\" | null)): boolean {\n return isSlice(s) && ((s as Slice).step === null || ((s as Slice).step as number) >= 1);\n}\n\nexport function isContiguousSelection(selection: ArraySelection) {\n selection = ensureArray(selection);\n\n for (let i = 0; i < selection.length; i++) {\n const s = selection[i];\n if (!(isIntegerArray(s) || isContiguousSlice(s) || s === \"...\")) {\n return false;\n }\n }\n return true;\n}\n\n// eslint-disable-next-line @typescript-eslint/no-unused-vars\nfunction isBasicSelection(selection: ArraySelection): boolean {\n selection = ensureArray(selection);\n\n for (let i = 0; i < selection.length; i++) {\n const s = selection[i];\n if (!(isInteger(s) || isPositiveSlice(s))) {\n return false;\n }\n }\n return true;\n}\nfunction* product(...iterables: (() => IterableIterator)[]): IterableIterator {\n if (iterables.length === 0) { return; }\n // make a list of iterators from the iterables\n const iterators = iterables.map(it => it());\n const results = iterators.map(it => it.next());\n\n // Disabled to allow empty inputs\n // if (results.some(r => r.done)) {\n // throw new Error(\"Input contains an empty iterator.\");\n // }\n\n for (let i = 0; ;) {\n if (results[i].done) {\n // reset the current iterator\n iterators[i] = iterables[i]();\n results[i] = iterators[i].next();\n // advance, and exit if we've reached the end\n if (++i >= iterators.length) { return; }\n } else {\n yield results.map(({ value }) => value);\n i = 0;\n }\n results[i] = iterators[i].next();\n }\n}\n\nexport class BasicIndexer implements Indexer {\n dimIndexers: DimIndexer[];\n shape: number[];\n dropAxes: null;\n\n constructor(selection: ArraySelection, array: ZarrArray) {\n selection = normalizeArraySelection(selection, array.shape);\n\n // Setup per-dimension indexers\n this.dimIndexers = [];\n const arrayShape = array.shape;\n for (let i = 0; i < arrayShape.length; i++) {\n let dimSelection = selection[i];\n const dimLength = arrayShape[i];\n const dimChunkLength = array.chunks[i];\n\n if (dimSelection === null) {\n dimSelection = slice(null);\n }\n\n\n if (isInteger(dimSelection)) {\n this.dimIndexers.push(new IntDimIndexer(dimSelection as number, dimLength, dimChunkLength));\n } else if (isSlice(dimSelection)) {\n this.dimIndexers.push(new SliceDimIndexer(dimSelection as Slice, dimLength, dimChunkLength));\n } else {\n throw new RangeError(`Unspported selection item for basic indexing; expected integer or slice, got ${dimSelection}`);\n }\n }\n\n this.shape = [];\n for (const d of this.dimIndexers) {\n if (d instanceof SliceDimIndexer) {\n this.shape.push(d.numItems);\n }\n }\n this.dropAxes = null;\n }\n\n * iter() {\n const dimIndexerIterables = this.dimIndexers.map(x => (() => x.iter()));\n const dimIndexerProduct = product(...dimIndexerIterables);\n\n for (const dimProjections of dimIndexerProduct) {\n // TODO fix this, I think the product outputs too many combinations\n const chunkCoords = [];\n const chunkSelection = [];\n const outSelection = [];\n\n for (const p of dimProjections) {\n chunkCoords.push((p).dimChunkIndex);\n chunkSelection.push((p).dimChunkSelection);\n if ((p).dimOutSelection !== null) {\n outSelection.push((p).dimOutSelection);\n }\n }\n\n yield ({\n chunkCoords,\n chunkSelection,\n outSelection,\n } as ChunkProjection);\n }\n\n }\n}\n\nclass IntDimIndexer implements DimIndexer {\n dimSelection: number;\n dimLength: number;\n dimChunkLength: number;\n numItems: number;\n\n constructor(dimSelection: number, dimLength: number, dimChunkLength: number) {\n dimSelection = normalizeIntegerSelection(dimSelection, dimLength);\n this.dimSelection = dimSelection;\n this.dimLength = dimLength;\n this.dimChunkLength = dimChunkLength;\n this.numItems = 1;\n }\n\n * iter() {\n const dimChunkIndex = Math.floor(this.dimSelection / this.dimChunkLength);\n const dimOffset = dimChunkIndex * this.dimChunkLength;\n const dimChunkSelection = this.dimSelection - dimOffset;\n const dimOutSelection = null;\n yield {\n dimChunkIndex,\n dimChunkSelection,\n dimOutSelection,\n } as ChunkDimProjection;\n }\n}\n\nclass SliceDimIndexer implements DimIndexer {\n dimLength: number;\n dimChunkLength: number;\n numItems: number;\n numChunks: number;\n\n start: number;\n stop: number;\n step: number;\n\n constructor(dimSelection: Slice, dimLength: number, dimChunkLength: number) {\n // Normalize\n const [start, stop, step] = sliceIndices(dimSelection, dimLength);\n this.start = start;\n this.stop = stop;\n this.step = step;\n if (this.step < 1) {\n throw new NegativeStepError();\n }\n\n this.dimLength = dimLength;\n this.dimChunkLength = dimChunkLength;\n this.numItems = Math.max(0, Math.ceil((this.stop - this.start) / this.step));\n this.numChunks = Math.ceil(this.dimLength / this.dimChunkLength);\n }\n\n *iter() {\n const dimChunkIndexFrom = Math.floor(this.start / this.dimChunkLength);\n const dimChunkIndexTo = Math.ceil(this.stop / this.dimChunkLength);\n\n // Iterate over chunks in range\n for (let dimChunkIndex = dimChunkIndexFrom; dimChunkIndex < dimChunkIndexTo; dimChunkIndex++) {\n\n // Compute offsets for chunk within overall array\n const dimOffset = dimChunkIndex * this.dimChunkLength;\n const dimLimit = Math.min(this.dimLength, (dimChunkIndex + 1) * this.dimChunkLength);\n\n // Determine chunk length, accounting for trailing chunk\n const dimChunkLength = dimLimit - dimOffset;\n\n let dimChunkSelStart: number;\n let dimChunkSelStop: number;\n let dimOutOffset: number;\n\n if (this.start < dimOffset) {\n // Selection starts before current chunk\n\n dimChunkSelStart = 0;\n const remainder = (dimOffset - this.start) % this.step;\n if (remainder > 0) {\n dimChunkSelStart += this.step - remainder;\n }\n // Compute number of previous items, provides offset into output array\n dimOutOffset = Math.ceil((dimOffset - this.start) / this.step);\n } else {\n // Selection starts within current chunk\n dimChunkSelStart = this.start - dimOffset;\n dimOutOffset = 0;\n }\n\n if (this.stop > dimLimit) {\n // Selection ends after current chunk\n dimChunkSelStop = dimChunkLength;\n } else {\n // Selection ends within current chunk\n dimChunkSelStop = this.stop - dimOffset;\n }\n\n const dimChunkSelection = slice(dimChunkSelStart, dimChunkSelStop, this.step);\n const dimChunkNumItems = Math.ceil((dimChunkSelStop - dimChunkSelStart) / this.step);\n const dimOutSelection = slice(dimOutOffset, dimOutOffset + dimChunkNumItems);\n yield {\n dimChunkIndex,\n dimChunkSelection,\n dimOutSelection,\n } as ChunkDimProjection;\n }\n\n }\n\n}\n","import type { Codec, CodecConstructor } from 'numcodecs';\n\ntype Config = Record;\ntype CodecImporter = () => CodecConstructor | Promise>;\n\nconst registry: Map = new Map();\n\nexport function addCodec(id: string, importFn: CodecImporter) {\n registry.set(id, importFn);\n}\n\nexport async function getCodec(config: Config & { id: string }): Promise {\n if (!registry.has(config.id)) {\n throw new Error(`Compression codec ${config.id} is not supported by Zarr.js yet.`);\n }\n /* eslint-disable @typescript-eslint/no-non-null-assertion */\n const codec = await registry.get(config.id)!();\n return codec.fromConfig(config);\n}\n","/**\n * Closely resembles the functions on the MutableMapping type in Python.\n */\nexport interface MutableMapping {\n getItem(item: string, opts?: O): T;\n setItem(item: string, value: T): boolean;\n deleteItem(item: string): boolean;\n containsItem(item: string): boolean;\n\n proxy(): MutableMappingProxy;\n\n // length(): number;\n}\n\n/**\n * Closely resembles the functions on the MutableMapping type in Python.\n */\nexport interface AsyncMutableMapping {\n getItem(item: string, opts?: O): Promise;\n setItem(item: string, value: T): Promise;\n deleteItem(item: string): Promise;\n containsItem(item: string): Promise;\n // length(): number;\n}\n\nexport interface MutableMappingProxy {\n [key: string]: T;\n}\n\nexport interface AsyncMutableMappingProxy {\n [key: string]: T | Promise;\n}\n\n\n/**\n * A proxy allows for accessing, setting and deleting the keys in the mutable mapping using\n * m[\"a\"] or even m.a notation.\n */\nexport function createProxy(mapping: S & MutableMapping): (S & MutableMappingProxy);\nexport function createProxy(mapping: S & AsyncMutableMapping): (S & AsyncMutableMappingProxy);\nexport function createProxy(mapping: (S & MutableMapping) | (S & AsyncMutableMapping)): (S & MutableMappingProxy) | (S & AsyncMutableMappingProxy) {\n return new Proxy(mapping as any, {\n set(target, key, value, _receiver) {\n return target.setItem(key as string, value);\n },\n get(target, key, _receiver) {\n return target.getItem(key as string);\n },\n deleteProperty(target, key) {\n return target.deleteItem(key as string);\n },\n has(target, key) {\n return target.containsItem(key as string);\n }\n });\n}","export interface ZarrError {\n __zarr__: string;\n}\n\nfunction isZarrError(err: unknown): err is ZarrError {\n return typeof err === 'object' && err !== null && '__zarr__' in err;\n}\n\nexport function isKeyError(o: unknown) {\n return isZarrError(o) && o.__zarr__ === 'KeyError';\n}\n\n// Custom error messages, note we have to patch the prototype of the\n// errors to fix `instanceof` calls, see:\n// https://github.com/Microsoft/TypeScript/wiki/Breaking-Changes#extending-built-ins-like-error-array-and-map-may-no-longer-work\nexport class ContainsArrayError extends Error implements ZarrError {\n __zarr__ = 'ContainsArrayError';\n constructor(path: string) {\n super(`path ${path} contains an array`);\n Object.setPrototypeOf(this, ContainsArrayError.prototype);\n }\n}\n\nexport class ContainsGroupError extends Error implements ZarrError {\n __zarr__ = 'ContainsGroupError';\n constructor(path: string) {\n super(`path ${path} contains a group`);\n Object.setPrototypeOf(this, ContainsGroupError.prototype);\n }\n}\n\nexport class ArrayNotFoundError extends Error implements ZarrError {\n __zarr__ = 'ArrayNotFoundError';\n constructor(path: string) {\n super(`array not found at path ${path}`);\n Object.setPrototypeOf(this, ArrayNotFoundError.prototype);\n }\n}\n\nexport class GroupNotFoundError extends Error implements ZarrError {\n __zarr__ = 'GroupNotFoundError';\n constructor(path: string) {\n super(`ground not found at path ${path}`);\n Object.setPrototypeOf(this, GroupNotFoundError.prototype);\n }\n}\n\nexport class PathNotFoundError extends Error implements ZarrError {\n __zarr__ = 'PathNotFoundError';\n constructor(path: string) {\n super(`nothing not found at path ${path}`);\n Object.setPrototypeOf(this, PathNotFoundError.prototype);\n }\n}\n\nexport class PermissionError extends Error implements ZarrError {\n __zarr__ = 'PermissionError';\n constructor(message: string) {\n super(message);\n Object.setPrototypeOf(this, PermissionError.prototype);\n }\n}\n\nexport class KeyError extends Error implements ZarrError {\n __zarr__ = 'KeyError';\n constructor(key: string) {\n super(`key ${key} not present`);\n Object.setPrototypeOf(this, KeyError.prototype);\n }\n}\n\nexport class TooManyIndicesError extends RangeError implements ZarrError {\n __zarr__ = 'TooManyIndicesError';\n constructor(selection: any[], shape: number[]) {\n super(`too many indices for array; expected ${shape.length}, got ${selection.length}`);\n Object.setPrototypeOf(this, TooManyIndicesError.prototype);\n }\n}\n\nexport class BoundsCheckError extends RangeError implements ZarrError {\n __zarr__ = 'BoundsCheckError';\n constructor(message: string) {\n super(message);\n Object.setPrototypeOf(this, BoundsCheckError.prototype);\n }\n}\n\nexport class InvalidSliceError extends RangeError implements ZarrError {\n __zarr__ = 'InvalidSliceError';\n constructor(from: any, to: any, stepSize: any, reason: any) {\n super(`slice arguments slice(${from}, ${to}, ${stepSize}) invalid: ${reason}`);\n Object.setPrototypeOf(this, InvalidSliceError.prototype);\n }\n}\n\nexport class NegativeStepError extends Error implements ZarrError {\n __zarr__ = 'NegativeStepError';\n constructor() {\n super(`Negative step size is not supported when indexing.`);\n Object.setPrototypeOf(this, NegativeStepError.prototype);\n }\n}\n\nexport class ValueError extends Error implements ZarrError {\n __zarr__ = 'ValueError';\n constructor(message: string) {\n super(message);\n Object.setPrototypeOf(this, ValueError.prototype);\n }\n}\n\nexport class HTTPError extends Error implements ZarrError {\n __zarr__ = 'HTTPError';\n constructor(code: string) {\n super(code);\n Object.setPrototypeOf(this, HTTPError.prototype);\n }\n}\n","\nimport { InvalidSliceError } from '../errors';\nimport { Slice, SliceArgument, SliceIndices } from \"./types\";\n\nexport function slice(start: SliceArgument, stop: SliceArgument | undefined = undefined, step: number | null = null): Slice {\n // tslint:disable-next-line: strict-type-predicates\n if (start === undefined) { // Not possible in typescript\n throw new InvalidSliceError(start, stop, step, \"The first argument must not be undefined\");\n }\n\n if ((typeof start === \"string\" && start !== \":\") || (typeof stop === \"string\" && stop !== \":\")) { // Note in typescript this will never happen with type checking.\n throw new InvalidSliceError(start, stop, step, \"Arguments can only be integers, \\\":\\\" or null\");\n }\n\n // slice(5) === slice(null, 5)\n if (stop === undefined) {\n stop = start;\n start = null;\n }\n\n // if (start !== null && stop !== null && start > stop) {\n // throw new InvalidSliceError(start, stop, step, \"to is higher than from\");\n // }\n\n return {\n start: start === \":\" ? null : start,\n stop: stop === \":\" ? null : stop,\n step,\n _slice: true,\n };\n}\n\n\n/**\n * Port of adjustIndices\n * https://github.com/python/cpython/blob/master/Objects/sliceobject.c#L243\n */\nfunction adjustIndices(start: number, stop: number, step: number, length: number) {\n if (start < 0) {\n start += length;\n if (start < 0) {\n start = (step < 0) ? -1 : 0;\n }\n } else if (start >= length) {\n start = (step < 0) ? length - 1 : length;\n }\n\n if (stop < 0) {\n stop += length;\n if (stop < 0) {\n stop = (step < 0) ? -1 : 0;\n }\n } else if (stop >= length) {\n stop = (step < 0) ? length - 1 : length;\n }\n\n if (step < 0) {\n if (stop < start) {\n const length = Math.floor((start - stop - 1) / (-step) + 1);\n return [start, stop, step, length];\n }\n } else {\n if (start < stop) {\n const length = Math.floor((stop - start - 1) / step + 1);\n return [start, stop, step, length];\n }\n }\n return [start, stop, step, 0];\n}\n\n/**\n * Port of slice.indices(n) and PySlice_Unpack\n * https://github.com/python/cpython/blob/master/Objects/sliceobject.c#L166\n * https://github.com/python/cpython/blob/master/Objects/sliceobject.c#L198 \n * \n * Behaviour might be slightly different as it's a weird hybrid implementation.\n */\nexport function sliceIndices(slice: Slice, length: number): SliceIndices {\n let start: number;\n let stop: number;\n let step: number;\n\n if (slice.step === null) {\n step = 1;\n } else {\n step = slice.step;\n }\n\n if (slice.start === null) {\n start = step < 0 ? Number.MAX_SAFE_INTEGER : 0;\n } else {\n start = slice.start;\n if (start < 0) {\n start += length;\n }\n }\n\n if (slice.stop === null) {\n stop = step < 0 ? -Number.MAX_SAFE_INTEGER : Number.MAX_SAFE_INTEGER;\n } else {\n stop = slice.stop;\n if (stop < 0) {\n stop += length;\n }\n }\n\n // This clips out of bounds slices\n const s = adjustIndices(start, stop, step, length);\n start = s[0];\n stop = s[1];\n step = s[2];\n // The output length\n length = s[3];\n\n\n // With out of bounds slicing these two assertions are not useful.\n // if (stop > length) throw new Error(\"Stop greater than length\");\n // if (start >= length) throw new Error(\"Start greater than or equal to length\");\n\n if (step === 0) throw new Error(\"Step size 0 is invalid\");\n\n return [start, stop, step, length];\n}","import { Order, FillType, ChunksArgument, DtypeString } from \"./types\";\n\nimport { DimensionSelection, Slice } from \"./core/types\";\nimport { isSlice } from \"./core/indexing\";\nimport { TypedArray } from \"./nestedArray/types\";\n\n/**\n * This should be true only if this javascript is getting executed in Node.\n */\nexport const IS_NODE = typeof process !== \"undefined\" && process.versions && process.versions.node;\n\n// eslint-disable-next-line @typescript-eslint/no-empty-function\nexport function noop(): void {}\n\nexport function humanReadableSize(size: number) {\n if (size < 2 ** 10) {\n return `${size}`;\n }\n else if (size < 2 ** 20) {\n return `${(size / (2 ** 10)).toFixed(1)}K`;\n }\n else if (size < 2 ** 30) {\n return `${(size / (2 ** 20)).toFixed(1)}M`;\n }\n else if (size < 2 ** 40) {\n return `${(size / (2 ** 30)).toFixed(1)}G`;\n }\n else if (size < 2 ** 50) {\n return `${(size / (2 ** 40)).toFixed(1)}T`;\n }\n return `${(size / (2 ** 50)).toFixed(1)}P`;\n}\n\n// eslint-disable-next-line @typescript-eslint/ban-types\nexport function normalizeStoragePath(path: string | String | null): string {\n if (path === null) {\n return \"\";\n }\n\n if (path instanceof String) {\n path = path.valueOf();\n }\n\n // convert backslash to forward slash\n path = path.replace(/\\\\/g, \"/\");\n\n // ensure no leading slash\n while (path.length > 0 && path[0] === '/') {\n path = path.slice(1);\n }\n\n // ensure no trailing slash\n while (path.length > 0 && path[path.length - 1] === '/') {\n path = path.slice(0, path.length - 1);\n }\n\n\n // collapse any repeated slashes\n path = path.replace(/\\/\\/+/g, \"/\");\n\n // don't allow path segments with just '.' or '..'\n const segments = path.split('/');\n\n for (const s of segments) {\n if (s === \".\" || s === \"..\") {\n throw Error(\"path containing '.' or '..' segment not allowed\");\n }\n }\n return path as string;\n}\n\nexport function normalizeShape(shape: number | number[]): number[] {\n if (typeof shape === \"number\") {\n shape = [shape];\n }\n return shape.map(x => Math.floor(x));\n}\n\nexport function normalizeChunks(chunks: ChunksArgument, shape: number[]): number[] {\n // Assume shape is already normalized\n\n if (chunks === null || chunks === true) {\n throw new Error(\"Chunk guessing is not supported yet\");\n }\n\n if (chunks === false) {\n return shape;\n }\n\n if (typeof chunks === \"number\") {\n chunks = [chunks];\n }\n\n // handle underspecified chunks\n if (chunks.length < shape.length) {\n // assume chunks across remaining dimensions\n chunks = chunks.concat(shape.slice(chunks.length));\n }\n\n return chunks.map((x, idx) => {\n // handle null or -1 in chunks\n if (x === -1 || x === null) {\n return shape[idx];\n } else {\n return Math.floor(x);\n }\n });\n}\n\nexport function normalizeOrder(order: string): Order {\n order = order.toUpperCase();\n return order as Order;\n}\n\nexport function normalizeDtype(dtype: DtypeString): DtypeString {\n return dtype;\n}\n\nexport function normalizeFillValue(fillValue: FillType): FillType {\n return fillValue;\n}\n\n/**\n * Determine whether `item` specifies a complete slice of array with the\n * given `shape`. Used to optimize __setitem__ operations on chunks\n * @param item\n * @param shape\n */\nexport function isTotalSlice(item: DimensionSelection | DimensionSelection[], shape: number[]): boolean {\n if (item === null) {\n return true;\n }\n if (!Array.isArray(item)) {\n item = [item];\n }\n\n for (let i = 0; i < Math.min(item.length, shape.length); i++) {\n const it = item[i];\n if (it === null) continue;\n\n if (isSlice(it)) {\n const s = it as Slice;\n const isStepOne = s.step === 1 || s.step === null;\n\n if (s.start === null && s.stop === null && isStepOne) {\n continue;\n }\n if (((s.stop as number) - (s.start as number)) === shape[i] && isStepOne) {\n continue;\n }\n return false;\n }\n return false;\n\n\n // } else {\n // console.error(`isTotalSlice unexpected non-slice, got ${it}`);\n // return false;\n // }\n }\n return true;\n}\n\n/**\n * Checks for === equality of all elements.\n */\nexport function arrayEquals1D(a: ArrayLike, b: ArrayLike) {\n if (a.length !== b.length) {\n return false;\n }\n\n for (let i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) {\n return false;\n }\n }\n return true;\n}\n\n/*\n * Determines \"C\" order strides for a given shape array.\n * Strides provide integer steps in each dimention to traverse an ndarray.\n *\n * NOTE: - These strides here are distinct from numpy.ndarray.strides, which describe actual byte steps.\n * - Strides are assumed to be contiguous, so initial step is 1. Thus, output will always be [XX, XX, 1].\n */\nexport function getStrides(shape: number[]): number[] {\n // adapted from https://github.com/scijs/ndarray/blob/master/ndarray.js#L326-L330\n const ndim = shape.length;\n const strides = Array(ndim);\n let step = 1; // init step\n for (let i = ndim - 1; i >= 0; i--) {\n strides[i] = step;\n step *= shape[i];\n }\n return strides;\n}\n\nexport function resolveUrl(root: string | URL, path: string): string {\n const base = typeof root === 'string' ? new URL(root) : root;\n if (!base.pathname.endsWith('/')) {\n // ensure trailing slash so that base is resolved as _directory_\n base.pathname += '/';\n }\n const resolved = new URL(path, base);\n // copy search params to new URL\n resolved.search = base.search;\n return resolved.href;\n}\n\n/**\n * Swaps byte order in-place for a given TypedArray.\n * Used to flip endian-ness when getting/setting chunks from/to zarr store.\n * @param src TypedArray\n */\nexport function byteSwapInplace(src: TypedArray): void {\n const b = src.BYTES_PER_ELEMENT;\n if (b === 1) return; // no swapping needed\n if (IS_NODE) {\n // Use builtin methods for swapping if in Node environment\n const bytes = Buffer.from(src.buffer, src.byteOffset, src.length * b);\n if (b === 2) bytes.swap16();\n if (b === 4) bytes.swap32();\n if (b === 8) bytes.swap64();\n return;\n }\n // In browser, need to flip manually\n // Adapted from https://github.com/zbjornson/node-bswap/blob/master/bswap.js\n const flipper = new Uint8Array(src.buffer, src.byteOffset, src.length * b);\n const numFlips = b / 2;\n const endByteIndex = b - 1;\n let t: number;\n for (let i = 0; i < flipper.length; i += b) {\n for (let j = 0; j < numFlips; j++) {\n t = flipper[i + j];\n flipper[i + j] = flipper[i + endByteIndex - j];\n flipper[i + endByteIndex - j] = t;\n }\n }\n}\n\n/**\n * Creates a copy of a TypedArray and swaps bytes.\n * Used to flip endian-ness when getting/setting chunks from/to zarr store.\n * @param src TypedArray\n */\nexport function byteSwap(src: TypedArray): TypedArray {\n const copy = src.slice();\n byteSwapInplace(copy);\n return copy;\n}\n\nfunction convertColMajorToRowMajor2D(src: TypedArray, out: TypedArray, shape: number[]): void {\n let idx = 0;\n const shape0 = shape[0];\n const shape1 = shape[1];\n const stride0 = shape1;\n for (let i1 = 0; i1 < shape1; i1++) {\n for (let i0 = 0; i0 < shape0; i0++) {\n out[i0 * stride0 + i1] = src[idx++];\n }\n }\n}\n\nfunction convertColMajorToRowMajor3D(src: TypedArray, out: TypedArray, shape: number[]): void {\n let idx = 0;\n const shape0 = shape[0];\n const shape1 = shape[1];\n const shape2 = shape[2];\n const stride0 = shape2 * shape1;\n const stride1 = shape2;\n for (let i2 = 0; i2 < shape2; i2++) {\n for (let i1 = 0; i1 < shape1; i1++) {\n for (let i0 = 0; i0 < shape0; i0++) {\n out[i0 * stride0 + i1 * stride1 + i2] = src[idx++];\n }\n }\n }\n}\n\nfunction convertColMajorToRowMajor4D(src: TypedArray, out: TypedArray, shape: number[]): void {\n let idx = 0;\n const shape0 = shape[0];\n const shape1 = shape[1];\n const shape2 = shape[2];\n const shape3 = shape[3];\n const stride0 = shape3 * shape2 * shape1;\n const stride1 = shape3 * shape2;\n const stride2 = shape3;\n for (let i3 = 0; i3 < shape3; i3++) {\n for (let i2 = 0; i2 < shape2; i2++) {\n for (let i1 = 0; i1 < shape1; i1++) {\n for (let i0 = 0; i0 < shape0; i0++) {\n out[i0 * stride0 + i1 * stride1 + i2 * stride2 + i3] = src[idx++];\n }\n }\n }\n }\n}\n\nfunction convertColMajorToRowMajorGeneric(src: TypedArray, out: TypedArray, shape: number[]): void {\n const nDims = shape.length;\n const size = shape.reduce((r, a) => r * a);\n\n const rowMajorStrides = shape.map((_, i) =>\n i + 1 === nDims ? 1 : shape.slice(i + 1).reduce((r, a) => r * a, 1)\n );\n\n const index = Array(nDims).fill(0);\n\n for (let colMajorIdx = 0; colMajorIdx < size; colMajorIdx++) {\n let rowMajorIdx = 0;\n for (let dim = 0; dim < nDims; dim++) {\n rowMajorIdx += index[dim] * rowMajorStrides[dim];\n }\n out[rowMajorIdx] = src[colMajorIdx];\n\n index[0] += 1;\n // Handle carry-over\n for (let dim = 0; dim < nDims; dim++) {\n if (index[dim] === shape[dim]) {\n if (dim + 1 === nDims) {\n return;\n }\n index[dim] = 0;\n index[dim + 1] += 1;\n }\n }\n }\n}\n\nconst colMajorToRowMajorConverters: {\n [dim: number]: (src: TypedArray, out: TypedArray, shape: number[]) => void;\n} = {\n [0]: noop,\n [1]: noop,\n [2]: convertColMajorToRowMajor2D,\n [3]: convertColMajorToRowMajor3D,\n [4]: convertColMajorToRowMajor4D,\n};\n\n/**\n * Rewrites a copy of a TypedArray while converting it from column-major (F-order) to row-major (C-order).\n * @param src TypedArray\n * @param out TypedArray\n * @param shape number[]\n */\nexport function convertColMajorToRowMajor(src: TypedArray, out: TypedArray, shape: number[]): void {\n return (colMajorToRowMajorConverters[shape.length] || convertColMajorToRowMajorGeneric)(\n src,\n out,\n shape\n );\n}\n\nexport function isArrayBufferLike(obj: unknown | null): obj is ArrayBufferLike {\n if (obj === null) {\n return false;\n }\n if (obj instanceof ArrayBuffer) {\n return true;\n }\n if (typeof SharedArrayBuffer === \"function\" && obj instanceof SharedArrayBuffer) {\n return true;\n }\n if (IS_NODE) { // Necessary for Node.js for some reason..\n return (obj as Record).toString().startsWith(\"[object ArrayBuffer]\")\n || (obj as Record).toString().startsWith(\"[object SharedArrayBuffer]\");\n }\n return false;\n}\n","export const ARRAY_META_KEY = \".zarray\";\nexport const GROUP_META_KEY = \".zgroup\";\nexport const ATTRS_META_KEY = \".zattrs\";\n","import { normalizeStoragePath, normalizeChunks, normalizeDtype, normalizeShape, normalizeOrder, normalizeFillValue } from '../util';\nimport { Store } from './types';\nimport { ARRAY_META_KEY, GROUP_META_KEY } from '../names';\nimport { FillType, Order, Filter, CompressorConfig, ZarrGroupMetadata, ChunksArgument, DtypeString, ZarrArrayMetadata, FillTypeSerialized } from '../types';\nimport { ContainsArrayError, ContainsGroupError } from '../errors';\n\n\n/**\n * Return true if the store contains an array at the given logical path.\n */\nexport async function containsArray(store: Store, path: string | null = null) {\n path = normalizeStoragePath(path);\n const prefix = pathToPrefix(path);\n const key = prefix + ARRAY_META_KEY;\n return store.containsItem(key);\n}\n\n/**\n * Return true if the store contains a group at the given logical path.\n */\nexport async function containsGroup(store: Store, path: string | null = null) {\n path = normalizeStoragePath(path);\n const prefix = pathToPrefix(path);\n const key = prefix + GROUP_META_KEY;\n return store.containsItem(key);\n}\n\n\nexport function pathToPrefix(path: string): string {\n // assume path already normalized\n if (path.length > 0) {\n return path + '/';\n }\n return '';\n}\n\nasync function listDirFromKeys(store: Store, path: string) {\n // assume path already normalized\n const prefix = pathToPrefix(path);\n const children = new Set();\n\n for (const key in await store.keys()) {\n if (key.startsWith(prefix) && key.length > prefix.length) {\n const suffix = key.slice(prefix.length);\n const child = suffix.split('/')[0];\n children.add(child);\n }\n }\n return Array.from(children).sort();\n}\n\nasync function requireParentGroup(store: Store, path: string, chunkStore: Store | null, overwrite: boolean) {\n // Assume path is normalized\n if (path.length === 0) {\n return;\n }\n\n const segments = path.split(\"/\");\n let p = \"\";\n for (const s of segments.slice(0, segments.length - 1)) {\n p += s;\n if (await containsArray(store, p)) {\n await initGroupMetadata(store, p, overwrite);\n } else if (!await containsGroup(store, p)) {\n await initGroupMetadata(store, p);\n }\n p += \"/\";\n }\n}\n\n/**\n * Obtain a directory listing for the given path. If `store` provides a `listDir`\n * method, this will be called, otherwise will fall back to implementation via the\n * `MutableMapping` interface.\n * @param store \n */\nexport async function listDir(store: Store, path: string | null = null) {\n path = normalizeStoragePath(path);\n if (store.listDir) {\n return store.listDir(path);\n } else {\n return listDirFromKeys(store, path);\n }\n}\n\nasync function initGroupMetadata(store: Store, path: string | null = null, overwrite = false) {\n path = normalizeStoragePath(path);\n\n // Guard conditions\n if (overwrite) {\n throw Error(\"Group overwriting not implemented yet :(\");\n } else if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n } else if (await containsGroup(store, path)) {\n throw new ContainsGroupError(path);\n }\n\n const metadata: ZarrGroupMetadata = { zarr_format: 2 };\n const key = pathToPrefix(path) + GROUP_META_KEY;\n await store.setItem(key, JSON.stringify(metadata));\n}\n/**\n * Initialize a group store. Note that this is a low-level function and there should be no\n * need to call this directly from user code.\n */\nexport async function initGroup(store: Store, path: string | null = null, chunkStore: null | Store = null, overwrite = false) {\n path = normalizeStoragePath(path);\n await requireParentGroup(store, path, chunkStore, overwrite);\n await initGroupMetadata(store, path, overwrite);\n}\n\nasync function initArrayMetadata(\n store: Store,\n shape: number | number[],\n chunks: ChunksArgument,\n dtype: DtypeString,\n path: string,\n compressor: null | CompressorConfig,\n fillValue: FillType,\n order: Order,\n overwrite: boolean,\n chunkStore: null | Store,\n filters: null | Filter[],\n dimensionSeparator?: '.' | '/',\n) {\n // Guard conditions\n if (overwrite) {\n throw Error(\"Array overwriting not implemented yet :(\");\n } else if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n } else if (await containsGroup(store, path)) {\n throw new ContainsGroupError(path);\n }\n\n // Normalize metadata, does type checking too.\n dtype = normalizeDtype(dtype);\n shape = normalizeShape(shape);\n chunks = normalizeChunks(chunks, shape);\n order = normalizeOrder(order);\n fillValue = normalizeFillValue(fillValue);\n\n if (filters !== null && filters.length > 0) {\n throw Error(\"Filters are not supported yet\");\n }\n\n let serializedFillValue: FillTypeSerialized = fillValue;\n\n if (typeof fillValue === \"number\") {\n if (Number.isNaN(fillValue)) serializedFillValue = \"NaN\";\n if (Number.POSITIVE_INFINITY === fillValue) serializedFillValue = \"Infinity\";\n if (Number.NEGATIVE_INFINITY === fillValue) serializedFillValue = \"-Infinity\";\n }\n\n filters = null;\n\n const metadata: ZarrArrayMetadata = {\n zarr_format: 2,\n shape: shape,\n chunks: chunks as number[],\n dtype: dtype,\n fill_value: serializedFillValue,\n order: order,\n compressor: compressor,\n filters: filters,\n };\n if (dimensionSeparator) {\n metadata.dimension_separator = dimensionSeparator;\n }\n const metaKey = pathToPrefix(path) + ARRAY_META_KEY;\n await store.setItem(metaKey, JSON.stringify(metadata));\n}\n\n/**\n * \n * Initialize an array store with the given configuration. Note that this is a low-level\n * function and there should be no need to call this directly from user code\n */\nexport async function initArray(\n store: Store,\n shape: number | number[],\n chunks: ChunksArgument,\n dtype: DtypeString,\n path: string | null = null,\n compressor: null | CompressorConfig = null,\n fillValue: FillType = null,\n order: Order = \"C\",\n overwrite = false,\n chunkStore: null | Store = null,\n filters: null | Filter[] = null,\n dimensionSeparator?: '.' | '/',\n) {\n\n path = normalizeStoragePath(path);\n await requireParentGroup(store, path, chunkStore, overwrite);\n await initArrayMetadata(store, shape, chunks, dtype, path, compressor, fillValue, order, overwrite, chunkStore, filters, dimensionSeparator);\n}\n","import { ZarrMetadataType, UserAttributes } from './types';\nimport { ValidStoreType } from './storage/types';\nimport { isArrayBufferLike, IS_NODE } from './util';\n\nexport function parseMetadata(\n s: ValidStoreType | ZarrMetadataType\n): ZarrMetadataType | UserAttributes {\n // Here we allow that a store may return an already-parsed metadata object,\n // or a string of JSON that we will parse here. We allow for an already-parsed\n // object to accommodate a consolidated metadata store, where all the metadata for\n // all groups and arrays will already have been parsed from JSON.\n if (typeof s !== 'string') {\n // tslint:disable-next-line: strict-type-predicates\n if (IS_NODE && Buffer.isBuffer(s)) {\n return JSON.parse(s.toString());\n } else if (isArrayBufferLike(s)) {\n const utf8Decoder = new TextDecoder();\n const bytes = new Uint8Array(s);\n return JSON.parse(utf8Decoder.decode(bytes));\n } else {\n return s;\n }\n }\n return JSON.parse(s);\n}\n","import { createProxy, AsyncMutableMapping, AsyncMutableMappingProxy } from './mutableMapping';\nimport { Store } from './storage/types';\nimport { parseMetadata } from './metadata';\nimport { UserAttributes } from './types';\nimport { PermissionError } from './errors';\n\n/**\n * Class providing access to user attributes on an array or group. Should not be\n * instantiated directly, will be available via the `.attrs` property of an array or\n * group.\n */\nexport class Attributes implements AsyncMutableMapping {\n store: Store;\n key: string;\n readOnly: boolean;\n cache: boolean;\n private cachedValue: M | null;\n\n constructor(store: Store, key: string, readOnly: boolean, cache = true) {\n this.store = store;\n this.key = key;\n this.readOnly = readOnly;\n this.cache = cache;\n this.cachedValue = null;\n }\n\n /**\n * Retrieve all attributes as a JSON object.\n */\n public async asObject() {\n if (this.cache && this.cachedValue !== null) {\n return this.cachedValue;\n }\n const o = await this.getNoSync();\n if (this.cache) {\n this.cachedValue = o;\n }\n return o;\n }\n\n private async getNoSync(): Promise {\n try {\n const data = await this.store.getItem(this.key);\n // TODO fix typing?\n return parseMetadata(data) as M;\n } catch (error) {\n return {} as M;\n }\n }\n\n private async setNoSync(key: string, value: any) {\n const d = await this.getNoSync();\n (d as any)[key] = value;\n await this.putNoSync(d);\n return true;\n }\n\n private async putNoSync(m: M) {\n await this.store.setItem(this.key, JSON.stringify(m));\n if (this.cache) {\n this.cachedValue = m;\n }\n }\n\n private async delNoSync(key: string): Promise {\n const d = await this.getNoSync();\n delete (d as any)[key];\n await this.putNoSync(d);\n return true;\n }\n\n /**\n * Overwrite all attributes with the provided object in a single operation\n */\n async put(d: M) {\n if (this.readOnly) {\n throw new PermissionError(\"attributes are read-only\");\n }\n return this.putNoSync(d);\n }\n\n async setItem(key: string, value: any): Promise {\n if (this.readOnly) {\n throw new PermissionError(\"attributes are read-only\");\n }\n return this.setNoSync(key, value);\n }\n\n async getItem(key: string) {\n return ((await this.asObject()) as any)[key];\n }\n\n async deleteItem(key: string) {\n if (this.readOnly) {\n throw new PermissionError(\"attributes are read-only\");\n }\n return this.delNoSync(key);\n }\n\n async containsItem(key: string) {\n return ((await this.asObject()) as any)[key] !== undefined;\n }\n\n proxy(): AsyncMutableMappingProxy {\n return createProxy(this);\n }\n}","import { DtypeString } from '../types';\nimport { ValueError } from '../errors';\n\n// Conditionally get the type for `Float16Array` based on end user TS settings. If not\n// present, then the type if `never` (and thus excluded from unions).\ntype Float16ArrayConstructor = typeof globalThis extends { Float16Array: infer T } ? T : never;\n// eslint-disable-next-line @typescript-eslint/naming-convention\nconst Float16Array = (globalThis as any).Float16Array as Float16ArrayConstructor;\n\nexport type NestedArrayData = TypedArray | NDNestedArrayData;\nexport type NDNestedArrayData =\n | TypedArray[]\n | TypedArray[][]\n | TypedArray[][][]\n | TypedArray[][][][]\n | TypedArray[][][][][]\n | TypedArray[][][][][][];\n\nexport type TypedArray =\n | Uint8Array\n | Int8Array\n | Uint16Array\n | Int16Array\n | Uint32Array\n | Int32Array\n | Float32Array\n | Float64Array\n | InstanceType;\n\nexport type TypedArrayConstructor = {\n new(): T;\n // tslint:disable-next-line: unified-signatures\n new(size: number): T;\n // tslint:disable-next-line: unified-signatures\n new(buffer: ArrayBuffer): T;\n BYTES_PER_ELEMENT: number;\n};\n\nconst DTYPE_TYPEDARRAY_MAPPING: { [A in DtypeString]: TypedArrayConstructor } = {\n '|b': Int8Array,\n '|b1': Uint8Array,\n '|B': Uint8Array,\n '|u1': Uint8Array,\n '|i1': Int8Array,\n 'b': Int8Array,\n '>B': Uint8Array,\n '>u1': Uint8Array,\n '>i1': Int8Array,\n '>u2': Uint16Array,\n '>i2': Int16Array,\n '>u4': Uint32Array,\n '>i4': Int32Array,\n '>f4': Float32Array,\n '>f2': Float16Array,\n '>f8': Float64Array\n};\n\nexport function getTypedArrayCtr(dtype: DtypeString) {\n const ctr = DTYPE_TYPEDARRAY_MAPPING[dtype];\n if (!ctr) {\n if (dtype.slice(1) === 'f2') {\n throw Error(\n `'${dtype}' is not supported natively in zarr.js. ` +\n `In order to access this dataset you must make Float16Array available as a global. ` +\n `See https://github.com/gzuidhof/zarr.js/issues/127`\n );\n }\n throw Error(`Dtype not recognized or not supported in zarr.js, got ${dtype}.`);\n }\n return ctr;\n}\n\n/*\n * Called by NestedArray and RawArray constructors only.\n * We byte-swap the buffer of a store after decoding\n * since TypedArray views are little endian only.\n *\n * This means NestedArrays and RawArrays will always be little endian,\n * unless a numpy-like library comes around and can handle endianess\n * for buffer views.\n */\nexport function getTypedArrayDtypeString(t: TypedArray): DtypeString {\n // Favour the types below instead of small and big B\n if (t instanceof Uint8Array) return '|u1';\n if (t instanceof Int8Array) return '|i1';\n if (t instanceof Uint16Array) return '(arr: any): TypedArrayConstructor {\n // TODO fix typing\n // tslint:disable-next-line: strict-type-predicates\n if ((arr as TypedArray).byteLength !== undefined) {\n return (arr).constructor;\n }\n return getNestedArrayConstructor(arr[0]);\n}\n\n/**\n * Returns both the slice result and new output shape\n * @param arr NestedArray to slice\n * @param shape The shape of the NestedArray\n * @param selection\n */\nexport function sliceNestedArray(arr: NestedArrayData, shape: number[], selection: number | ArraySelection): [NestedArrayData | number, number[]] {\n // This translates \"...\", \":\", null into a list of slices or integer selections\n const normalizedSelection = normalizeArraySelection(selection, shape);\n const [sliceIndices, outShape] = selectionToSliceIndices(normalizedSelection, shape);\n const outArray = _sliceNestedArray(arr, shape, sliceIndices);\n return [outArray, outShape];\n}\n\nfunction _sliceNestedArray(arr: NestedArrayData, shape: number[], selection: (SliceIndices | number)[]): NestedArrayData | number {\n const currentSlice = selection[0];\n\n // Is this necessary?\n // // This is possible when a slice list is passed shorter than the amount of dimensions\n // // tslint:disable-next-line: strict-type-predicates\n // if (currentSlice === undefined) {\n // return arr.slice();\n // }\n\n // When a number is passed that dimension is squeezed\n if (typeof currentSlice === \"number\") {\n // Assume already normalized integer selection here.\n if (shape.length === 1) {\n return arr[currentSlice];\n } else {\n return _sliceNestedArray(arr[currentSlice] as NestedArrayData, shape.slice(1), selection.slice(1));\n }\n }\n const [from, to, step, outputSize] = currentSlice;\n\n if (outputSize === 0) {\n return new (getNestedArrayConstructor(arr))(0);\n }\n\n if (shape.length === 1) {\n if (step === 1) {\n return (arr as TypedArray).slice(from, to);\n }\n\n const newArrData = new (arr.constructor as TypedArrayConstructor)(outputSize);\n for (let i = 0; i < outputSize; i++) {\n newArrData[i] = (arr as TypedArray)[from + i * step];\n }\n return newArrData;\n }\n\n let newArr = new Array(outputSize);\n\n for (let i = 0; i < outputSize; i++) {\n newArr[i] = _sliceNestedArray(arr[from + i * step] as NestedArrayData, shape.slice(1), selection.slice(1));\n }\n\n // This is necessary to ensure that the return value is a NestedArray if the last dimension is squeezed\n // e.g. shape [2,1] with slice [:, 0] would otherwise result in a list of numbers instead of a valid NestedArray\n if (outputSize > 0 && typeof newArr[0] === \"number\") {\n const typedArrayConstructor = (arr[0] as TypedArray).constructor;\n newArr = (typedArrayConstructor as any).from(newArr);\n }\n\n return newArr;\n}\n\n\n\n\nexport function setNestedArrayToScalar(dstArr: NestedArrayData, value: number, destShape: number[], selection: number | ArraySelection) {\n // This translates \"...\", \":\", null, etc into a list of slices.\n const normalizedSelection = normalizeArraySelection(selection, destShape, true);\n\n // Above we force the results to be SliceIndicesIndices only, without integer selections making this cast is safe.\n const [sliceIndices, _outShape] = selectionToSliceIndices(normalizedSelection, destShape) as [SliceIndices[], number[]];\n _setNestedArrayToScalar(dstArr, value, destShape, sliceIndices);\n}\n\nexport function setNestedArray(dstArr: NestedArrayData, sourceArr: NestedArrayData, destShape: number[], sourceShape: number[], selection: number | ArraySelection) {\n // This translates \"...\", \":\", null, etc into a list of slices.\n const normalizedSelection = normalizeArraySelection(selection, destShape, false);\n const [sliceIndices, outShape] = selectionToSliceIndices(normalizedSelection, destShape);\n\n // TODO: replace with non stringify equality check\n if (JSON.stringify(outShape) !== JSON.stringify(sourceShape)) {\n throw new ValueError(`Shape mismatch in target and source NestedArray: ${outShape} and ${sourceShape}`);\n }\n\n _setNestedArray(dstArr, sourceArr, destShape, sliceIndices);\n}\n\n\nfunction _setNestedArray(dstArr: NestedArrayData, sourceArr: NestedArrayData | number, shape: number[], selection: (SliceIndices | number)[]) {\n\n const currentSlice = selection[0];\n\n if (typeof sourceArr === \"number\") {\n _setNestedArrayToScalar(dstArr, sourceArr, shape, selection.map(x => typeof x === \"number\" ? [x, x + 1, 1, 1] : x));\n return;\n }\n\n // This dimension is squeezed.\n if (typeof currentSlice === \"number\") {\n _setNestedArray((dstArr as NDNestedArrayData)[currentSlice], sourceArr, shape.slice(1), selection.slice(1));\n return;\n }\n\n const [from, _to, step, outputSize] = currentSlice;\n\n if (shape.length === 1) {\n if (step === 1) {\n (dstArr as TypedArray).set(sourceArr as TypedArray, from);\n } else {\n for (let i = 0; i < outputSize; i++) {\n dstArr[from + i * step] = (sourceArr)[i];\n }\n }\n return;\n }\n\n for (let i = 0; i < outputSize; i++) {\n _setNestedArray((dstArr as NDNestedArrayData)[from + i * step], (sourceArr as NDNestedArrayData)[i], shape.slice(1), selection.slice(1));\n }\n}\n\nfunction _setNestedArrayToScalar(dstArr: NestedArrayData, value: number, shape: number[], selection: SliceIndices[]) {\n const currentSlice = selection[0];\n\n const [from, to, step, outputSize] = currentSlice;\n\n if (shape.length === 1) {\n if (step === 1) {\n (dstArr as TypedArray).fill(value, from, to);\n } else {\n for (let i = 0; i < outputSize; i++) {\n dstArr[from + i * step] = value;\n }\n }\n return;\n }\n\n for (let i = 0; i < outputSize; i++) {\n _setNestedArrayToScalar((dstArr as NDNestedArrayData)[from + i * step], value, shape.slice(1), selection.slice(1));\n }\n}\n\nexport function flattenNestedArray(arr: NestedArrayData, shape: number[], constr?: TypedArrayConstructor): TypedArray {\n if (constr === undefined) {\n constr = getNestedArrayConstructor(arr);\n }\n const size = shape.reduce((x, y) => x * y, 1);\n const outArr = new constr(size);\n\n _flattenNestedArray(arr, shape, outArr, 0);\n\n return outArr;\n}\n\nfunction _flattenNestedArray(arr: NestedArrayData, shape: number[], outArr: TypedArray, offset: number) {\n if (shape.length === 1) {\n // This is only ever reached if called with rank 1 shape, never reached through recursion.\n // We just slice set the array directly from one level above to save some function calls.\n outArr.set((arr as TypedArray), offset);\n return;\n }\n\n if (shape.length === 2) {\n for (let i = 0; i < shape[0]; i++) {\n outArr.set((arr as TypedArray[])[i], offset + shape[1] * i);\n }\n return arr;\n }\n\n const nextShape = shape.slice(1);\n // Small optimization possible here: this can be precomputed for different levels of depth and passed on.\n const mult = nextShape.reduce((x, y) => x * y, 1);\n\n for (let i = 0; i < shape[0]; i++) {\n _flattenNestedArray((arr as NDNestedArrayData)[i], nextShape, outArr, offset + mult * i);\n }\n return arr;\n}\n","import { DtypeString } from '../types';\nimport { NestedArrayData, TypedArray, TypedArrayConstructor, getTypedArrayCtr, getTypedArrayDtypeString } from './types';\nimport { ArraySelection, Slice } from '../core/types';\nimport { slice } from '../core/slice';\nimport { ValueError } from '../errors';\nimport { normalizeShape, IS_NODE, isArrayBufferLike } from '../util';\nimport { setNestedArray, setNestedArrayToScalar, flattenNestedArray, sliceNestedArray } from './ops';\n\nexport class NestedArray {\n dtype: DtypeString;\n shape: number[];\n data: NestedArrayData;\n\n constructor(data: TypedArray, shape?: number | number[], dtype?: DtypeString)\n constructor(data: Buffer | ArrayBufferLike | NestedArrayData | null, shape: number | number[], dtype: DtypeString)\n constructor(data: Buffer | ArrayBufferLike | NestedArrayData | TypedArray | null, shape?: number | number[], dtype?: DtypeString) {\n const dataIsTypedArray = data !== null && !!(data as TypedArray).BYTES_PER_ELEMENT;\n\n if (shape === undefined) {\n if (!dataIsTypedArray) {\n throw new ValueError(\"Shape argument is required unless you pass in a TypedArray\");\n }\n shape = [(data as TypedArray).length];\n }\n\n if (dtype === undefined) {\n if (!dataIsTypedArray) {\n throw new ValueError(\"Dtype argument is required unless you pass in a TypedArray\");\n }\n dtype = getTypedArrayDtypeString(data as TypedArray);\n }\n\n shape = normalizeShape(shape);\n this.shape = shape;\n this.dtype = dtype;\n\n if (dataIsTypedArray && shape.length !== 1) {\n data = (data as TypedArray).buffer;\n }\n\n // Zero dimension array.. they are a bit weirdly represented now, they will only ever occur internally\n if (this.shape.length === 0) {\n this.data = new (getTypedArrayCtr(dtype))(1);\n }\n else if (\n // tslint:disable-next-line: strict-type-predicates\n (IS_NODE && Buffer.isBuffer(data))\n || isArrayBufferLike(data)\n || data === null\n ) {\n // Create from ArrayBuffer or Buffer\n const numShapeElements = shape.reduce((x, y) => x * y, 1);\n\n if (data === null) {\n data = new ArrayBuffer(numShapeElements * parseInt(dtype[dtype.length - 1], 10));\n }\n\n const numDataElements = (data as ArrayBuffer).byteLength / parseInt(dtype[dtype.length - 1], 10);\n if (numShapeElements !== numDataElements) {\n throw new Error(`Buffer has ${numDataElements} of dtype ${dtype}, shape is too large or small ${shape} (flat=${numShapeElements})`);\n }\n const typeConstructor: TypedArrayConstructor = getTypedArrayCtr(dtype);\n this.data = createNestedArray((data as ArrayBuffer), typeConstructor, shape);\n } else {\n this.data = data;\n }\n }\n\n public get(selection: Slice | \":\" | \"...\" | null | (Slice | null | \":\" | \"...\")[]): NestedArray;\n public get(selection: ArraySelection): NestedArray | number;\n public get(selection: ArraySelection): NestedArray | number {\n const [sliceResult, outShape] = sliceNestedArray(this.data, this.shape, selection);\n if (outShape.length === 0) {\n return sliceResult as number;\n } else {\n return new NestedArray(sliceResult as NestedArrayData, outShape, this.dtype);\n }\n }\n\n public set(selection: ArraySelection = null, value: NestedArray | number) {\n if (selection === null) {\n selection = [slice(null)];\n }\n if (typeof value === \"number\") {\n if (this.shape.length === 0) {\n // Zero dimension array..\n this.data[0] = value;\n } else {\n setNestedArrayToScalar(this.data, value, this.shape, selection);\n }\n } else {\n setNestedArray(this.data, value.data, this.shape, value.shape, selection);\n }\n }\n\n public flatten(): T {\n if (this.shape.length === 1) {\n return this.data as T;\n }\n return flattenNestedArray(this.data, this.shape, getTypedArrayCtr(this.dtype)) as T;\n }\n\n /**\n * Currently only supports a single integer as the size, TODO: support start, stop, step.\n */\n public static arange(size: number, dtype: DtypeString = \" {\n const constr = getTypedArrayCtr(dtype);\n const data = rangeTypedArray([size], constr);\n return new NestedArray(data, [size], dtype);\n }\n}\n\n\n\n/**\n * Creates a TypedArray with values 0 through N where N is the product of the shape.\n */\nexport function rangeTypedArray(shape: number[], tContructor: TypedArrayConstructor) {\n const size = shape.reduce((x, y) => x * y, 1);\n const data = new tContructor(size);\n data.set([...Array(size).keys()]); // Sets range 0,1,2,3,4,5\n return data;\n}\n\n/**\n * Creates multi-dimensional (rank > 1) array given input data and shape recursively.\n * What it does is create a Array>> or some other typed array.\n * This is for internal use, there should be no need to call this from user code.\n * @param data a buffer containing the data for this array.\n * @param t constructor for the datatype of choice\n * @param shape list of numbers describing the size in each dimension\n * @param offset in bytes for this dimension\n */\nexport function createNestedArray(data: Buffer | ArrayBuffer, t: TypedArrayConstructor, shape: number[], offset = 0): NestedArrayData {\n if (shape.length === 1) {\n // This is only ever reached if called with rank 1 shape, never reached through recursion.\n // We just slice set the array directly from one level above to save some function calls.\n return new t(data.slice(offset, offset + shape[0] * t.BYTES_PER_ELEMENT));\n }\n\n const arr = new Array(shape[0]);\n if (shape.length === 2) {\n for (let i = 0; i < shape[0]; i++) {\n arr[i] = new t(data.slice(offset + shape[1] * i * t.BYTES_PER_ELEMENT, offset + shape[1] * (i + 1) * t.BYTES_PER_ELEMENT));\n }\n return arr;\n }\n\n const nextShape = shape.slice(1);\n // Small optimization possible here: this can be precomputed for different levels of depth and passed on.\n const mult = nextShape.reduce((x, y) => x * y, 1);\n\n for (let i = 0; i < shape[0]; i++) {\n arr[i] = createNestedArray(data, t, nextShape, offset + mult * i * t.BYTES_PER_ELEMENT);\n }\n return arr;\n}\n","import { ArraySelection, SliceIndices } from '../core/types';\nimport { normalizeArraySelection, selectionToSliceIndices } from '../core/indexing';\nimport { ValueError } from '../errors';\nimport { TypedArray } from '../nestedArray/types';\n\nexport function setRawArrayToScalar(dstArr: TypedArray, dstStrides: number[], dstShape: number[], dstSelection: number | ArraySelection, value: number) {\n // This translates \"...\", \":\", null, etc into a list of slices.\n const normalizedSelection = normalizeArraySelection(dstSelection, dstShape, true);\n const [sliceIndices] = selectionToSliceIndices(normalizedSelection, dstShape);\n // Above we force the results to be SliceIndicesIndices only, without integer selections making this cast is safe.\n _setRawArrayToScalar(value, dstArr, dstStrides, sliceIndices as SliceIndices[]);\n}\n\nexport function setRawArray(dstArr: TypedArray, dstStrides: number[], dstShape: number[], dstSelection: number | ArraySelection, sourceArr: TypedArray, sourceStrides: number[], sourceShape: number[]): void {\n // This translates \"...\", \":\", null, etc into a list of slices.\n const normalizedDstSelection = normalizeArraySelection(dstSelection, dstShape, false);\n const [dstSliceIndices, outShape] = selectionToSliceIndices(normalizedDstSelection, dstShape);\n\n // TODO: replace with non stringify equality check\n if (JSON.stringify(outShape) !== JSON.stringify(sourceShape)) {\n throw new ValueError(`Shape mismatch in target and source RawArray: ${outShape} and ${sourceShape}`);\n }\n\n _setRawArray(dstArr, dstStrides, dstSliceIndices, sourceArr, sourceStrides);\n}\n\nexport function setRawArrayFromChunkItem(dstArr: TypedArray, dstStrides: number[], dstShape: number[], dstSelection: number | ArraySelection, sourceArr: TypedArray, sourceStrides: number[], sourceShape: number[], sourceSelection: number | ArraySelection) {\n // This translates \"...\", \":\", null, etc into a list of slices.\n const normalizedDstSelection = normalizeArraySelection(dstSelection, dstShape, true);\n // Above we force the results to be dstSliceIndices only, without integer selections making this cast is safe.\n const [dstSliceIndices] = selectionToSliceIndices(normalizedDstSelection, dstShape);\n\n const normalizedSourceSelection = normalizeArraySelection(sourceSelection, sourceShape, false);\n const [sourceSliceIndicies] = selectionToSliceIndices(normalizedSourceSelection, sourceShape);\n\n // TODO check to ensure chunk and dest selection are same shape?\n // As is, this only gets called in ZarrArray.getRaw where this condition should be ensured, and check might hinder performance.\n\n _setRawArrayFromChunkItem(dstArr, dstStrides, dstSliceIndices as SliceIndices[], sourceArr, sourceStrides, sourceSliceIndicies);\n}\n\nfunction _setRawArrayToScalar(value: number, dstArr: TypedArray, dstStrides: number[], dstSliceIndices: SliceIndices[]) {\n const [currentDstSlice, ...nextDstSliceIndices] = dstSliceIndices;\n const [currentDstStride, ...nextDstStrides] = dstStrides;\n\n const [from, _to, step, outputSize] = currentDstSlice;\n\n if (dstStrides.length === 1) {\n if (step === 1 && currentDstStride === 1) {\n dstArr.fill(value, from, from + outputSize);\n } else {\n for (let i = 0; i < outputSize; i++) {\n dstArr[currentDstStride * (from + (step * i))] = value;\n }\n }\n return;\n }\n\n for (let i = 0; i < outputSize; i++) {\n _setRawArrayToScalar(\n value,\n dstArr.subarray(currentDstStride * (from + (step * i))),\n nextDstStrides,\n nextDstSliceIndices,\n );\n }\n}\n\nfunction _setRawArray(dstArr: TypedArray, dstStrides: number[], dstSliceIndices: (number | SliceIndices)[], sourceArr: TypedArray, sourceStrides: number[]) {\n if (dstSliceIndices.length === 0) {\n dstArr.set(sourceArr);\n return;\n }\n\n const [currentDstSlice, ...nextDstSliceIndices] = dstSliceIndices;\n const [currentDstStride, ...nextDstStrides] = dstStrides;\n\n // This dimension is squeezed.\n if (typeof currentDstSlice === \"number\") {\n _setRawArray(\n dstArr.subarray(currentDstSlice * currentDstStride),\n nextDstStrides,\n nextDstSliceIndices,\n sourceArr,\n sourceStrides\n );\n return;\n }\n\n const [currentSourceStride, ...nextSourceStrides] = sourceStrides;\n const [from, _to, step, outputSize] = currentDstSlice;\n\n if (dstStrides.length === 1) {\n if (step === 1 && currentDstStride === 1 && currentSourceStride === 1) {\n dstArr.set(sourceArr.subarray(0, outputSize), from);\n } else {\n for (let i = 0; i < outputSize; i++) {\n dstArr[currentDstStride * (from + (step * i))] = sourceArr[currentSourceStride * i];\n }\n }\n return;\n }\n\n for (let i = 0; i < outputSize; i++) {\n // Apply strides as above, using both destination and source-specific strides.\n _setRawArray(\n dstArr.subarray(currentDstStride * (from + (i * step))),\n nextDstStrides,\n nextDstSliceIndices,\n sourceArr.subarray(currentSourceStride * i),\n nextSourceStrides\n );\n }\n}\n\nfunction _setRawArrayFromChunkItem(dstArr: TypedArray, dstStrides: number[], dstSliceIndices: SliceIndices[], sourceArr: TypedArray, sourceStrides: number[], sourceSliceIndices: (SliceIndices | number)[]) {\n if (sourceSliceIndices.length === 0) {\n // Case when last source dimension is squeezed\n dstArr.set(sourceArr.subarray(0, dstArr.length));\n return;\n }\n\n // Get current indicies and strides for both destination and source arrays\n const [currentDstSlice, ...nextDstSliceIndices] = dstSliceIndices;\n const [currentSourceSlice, ...nextSourceSliceIndices] = sourceSliceIndices;\n\n const [currentDstStride, ...nextDstStrides] = dstStrides;\n const [currentSourceStride, ...nextSourceStrides] = sourceStrides;\n\n // This source dimension is squeezed\n if (typeof currentSourceSlice === \"number\") {\n /*\n Sets dimension offset for squeezed dimension.\n\n Ex. if 0th dimension is squeezed to 2nd index (numpy : arr[2,i])\n\n sourceArr[stride[0]* 2 + i] --> sourceArr.subarray(stride[0] * 2)[i] (sourceArr[i] in next call)\n\n Thus, subsequent squeezed dims are appended to the source offset.\n */\n _setRawArrayFromChunkItem(\n // Don't update destination offset/slices, just source\n dstArr, dstStrides, dstSliceIndices,\n sourceArr.subarray(currentSourceStride * currentSourceSlice),\n nextSourceStrides,\n nextSourceSliceIndices,\n );\n return;\n }\n\n const [from, _to, step, outputSize] = currentDstSlice; // just need start and size\n const [sfrom, _sto, sstep, _soutputSize] = currentSourceSlice; // Will always be subset of dst, so don't need output size just start\n\n if (dstStrides.length === 1 && sourceStrides.length === 1) {\n if (step === 1 && currentDstStride === 1 && sstep === 1 && currentSourceStride === 1) {\n dstArr.set(sourceArr.subarray(sfrom, sfrom + outputSize), from);\n } else {\n for (let i = 0; i < outputSize; i++) {\n dstArr[currentDstStride * (from + (step * i))] = sourceArr[currentSourceStride * (sfrom + (sstep * i))];\n }\n }\n return;\n }\n\n for (let i = 0; i < outputSize; i++) {\n // Apply strides as above, using both destination and source-specific strides.\n _setRawArrayFromChunkItem(\n dstArr.subarray(currentDstStride * (from + (i * step))),\n nextDstStrides,\n nextDstSliceIndices,\n sourceArr.subarray(currentSourceStride * (sfrom + (i * sstep))),\n nextSourceStrides,\n nextSourceSliceIndices,\n );\n }\n}\n","import { DtypeString } from '../types';\nimport { ArraySelection } from '../core/types';\nimport { slice } from '../core/slice';\nimport { ValueError } from '../errors';\nimport { normalizeShape, IS_NODE, getStrides, isArrayBufferLike } from '../util';\nimport { TypedArray, getTypedArrayCtr, getTypedArrayDtypeString, TypedArrayConstructor } from '../nestedArray/types';\nimport { setRawArrayFromChunkItem, setRawArrayToScalar, setRawArray } from './ops';\n\nexport class RawArray {\n dtype: DtypeString;\n shape: number[];\n strides: number[];\n data: TypedArray;\n\n constructor(data: TypedArray, shape?: number | number[], dtype?: DtypeString, strides?: number[])\n constructor(data: Buffer | ArrayBufferLike | null, shape?: number | number[], dtype?: DtypeString, strides?: number[])\n constructor(data: Buffer | ArrayBufferLike | TypedArray | null, shape?: number | number[], dtype?: DtypeString, strides?: number[]) {\n const dataIsTypedArray = data !== null && !!(data as TypedArray).BYTES_PER_ELEMENT;\n\n if (shape === undefined) {\n if (!dataIsTypedArray) {\n throw new ValueError(\"Shape argument is required unless you pass in a TypedArray\");\n }\n shape = [(data as TypedArray).length];\n }\n shape = normalizeShape(shape);\n\n if (dtype === undefined) {\n if (!dataIsTypedArray) {\n throw new ValueError(\"Dtype argument is required unless you pass in a TypedArray\");\n }\n dtype = getTypedArrayDtypeString(data as TypedArray);\n }\n\n if (strides === undefined) {\n strides = getStrides(shape);\n }\n\n this.shape = shape;\n this.dtype = dtype;\n this.strides = strides;\n\n if (dataIsTypedArray && shape.length !== 1) {\n data = (data as TypedArray).buffer;\n }\n\n // Zero dimension array.. they are a bit weirdly represented now, they will only ever occur internally\n if (this.shape.length === 0) {\n this.data = new (getTypedArrayCtr(dtype))(1);\n } else if (\n // tslint:disable-next-line: strict-type-predicates\n (IS_NODE && Buffer.isBuffer(data))\n || isArrayBufferLike(data)\n || data === null\n ) {\n // Create from ArrayBuffer or Buffer\n const numShapeElements = shape.reduce((x, y) => x * y, 1);\n\n if (data === null) {\n data = new ArrayBuffer(numShapeElements * parseInt(dtype[dtype.length - 1], 10));\n }\n\n const numDataElements = (data as ArrayBuffer).byteLength / parseInt(dtype[dtype.length - 1], 10);\n if (numShapeElements !== numDataElements) {\n throw new Error(`Buffer has ${numDataElements} of dtype ${dtype}, shape is too large or small ${shape} (flat=${numShapeElements})`);\n }\n const typeConstructor: TypedArrayConstructor = getTypedArrayCtr(dtype);\n this.data = new typeConstructor(data as ArrayBuffer);\n } else {\n this.data = data;\n }\n }\n\n public set(selection: ArraySelection, value: RawArray | number): void;\n public set(selection: ArraySelection, chunk: RawArray, chunkSelection: ArraySelection): void;\n public set(selection: ArraySelection = null, value: RawArray | number, chunkSelection?: ArraySelection) {\n if (selection === null) {\n selection = [slice(null)];\n }\n if (typeof value === \"number\") {\n if (this.shape.length === 0) {\n // Zero dimension array..\n this.data[0] = value;\n } else {\n setRawArrayToScalar(this.data, this.strides, this.shape, selection, value);\n }\n } else if (value instanceof RawArray && chunkSelection) {\n // Copy directly from decoded chunk to destination array\n setRawArrayFromChunkItem(this.data, this.strides, this.shape, selection, value.data, value.strides, value.shape, chunkSelection);\n } else {\n setRawArray(this.data, this.strides, this.shape, selection, value.data, value.strides, value.shape);\n }\n }\n}\n\n\n","'use strict';\n\nvar has = Object.prototype.hasOwnProperty\n , prefix = '~';\n\n/**\n * Constructor to create a storage for our `EE` objects.\n * An `Events` instance is a plain object whose properties are event names.\n *\n * @constructor\n * @private\n */\nfunction Events() {}\n\n//\n// We try to not inherit from `Object.prototype`. In some engines creating an\n// instance in this way is faster than calling `Object.create(null)` directly.\n// If `Object.create(null)` is not supported we prefix the event names with a\n// character to make sure that the built-in object properties are not\n// overridden or used as an attack vector.\n//\nif (Object.create) {\n Events.prototype = Object.create(null);\n\n //\n // This hack is needed because the `__proto__` property is still inherited in\n // some old browsers like Android 4, iPhone 5.1, Opera 11 and Safari 5.\n //\n if (!new Events().__proto__) prefix = false;\n}\n\n/**\n * Representation of a single event listener.\n *\n * @param {Function} fn The listener function.\n * @param {*} context The context to invoke the listener with.\n * @param {Boolean} [once=false] Specify if the listener is a one-time listener.\n * @constructor\n * @private\n */\nfunction EE(fn, context, once) {\n this.fn = fn;\n this.context = context;\n this.once = once || false;\n}\n\n/**\n * Add a listener for a given event.\n *\n * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} context The context to invoke the listener with.\n * @param {Boolean} once Specify if the listener is a one-time listener.\n * @returns {EventEmitter}\n * @private\n */\nfunction addListener(emitter, event, fn, context, once) {\n if (typeof fn !== 'function') {\n throw new TypeError('The listener must be a function');\n }\n\n var listener = new EE(fn, context || emitter, once)\n , evt = prefix ? prefix + event : event;\n\n if (!emitter._events[evt]) emitter._events[evt] = listener, emitter._eventsCount++;\n else if (!emitter._events[evt].fn) emitter._events[evt].push(listener);\n else emitter._events[evt] = [emitter._events[evt], listener];\n\n return emitter;\n}\n\n/**\n * Clear event by name.\n *\n * @param {EventEmitter} emitter Reference to the `EventEmitter` instance.\n * @param {(String|Symbol)} evt The Event name.\n * @private\n */\nfunction clearEvent(emitter, evt) {\n if (--emitter._eventsCount === 0) emitter._events = new Events();\n else delete emitter._events[evt];\n}\n\n/**\n * Minimal `EventEmitter` interface that is molded against the Node.js\n * `EventEmitter` interface.\n *\n * @constructor\n * @public\n */\nfunction EventEmitter() {\n this._events = new Events();\n this._eventsCount = 0;\n}\n\n/**\n * Return an array listing the events for which the emitter has registered\n * listeners.\n *\n * @returns {Array}\n * @public\n */\nEventEmitter.prototype.eventNames = function eventNames() {\n var names = []\n , events\n , name;\n\n if (this._eventsCount === 0) return names;\n\n for (name in (events = this._events)) {\n if (has.call(events, name)) names.push(prefix ? name.slice(1) : name);\n }\n\n if (Object.getOwnPropertySymbols) {\n return names.concat(Object.getOwnPropertySymbols(events));\n }\n\n return names;\n};\n\n/**\n * Return the listeners registered for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Array} The registered listeners.\n * @public\n */\nEventEmitter.prototype.listeners = function listeners(event) {\n var evt = prefix ? prefix + event : event\n , handlers = this._events[evt];\n\n if (!handlers) return [];\n if (handlers.fn) return [handlers.fn];\n\n for (var i = 0, l = handlers.length, ee = new Array(l); i < l; i++) {\n ee[i] = handlers[i].fn;\n }\n\n return ee;\n};\n\n/**\n * Return the number of listeners listening to a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Number} The number of listeners.\n * @public\n */\nEventEmitter.prototype.listenerCount = function listenerCount(event) {\n var evt = prefix ? prefix + event : event\n , listeners = this._events[evt];\n\n if (!listeners) return 0;\n if (listeners.fn) return 1;\n return listeners.length;\n};\n\n/**\n * Calls each of the listeners registered for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @returns {Boolean} `true` if the event had listeners, else `false`.\n * @public\n */\nEventEmitter.prototype.emit = function emit(event, a1, a2, a3, a4, a5) {\n var evt = prefix ? prefix + event : event;\n\n if (!this._events[evt]) return false;\n\n var listeners = this._events[evt]\n , len = arguments.length\n , args\n , i;\n\n if (listeners.fn) {\n if (listeners.once) this.removeListener(event, listeners.fn, undefined, true);\n\n switch (len) {\n case 1: return listeners.fn.call(listeners.context), true;\n case 2: return listeners.fn.call(listeners.context, a1), true;\n case 3: return listeners.fn.call(listeners.context, a1, a2), true;\n case 4: return listeners.fn.call(listeners.context, a1, a2, a3), true;\n case 5: return listeners.fn.call(listeners.context, a1, a2, a3, a4), true;\n case 6: return listeners.fn.call(listeners.context, a1, a2, a3, a4, a5), true;\n }\n\n for (i = 1, args = new Array(len -1); i < len; i++) {\n args[i - 1] = arguments[i];\n }\n\n listeners.fn.apply(listeners.context, args);\n } else {\n var length = listeners.length\n , j;\n\n for (i = 0; i < length; i++) {\n if (listeners[i].once) this.removeListener(event, listeners[i].fn, undefined, true);\n\n switch (len) {\n case 1: listeners[i].fn.call(listeners[i].context); break;\n case 2: listeners[i].fn.call(listeners[i].context, a1); break;\n case 3: listeners[i].fn.call(listeners[i].context, a1, a2); break;\n case 4: listeners[i].fn.call(listeners[i].context, a1, a2, a3); break;\n default:\n if (!args) for (j = 1, args = new Array(len -1); j < len; j++) {\n args[j - 1] = arguments[j];\n }\n\n listeners[i].fn.apply(listeners[i].context, args);\n }\n }\n }\n\n return true;\n};\n\n/**\n * Add a listener for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} [context=this] The context to invoke the listener with.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.on = function on(event, fn, context) {\n return addListener(this, event, fn, context, false);\n};\n\n/**\n * Add a one-time listener for a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn The listener function.\n * @param {*} [context=this] The context to invoke the listener with.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.once = function once(event, fn, context) {\n return addListener(this, event, fn, context, true);\n};\n\n/**\n * Remove the listeners of a given event.\n *\n * @param {(String|Symbol)} event The event name.\n * @param {Function} fn Only remove the listeners that match this function.\n * @param {*} context Only remove the listeners that have this context.\n * @param {Boolean} once Only remove one-time listeners.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.removeListener = function removeListener(event, fn, context, once) {\n var evt = prefix ? prefix + event : event;\n\n if (!this._events[evt]) return this;\n if (!fn) {\n clearEvent(this, evt);\n return this;\n }\n\n var listeners = this._events[evt];\n\n if (listeners.fn) {\n if (\n listeners.fn === fn &&\n (!once || listeners.once) &&\n (!context || listeners.context === context)\n ) {\n clearEvent(this, evt);\n }\n } else {\n for (var i = 0, events = [], length = listeners.length; i < length; i++) {\n if (\n listeners[i].fn !== fn ||\n (once && !listeners[i].once) ||\n (context && listeners[i].context !== context)\n ) {\n events.push(listeners[i]);\n }\n }\n\n //\n // Reset the array, or remove it completely if we have no more listeners.\n //\n if (events.length) this._events[evt] = events.length === 1 ? events[0] : events;\n else clearEvent(this, evt);\n }\n\n return this;\n};\n\n/**\n * Remove all listeners, or those of the specified event.\n *\n * @param {(String|Symbol)} [event] The event name.\n * @returns {EventEmitter} `this`.\n * @public\n */\nEventEmitter.prototype.removeAllListeners = function removeAllListeners(event) {\n var evt;\n\n if (event) {\n evt = prefix ? prefix + event : event;\n if (this._events[evt]) clearEvent(this, evt);\n } else {\n this._events = new Events();\n this._eventsCount = 0;\n }\n\n return this;\n};\n\n//\n// Alias methods names because people roll like that.\n//\nEventEmitter.prototype.off = EventEmitter.prototype.removeListener;\nEventEmitter.prototype.addListener = EventEmitter.prototype.on;\n\n//\n// Expose the prefix.\n//\nEventEmitter.prefixed = prefix;\n\n//\n// Allow `EventEmitter` to be imported as module namespace.\n//\nEventEmitter.EventEmitter = EventEmitter;\n\n//\n// Expose the module.\n//\nif ('undefined' !== typeof module) {\n module.exports = EventEmitter;\n}\n","export class TimeoutError extends Error {\n\tconstructor(message) {\n\t\tsuper(message);\n\t\tthis.name = 'TimeoutError';\n\t}\n}\n\nexport default function pTimeout(promise, milliseconds, fallback, options) {\n\tlet timer;\n\tconst cancelablePromise = new Promise((resolve, reject) => {\n\t\tif (typeof milliseconds !== 'number' || milliseconds < 0) {\n\t\t\tthrow new TypeError('Expected `milliseconds` to be a positive number');\n\t\t}\n\n\t\tif (milliseconds === Number.POSITIVE_INFINITY) {\n\t\t\tresolve(promise);\n\t\t\treturn;\n\t\t}\n\n\t\toptions = {\n\t\t\tcustomTimers: {setTimeout, clearTimeout},\n\t\t\t...options\n\t\t};\n\n\t\ttimer = options.customTimers.setTimeout.call(undefined, () => {\n\t\t\tif (typeof fallback === 'function') {\n\t\t\t\ttry {\n\t\t\t\t\tresolve(fallback());\n\t\t\t\t} catch (error) {\n\t\t\t\t\treject(error);\n\t\t\t\t}\n\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\tconst message = typeof fallback === 'string' ? fallback : `Promise timed out after ${milliseconds} milliseconds`;\n\t\t\tconst timeoutError = fallback instanceof Error ? fallback : new TimeoutError(message);\n\n\t\t\tif (typeof promise.cancel === 'function') {\n\t\t\t\tpromise.cancel();\n\t\t\t}\n\n\t\t\treject(timeoutError);\n\t\t}, milliseconds);\n\n\t\t(async () => {\n\t\t\ttry {\n\t\t\t\tresolve(await promise);\n\t\t\t} catch (error) {\n\t\t\t\treject(error);\n\t\t\t} finally {\n\t\t\t\toptions.customTimers.clearTimeout.call(undefined, timer);\n\t\t\t}\n\t\t})();\n\t});\n\n\tcancelablePromise.clear = () => {\n\t\tclearTimeout(timer);\n\t\ttimer = undefined;\n\t};\n\n\treturn cancelablePromise;\n}\n","// Port of lower_bound from https://en.cppreference.com/w/cpp/algorithm/lower_bound\n// Used to compute insertion index to keep queue sorted after insertion\nexport default function lowerBound(array, value, comparator) {\n let first = 0;\n let count = array.length;\n while (count > 0) {\n const step = Math.trunc(count / 2);\n let it = first + step;\n if (comparator(array[it], value) <= 0) {\n first = ++it;\n count -= step + 1;\n }\n else {\n count = step;\n }\n }\n return first;\n}\n","import { ValidStoreType, AsyncStore } from './types';\nimport { IS_NODE, resolveUrl } from '../util';\nimport { KeyError, HTTPError } from '../errors';\n\nenum HTTPMethod {\n HEAD = 'HEAD',\n GET = 'GET',\n PUT = 'PUT',\n}\n\nconst DEFAULT_METHODS = [HTTPMethod.HEAD, HTTPMethod.GET, HTTPMethod.PUT];\n\ninterface HTTPStoreOptions {\n fetchOptions?: RequestInit;\n supportedMethods?: HTTPMethod[];\n}\n\nexport class HTTPStore implements AsyncStore {\n listDir?: undefined;\n rmDir?: undefined;\n getSize?: undefined;\n rename?: undefined;\n\n public url: UrlRoot;\n public fetchOptions: RequestInit;\n private supportedMethods: Set;\n\n constructor(url: UrlRoot, options: HTTPStoreOptions = {}) {\n this.url = url;\n const { fetchOptions = {}, supportedMethods = DEFAULT_METHODS } = options;\n this.fetchOptions = fetchOptions;\n this.supportedMethods = new Set(supportedMethods);\n }\n\n keys(): Promise {\n throw new Error('Method not implemented.');\n }\n\n async getItem(item: string, opts?: RequestInit) {\n const url = resolveUrl(this.url, item);\n const value = await fetch(url, { ...this.fetchOptions, ...opts });\n\n if (value.status === 404) {\n // Item is not found\n throw new KeyError(item);\n } else if (value.status !== 200) {\n throw new HTTPError(String(value.status));\n }\n\n // only decode if 200\n if (IS_NODE) {\n return Buffer.from(await value.arrayBuffer());\n } else {\n return value.arrayBuffer(); // Browser\n }\n }\n\n async setItem(item: string, value: ValidStoreType): Promise {\n if (!this.supportedMethods.has(HTTPMethod.PUT)) {\n throw new Error('HTTP PUT no a supported method for store.');\n }\n const url = resolveUrl(this.url, item);\n if (typeof value === 'string') {\n value = new TextEncoder().encode(value).buffer;\n }\n const set = await fetch(url, { ...this.fetchOptions, method: HTTPMethod.PUT, body: value });\n return set.status.toString()[0] === '2';\n }\n\n deleteItem(_item: string): Promise {\n throw new Error('Method not implemented.');\n }\n\n async containsItem(item: string): Promise {\n const url = resolveUrl(this.url, item);\n // Just check headers if HEAD method supported\n const method = this.supportedMethods.has(HTTPMethod.HEAD) ? HTTPMethod.HEAD : HTTPMethod.GET;\n const value = await fetch(url, { ...this.fetchOptions, method });\n return value.status === 200;\n }\n}\n","import lowerBound from './lower-bound.js';\nexport default class PriorityQueue {\n constructor() {\n Object.defineProperty(this, \"_queue\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: []\n });\n }\n enqueue(run, options) {\n var _a;\n options = {\n priority: 0,\n ...options\n };\n const element = {\n priority: options.priority,\n run\n };\n if (this.size && ((_a = this._queue[this.size - 1]) === null || _a === void 0 ? void 0 : _a.priority) >= options.priority) {\n this._queue.push(element);\n return;\n }\n const index = lowerBound(this._queue, element, (a, b) => b.priority - a.priority);\n this._queue.splice(index, 0, element);\n }\n dequeue() {\n const item = this._queue.shift();\n return item === null || item === void 0 ? void 0 : item.run;\n }\n filter(options) {\n return this._queue.filter((element) => element.priority === options.priority).map((element) => element.run);\n }\n get size() {\n return this._queue.length;\n }\n}\n","import EventEmitter from 'eventemitter3';\nimport pTimeout, { TimeoutError } from 'p-timeout';\nimport PriorityQueue from './priority-queue.js';\n// eslint-disable-next-line @typescript-eslint/no-empty-function\nconst empty = () => { };\nconst timeoutError = new TimeoutError();\n/**\nPromise queue with concurrency control.\n*/\nexport default class PQueue extends EventEmitter {\n constructor(options) {\n var _a, _b, _c, _d;\n super();\n Object.defineProperty(this, \"_carryoverConcurrencyCount\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_isIntervalIgnored\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_intervalCount\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: 0\n });\n Object.defineProperty(this, \"_intervalCap\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_interval\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_intervalEnd\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: 0\n });\n Object.defineProperty(this, \"_intervalId\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_timeoutId\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_queue\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_queueClass\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_pendingCount\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: 0\n });\n // The `!` is needed because of https://github.com/microsoft/TypeScript/issues/32194\n Object.defineProperty(this, \"_concurrency\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_isPaused\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_resolveEmpty\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: empty\n });\n Object.defineProperty(this, \"_resolveIdle\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: empty\n });\n Object.defineProperty(this, \"_timeout\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n Object.defineProperty(this, \"_throwOnTimeout\", {\n enumerable: true,\n configurable: true,\n writable: true,\n value: void 0\n });\n // eslint-disable-next-line @typescript-eslint/consistent-type-assertions\n options = {\n carryoverConcurrencyCount: false,\n intervalCap: Number.POSITIVE_INFINITY,\n interval: 0,\n concurrency: Number.POSITIVE_INFINITY,\n autoStart: true,\n queueClass: PriorityQueue,\n ...options\n };\n if (!(typeof options.intervalCap === 'number' && options.intervalCap >= 1)) {\n throw new TypeError(`Expected \\`intervalCap\\` to be a number from 1 and up, got \\`${(_b = (_a = options.intervalCap) === null || _a === void 0 ? void 0 : _a.toString()) !== null && _b !== void 0 ? _b : ''}\\` (${typeof options.intervalCap})`);\n }\n if (options.interval === undefined || !(Number.isFinite(options.interval) && options.interval >= 0)) {\n throw new TypeError(`Expected \\`interval\\` to be a finite number >= 0, got \\`${(_d = (_c = options.interval) === null || _c === void 0 ? void 0 : _c.toString()) !== null && _d !== void 0 ? _d : ''}\\` (${typeof options.interval})`);\n }\n this._carryoverConcurrencyCount = options.carryoverConcurrencyCount;\n this._isIntervalIgnored = options.intervalCap === Number.POSITIVE_INFINITY || options.interval === 0;\n this._intervalCap = options.intervalCap;\n this._interval = options.interval;\n this._queue = new options.queueClass();\n this._queueClass = options.queueClass;\n this.concurrency = options.concurrency;\n this._timeout = options.timeout;\n this._throwOnTimeout = options.throwOnTimeout === true;\n this._isPaused = options.autoStart === false;\n }\n get _doesIntervalAllowAnother() {\n return this._isIntervalIgnored || this._intervalCount < this._intervalCap;\n }\n get _doesConcurrentAllowAnother() {\n return this._pendingCount < this._concurrency;\n }\n _next() {\n this._pendingCount--;\n this._tryToStartAnother();\n this.emit('next');\n }\n _resolvePromises() {\n this._resolveEmpty();\n this._resolveEmpty = empty;\n if (this._pendingCount === 0) {\n this._resolveIdle();\n this._resolveIdle = empty;\n this.emit('idle');\n }\n }\n _onResumeInterval() {\n this._onInterval();\n this._initializeIntervalIfNeeded();\n this._timeoutId = undefined;\n }\n _isIntervalPaused() {\n const now = Date.now();\n if (this._intervalId === undefined) {\n const delay = this._intervalEnd - now;\n if (delay < 0) {\n // Act as the interval was done\n // We don't need to resume it here because it will be resumed on line 160\n this._intervalCount = (this._carryoverConcurrencyCount) ? this._pendingCount : 0;\n }\n else {\n // Act as the interval is pending\n if (this._timeoutId === undefined) {\n this._timeoutId = setTimeout(() => {\n this._onResumeInterval();\n }, delay);\n }\n return true;\n }\n }\n return false;\n }\n _tryToStartAnother() {\n if (this._queue.size === 0) {\n // We can clear the interval (\"pause\")\n // Because we can redo it later (\"resume\")\n if (this._intervalId) {\n clearInterval(this._intervalId);\n }\n this._intervalId = undefined;\n this._resolvePromises();\n return false;\n }\n if (!this._isPaused) {\n const canInitializeInterval = !this._isIntervalPaused();\n if (this._doesIntervalAllowAnother && this._doesConcurrentAllowAnother) {\n const job = this._queue.dequeue();\n if (!job) {\n return false;\n }\n this.emit('active');\n job();\n if (canInitializeInterval) {\n this._initializeIntervalIfNeeded();\n }\n return true;\n }\n }\n return false;\n }\n _initializeIntervalIfNeeded() {\n if (this._isIntervalIgnored || this._intervalId !== undefined) {\n return;\n }\n this._intervalId = setInterval(() => {\n this._onInterval();\n }, this._interval);\n this._intervalEnd = Date.now() + this._interval;\n }\n _onInterval() {\n if (this._intervalCount === 0 && this._pendingCount === 0 && this._intervalId) {\n clearInterval(this._intervalId);\n this._intervalId = undefined;\n }\n this._intervalCount = this._carryoverConcurrencyCount ? this._pendingCount : 0;\n this._processQueue();\n }\n /**\n Executes all queued functions until it reaches the limit.\n */\n _processQueue() {\n // eslint-disable-next-line no-empty\n while (this._tryToStartAnother()) { }\n }\n get concurrency() {\n return this._concurrency;\n }\n set concurrency(newConcurrency) {\n if (!(typeof newConcurrency === 'number' && newConcurrency >= 1)) {\n throw new TypeError(`Expected \\`concurrency\\` to be a number from 1 and up, got \\`${newConcurrency}\\` (${typeof newConcurrency})`);\n }\n this._concurrency = newConcurrency;\n this._processQueue();\n }\n /**\n Adds a sync or async task to the queue. Always returns a promise.\n */\n async add(fn, options = {}) {\n return new Promise((resolve, reject) => {\n const run = async () => {\n this._pendingCount++;\n this._intervalCount++;\n try {\n const operation = (this._timeout === undefined && options.timeout === undefined) ? fn() : pTimeout(Promise.resolve(fn()), (options.timeout === undefined ? this._timeout : options.timeout), () => {\n if (options.throwOnTimeout === undefined ? this._throwOnTimeout : options.throwOnTimeout) {\n reject(timeoutError);\n }\n return undefined;\n });\n const result = await operation;\n resolve(result);\n this.emit('completed', result);\n }\n catch (error) {\n reject(error);\n this.emit('error', error);\n }\n this._next();\n };\n this._queue.enqueue(run, options);\n this._tryToStartAnother();\n this.emit('add');\n });\n }\n /**\n Same as `.add()`, but accepts an array of sync or async functions.\n\n @returns A promise that resolves when all functions are resolved.\n */\n async addAll(functions, options) {\n return Promise.all(functions.map(async (function_) => this.add(function_, options)));\n }\n /**\n Start (or resume) executing enqueued tasks within concurrency limit. No need to call this if queue is not paused (via `options.autoStart = false` or by `.pause()` method.)\n */\n start() {\n if (!this._isPaused) {\n return this;\n }\n this._isPaused = false;\n this._processQueue();\n return this;\n }\n /**\n Put queue execution on hold.\n */\n pause() {\n this._isPaused = true;\n }\n /**\n Clear the queue.\n */\n clear() {\n this._queue = new this._queueClass();\n }\n /**\n Can be called multiple times. Useful if you for example add additional items at a later time.\n\n @returns A promise that settles when the queue becomes empty.\n */\n async onEmpty() {\n // Instantly resolve if the queue is empty\n if (this._queue.size === 0) {\n return;\n }\n return new Promise(resolve => {\n const existingResolve = this._resolveEmpty;\n this._resolveEmpty = () => {\n existingResolve();\n resolve();\n };\n });\n }\n /**\n @returns A promise that settles when the queue size is less than the given limit: `queue.size < limit`.\n\n If you want to avoid having the queue grow beyond a certain size you can `await queue.onSizeLessThan()` before adding a new item.\n\n Note that this only limits the number of items waiting to start. There could still be up to `concurrency` jobs already running that this call does not include in its calculation.\n */\n async onSizeLessThan(limit) {\n // Instantly resolve if the queue is empty.\n if (this._queue.size < limit) {\n return;\n }\n return new Promise(resolve => {\n const listener = () => {\n if (this._queue.size < limit) {\n this.removeListener('next', listener);\n resolve();\n }\n };\n this.on('next', listener);\n });\n }\n /**\n The difference with `.onEmpty` is that `.onIdle` guarantees that all work from the queue has finished. `.onEmpty` merely signals that the queue is empty, but it could mean that some promises haven't completed yet.\n\n @returns A promise that settles when the queue becomes empty, and all promises have completed; `queue.size === 0 && queue.pending === 0`.\n */\n async onIdle() {\n // Instantly resolve if none pending and if nothing else is queued\n if (this._pendingCount === 0 && this._queue.size === 0) {\n return;\n }\n return new Promise(resolve => {\n const existingResolve = this._resolveIdle;\n this._resolveIdle = () => {\n existingResolve();\n resolve();\n };\n });\n }\n /**\n Size of the queue, the number of queued items waiting to run.\n */\n get size() {\n return this._queue.size;\n }\n /**\n Size of the queue, filtered by the given options.\n\n For example, this can be used to find the number of items remaining in the queue with a specific priority level.\n */\n sizeBy(options) {\n // eslint-disable-next-line unicorn/no-array-callback-reference\n return this._queue.filter(options).length;\n }\n /**\n Number of running items (no longer in the queue).\n */\n get pending() {\n return this._pendingCount;\n }\n /**\n Whether the queue is currently paused.\n */\n get isPaused() {\n return this._isPaused;\n }\n get timeout() {\n return this._timeout;\n }\n /**\n Set the timeout for future operations.\n */\n set timeout(milliseconds) {\n this._timeout = milliseconds;\n }\n}\n","import { Store, ValidStoreType } from \"../storage/types\";\n\nimport { containsGroup, pathToPrefix } from '../storage/index';\nimport { normalizeStoragePath, isTotalSlice, arrayEquals1D, byteSwap, byteSwapInplace, convertColMajorToRowMajor } from '../util';\nimport { ZarrArrayMetadata, UserAttributes, FillType } from '../types';\nimport { ARRAY_META_KEY, ATTRS_META_KEY } from '../names';\nimport { Attributes } from \"../attributes\";\nimport { parseMetadata } from \"../metadata\";\nimport { ArraySelection, DimensionSelection, Indexer, Slice, ChunkProjection } from \"./types\";\nimport { BasicIndexer, isContiguousSelection, normalizeIntegerSelection } from './indexing';\nimport { NestedArray } from \"../nestedArray\";\nimport { RawArray } from \"../rawArray\";\nimport { TypedArray, getTypedArrayCtr } from '../nestedArray/types';\nimport { ValueError, PermissionError, BoundsCheckError, ContainsGroupError, isKeyError } from '../errors';\nimport { getCodec } from \"../compression/registry\";\n\n\nimport type { Codec } from 'numcodecs';\nimport PQueue from 'p-queue';\n\nexport interface GetOptions {\n concurrencyLimit?: number;\n progressCallback?: (progressUpdate: {\n progress: number;\n queueSize: number;\n }) => void;\n}\n\nexport interface SetOptions {\n concurrencyLimit?: number;\n progressCallback?: (progressUpdate: {\n progress: number;\n queueSize: number;\n }) => void;\n}\n\nexport interface GetRawChunkOptions {\n storeOptions: O;\n}\n\nexport class ZarrArray {\n\n public store: Store;\n private compressor: Promise | null;\n\n private _chunkStore: Store | null;\n /**\n * A `Store` providing the underlying storage for array chunks.\n */\n public get chunkStore(): Store {\n if (this._chunkStore) {\n return this._chunkStore;\n }\n return this.store;\n }\n public path: string;\n public keyPrefix: string;\n public readOnly: boolean;\n public cacheMetadata: boolean;\n public cacheAttrs: boolean;\n public meta: ZarrArrayMetadata;\n public attrs: Attributes;\n\n /**\n * Array name following h5py convention.\n */\n public get name(): string | null {\n if (this.path.length > 0) {\n if (this.path[0] !== \"/\") {\n return \"/\" + this.path;\n }\n return this.path;\n }\n return null;\n }\n\n /**\n * Final component of name.\n */\n public get basename(): string | null {\n const name = this.name;\n if (name === null) {\n return null;\n }\n const parts = name.split(\"/\");\n return parts[parts.length - 1];\n }\n\n /**\n * \"A list of integers describing the length of each dimension of the array.\n */\n public get shape(): number[] {\n // this.refreshMetadata();\n return this.meta.shape;\n }\n\n /**\n * A list of integers describing the length of each dimension of a chunk of the array.\n */\n public get chunks(): number[] {\n return this.meta.chunks;\n }\n\n /**\n * Integer describing how many element a chunk contains\n */\n private get chunkSize(): number {\n return this.chunks.reduce((x, y) => x * y, 1);\n }\n\n /**\n * The NumPy data type.\n */\n public get dtype() {\n return this.meta.dtype;\n }\n\n /**\n * A value used for uninitialized portions of the array.\n */\n public get fillValue(): FillType {\n\n const fillTypeValue = this.meta.fill_value;\n\n // TODO extract into function\n if (fillTypeValue === \"NaN\") {\n return NaN;\n } else if (fillTypeValue === \"Infinity\") {\n return Infinity;\n } else if (fillTypeValue === \"-Infinity\") {\n return -Infinity;\n }\n\n return this.meta.fill_value as FillType;\n }\n\n /**\n * Number of dimensions.\n */\n public get nDims() {\n return this.meta.shape.length;\n }\n\n /**\n * The total number of elements in the array.\n */\n public get size() {\n // this.refreshMetadata()\n return this.meta.shape.reduce((x, y) => x * y, 1);\n }\n\n public get length() {\n return this.shape[0];\n }\n\n\n private get _chunkDataShape() {\n if (this.shape.length === 0) {\n return [1];\n } else {\n const s = [];\n for (let i = 0; i < this.shape.length; i++) {\n s[i] = Math.ceil(this.shape[i] / this.chunks[i]);\n }\n return s;\n }\n }\n /**\n * A tuple of integers describing the number of chunks along each\n * dimension of the array.\n */\n public get chunkDataShape() {\n // this.refreshMetadata();\n return this._chunkDataShape;\n }\n\n /**\n * Total number of chunks.\n */\n public get numChunks() {\n // this.refreshMetadata();\n return this.chunkDataShape.reduce((x, y) => x * y, 1);\n }\n\n /**\n * Instantiate an array from an initialized store.\n * @param store Array store, already initialized.\n * @param path Storage path.\n * @param readOnly True if array should be protected against modification.\n * @param chunkStore Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata.\n * @param cacheMetadata If true (default), array configuration metadata will be cached for the lifetime of the object.\n * If false, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern).\n * @param cacheAttrs If true (default), user attributes will be cached for attribute read operations.\n * If false, user attributes are reloaded from the store prior to all attribute read operations.\n */\n public static async create(store: Store, path: null | string = null, readOnly = false, chunkStore: Store | null = null, cacheMetadata = true, cacheAttrs = true) {\n const metadata = await this.loadMetadataForConstructor(store, path);\n return new ZarrArray(store, path, metadata as ZarrArrayMetadata, readOnly, chunkStore, cacheMetadata, cacheAttrs);\n }\n\n private static async loadMetadataForConstructor(store: Store, path: null | string) {\n try {\n path = normalizeStoragePath(path);\n const keyPrefix = pathToPrefix(path);\n const metaStoreValue = await store.getItem(keyPrefix + ARRAY_META_KEY);\n return parseMetadata(metaStoreValue);\n } catch (error) {\n if (await containsGroup(store, path)) {\n throw new ContainsGroupError(path ?? '');\n }\n throw new Error(\"Failed to load metadata for ZarrArray:\" + (error as any).toString());\n }\n }\n\n /**\n * Instantiate an array from an initialized store.\n * @param store Array store, already initialized.\n * @param path Storage path.\n * @param metadata The initial value for the metadata\n * @param readOnly True if array should be protected against modification.\n * @param chunkStore Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata.\n * @param cacheMetadata If true (default), array configuration metadata will be cached for the lifetime of the object.\n * If false, array metadata will be reloaded prior to all data access and modification operations (may incur overhead depending on storage and data access pattern).\n * @param cacheAttrs If true (default), user attributes will be cached for attribute read operations.\n * If false, user attributes are reloaded from the store prior to all attribute read operations.\n */\n private constructor(store: Store, path: null | string = null, metadata: ZarrArrayMetadata, readOnly = false, chunkStore: Store | null = null, cacheMetadata = true, cacheAttrs = true) {\n // N.B., expect at this point store is fully initialized with all\n // configuration metadata fully specified and normalized\n\n this.store = store;\n this._chunkStore = chunkStore;\n this.path = normalizeStoragePath(path);\n this.keyPrefix = pathToPrefix(this.path);\n this.readOnly = readOnly;\n this.cacheMetadata = cacheMetadata;\n this.cacheAttrs = cacheAttrs;\n this.meta = metadata;\n if (this.meta.compressor !== null) {\n this.compressor = getCodec(this.meta.compressor);\n } else {\n this.compressor = null;\n }\n\n\n const attrKey = this.keyPrefix + ATTRS_META_KEY;\n this.attrs = new Attributes(this.store, attrKey, this.readOnly, cacheAttrs);\n }\n\n /**\n * (Re)load metadata from store\n */\n public async reloadMetadata() {\n const metaKey = this.keyPrefix + ARRAY_META_KEY;\n const metaStoreValue = this.store.getItem(metaKey);\n this.meta = parseMetadata(await metaStoreValue) as ZarrArrayMetadata;\n return this.meta;\n }\n\n private async refreshMetadata() {\n if (!this.cacheMetadata) {\n await this.reloadMetadata();\n }\n }\n\n public get(selection?: undefined | Slice | \":\" | \"...\" | null | (Slice | null | \":\" | \"...\")[], opts?: GetOptions): Promise | number>;\n public get(selection?: ArraySelection, opts?: GetOptions): Promise | number>;\n public get(selection: ArraySelection = null, opts: GetOptions = {}): Promise | number> {\n return this.getBasicSelection(selection, false, opts);\n }\n\n public getRaw(selection?: undefined | Slice | \":\" | \"...\" | null | (Slice | null | \":\" | \"...\")[], opts?: GetOptions): Promise;\n public getRaw(selection?: ArraySelection, opts?: GetOptions): Promise;\n public getRaw(selection: ArraySelection = null, opts: GetOptions = {}): Promise {\n return this.getBasicSelection(selection, true, opts);\n }\n\n // asRaw = false\n public async getBasicSelection(selection: Slice | \":\" | \"...\" | null | (Slice | null | \":\" | \"...\")[], asRaw?: false, opts?: GetOptions): Promise | number>;\n public async getBasicSelection(selection: ArraySelection, asRaw?: false, opts?: GetOptions): Promise | number>;\n // asRaw = true\n public async getBasicSelection(selection: Slice | \":\" | \"...\" | null | (Slice | null | \":\" | \"...\")[], asRaw?: true, opts?: GetOptions): Promise;\n public async getBasicSelection(selection: ArraySelection, asRaw?: true, opts?: GetOptions): Promise;\n\n public async getBasicSelection(selection: ArraySelection, asRaw = false, { concurrencyLimit = 10, progressCallback }: GetOptions = {}): Promise | RawArray | number> {\n // Refresh metadata\n if (!this.cacheMetadata) {\n await this.reloadMetadata();\n }\n\n // Check fields (TODO?)\n if (this.shape.length === 0) {\n throw new Error(\"Shape [] indexing is not supported yet\");\n } else {\n return this.getBasicSelectionND(selection, asRaw, concurrencyLimit, progressCallback);\n }\n }\n\n private getBasicSelectionND(selection: ArraySelection, asRaw: boolean, concurrencyLimit: number, progressCallback?: (progressUpdate: { progress: number; queueSize: number }) => void): Promise | RawArray> {\n const indexer = new BasicIndexer(selection, this);\n return this.getSelection(indexer, asRaw, concurrencyLimit, progressCallback);\n }\n\n private async getSelection(indexer: BasicIndexer, asRaw: boolean, concurrencyLimit: number, progressCallback?: (progressUpdate: { progress: number; queueSize: number }) => void): Promise | RawArray> {\n // We iterate over all chunks which overlap the selection and thus contain data\n // that needs to be extracted. Each chunk is processed in turn, extracting the\n // necessary data and storing into the correct location in the output array.\n\n // N.B., it is an important optimisation that we only visit chunks which overlap\n // the selection. This minimises the number of iterations in the main for loop.\n\n // check fields are sensible (TODO?)\n\n const outDtype = this.dtype;\n const outShape = indexer.shape;\n const outSize = indexer.shape.reduce((x, y) => x * y, 1);\n\n if (asRaw && (outSize === this.chunkSize)) {\n // Optimization: if output strided array _is_ chunk exactly,\n // decode directly as new TypedArray and return\n const itr = indexer.iter();\n const proj = itr.next(); // ensure there is only one projection\n if (proj.done === false && itr.next().done === true) {\n const chunkProjection = proj.value as ChunkProjection;\n const out = await this.decodeDirectToRawArray(chunkProjection, outShape, outSize);\n return out;\n }\n }\n\n const out = asRaw\n ? new RawArray(null, outShape, outDtype)\n : new NestedArray(null, outShape, outDtype);\n\n if (outSize === 0) {\n return out;\n }\n\n // create promise queue with concurrency control\n const queue = new PQueue({ concurrency: concurrencyLimit });\n\n if (progressCallback) {\n\n let progress = 0;\n let queueSize = 0;\n for (const _ of indexer.iter()) queueSize += 1;\n progressCallback({ progress: 0, queueSize: queueSize });\n for (const proj of indexer.iter()) {\n (async () => {\n await queue.add(() => this.chunkGetItem(proj.chunkCoords, proj.chunkSelection, out, proj.outSelection, indexer.dropAxes));\n progress += 1;\n progressCallback({ progress: progress, queueSize: queueSize });\n })();\n }\n\n } else {\n for (const proj of indexer.iter()) {\n queue.add(() => this.chunkGetItem(proj.chunkCoords, proj.chunkSelection, out, proj.outSelection, indexer.dropAxes));\n }\n }\n\n // guarantees that all work on queue has finished\n await queue.onIdle();\n\n // Return scalar instead of zero-dimensional array.\n if (out.shape.length === 0) {\n return out.data[0] as number;\n }\n\n return out;\n }\n\n /**\n * Obtain part or whole of a chunk.\n * @param chunkCoords Indices of the chunk.\n * @param chunkSelection Location of region within the chunk to extract.\n * @param out Array to store result in.\n * @param outSelection Location of region within output array to store results in.\n * @param dropAxes Axes to squeeze out of the chunk.\n */\n private async chunkGetItem(chunkCoords: number[], chunkSelection: DimensionSelection[], out: NestedArray | RawArray, outSelection: DimensionSelection[], dropAxes: null | number[]) {\n if (chunkCoords.length !== this._chunkDataShape.length) {\n throw new ValueError(`Inconsistent shapes: chunkCoordsLength: ${chunkCoords.length}, cDataShapeLength: ${this.chunkDataShape.length}`);\n }\n\n const cKey = this.chunkKey(chunkCoords);\n try {\n const cdata = await this.chunkStore.getItem(cKey);\n const decodedChunk = await this.decodeChunk(cdata);\n\n if (out instanceof NestedArray) {\n\n if (isContiguousSelection(outSelection) && isTotalSlice(chunkSelection, this.chunks) && !this.meta.filters) {\n // Optimization: we want the whole chunk, and the destination is\n // contiguous, so we can decompress directly from the chunk\n // into the destination array\n\n // TODO check order\n // TODO filters..\n out.set(outSelection, this.toNestedArray(decodedChunk));\n return;\n }\n\n // Decode chunk\n const chunk = this.toNestedArray(decodedChunk);\n const tmp = chunk.get(chunkSelection);\n\n if (dropAxes !== null) {\n throw new Error(\"Drop axes is not supported yet\");\n }\n\n out.set(outSelection, tmp as NestedArray);\n\n } else {\n /* RawArray\n Copies chunk by index directly into output. Doesn't matter if selection is contiguous\n since store/output are different shapes/strides.\n */\n out.set(outSelection, this.chunkBufferToRawArray(decodedChunk), chunkSelection);\n }\n\n } catch (error) {\n if (isKeyError(error)) {\n // fill with scalar if cKey doesn't exist in store\n if (this.fillValue !== null) {\n out.set(outSelection, this.fillValue);\n }\n } else {\n // Different type of error - rethrow\n throw error;\n }\n }\n }\n\n public async getRawChunk(chunkCoords: number[], opts?: GetRawChunkOptions): Promise {\n if (chunkCoords.length !== this.shape.length) {\n throw new Error(`Chunk coordinates ${chunkCoords.join(\".\")} do not correspond to shape ${this.shape}.`);\n }\n try {\n for (let i = 0; i < chunkCoords.length; i++) {\n const dimLength = Math.ceil(this.shape[i] / this.chunks[i]);\n chunkCoords[i] = normalizeIntegerSelection(chunkCoords[i], dimLength);\n }\n } catch (error) {\n if (error instanceof BoundsCheckError) {\n throw new BoundsCheckError(`index ${chunkCoords.join(\".\")} is out of bounds for shape: ${this.shape} and chunks ${this.chunks}`);\n } else {\n throw error;\n }\n }\n const cKey = this.chunkKey(chunkCoords);\n const cdata = this.chunkStore.getItem(cKey, opts?.storeOptions);\n const buffer = await this.decodeChunk(await cdata);\n const outShape = this.chunks.filter(d => d !== 1); // squeeze chunk dim if 1\n return new RawArray(buffer, outShape, this.dtype);\n }\n\n private chunkKey(chunkCoords: number[]) {\n const sep = this.meta.dimension_separator ?? \".\";\n return this.keyPrefix + chunkCoords.join(sep);\n }\n\n private ensureByteArray(chunkData: ValidStoreType): Uint8Array {\n if (typeof chunkData === \"string\") {\n return new Uint8Array(Buffer.from(chunkData).buffer);\n }\n return new Uint8Array(chunkData);\n }\n\n private toTypedArray(buffer: Buffer | ArrayBuffer) {\n return new (getTypedArrayCtr(this.dtype))(buffer);\n }\n\n private toNestedArray(data: ValidStoreType) {\n const buffer = this.ensureByteArray(data).buffer;\n\n return new NestedArray(buffer, this.chunks, this.dtype);\n }\n\n private async decodeChunk(chunkData: ValidStoreType) {\n let bytes = this.ensureByteArray(chunkData);\n\n if (this.compressor !== null) {\n bytes = await (await this.compressor).decode(bytes);\n }\n\n if (this.dtype.includes('>')) {\n // Need to flip bytes for Javascript TypedArrays\n // We flip bytes in-place to avoid creating an extra copy of the decoded buffer.\n byteSwapInplace(this.toTypedArray(bytes.buffer));\n }\n\n if (this.meta.order === \"F\" && this.nDims > 1) {\n // We need to transpose the array, because this library only support C-order.\n const src = this.toTypedArray(bytes.buffer);\n const out = new (getTypedArrayCtr(this.dtype))(src.length);\n convertColMajorToRowMajor(src, out, this.chunks);\n return out.buffer;\n }\n\n // TODO filtering etc\n return bytes.buffer;\n }\n\n private chunkBufferToRawArray(buffer: Buffer | ArrayBuffer) {\n return new RawArray(buffer, this.chunks, this.dtype);\n }\n\n private async decodeDirectToRawArray({ chunkCoords }: ChunkProjection, outShape: number[], outSize: number): Promise {\n const cKey = this.chunkKey(chunkCoords);\n try {\n const cdata = await this.chunkStore.getItem(cKey);\n return new RawArray(await this.decodeChunk(cdata), outShape, this.dtype);\n } catch (error) {\n if (isKeyError(error)) {\n // fill with scalar if item doesn't exist\n const data = new (getTypedArrayCtr(this.dtype))(outSize);\n return new RawArray(data.fill(this.fillValue as number), outShape);\n } else {\n // Different type of error - rethrow\n throw error;\n }\n }\n }\n\n public async set(selection: ArraySelection = null, value: any, opts: SetOptions = {}) {\n await this.setBasicSelection(selection, value, opts);\n }\n\n public async setBasicSelection(selection: ArraySelection, value: any, { concurrencyLimit = 10, progressCallback }: SetOptions = {}) {\n if (this.readOnly) {\n throw new PermissionError(\"Object is read only\");\n }\n\n if (!this.cacheMetadata) {\n await this.reloadMetadata();\n }\n\n if (this.shape.length === 0) {\n throw new Error(\"Shape [] indexing is not supported yet\");\n } else {\n await this.setBasicSelectionND(selection, value, concurrencyLimit, progressCallback);\n }\n }\n\n private async setBasicSelectionND(selection: ArraySelection, value: any, concurrencyLimit: number, progressCallback?: (progressUpdate: { progress: number; queueSize: number }) => void) {\n const indexer = new BasicIndexer(selection, this);\n await this.setSelection(indexer, value, concurrencyLimit, progressCallback);\n }\n\n private getChunkValue(proj: ChunkProjection, indexer: Indexer, value: number | NestedArray, selectionShape: number[]): number | NestedArray {\n let chunkValue: number | NestedArray;\n if (selectionShape.length === 0) {\n chunkValue = value;\n } else if (typeof value === \"number\") {\n chunkValue = value;\n } else {\n chunkValue = value.get(proj.outSelection);\n // tslint:disable-next-line: strict-type-predicates\n if (indexer.dropAxes !== null) {\n throw new Error(\"Handling drop axes not supported yet\");\n }\n }\n return chunkValue;\n }\n\n private async setSelection(indexer: Indexer, value: number | NestedArray, concurrencyLimit: number, progressCallback?: (progressUpdate: { progress: number; queueSize: number }) => void) {\n // We iterate over all chunks which overlap the selection and thus contain data\n // that needs to be replaced. Each chunk is processed in turn, extracting the\n // necessary data from the value array and storing into the chunk array.\n\n // N.B., it is an important optimisation that we only visit chunks which overlap\n // the selection. This minimises the number of iterations in the main for loop.\n\n // TODO? check fields are sensible\n\n // Determine indices of chunks overlapping the selection\n const selectionShape = indexer.shape;\n\n // Check value shape\n if (selectionShape.length === 0) {\n // Setting a single value\n } else if (typeof value === \"number\") {\n // Setting a scalar value\n } else if (value instanceof NestedArray) {\n // TODO: non stringify equality check\n if (!arrayEquals1D(value.shape, selectionShape)) {\n throw new ValueError(`Shape mismatch in source NestedArray and set selection: ${value.shape} and ${selectionShape}`);\n }\n } else {\n // TODO support TypedArrays, buffers, etc\n throw new Error(\"Unknown data type for setting :(\");\n }\n\n const queue = new PQueue({ concurrency: concurrencyLimit });\n\n if (progressCallback) {\n\n let queueSize = 0;\n for (const _ of indexer.iter()) queueSize += 1;\n\n let progress = 0;\n progressCallback({ progress: 0, queueSize: queueSize });\n for (const proj of indexer.iter()) {\n const chunkValue = this.getChunkValue(proj, indexer, value, selectionShape);\n (async () => {\n await queue.add(() => this.chunkSetItem(proj.chunkCoords, proj.chunkSelection, chunkValue));\n progress += 1;\n progressCallback({ progress: progress, queueSize: queueSize });\n })();\n }\n\n } else {\n\n for (const proj of indexer.iter()) {\n const chunkValue = this.getChunkValue(proj, indexer, value, selectionShape);\n queue.add(() => this.chunkSetItem(proj.chunkCoords, proj.chunkSelection, chunkValue));\n }\n\n }\n\n // guarantees that all work on queue has finished\n await queue.onIdle();\n }\n\n private async chunkSetItem(chunkCoords: number[], chunkSelection: DimensionSelection[], value: number | NestedArray) {\n if (this.meta.order === \"F\" && this.nDims > 1) {\n throw new Error(\"Setting content for arrays in F-order is not supported.\");\n }\n\n // Obtain key for chunk storage\n const chunkKey = this.chunkKey(chunkCoords);\n\n let chunk: null | TypedArray = null;\n\n const dtypeConstr = getTypedArrayCtr(this.dtype);\n const chunkSize = this.chunkSize;\n\n if (isTotalSlice(chunkSelection, this.chunks)) {\n // Totally replace chunk\n\n // Optimization: we are completely replacing the chunk, so no need\n // to access the existing chunk data\n\n if (typeof value === \"number\") {\n // TODO get the right type here\n chunk = new dtypeConstr(chunkSize);\n chunk.fill(value);\n } else {\n chunk = value.flatten();\n }\n } else {\n\n // partially replace the contents of this chunk\n\n // Existing chunk data\n let chunkData: TypedArray;\n\n try {\n // Chunk is initialized if this does not error\n const chunkStoreData = await this.chunkStore.getItem(chunkKey);\n const dBytes = await this.decodeChunk(chunkStoreData);\n chunkData = this.toTypedArray(dBytes);\n } catch (error) {\n if (isKeyError(error)) {\n // Chunk is not initialized\n chunkData = new dtypeConstr(chunkSize);\n if (this.fillValue !== null) {\n chunkData.fill(this.fillValue);\n }\n } else {\n // Different type of error - rethrow\n throw error;\n }\n }\n\n const chunkNestedArray = new NestedArray(\n chunkData,\n this.chunks,\n this.dtype,\n );\n chunkNestedArray.set(chunkSelection, value);\n chunk = chunkNestedArray.flatten();\n }\n const chunkData = await this.encodeChunk(chunk);\n this.chunkStore.setItem(chunkKey, chunkData);\n }\n\n private async encodeChunk(chunk: TypedArray) {\n if (this.dtype.includes('>')) {\n /*\n * If big endian, flip bytes before applying compression and setting store.\n *\n * Here we create a copy (not in-place byteswapping) to avoid flipping the\n * bytes in the buffers of user-created Raw- and NestedArrays.\n */\n chunk = byteSwap(chunk);\n }\n\n if (this.compressor !== null) {\n const bytes = new Uint8Array(chunk.buffer);\n const cbytes = await (await this.compressor).encode(bytes);\n return cbytes.buffer;\n }\n\n // TODO: filters, etc\n return chunk.buffer;\n }\n}\n","import { SyncStore, ValidStoreType } from \"./types\";\nimport { createProxy, MutableMappingProxy } from \"../mutableMapping\";\nimport { KeyError } from \"../errors\";\n\nexport class MemoryStore implements SyncStore {\n listDir?: undefined;\n rmDir?: undefined;\n getSize?: undefined;\n rename?: undefined;\n\n root: { [key: string]: any };\n\n constructor(root = {}) {\n this.root = root;\n }\n\n public proxy(): MutableMappingProxy {\n return createProxy(this);\n }\n\n private getParent(item: string): [any, string] {\n let parent = this.root;\n const segments = item.split('/');\n // find the parent container\n for (const k of segments.slice(0, segments.length - 1)) {\n parent = parent[k];\n if (!parent) {\n throw Error(item);\n }\n // if not isinstance(parent, self.cls):\n // raise KeyError(item)\n }\n return [parent, segments[segments.length - 1]];\n }\n\n private requireParent(item: string): [any, string] {\n let parent = this.root;\n const segments = item.split('/');\n\n // require the parent container\n for (const k of segments.slice(0, segments.length - 1)) {\n // TODO: verify correct implementation\n if (parent[k] === undefined) {\n parent[k] = {};\n }\n parent = parent[k];\n }\n\n return [parent, segments[segments.length - 1]];\n }\n\n getItem(item: string) {\n const [parent, key] = this.getParent(item);\n const value = parent[key];\n if (value === undefined) {\n throw new KeyError(item);\n }\n return value;\n }\n\n setItem(item: string, value: any): boolean {\n const [parent, key] = this.requireParent(item);\n parent[key] = value;\n return true;\n }\n\n deleteItem(item: string): boolean {\n const [parent, key] = this.getParent(item);\n return delete parent[key];\n }\n\n containsItem(item: string): boolean {\n // TODO: more sane implementation\n try {\n return this.getItem(item) !== undefined;\n } catch (e) {\n return false;\n }\n }\n\n keys(): string[] {\n throw new Error(\"Method not implemented.\");\n }\n\n\n}","import { ChunksArgument, DtypeString, CompressorConfig, Order, Filter, FillType, PersistenceMode } from './types';\nimport { Store } from './storage/types';\nimport { ZarrArray } from './core/index';\nimport { MemoryStore } from './storage/memoryStore';\nimport { initArray, containsArray, containsGroup } from './storage/index';\nimport { TypedArray } from './nestedArray/types';\nimport { NestedArray } from './nestedArray/index';\nimport { normalizeStoragePath } from './util';\nimport { ContainsArrayError, ValueError, ArrayNotFoundError, ContainsGroupError } from './errors';\nimport { HTTPStore } from './storage/httpStore';\n\nexport type CreateArrayOptions = {\n shape: number | number[];\n chunks?: ChunksArgument;\n dtype?: DtypeString;\n compressor?: CompressorConfig | null;\n fillValue?: FillType;\n order?: Order;\n store?: Store;\n overwrite?: boolean;\n path?: string | null;\n chunkStore?: Store;\n filters?: Filter[];\n cacheMetadata?: boolean;\n cacheAttrs?: boolean;\n readOnly?: boolean;\n dimensionSeparator?: '.' | '/';\n};\n\n/**\n * \n * @param shape Array shape.\n * @param chunks Chunk shape. If `true`, will be guessed from `shape` and `dtype`. If\n * `false`, will be set to `shape`, i.e., single chunk for the whole array.\n * If an int, the chunk size in each dimension will be given by the value\n * of `chunks`. Default is `true`.\n * @param dtype NumPy dtype.\n * @param compressor Primary compressor.\n * @param fillValue Default value to use for uninitialized portions of the array.\n * @param order Memory layout to be used within each chunk.\n * @param store Store or path to directory in file system or name of zip file.\n * @param overwrite If True, delete all pre-existing data in `store` at `path` before creating the array.\n * @param path Path under which array is stored.\n * @param chunkStore Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata.\n * @param filters Sequence of filters to use to encode chunk data prior to compression.\n * @param cacheMetadata If `true` (default), array configuration metadata will be cached for the\n * lifetime of the object. If `false`, array metadata will be reloaded\n * prior to all data access and modification operations (may incur\n * overhead depending on storage and data access pattern).\n * @param cacheAttrs If `true` (default), user attributes will be cached for attribute read\n * operations. If `false`, user attributes are reloaded from the store prior\n * to all attribute read operations.\n * @param readOnly `true` if array should be protected against modification, defaults to `false`.\n * @param dimensionSeparator if specified, defines an alternate string separator placed between the dimension chunks.\n */\nexport async function create(\n { shape, chunks = true, dtype = \" {\n\n store = normalizeStoreArgument(store);\n\n await initArray(store, shape, chunks, dtype, path, compressor, fillValue, order, overwrite, chunkStore, filters, dimensionSeparator);\n const z = await ZarrArray.create(store, path, readOnly, chunkStore, cacheMetadata, cacheAttrs);\n\n return z;\n}\n\n\n/**\n * Create an empty array.\n */\nexport async function empty(shape: number | number[], opts: Omit = {}) {\n opts.fillValue = null;\n return create({ shape, ...opts });\n}\n\n/**\n * Create an array, with zero being used as the default value for\n * uninitialized portions of the array.\n */\nexport async function zeros(shape: number | number[], opts: Omit = {}) {\n opts.fillValue = 0;\n return create({ shape, ...opts });\n}\n\n/**\n * Create an array, with one being used as the default value for\n * uninitialized portions of the array.\n */\nexport async function ones(shape: number | number[], opts: Omit = {}) {\n opts.fillValue = 1;\n return create({ shape, ...opts });\n}\n\n/**\n * Create an array, with `fill_value` being used as the default value for\n * uninitialized portions of the array\n */\nexport async function full(shape: number | number[], fillValue: FillType, opts: Omit = {}) {\n opts.fillValue = fillValue;\n return create({ shape, ...opts });\n}\n\nexport async function array(data: Buffer | ArrayBuffer | NestedArray, opts: Omit = {}) {\n // TODO: infer chunks?\n\n let shape = null;\n if (data instanceof NestedArray) {\n shape = data.shape;\n opts.dtype = opts.dtype === undefined ? data.dtype : opts.dtype;\n } else {\n shape = data.byteLength;\n // TODO: infer datatype\n }\n // TODO: support TypedArray\n\n const wasReadOnly = opts.readOnly === undefined ? false : opts.readOnly;\n opts.readOnly = false;\n\n const z = await create({ shape, ...opts });\n await z.set(null, data);\n z.readOnly = wasReadOnly;\n\n return z;\n}\n\ntype OpenArrayOptions = Partial;\n\nexport async function openArray(\n { shape, mode = \"a\", chunks = true, dtype = \" {\n /**\n * A `Store` providing the underlying storage for the group.\n */\n public store: Store;\n\n /**\n * Storage path.\n */\n public path: string;\n\n /**\n * Group name following h5py convention.\n */\n public get name(): string {\n if (this.path.length > 0) {\n if (this.path[0] !== \"/\") {\n return \"/\" + this.path;\n }\n return this.path;\n }\n return \"/\";\n }\n\n /**\n * Final component of name.\n */\n public get basename(): string {\n const parts = this.name.split(\"/\");\n return parts[parts.length - 1];\n }\n\n /**\n * An object containing user-defined attributes. Note that\n * attribute values are stored as a JSON string in a store.\n */\n public attrs: Attributes;\n\n\n private _chunkStore: Store | null;\n /**\n * A `Store` providing the underlying storage for array chunks.\n */\n public get chunkStore(): Store {\n if (this._chunkStore) {\n return this._chunkStore;\n }\n return this.store;\n }\n\n private keyPrefix: string;\n public readOnly: boolean;\n private meta: ZarrGroupMetadata;\n\n public static async create(store: Store, path: string | null = null, readOnly = false, chunkStore: Store | null = null, cacheAttrs = true) {\n const metadata = await this.loadMetadataForConstructor(store, path);\n return new Group(store, path, metadata as ZarrGroupMetadata, readOnly, chunkStore, cacheAttrs);\n }\n\n private static async loadMetadataForConstructor(store: Store, path: null | string) {\n path = normalizeStoragePath(path);\n const keyPrefix = pathToPrefix(path);\n try {\n const metaStoreValue = await store.getItem(keyPrefix + GROUP_META_KEY);\n return parseMetadata(metaStoreValue);\n } catch (error) {\n if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n }\n throw new GroupNotFoundError(path);\n }\n }\n\n private constructor(store: Store, path: string | null = null, metadata: ZarrGroupMetadata, readOnly = false, chunkStore: Store | null = null, cacheAttrs = true) {\n this.store = store;\n this._chunkStore = chunkStore;\n this.path = normalizeStoragePath(path);\n this.keyPrefix = pathToPrefix(this.path);\n this.readOnly = readOnly;\n this.meta = metadata;\n\n // Initialize attributes\n const attrKey = this.keyPrefix + ATTRS_META_KEY;\n this.attrs = new Attributes(this.store, attrKey, this.readOnly, cacheAttrs);\n }\n\n private itemPath(item: string | null) {\n const absolute = typeof item === \"string\" && item.length > 0 && item[0] === '/';\n const path = normalizeStoragePath(item);\n // Absolute path\n if (!absolute && this.path.length > 0) {\n return this.keyPrefix + path;\n }\n return path;\n }\n\n /**\n * Create a sub-group.\n */\n public async createGroup(name: string, overwrite = false) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n const path = this.itemPath(name);\n await initGroup(this.store, path, this._chunkStore, overwrite);\n return Group.create(this.store, path, this.readOnly, this._chunkStore, this.attrs.cache);\n }\n\n /**\n * Obtain a sub-group, creating one if it doesn't exist.\n */\n public async requireGroup(name: string, overwrite = false) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n const path = this.itemPath(name);\n if (!await containsGroup(this.store, path)) {\n await initGroup(this.store, path, this._chunkStore, overwrite);\n }\n return Group.create(this.store, path, this.readOnly, this._chunkStore, this.attrs.cache);\n }\n\n private getOptsForArrayCreation(name: string, opts: Omit = {}) {\n const path = this.itemPath(name);\n opts.path = path;\n\n if (opts.cacheAttrs === undefined) {\n opts.cacheAttrs = this.attrs.cache;\n }\n opts.store = this.store;\n opts.chunkStore = this.chunkStore;\n return opts;\n }\n\n /**\n * Creates an array\n */\n public array(name: string, data: Buffer | ArrayBuffer | NestedArray, opts?: Omit, overwrite?: boolean) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n opts.overwrite = overwrite === undefined ? opts.overwrite : overwrite;\n\n return array(data, opts);\n }\n\n public empty(name: string, shape: number | number[], opts: Omit= {}) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n\n return empty(shape, opts);\n }\n\n public zeros(name: string, shape: number | number[], opts: Omit= {}) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n\n return zeros(shape, opts);\n }\n\n public ones(name: string, shape: number | number[], opts: Omit= {}) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n\n return ones(shape, opts);\n }\n\n public full(name: string, shape: number | number[], fillValue: number | null, opts: Omit = {}) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n\n return full(shape, fillValue, opts);\n }\n\n public createDataset(name: string, shape?: number | number[], data?: Buffer | ArrayBuffer | NestedArray, opts?: Omit) {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n opts = this.getOptsForArrayCreation(name, opts);\n\n let z: Promise;\n if (data === undefined) {\n if (shape === undefined) {\n throw new ValueError(\"Shape must be set if no data is passed to CreateDataset\");\n }\n z = create({ shape, ...opts });\n } else {\n z = array(data, opts);\n }\n return z;\n }\n\n async getItem(item: string) {\n const path = this.itemPath(item);\n if (await containsArray(this.store, path)) {\n return ZarrArray.create(this.store, path, this.readOnly, this.chunkStore, undefined, this.attrs.cache);\n } else if (await containsGroup(this.store, path)) {\n return Group.create(this.store, path, this.readOnly, this._chunkStore, this.attrs.cache);\n }\n throw new KeyError(item);\n }\n\n async setItem(item: string, value: any) {\n await this.array(item, value, {}, true);\n return true;\n }\n\n // eslint-disable-next-line @typescript-eslint/no-unused-vars\n async deleteItem(_item: string): Promise {\n if (this.readOnly) {\n throw new PermissionError(\"group is read only\");\n }\n throw new Error(\"Method not implemented.\");\n }\n\n async containsItem(item: string) {\n const path = this.itemPath(item);\n return await containsArray(this.store, path) || containsGroup(this.store, path);\n }\n\n proxy(): AsyncMutableMappingProxy {\n return createProxy(this);\n }\n}\n\n/**\n * Create a group.\n * @param store Store or path to directory in file system.\n * @param path Group path within store.\n * @param chunkStore Separate storage for chunks. If not provided, `store` will be used for storage of both chunks and metadata.\n * @param overwrite If `true`, delete any pre-existing data in `store` at `path` before creating the group.\n * @param cacheAttrs If `true` (default), user attributes will be cached for attribute read operations.\n * If `false`, user attributes are reloaded from the store prior to all attribute read operations.\n */\nexport async function group(store?: Store | string, path: string | null = null, chunkStore?: Store, overwrite = false, cacheAttrs = true) {\n store = normalizeStoreArgument(store);\n path = normalizeStoragePath(path);\n\n if (overwrite || await containsGroup(store)) {\n await initGroup(store, path, chunkStore, overwrite);\n }\n\n return Group.create(store, path, false, chunkStore, cacheAttrs);\n}\n\n/**\n * Open a group using file-mode-like semantics.\n * @param store Store or path to directory in file system or name of zip file.\n * @param path Group path within store.\n * @param mode Persistence mode, see `PersistenceMode` type.\n * @param chunkStore Store or path to directory in file system or name of zip file.\n * @param cacheAttrs If `true` (default), user attributes will be cached for attribute read operations\n * If False, user attributes are reloaded from the store prior to all attribute read operations.\n *\n */\nexport async function openGroup(store?: Store | string, path: string | null = null, mode: PersistenceMode = \"a\", chunkStore?: Store, cacheAttrs = true) {\n store = normalizeStoreArgument(store);\n if (chunkStore !== undefined) {\n chunkStore = normalizeStoreArgument(store);\n }\n path = normalizeStoragePath(path);\n\n if (mode === \"r\" || mode === \"r+\") {\n if (!await containsGroup(store, path)) {\n if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n }\n throw new GroupNotFoundError(path);\n }\n } else if (mode === \"w\") {\n await initGroup(store, path, chunkStore, true);\n } else if (mode === \"a\") {\n if (!await containsGroup(store, path)) {\n if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n }\n await initGroup(store, path, chunkStore);\n }\n } else if (mode === \"w-\" || (mode as any) === \"x\") {\n if (await containsArray(store, path)) {\n throw new ContainsArrayError(path);\n } else if (await containsGroup(store, path)) {\n throw new ContainsGroupError(path);\n } else {\n await initGroup(store, path, chunkStore);\n }\n } else {\n throw new ValueError(`Invalid mode argument: ${mode}`);\n }\n\n const readOnly = mode === \"r\";\n return Group.create(store, path, readOnly, chunkStore, cacheAttrs);\n}\n","/*! pako 2.0.3 https://github.com/nodeca/pako @license (MIT AND Zlib) */\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n/* eslint-disable space-unary-ops */\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\n\n//const Z_FILTERED = 1;\n//const Z_HUFFMAN_ONLY = 2;\n//const Z_RLE = 3;\nconst Z_FIXED = 4;\n//const Z_DEFAULT_STRATEGY = 0;\n\n/* Possible values of the data_type field (though see inflate()) */\nconst Z_BINARY = 0;\nconst Z_TEXT = 1;\n//const Z_ASCII = 1; // = Z_TEXT\nconst Z_UNKNOWN = 2;\n\n/*============================================================================*/\n\n\nfunction zero(buf) { let len = buf.length; while (--len >= 0) { buf[len] = 0; } }\n\n// From zutil.h\n\nconst STORED_BLOCK = 0;\nconst STATIC_TREES = 1;\nconst DYN_TREES = 2;\n/* The three kinds of block type */\n\nconst MIN_MATCH = 3;\nconst MAX_MATCH = 258;\n/* The minimum and maximum match lengths */\n\n// From deflate.h\n/* ===========================================================================\n * Internal compression state.\n */\n\nconst LENGTH_CODES = 29;\n/* number of length codes, not counting the special END_BLOCK code */\n\nconst LITERALS = 256;\n/* number of literal bytes 0..255 */\n\nconst L_CODES = LITERALS + 1 + LENGTH_CODES;\n/* number of Literal or Length codes, including the END_BLOCK code */\n\nconst D_CODES = 30;\n/* number of distance codes */\n\nconst BL_CODES = 19;\n/* number of codes used to transfer the bit lengths */\n\nconst HEAP_SIZE = 2 * L_CODES + 1;\n/* maximum heap size */\n\nconst MAX_BITS = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nconst Buf_size = 16;\n/* size of bit buffer in bi_buf */\n\n\n/* ===========================================================================\n * Constants\n */\n\nconst MAX_BL_BITS = 7;\n/* Bit length codes must not exceed MAX_BL_BITS bits */\n\nconst END_BLOCK = 256;\n/* end of block literal code */\n\nconst REP_3_6 = 16;\n/* repeat previous bit length 3-6 times (2 bits of repeat count) */\n\nconst REPZ_3_10 = 17;\n/* repeat a zero length 3-10 times (3 bits of repeat count) */\n\nconst REPZ_11_138 = 18;\n/* repeat a zero length 11-138 times (7 bits of repeat count) */\n\n/* eslint-disable comma-spacing,array-bracket-spacing */\nconst extra_lbits = /* extra bits for each length code */\n new Uint8Array([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]);\n\nconst extra_dbits = /* extra bits for each distance code */\n new Uint8Array([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]);\n\nconst extra_blbits = /* extra bits for each bit length code */\n new Uint8Array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]);\n\nconst bl_order =\n new Uint8Array([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]);\n/* eslint-enable comma-spacing,array-bracket-spacing */\n\n/* The lengths of the bit length codes are sent in order of decreasing\n * probability, to avoid transmitting the lengths for unused bit length codes.\n */\n\n/* ===========================================================================\n * Local data. These are initialized only once.\n */\n\n// We pre-fill arrays with 0 to avoid uninitialized gaps\n\nconst DIST_CODE_LEN = 512; /* see definition of array dist_code below */\n\n// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1\nconst static_ltree = new Array((L_CODES + 2) * 2);\nzero(static_ltree);\n/* The static literal tree. Since the bit lengths are imposed, there is no\n * need for the L_CODES extra codes used during heap construction. However\n * The codes 286 and 287 are needed to build a canonical tree (see _tr_init\n * below).\n */\n\nconst static_dtree = new Array(D_CODES * 2);\nzero(static_dtree);\n/* The static distance tree. (Actually a trivial tree since all codes use\n * 5 bits.)\n */\n\nconst _dist_code = new Array(DIST_CODE_LEN);\nzero(_dist_code);\n/* Distance codes. The first 256 values correspond to the distances\n * 3 .. 258, the last 256 values correspond to the top 8 bits of\n * the 15 bit distances.\n */\n\nconst _length_code = new Array(MAX_MATCH - MIN_MATCH + 1);\nzero(_length_code);\n/* length code for each normalized match length (0 == MIN_MATCH) */\n\nconst base_length = new Array(LENGTH_CODES);\nzero(base_length);\n/* First normalized length for each code (0 = MIN_MATCH) */\n\nconst base_dist = new Array(D_CODES);\nzero(base_dist);\n/* First normalized distance for each code (0 = distance of 1) */\n\n\nfunction StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {\n\n this.static_tree = static_tree; /* static tree or NULL */\n this.extra_bits = extra_bits; /* extra bits for each code or NULL */\n this.extra_base = extra_base; /* base index for extra_bits */\n this.elems = elems; /* max number of elements in the tree */\n this.max_length = max_length; /* max bit length for the codes */\n\n // show if `static_tree` has data or dummy - needed for monomorphic objects\n this.has_stree = static_tree && static_tree.length;\n}\n\n\nlet static_l_desc;\nlet static_d_desc;\nlet static_bl_desc;\n\n\nfunction TreeDesc(dyn_tree, stat_desc) {\n this.dyn_tree = dyn_tree; /* the dynamic tree */\n this.max_code = 0; /* largest code with non zero frequency */\n this.stat_desc = stat_desc; /* the corresponding static tree */\n}\n\n\n\nconst d_code = (dist) => {\n\n return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];\n};\n\n\n/* ===========================================================================\n * Output a short LSB first on the stream.\n * IN assertion: there is enough room in pendingBuf.\n */\nconst put_short = (s, w) => {\n// put_byte(s, (uch)((w) & 0xff));\n// put_byte(s, (uch)((ush)(w) >> 8));\n s.pending_buf[s.pending++] = (w) & 0xff;\n s.pending_buf[s.pending++] = (w >>> 8) & 0xff;\n};\n\n\n/* ===========================================================================\n * Send a value on a given number of bits.\n * IN assertion: length <= 16 and value fits in length bits.\n */\nconst send_bits = (s, value, length) => {\n\n if (s.bi_valid > (Buf_size - length)) {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n put_short(s, s.bi_buf);\n s.bi_buf = value >> (Buf_size - s.bi_valid);\n s.bi_valid += length - Buf_size;\n } else {\n s.bi_buf |= (value << s.bi_valid) & 0xffff;\n s.bi_valid += length;\n }\n};\n\n\nconst send_code = (s, c, tree) => {\n\n send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);\n};\n\n\n/* ===========================================================================\n * Reverse the first len bits of a code, using straightforward code (a faster\n * method would use a table)\n * IN assertion: 1 <= len <= 15\n */\nconst bi_reverse = (code, len) => {\n\n let res = 0;\n do {\n res |= code & 1;\n code >>>= 1;\n res <<= 1;\n } while (--len > 0);\n return res >>> 1;\n};\n\n\n/* ===========================================================================\n * Flush the bit buffer, keeping at most 7 bits in it.\n */\nconst bi_flush = (s) => {\n\n if (s.bi_valid === 16) {\n put_short(s, s.bi_buf);\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n } else if (s.bi_valid >= 8) {\n s.pending_buf[s.pending++] = s.bi_buf & 0xff;\n s.bi_buf >>= 8;\n s.bi_valid -= 8;\n }\n};\n\n\n/* ===========================================================================\n * Compute the optimal bit lengths for a tree and update the total bit length\n * for the current block.\n * IN assertion: the fields freq and dad are set, heap[heap_max] and\n * above are the tree nodes sorted by increasing frequency.\n * OUT assertions: the field len is set to the optimal bit length, the\n * array bl_count contains the frequencies for each bit length.\n * The length opt_len is updated; static_len is also updated if stree is\n * not null.\n */\nconst gen_bitlen = (s, desc) =>\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n const tree = desc.dyn_tree;\n const max_code = desc.max_code;\n const stree = desc.stat_desc.static_tree;\n const has_stree = desc.stat_desc.has_stree;\n const extra = desc.stat_desc.extra_bits;\n const base = desc.stat_desc.extra_base;\n const max_length = desc.stat_desc.max_length;\n let h; /* heap index */\n let n, m; /* iterate over the tree elements */\n let bits; /* bit length */\n let xbits; /* extra bits */\n let f; /* frequency */\n let overflow = 0; /* number of elements with bit length too large */\n\n for (bits = 0; bits <= MAX_BITS; bits++) {\n s.bl_count[bits] = 0;\n }\n\n /* In a first pass, compute the optimal bit lengths (which may\n * overflow in the case of the bit length tree).\n */\n tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */\n\n for (h = s.heap_max + 1; h < HEAP_SIZE; h++) {\n n = s.heap[h];\n bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;\n if (bits > max_length) {\n bits = max_length;\n overflow++;\n }\n tree[n * 2 + 1]/*.Len*/ = bits;\n /* We overwrite tree[n].Dad which is no longer needed */\n\n if (n > max_code) { continue; } /* not a leaf node */\n\n s.bl_count[bits]++;\n xbits = 0;\n if (n >= base) {\n xbits = extra[n - base];\n }\n f = tree[n * 2]/*.Freq*/;\n s.opt_len += f * (bits + xbits);\n if (has_stree) {\n s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);\n }\n }\n if (overflow === 0) { return; }\n\n // Trace((stderr,\"\\nbit length overflow\\n\"));\n /* This happens for example on obj2 and pic of the Calgary corpus */\n\n /* Find the first bit length which could increase: */\n do {\n bits = max_length - 1;\n while (s.bl_count[bits] === 0) { bits--; }\n s.bl_count[bits]--; /* move one leaf down the tree */\n s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */\n s.bl_count[max_length]--;\n /* The brother of the overflow item also moves one step up,\n * but this does not affect bl_count[max_length]\n */\n overflow -= 2;\n } while (overflow > 0);\n\n /* Now recompute all bit lengths, scanning in increasing frequency.\n * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all\n * lengths instead of fixing only the wrong ones. This idea is taken\n * from 'ar' written by Haruhiko Okumura.)\n */\n for (bits = max_length; bits !== 0; bits--) {\n n = s.bl_count[bits];\n while (n !== 0) {\n m = s.heap[--h];\n if (m > max_code) { continue; }\n if (tree[m * 2 + 1]/*.Len*/ !== bits) {\n // Trace((stderr,\"code %d bits %d->%d\\n\", m, tree[m].Len, bits));\n s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;\n tree[m * 2 + 1]/*.Len*/ = bits;\n }\n n--;\n }\n }\n};\n\n\n/* ===========================================================================\n * Generate the codes for a given tree and bit counts (which need not be\n * optimal).\n * IN assertion: the array bl_count contains the bit length statistics for\n * the given tree and the field len is set for all tree elements.\n * OUT assertion: the field code is set for all tree elements of non\n * zero code length.\n */\nconst gen_codes = (tree, max_code, bl_count) =>\n// ct_data *tree; /* the tree to decorate */\n// int max_code; /* largest code with non zero frequency */\n// ushf *bl_count; /* number of codes at each bit length */\n{\n const next_code = new Array(MAX_BITS + 1); /* next code value for each bit length */\n let code = 0; /* running code value */\n let bits; /* bit index */\n let n; /* code index */\n\n /* The distribution counts are first used to generate the code values\n * without bit reversal.\n */\n for (bits = 1; bits <= MAX_BITS; bits++) {\n next_code[bits] = code = (code + bl_count[bits - 1]) << 1;\n }\n /* Check that the bit counts in bl_count are consistent. The last code\n * must be all ones.\n */\n //Assert (code + bl_count[MAX_BITS]-1 == (1< {\n\n let n; /* iterates over tree elements */\n let bits; /* bit counter */\n let length; /* length value */\n let code; /* code value */\n let dist; /* distance index */\n const bl_count = new Array(MAX_BITS + 1);\n /* number of codes at each bit length for an optimal tree */\n\n // do check in _tr_init()\n //if (static_init_done) return;\n\n /* For some embedded targets, global variables are not initialized: */\n/*#ifdef NO_INIT_GLOBAL_POINTERS\n static_l_desc.static_tree = static_ltree;\n static_l_desc.extra_bits = extra_lbits;\n static_d_desc.static_tree = static_dtree;\n static_d_desc.extra_bits = extra_dbits;\n static_bl_desc.extra_bits = extra_blbits;\n#endif*/\n\n /* Initialize the mapping length (0..255) -> length code (0..28) */\n length = 0;\n for (code = 0; code < LENGTH_CODES - 1; code++) {\n base_length[code] = length;\n for (n = 0; n < (1 << extra_lbits[code]); n++) {\n _length_code[length++] = code;\n }\n }\n //Assert (length == 256, \"tr_static_init: length != 256\");\n /* Note that the length 255 (match length 258) can be represented\n * in two different ways: code 284 + 5 bits or code 285, so we\n * overwrite length_code[255] to use the best encoding:\n */\n _length_code[length - 1] = code;\n\n /* Initialize the mapping dist (0..32K) -> dist code (0..29) */\n dist = 0;\n for (code = 0; code < 16; code++) {\n base_dist[code] = dist;\n for (n = 0; n < (1 << extra_dbits[code]); n++) {\n _dist_code[dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: dist != 256\");\n dist >>= 7; /* from now on, all distances are divided by 128 */\n for (; code < D_CODES; code++) {\n base_dist[code] = dist << 7;\n for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {\n _dist_code[256 + dist++] = code;\n }\n }\n //Assert (dist == 256, \"tr_static_init: 256+dist != 512\");\n\n /* Construct the codes of the static literal tree */\n for (bits = 0; bits <= MAX_BITS; bits++) {\n bl_count[bits] = 0;\n }\n\n n = 0;\n while (n <= 143) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n while (n <= 255) {\n static_ltree[n * 2 + 1]/*.Len*/ = 9;\n n++;\n bl_count[9]++;\n }\n while (n <= 279) {\n static_ltree[n * 2 + 1]/*.Len*/ = 7;\n n++;\n bl_count[7]++;\n }\n while (n <= 287) {\n static_ltree[n * 2 + 1]/*.Len*/ = 8;\n n++;\n bl_count[8]++;\n }\n /* Codes 286 and 287 do not exist, but we must include them in the\n * tree construction to get a canonical Huffman tree (longest code\n * all ones)\n */\n gen_codes(static_ltree, L_CODES + 1, bl_count);\n\n /* The static distance tree is trivial: */\n for (n = 0; n < D_CODES; n++) {\n static_dtree[n * 2 + 1]/*.Len*/ = 5;\n static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);\n }\n\n // Now data ready and we can init static trees\n static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS + 1, L_CODES, MAX_BITS);\n static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES, MAX_BITS);\n static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES, MAX_BL_BITS);\n\n //static_init_done = true;\n};\n\n\n/* ===========================================================================\n * Initialize a new block.\n */\nconst init_block = (s) => {\n\n let n; /* iterates over tree elements */\n\n /* Initialize the trees. */\n for (n = 0; n < L_CODES; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < D_CODES; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }\n for (n = 0; n < BL_CODES; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }\n\n s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;\n s.opt_len = s.static_len = 0;\n s.last_lit = s.matches = 0;\n};\n\n\n/* ===========================================================================\n * Flush the bit buffer and align the output on a byte boundary\n */\nconst bi_windup = (s) =>\n{\n if (s.bi_valid > 8) {\n put_short(s, s.bi_buf);\n } else if (s.bi_valid > 0) {\n //put_byte(s, (Byte)s->bi_buf);\n s.pending_buf[s.pending++] = s.bi_buf;\n }\n s.bi_buf = 0;\n s.bi_valid = 0;\n};\n\n/* ===========================================================================\n * Copy a stored block, storing first the length and its\n * one's complement if requested.\n */\nconst copy_block = (s, buf, len, header) =>\n//DeflateState *s;\n//charf *buf; /* the input data */\n//unsigned len; /* its length */\n//int header; /* true if block header must be written */\n{\n bi_windup(s); /* align on byte boundary */\n\n if (header) {\n put_short(s, len);\n put_short(s, ~len);\n }\n// while (len--) {\n// put_byte(s, *buf++);\n// }\n s.pending_buf.set(s.window.subarray(buf, buf + len), s.pending);\n s.pending += len;\n};\n\n/* ===========================================================================\n * Compares to subtrees, using the tree depth as tie breaker when\n * the subtrees have equal frequency. This minimizes the worst case length.\n */\nconst smaller = (tree, n, m, depth) => {\n\n const _n2 = n * 2;\n const _m2 = m * 2;\n return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||\n (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));\n};\n\n/* ===========================================================================\n * Restore the heap property by moving down the tree starting at node k,\n * exchanging a node with the smallest of its two sons if necessary, stopping\n * when the heap property is re-established (each father smaller than its\n * two sons).\n */\nconst pqdownheap = (s, tree, k) =>\n// deflate_state *s;\n// ct_data *tree; /* the tree to restore */\n// int k; /* node to move down */\n{\n const v = s.heap[k];\n let j = k << 1; /* left son of k */\n while (j <= s.heap_len) {\n /* Set j to the smallest of the two sons: */\n if (j < s.heap_len &&\n smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {\n j++;\n }\n /* Exit if v is smaller than both sons */\n if (smaller(tree, v, s.heap[j], s.depth)) { break; }\n\n /* Exchange v with the smallest son */\n s.heap[k] = s.heap[j];\n k = j;\n\n /* And continue down the tree, setting j to the left son of k */\n j <<= 1;\n }\n s.heap[k] = v;\n};\n\n\n// inlined manually\n// const SMALLEST = 1;\n\n/* ===========================================================================\n * Send the block data compressed using the given Huffman trees\n */\nconst compress_block = (s, ltree, dtree) =>\n// deflate_state *s;\n// const ct_data *ltree; /* literal tree */\n// const ct_data *dtree; /* distance tree */\n{\n let dist; /* distance of matched string */\n let lc; /* match length or unmatched char (if dist == 0) */\n let lx = 0; /* running index in l_buf */\n let code; /* the code to send */\n let extra; /* number of extra bits to send */\n\n if (s.last_lit !== 0) {\n do {\n dist = (s.pending_buf[s.d_buf + lx * 2] << 8) | (s.pending_buf[s.d_buf + lx * 2 + 1]);\n lc = s.pending_buf[s.l_buf + lx];\n lx++;\n\n if (dist === 0) {\n send_code(s, lc, ltree); /* send a literal byte */\n //Tracecv(isgraph(lc), (stderr,\" '%c' \", lc));\n } else {\n /* Here, lc is the match length - MIN_MATCH */\n code = _length_code[lc];\n send_code(s, code + LITERALS + 1, ltree); /* send the length code */\n extra = extra_lbits[code];\n if (extra !== 0) {\n lc -= base_length[code];\n send_bits(s, lc, extra); /* send the extra length bits */\n }\n dist--; /* dist is now the match distance - 1 */\n code = d_code(dist);\n //Assert (code < D_CODES, \"bad d_code\");\n\n send_code(s, code, dtree); /* send the distance code */\n extra = extra_dbits[code];\n if (extra !== 0) {\n dist -= base_dist[code];\n send_bits(s, dist, extra); /* send the extra distance bits */\n }\n } /* literal or match pair ? */\n\n /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */\n //Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx,\n // \"pendingBuf overflow\");\n\n } while (lx < s.last_lit);\n }\n\n send_code(s, END_BLOCK, ltree);\n};\n\n\n/* ===========================================================================\n * Construct one Huffman tree and assigns the code bit strings and lengths.\n * Update the total bit length for the current block.\n * IN assertion: the field freq is set for all tree elements.\n * OUT assertions: the fields len and code are set to the optimal bit length\n * and corresponding code. The length opt_len is updated; static_len is\n * also updated if stree is not null. The field max_code is set.\n */\nconst build_tree = (s, desc) =>\n// deflate_state *s;\n// tree_desc *desc; /* the tree descriptor */\n{\n const tree = desc.dyn_tree;\n const stree = desc.stat_desc.static_tree;\n const has_stree = desc.stat_desc.has_stree;\n const elems = desc.stat_desc.elems;\n let n, m; /* iterate over heap elements */\n let max_code = -1; /* largest code with non zero frequency */\n let node; /* new node being created */\n\n /* Construct the initial heap, with least frequent element in\n * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].\n * heap[0] is not used.\n */\n s.heap_len = 0;\n s.heap_max = HEAP_SIZE;\n\n for (n = 0; n < elems; n++) {\n if (tree[n * 2]/*.Freq*/ !== 0) {\n s.heap[++s.heap_len] = max_code = n;\n s.depth[n] = 0;\n\n } else {\n tree[n * 2 + 1]/*.Len*/ = 0;\n }\n }\n\n /* The pkzip format requires that at least one distance code exists,\n * and that at least one bit should be sent even if there is only one\n * possible code. So to avoid special checks later on we force at least\n * two codes of non zero frequency.\n */\n while (s.heap_len < 2) {\n node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);\n tree[node * 2]/*.Freq*/ = 1;\n s.depth[node] = 0;\n s.opt_len--;\n\n if (has_stree) {\n s.static_len -= stree[node * 2 + 1]/*.Len*/;\n }\n /* node is 0 or 1 so it does not have extra bits */\n }\n desc.max_code = max_code;\n\n /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,\n * establish sub-heaps of increasing lengths:\n */\n for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }\n\n /* Construct the Huffman tree by repeatedly combining the least two\n * frequent nodes.\n */\n node = elems; /* next internal node of the tree */\n do {\n //pqremove(s, tree, n); /* n = node of least frequency */\n /*** pqremove ***/\n n = s.heap[1/*SMALLEST*/];\n s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];\n pqdownheap(s, tree, 1/*SMALLEST*/);\n /***/\n\n m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */\n\n s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */\n s.heap[--s.heap_max] = m;\n\n /* Create a new node father of n and m */\n tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;\n s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;\n tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;\n\n /* and insert the new node in the heap */\n s.heap[1/*SMALLEST*/] = node++;\n pqdownheap(s, tree, 1/*SMALLEST*/);\n\n } while (s.heap_len >= 2);\n\n s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];\n\n /* At this point, the fields freq and dad are set. We can now\n * generate the bit lengths.\n */\n gen_bitlen(s, desc);\n\n /* The field len is now set, we can generate the bit codes */\n gen_codes(tree, max_code, s.bl_count);\n};\n\n\n/* ===========================================================================\n * Scan a literal or distance tree to determine the frequencies of the codes\n * in the bit length tree.\n */\nconst scan_tree = (s, tree, max_code) =>\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n let n; /* iterates over all tree elements */\n let prevlen = -1; /* last emitted length */\n let curlen; /* length of current code */\n\n let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n let count = 0; /* repeat count of the current code */\n let max_count = 7; /* max repeat count */\n let min_count = 4; /* min repeat count */\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n s.bl_tree[curlen * 2]/*.Freq*/ += count;\n\n } else if (curlen !== 0) {\n\n if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }\n s.bl_tree[REP_3_6 * 2]/*.Freq*/++;\n\n } else if (count <= 10) {\n s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;\n\n } else {\n s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;\n }\n\n count = 0;\n prevlen = curlen;\n\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n};\n\n\n/* ===========================================================================\n * Send a literal or distance tree in compressed form, using the codes in\n * bl_tree.\n */\nconst send_tree = (s, tree, max_code) =>\n// deflate_state *s;\n// ct_data *tree; /* the tree to be scanned */\n// int max_code; /* and its largest code of non zero frequency */\n{\n let n; /* iterates over all tree elements */\n let prevlen = -1; /* last emitted length */\n let curlen; /* length of current code */\n\n let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */\n\n let count = 0; /* repeat count of the current code */\n let max_count = 7; /* max repeat count */\n let min_count = 4; /* min repeat count */\n\n /* tree[max_code+1].Len = -1; */ /* guard already set */\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n }\n\n for (n = 0; n <= max_code; n++) {\n curlen = nextlen;\n nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;\n\n if (++count < max_count && curlen === nextlen) {\n continue;\n\n } else if (count < min_count) {\n do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);\n\n } else if (curlen !== 0) {\n if (curlen !== prevlen) {\n send_code(s, curlen, s.bl_tree);\n count--;\n }\n //Assert(count >= 3 && count <= 6, \" 3_6?\");\n send_code(s, REP_3_6, s.bl_tree);\n send_bits(s, count - 3, 2);\n\n } else if (count <= 10) {\n send_code(s, REPZ_3_10, s.bl_tree);\n send_bits(s, count - 3, 3);\n\n } else {\n send_code(s, REPZ_11_138, s.bl_tree);\n send_bits(s, count - 11, 7);\n }\n\n count = 0;\n prevlen = curlen;\n if (nextlen === 0) {\n max_count = 138;\n min_count = 3;\n\n } else if (curlen === nextlen) {\n max_count = 6;\n min_count = 3;\n\n } else {\n max_count = 7;\n min_count = 4;\n }\n }\n};\n\n\n/* ===========================================================================\n * Construct the Huffman tree for the bit lengths and return the index in\n * bl_order of the last bit length code to send.\n */\nconst build_bl_tree = (s) => {\n\n let max_blindex; /* index of last bit length code of non zero freq */\n\n /* Determine the bit length frequencies for literal and distance trees */\n scan_tree(s, s.dyn_ltree, s.l_desc.max_code);\n scan_tree(s, s.dyn_dtree, s.d_desc.max_code);\n\n /* Build the bit length tree: */\n build_tree(s, s.bl_desc);\n /* opt_len now includes the length of the tree representations, except\n * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.\n */\n\n /* Determine the number of bit length codes to send. The pkzip format\n * requires that at least 4 bit length codes be sent. (appnote.txt says\n * 3 but the actual value used is 4.)\n */\n for (max_blindex = BL_CODES - 1; max_blindex >= 3; max_blindex--) {\n if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {\n break;\n }\n }\n /* Update opt_len to include the bit length tree and counts */\n s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;\n //Tracev((stderr, \"\\ndyn trees: dyn %ld, stat %ld\",\n // s->opt_len, s->static_len));\n\n return max_blindex;\n};\n\n\n/* ===========================================================================\n * Send the header for a block using dynamic Huffman trees: the counts, the\n * lengths of the bit length codes, the literal tree and the distance tree.\n * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.\n */\nconst send_all_trees = (s, lcodes, dcodes, blcodes) =>\n// deflate_state *s;\n// int lcodes, dcodes, blcodes; /* number of codes for each tree */\n{\n let rank; /* index in bl_order */\n\n //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, \"not enough codes\");\n //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,\n // \"too many codes\");\n //Tracev((stderr, \"\\nbl counts: \"));\n send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */\n send_bits(s, dcodes - 1, 5);\n send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */\n for (rank = 0; rank < blcodes; rank++) {\n //Tracev((stderr, \"\\nbl code %2d \", bl_order[rank]));\n send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);\n }\n //Tracev((stderr, \"\\nbl tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */\n //Tracev((stderr, \"\\nlit tree: sent %ld\", s->bits_sent));\n\n send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */\n //Tracev((stderr, \"\\ndist tree: sent %ld\", s->bits_sent));\n};\n\n\n/* ===========================================================================\n * Check if the data type is TEXT or BINARY, using the following algorithm:\n * - TEXT if the two conditions below are satisfied:\n * a) There are no non-portable control characters belonging to the\n * \"black list\" (0..6, 14..25, 28..31).\n * b) There is at least one printable character belonging to the\n * \"white list\" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).\n * - BINARY otherwise.\n * - The following partially-portable control characters form a\n * \"gray list\" that is ignored in this detection algorithm:\n * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).\n * IN assertion: the fields Freq of dyn_ltree are set.\n */\nconst detect_data_type = (s) => {\n /* black_mask is the bit mask of black-listed bytes\n * set bits 0..6, 14..25, and 28..31\n * 0xf3ffc07f = binary 11110011111111111100000001111111\n */\n let black_mask = 0xf3ffc07f;\n let n;\n\n /* Check for non-textual (\"black-listed\") bytes. */\n for (n = 0; n <= 31; n++, black_mask >>>= 1) {\n if ((black_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {\n return Z_BINARY;\n }\n }\n\n /* Check for textual (\"white-listed\") bytes. */\n if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||\n s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n for (n = 32; n < LITERALS; n++) {\n if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {\n return Z_TEXT;\n }\n }\n\n /* There are no \"black-listed\" or \"white-listed\" bytes:\n * this stream either is empty or has tolerated (\"gray-listed\") bytes only.\n */\n return Z_BINARY;\n};\n\n\nlet static_init_done = false;\n\n/* ===========================================================================\n * Initialize the tree data structures for a new zlib stream.\n */\nconst _tr_init = (s) =>\n{\n\n if (!static_init_done) {\n tr_static_init();\n static_init_done = true;\n }\n\n s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);\n s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);\n s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);\n\n s.bi_buf = 0;\n s.bi_valid = 0;\n\n /* Initialize the first block of the first file: */\n init_block(s);\n};\n\n\n/* ===========================================================================\n * Send a stored block\n */\nconst _tr_stored_block = (s, buf, stored_len, last) =>\n//DeflateState *s;\n//charf *buf; /* input block */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */\n copy_block(s, buf, stored_len, true); /* with header */\n};\n\n\n/* ===========================================================================\n * Send one empty static block to give enough lookahead for inflate.\n * This takes 10 bits, of which 7 may remain in the bit buffer.\n */\nconst _tr_align = (s) => {\n send_bits(s, STATIC_TREES << 1, 3);\n send_code(s, END_BLOCK, static_ltree);\n bi_flush(s);\n};\n\n\n/* ===========================================================================\n * Determine the best encoding for the current block: dynamic trees, static\n * trees or store, and output the encoded block to the zip file.\n */\nconst _tr_flush_block = (s, buf, stored_len, last) =>\n//DeflateState *s;\n//charf *buf; /* input block, or NULL if too old */\n//ulg stored_len; /* length of input block */\n//int last; /* one if this is the last block for a file */\n{\n let opt_lenb, static_lenb; /* opt_len and static_len in bytes */\n let max_blindex = 0; /* index of last bit length code of non zero freq */\n\n /* Build the Huffman trees unless a stored block is forced */\n if (s.level > 0) {\n\n /* Check if the file is binary or text */\n if (s.strm.data_type === Z_UNKNOWN) {\n s.strm.data_type = detect_data_type(s);\n }\n\n /* Construct the literal and distance trees */\n build_tree(s, s.l_desc);\n // Tracev((stderr, \"\\nlit data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n\n build_tree(s, s.d_desc);\n // Tracev((stderr, \"\\ndist data: dyn %ld, stat %ld\", s->opt_len,\n // s->static_len));\n /* At this point, opt_len and static_len are the total bit lengths of\n * the compressed block data, excluding the tree representations.\n */\n\n /* Build the bit length tree for the above two trees, and get the index\n * in bl_order of the last bit length code to send.\n */\n max_blindex = build_bl_tree(s);\n\n /* Determine the best encoding. Compute the block lengths in bytes. */\n opt_lenb = (s.opt_len + 3 + 7) >>> 3;\n static_lenb = (s.static_len + 3 + 7) >>> 3;\n\n // Tracev((stderr, \"\\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u \",\n // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,\n // s->last_lit));\n\n if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }\n\n } else {\n // Assert(buf != (char*)0, \"lost buf\");\n opt_lenb = static_lenb = stored_len + 5; /* force a stored block */\n }\n\n if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {\n /* 4: two words for the lengths */\n\n /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.\n * Otherwise we can't have processed more than WSIZE input bytes since\n * the last block flush, because compression would have been\n * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to\n * transform a block into a stored block.\n */\n _tr_stored_block(s, buf, stored_len, last);\n\n } else if (s.strategy === Z_FIXED || static_lenb === opt_lenb) {\n\n send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);\n compress_block(s, static_ltree, static_dtree);\n\n } else {\n send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);\n send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);\n compress_block(s, s.dyn_ltree, s.dyn_dtree);\n }\n // Assert (s->compressed_len == s->bits_sent, \"bad compressed size\");\n /* The above check is made mod 2^32, for files larger than 512 MB\n * and uLong implemented on 32 bits.\n */\n init_block(s);\n\n if (last) {\n bi_windup(s);\n }\n // Tracev((stderr,\"\\ncomprlen %lu(%lu) \", s->compressed_len>>3,\n // s->compressed_len-7*last));\n};\n\n/* ===========================================================================\n * Save the match info and tally the frequency counts. Return true if\n * the current block must be flushed.\n */\nconst _tr_tally = (s, dist, lc) =>\n// deflate_state *s;\n// unsigned dist; /* distance of matched string */\n// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */\n{\n //let out_length, in_length, dcode;\n\n s.pending_buf[s.d_buf + s.last_lit * 2] = (dist >>> 8) & 0xff;\n s.pending_buf[s.d_buf + s.last_lit * 2 + 1] = dist & 0xff;\n\n s.pending_buf[s.l_buf + s.last_lit] = lc & 0xff;\n s.last_lit++;\n\n if (dist === 0) {\n /* lc is the unmatched char */\n s.dyn_ltree[lc * 2]/*.Freq*/++;\n } else {\n s.matches++;\n /* Here, lc is the match length - MIN_MATCH */\n dist--; /* dist = match distance - 1 */\n //Assert((ush)dist < (ush)MAX_DIST(s) &&\n // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&\n // (ush)d_code(dist) < (ush)D_CODES, \"_tr_tally: bad match\");\n\n s.dyn_ltree[(_length_code[lc] + LITERALS + 1) * 2]/*.Freq*/++;\n s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n\n//#ifdef TRUNCATE_BLOCK\n// /* Try to guess if it is profitable to stop the current block here */\n// if ((s.last_lit & 0x1fff) === 0 && s.level > 2) {\n// /* Compute an upper bound for the compressed length */\n// out_length = s.last_lit*8;\n// in_length = s.strstart - s.block_start;\n//\n// for (dcode = 0; dcode < D_CODES; dcode++) {\n// out_length += s.dyn_dtree[dcode*2]/*.Freq*/ * (5 + extra_dbits[dcode]);\n// }\n// out_length >>>= 3;\n// //Tracev((stderr,\"\\nlast_lit %u, in %ld, out ~%ld(%ld%%) \",\n// // s->last_lit, in_length, out_length,\n// // 100L - out_length*100L/in_length));\n// if (s.matches < (s.last_lit>>1)/*int /2*/ && out_length < (in_length>>1)/*int /2*/) {\n// return true;\n// }\n// }\n//#endif\n\n return (s.last_lit === s.lit_bufsize - 1);\n /* We avoid equality with lit_bufsize because of wraparound at 64K\n * on 16 bit machines and because stored blocks are restricted to\n * 64K-1 bytes.\n */\n};\n\nvar _tr_init_1 = _tr_init;\nvar _tr_stored_block_1 = _tr_stored_block;\nvar _tr_flush_block_1 = _tr_flush_block;\nvar _tr_tally_1 = _tr_tally;\nvar _tr_align_1 = _tr_align;\n\nvar trees = {\n\t_tr_init: _tr_init_1,\n\t_tr_stored_block: _tr_stored_block_1,\n\t_tr_flush_block: _tr_flush_block_1,\n\t_tr_tally: _tr_tally_1,\n\t_tr_align: _tr_align_1\n};\n\n// Note: adler32 takes 12% for level 0 and 2% for level 6.\n// It isn't worth it to make additional optimizations as in original.\n// Small size is preferable.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nconst adler32 = (adler, buf, len, pos) => {\n let s1 = (adler & 0xffff) |0,\n s2 = ((adler >>> 16) & 0xffff) |0,\n n = 0;\n\n while (len !== 0) {\n // Set limit ~ twice less than 5552, to keep\n // s2 in 31-bits, because we force signed ints.\n // in other case %= will fail.\n n = len > 2000 ? 2000 : len;\n len -= n;\n\n do {\n s1 = (s1 + buf[pos++]) |0;\n s2 = (s2 + s1) |0;\n } while (--n);\n\n s1 %= 65521;\n s2 %= 65521;\n }\n\n return (s1 | (s2 << 16)) |0;\n};\n\n\nvar adler32_1 = adler32;\n\n// Note: we can't get significant speed boost here.\n// So write code to minimize size - no pregenerated tables\n// and array tools dependencies.\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// Use ordinary array, since untyped makes no boost here\nconst makeTable = () => {\n let c, table = [];\n\n for (var n = 0; n < 256; n++) {\n c = n;\n for (var k = 0; k < 8; k++) {\n c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));\n }\n table[n] = c;\n }\n\n return table;\n};\n\n// Create table on load. Just 255 signed longs. Not a problem.\nconst crcTable = new Uint32Array(makeTable());\n\n\nconst crc32 = (crc, buf, len, pos) => {\n const t = crcTable;\n const end = pos + len;\n\n crc ^= -1;\n\n for (let i = pos; i < end; i++) {\n crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];\n }\n\n return (crc ^ (-1)); // >>> 0;\n};\n\n\nvar crc32_1 = crc32;\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar messages = {\n 2: 'need dictionary', /* Z_NEED_DICT 2 */\n 1: 'stream end', /* Z_STREAM_END 1 */\n 0: '', /* Z_OK 0 */\n '-1': 'file error', /* Z_ERRNO (-1) */\n '-2': 'stream error', /* Z_STREAM_ERROR (-2) */\n '-3': 'data error', /* Z_DATA_ERROR (-3) */\n '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */\n '-5': 'buffer error', /* Z_BUF_ERROR (-5) */\n '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nvar constants = {\n\n /* Allowed flush values; see deflate() and inflate() below for details */\n Z_NO_FLUSH: 0,\n Z_PARTIAL_FLUSH: 1,\n Z_SYNC_FLUSH: 2,\n Z_FULL_FLUSH: 3,\n Z_FINISH: 4,\n Z_BLOCK: 5,\n Z_TREES: 6,\n\n /* Return codes for the compression/decompression functions. Negative values\n * are errors, positive values are used for special but normal events.\n */\n Z_OK: 0,\n Z_STREAM_END: 1,\n Z_NEED_DICT: 2,\n Z_ERRNO: -1,\n Z_STREAM_ERROR: -2,\n Z_DATA_ERROR: -3,\n Z_MEM_ERROR: -4,\n Z_BUF_ERROR: -5,\n //Z_VERSION_ERROR: -6,\n\n /* compression levels */\n Z_NO_COMPRESSION: 0,\n Z_BEST_SPEED: 1,\n Z_BEST_COMPRESSION: 9,\n Z_DEFAULT_COMPRESSION: -1,\n\n\n Z_FILTERED: 1,\n Z_HUFFMAN_ONLY: 2,\n Z_RLE: 3,\n Z_FIXED: 4,\n Z_DEFAULT_STRATEGY: 0,\n\n /* Possible values of the data_type field (though see inflate()) */\n Z_BINARY: 0,\n Z_TEXT: 1,\n //Z_ASCII: 1, // = Z_TEXT (deprecated)\n Z_UNKNOWN: 2,\n\n /* The deflate compression method */\n Z_DEFLATED: 8\n //Z_NULL: null // Use -1 or null inline, depending on var type\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nconst { _tr_init: _tr_init$1, _tr_stored_block: _tr_stored_block$1, _tr_flush_block: _tr_flush_block$1, _tr_tally: _tr_tally$1, _tr_align: _tr_align$1 } = trees;\n\n\n\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nconst {\n Z_NO_FLUSH, Z_PARTIAL_FLUSH, Z_FULL_FLUSH, Z_FINISH, Z_BLOCK,\n Z_OK, Z_STREAM_END, Z_STREAM_ERROR, Z_DATA_ERROR, Z_BUF_ERROR,\n Z_DEFAULT_COMPRESSION,\n Z_FILTERED, Z_HUFFMAN_ONLY, Z_RLE, Z_FIXED: Z_FIXED$1, Z_DEFAULT_STRATEGY,\n Z_UNKNOWN: Z_UNKNOWN$1,\n Z_DEFLATED\n} = constants;\n\n/*============================================================================*/\n\n\nconst MAX_MEM_LEVEL = 9;\n/* Maximum value for memLevel in deflateInit2 */\nconst MAX_WBITS = 15;\n/* 32K LZ77 window */\nconst DEF_MEM_LEVEL = 8;\n\n\nconst LENGTH_CODES$1 = 29;\n/* number of length codes, not counting the special END_BLOCK code */\nconst LITERALS$1 = 256;\n/* number of literal bytes 0..255 */\nconst L_CODES$1 = LITERALS$1 + 1 + LENGTH_CODES$1;\n/* number of Literal or Length codes, including the END_BLOCK code */\nconst D_CODES$1 = 30;\n/* number of distance codes */\nconst BL_CODES$1 = 19;\n/* number of codes used to transfer the bit lengths */\nconst HEAP_SIZE$1 = 2 * L_CODES$1 + 1;\n/* maximum heap size */\nconst MAX_BITS$1 = 15;\n/* All codes must not exceed MAX_BITS bits */\n\nconst MIN_MATCH$1 = 3;\nconst MAX_MATCH$1 = 258;\nconst MIN_LOOKAHEAD = (MAX_MATCH$1 + MIN_MATCH$1 + 1);\n\nconst PRESET_DICT = 0x20;\n\nconst INIT_STATE = 42;\nconst EXTRA_STATE = 69;\nconst NAME_STATE = 73;\nconst COMMENT_STATE = 91;\nconst HCRC_STATE = 103;\nconst BUSY_STATE = 113;\nconst FINISH_STATE = 666;\n\nconst BS_NEED_MORE = 1; /* block not completed, need more input or more output */\nconst BS_BLOCK_DONE = 2; /* block flush performed */\nconst BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */\nconst BS_FINISH_DONE = 4; /* finish done, accept no more input or output */\n\nconst OS_CODE = 0x03; // Unix :) . Don't detect, use this default.\n\nconst err = (strm, errorCode) => {\n strm.msg = messages[errorCode];\n return errorCode;\n};\n\nconst rank = (f) => {\n return ((f) << 1) - ((f) > 4 ? 9 : 0);\n};\n\nconst zero$1 = (buf) => {\n let len = buf.length; while (--len >= 0) { buf[len] = 0; }\n};\n\n\n/* eslint-disable new-cap */\nlet HASH_ZLIB = (s, prev, data) => ((prev << s.hash_shift) ^ data) & s.hash_mask;\n// This hash causes less collisions, https://github.com/nodeca/pako/issues/135\n// But breaks binary compatibility\n//let HASH_FAST = (s, prev, data) => ((prev << 8) + (prev >> 8) + (data << 4)) & s.hash_mask;\nlet HASH = HASH_ZLIB;\n\n/* =========================================================================\n * Flush as much pending output as possible. All deflate() output goes\n * through this function so some applications may wish to modify it\n * to avoid allocating a large strm->output buffer and copying into it.\n * (See also read_buf()).\n */\nconst flush_pending = (strm) => {\n const s = strm.state;\n\n //_tr_flush_bits(s);\n let len = s.pending;\n if (len > strm.avail_out) {\n len = strm.avail_out;\n }\n if (len === 0) { return; }\n\n strm.output.set(s.pending_buf.subarray(s.pending_out, s.pending_out + len), strm.next_out);\n strm.next_out += len;\n s.pending_out += len;\n strm.total_out += len;\n strm.avail_out -= len;\n s.pending -= len;\n if (s.pending === 0) {\n s.pending_out = 0;\n }\n};\n\n\nconst flush_block_only = (s, last) => {\n _tr_flush_block$1(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);\n s.block_start = s.strstart;\n flush_pending(s.strm);\n};\n\n\nconst put_byte = (s, b) => {\n s.pending_buf[s.pending++] = b;\n};\n\n\n/* =========================================================================\n * Put a short in the pending buffer. The 16-bit value is put in MSB order.\n * IN assertion: the stream state is correct and there is enough room in\n * pending_buf.\n */\nconst putShortMSB = (s, b) => {\n\n // put_byte(s, (Byte)(b >> 8));\n// put_byte(s, (Byte)(b & 0xff));\n s.pending_buf[s.pending++] = (b >>> 8) & 0xff;\n s.pending_buf[s.pending++] = b & 0xff;\n};\n\n\n/* ===========================================================================\n * Read a new buffer from the current input stream, update the adler32\n * and total number of bytes read. All deflate() input goes through\n * this function so some applications may wish to modify it to avoid\n * allocating a large strm->input buffer and copying from it.\n * (See also flush_pending()).\n */\nconst read_buf = (strm, buf, start, size) => {\n\n let len = strm.avail_in;\n\n if (len > size) { len = size; }\n if (len === 0) { return 0; }\n\n strm.avail_in -= len;\n\n // zmemcpy(buf, strm->next_in, len);\n buf.set(strm.input.subarray(strm.next_in, strm.next_in + len), start);\n if (strm.state.wrap === 1) {\n strm.adler = adler32_1(strm.adler, buf, len, start);\n }\n\n else if (strm.state.wrap === 2) {\n strm.adler = crc32_1(strm.adler, buf, len, start);\n }\n\n strm.next_in += len;\n strm.total_in += len;\n\n return len;\n};\n\n\n/* ===========================================================================\n * Set match_start to the longest match starting at the given string and\n * return its length. Matches shorter or equal to prev_length are discarded,\n * in which case the result is equal to prev_length and match_start is\n * garbage.\n * IN assertions: cur_match is the head of the hash chain for the current\n * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1\n * OUT assertion: the match length is not greater than s->lookahead.\n */\nconst longest_match = (s, cur_match) => {\n\n let chain_length = s.max_chain_length; /* max hash chain length */\n let scan = s.strstart; /* current string */\n let match; /* matched string */\n let len; /* length of current match */\n let best_len = s.prev_length; /* best match length so far */\n let nice_match = s.nice_match; /* stop if match long enough */\n const limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?\n s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;\n\n const _win = s.window; // shortcut\n\n const wmask = s.w_mask;\n const prev = s.prev;\n\n /* Stop when cur_match becomes <= limit. To simplify the code,\n * we prevent matches with the string of window index 0.\n */\n\n const strend = s.strstart + MAX_MATCH$1;\n let scan_end1 = _win[scan + best_len - 1];\n let scan_end = _win[scan + best_len];\n\n /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.\n * It is easy to get rid of this optimization if necessary.\n */\n // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, \"Code too clever\");\n\n /* Do not waste too much time if we already have a good match: */\n if (s.prev_length >= s.good_match) {\n chain_length >>= 2;\n }\n /* Do not look for matches beyond the end of the input. This is necessary\n * to make deflate deterministic.\n */\n if (nice_match > s.lookahead) { nice_match = s.lookahead; }\n\n // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, \"need lookahead\");\n\n do {\n // Assert(cur_match < s->strstart, \"no future\");\n match = cur_match;\n\n /* Skip to next match if the match length cannot increase\n * or if the match length is less than 2. Note that the checks below\n * for insufficient lookahead only occur occasionally for performance\n * reasons. Therefore uninitialized memory will be accessed, and\n * conditional jumps will be made that depend on those values.\n * However the length of the match is limited to the lookahead, so\n * the output of deflate is not affected by the uninitialized values.\n */\n\n if (_win[match + best_len] !== scan_end ||\n _win[match + best_len - 1] !== scan_end1 ||\n _win[match] !== _win[scan] ||\n _win[++match] !== _win[scan + 1]) {\n continue;\n }\n\n /* The check at best_len-1 can be removed because it will be made\n * again later. (This heuristic is not always a win.)\n * It is not necessary to compare scan[2] and match[2] since they\n * are always equal when the other bytes match, given that\n * the hash keys are equal and that HASH_BITS >= 8.\n */\n scan += 2;\n match++;\n // Assert(*scan == *match, \"match[2]?\");\n\n /* We check for insufficient lookahead only every 8th comparison;\n * the 256th check will be made at strstart+258.\n */\n do {\n /*jshint noempty:false*/\n } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&\n scan < strend);\n\n // Assert(scan <= s->window+(unsigned)(s->window_size-1), \"wild scan\");\n\n len = MAX_MATCH$1 - (strend - scan);\n scan = strend - MAX_MATCH$1;\n\n if (len > best_len) {\n s.match_start = cur_match;\n best_len = len;\n if (len >= nice_match) {\n break;\n }\n scan_end1 = _win[scan + best_len - 1];\n scan_end = _win[scan + best_len];\n }\n } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);\n\n if (best_len <= s.lookahead) {\n return best_len;\n }\n return s.lookahead;\n};\n\n\n/* ===========================================================================\n * Fill the window when the lookahead becomes insufficient.\n * Updates strstart and lookahead.\n *\n * IN assertion: lookahead < MIN_LOOKAHEAD\n * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD\n * At least one byte has been read, or avail_in == 0; reads are\n * performed for at least two bytes (required for the zip translate_eol\n * option -- not supported here).\n */\nconst fill_window = (s) => {\n\n const _w_size = s.w_size;\n let p, n, m, more, str;\n\n //Assert(s->lookahead < MIN_LOOKAHEAD, \"already enough lookahead\");\n\n do {\n more = s.window_size - s.lookahead - s.strstart;\n\n // JS ints have 32 bit, block below not needed\n /* Deal with !@#$% 64K limit: */\n //if (sizeof(int) <= 2) {\n // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {\n // more = wsize;\n //\n // } else if (more == (unsigned)(-1)) {\n // /* Very unlikely, but possible on 16 bit machine if\n // * strstart == 0 && lookahead == 1 (input done a byte at time)\n // */\n // more--;\n // }\n //}\n\n\n /* If the window is almost full and there is insufficient lookahead,\n * move the upper half to the lower one to make room in the upper half.\n */\n if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {\n\n s.window.set(s.window.subarray(_w_size, _w_size + _w_size), 0);\n s.match_start -= _w_size;\n s.strstart -= _w_size;\n /* we now have strstart >= MAX_DIST */\n s.block_start -= _w_size;\n\n /* Slide the hash table (could be avoided with 32 bit values\n at the expense of memory usage). We slide even when level == 0\n to keep the hash table consistent if we switch back to level > 0\n later. (Using level 0 permanently is not an optimal usage of\n zlib, so we don't care about this pathological case.)\n */\n\n n = s.hash_size;\n p = n;\n\n do {\n m = s.head[--p];\n s.head[p] = (m >= _w_size ? m - _w_size : 0);\n } while (--n);\n\n n = _w_size;\n p = n;\n\n do {\n m = s.prev[--p];\n s.prev[p] = (m >= _w_size ? m - _w_size : 0);\n /* If n is not on any hash chain, prev[n] is garbage but\n * its value will never be used.\n */\n } while (--n);\n\n more += _w_size;\n }\n if (s.strm.avail_in === 0) {\n break;\n }\n\n /* If there was no sliding:\n * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&\n * more == window_size - lookahead - strstart\n * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)\n * => more >= window_size - 2*WSIZE + 2\n * In the BIG_MEM or MMAP case (not yet supported),\n * window_size == input_size + MIN_LOOKAHEAD &&\n * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.\n * Otherwise, window_size == 2*WSIZE so more >= 2.\n * If there was sliding, more >= WSIZE. So in all cases, more >= 2.\n */\n //Assert(more >= 2, \"more < 2\");\n n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);\n s.lookahead += n;\n\n /* Initialize the hash value now that we have some input: */\n if (s.lookahead + s.insert >= MIN_MATCH$1) {\n str = s.strstart - s.insert;\n s.ins_h = s.window[str];\n\n /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */\n s.ins_h = HASH(s, s.ins_h, s.window[str + 1]);\n//#if MIN_MATCH != 3\n// Call update_hash() MIN_MATCH-3 more times\n//#endif\n while (s.insert) {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH$1 - 1]);\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = str;\n str++;\n s.insert--;\n if (s.lookahead + s.insert < MIN_MATCH$1) {\n break;\n }\n }\n }\n /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,\n * but this is not important since only literal bytes will be emitted.\n */\n\n } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);\n\n /* If the WIN_INIT bytes after the end of the current data have never been\n * written, then zero those bytes in order to avoid memory check reports of\n * the use of uninitialized (or uninitialised as Julian writes) bytes by\n * the longest match routines. Update the high water mark for the next\n * time through here. WIN_INIT is set to MAX_MATCH since the longest match\n * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.\n */\n// if (s.high_water < s.window_size) {\n// const curr = s.strstart + s.lookahead;\n// let init = 0;\n//\n// if (s.high_water < curr) {\n// /* Previous high water mark below current data -- zero WIN_INIT\n// * bytes or up to end of window, whichever is less.\n// */\n// init = s.window_size - curr;\n// if (init > WIN_INIT)\n// init = WIN_INIT;\n// zmemzero(s->window + curr, (unsigned)init);\n// s->high_water = curr + init;\n// }\n// else if (s->high_water < (ulg)curr + WIN_INIT) {\n// /* High water mark at or above current data, but below current data\n// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up\n// * to end of window, whichever is less.\n// */\n// init = (ulg)curr + WIN_INIT - s->high_water;\n// if (init > s->window_size - s->high_water)\n// init = s->window_size - s->high_water;\n// zmemzero(s->window + s->high_water, (unsigned)init);\n// s->high_water += init;\n// }\n// }\n//\n// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,\n// \"not enough room for search\");\n};\n\n/* ===========================================================================\n * Copy without compression as much as possible from the input stream, return\n * the current block state.\n * This function does not insert new strings in the dictionary since\n * uncompressible data is probably not useful. This function is used\n * only for the level=0 compression option.\n * NOTE: this function should be optimized to avoid extra copying from\n * window to pending_buf.\n */\nconst deflate_stored = (s, flush) => {\n\n /* Stored blocks are limited to 0xffff bytes, pending_buf is limited\n * to pending_buf_size, and each stored block has a 5 byte header:\n */\n let max_block_size = 0xffff;\n\n if (max_block_size > s.pending_buf_size - 5) {\n max_block_size = s.pending_buf_size - 5;\n }\n\n /* Copy as much as possible from input to output: */\n for (;;) {\n /* Fill the window as much as possible: */\n if (s.lookahead <= 1) {\n\n //Assert(s->strstart < s->w_size+MAX_DIST(s) ||\n // s->block_start >= (long)s->w_size, \"slide too late\");\n// if (!(s.strstart < s.w_size + (s.w_size - MIN_LOOKAHEAD) ||\n// s.block_start >= s.w_size)) {\n// throw new Error(\"slide too late\");\n// }\n\n fill_window(s);\n if (s.lookahead === 0 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n\n if (s.lookahead === 0) {\n break;\n }\n /* flush the current block */\n }\n //Assert(s->block_start >= 0L, \"block gone\");\n// if (s.block_start < 0) throw new Error(\"block gone\");\n\n s.strstart += s.lookahead;\n s.lookahead = 0;\n\n /* Emit a stored block if pending_buf will be full: */\n const max_start = s.block_start + max_block_size;\n\n if (s.strstart === 0 || s.strstart >= max_start) {\n /* strstart == 0 is possible when wraparound on 16-bit machine */\n s.lookahead = s.strstart - max_start;\n s.strstart = max_start;\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n\n\n }\n /* Flush if we may have to slide, otherwise block_start may become\n * negative and the data will be gone:\n */\n if (s.strstart - s.block_start >= (s.w_size - MIN_LOOKAHEAD)) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n\n s.insert = 0;\n\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n\n if (s.strstart > s.block_start) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_NEED_MORE;\n};\n\n/* ===========================================================================\n * Compress as much as possible from the input stream, return the current\n * block state.\n * This function does not perform lazy evaluation of matches and inserts\n * new strings in the dictionary only for unmatched strings or for short\n * matches. It is used only for the fast compression options.\n */\nconst deflate_fast = (s, flush) => {\n\n let hash_head; /* head of the hash chain */\n let bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) {\n break; /* flush the current block */\n }\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH$1) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]);\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n * At this point we have always match_length < MIN_MATCH\n */\n if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n }\n if (s.match_length >= MIN_MATCH$1) {\n // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only\n\n /*** _tr_tally_dist(s, s.strstart - s.match_start,\n s.match_length - MIN_MATCH, bflush); ***/\n bflush = _tr_tally$1(s, s.strstart - s.match_start, s.match_length - MIN_MATCH$1);\n\n s.lookahead -= s.match_length;\n\n /* Insert new strings in the hash table only if the match length\n * is not too large. This saves time but degrades compression.\n */\n if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH$1) {\n s.match_length--; /* string at strstart already in table */\n do {\n s.strstart++;\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]);\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n /* strstart never exceeds WSIZE-MAX_MATCH, so there are\n * always MIN_MATCH bytes ahead.\n */\n } while (--s.match_length !== 0);\n s.strstart++;\n } else\n {\n s.strstart += s.match_length;\n s.match_length = 0;\n s.ins_h = s.window[s.strstart];\n /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */\n s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + 1]);\n\n//#if MIN_MATCH != 3\n// Call UPDATE_HASH() MIN_MATCH-3 more times\n//#endif\n /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not\n * matter since it will be recomputed at next deflate call.\n */\n }\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s.window[s.strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = _tr_tally$1(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = ((s.strstart < (MIN_MATCH$1 - 1)) ? s.strstart : MIN_MATCH$1 - 1);\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n};\n\n/* ===========================================================================\n * Same as above, but achieves better compression. We use a lazy\n * evaluation for matches: a match is finally adopted only if there is\n * no better match at the next window position.\n */\nconst deflate_slow = (s, flush) => {\n\n let hash_head; /* head of hash chain */\n let bflush; /* set if current block must be flushed */\n\n let max_insert;\n\n /* Process the input block. */\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the next match, plus MIN_MATCH bytes to insert the\n * string following the next match.\n */\n if (s.lookahead < MIN_LOOKAHEAD) {\n fill_window(s);\n if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* Insert the string window[strstart .. strstart+2] in the\n * dictionary, and set hash_head to the head of the hash chain:\n */\n hash_head = 0/*NIL*/;\n if (s.lookahead >= MIN_MATCH$1) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]);\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n\n /* Find the longest match, discarding those <= prev_length.\n */\n s.prev_length = s.match_length;\n s.prev_match = s.match_start;\n s.match_length = MIN_MATCH$1 - 1;\n\n if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&\n s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {\n /* To simplify the code, we prevent matches with the string\n * of window index 0 (in particular we have to avoid a match\n * of the string with itself at the start of the input file).\n */\n s.match_length = longest_match(s, hash_head);\n /* longest_match() sets match_start */\n\n if (s.match_length <= 5 &&\n (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH$1 && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {\n\n /* If prev_match is also MIN_MATCH, match_start is garbage\n * but we will ignore the current match anyway.\n */\n s.match_length = MIN_MATCH$1 - 1;\n }\n }\n /* If there was a match at the previous step and the current\n * match is not better, output the previous match:\n */\n if (s.prev_length >= MIN_MATCH$1 && s.match_length <= s.prev_length) {\n max_insert = s.strstart + s.lookahead - MIN_MATCH$1;\n /* Do not insert strings in hash table beyond this. */\n\n //check_match(s, s.strstart-1, s.prev_match, s.prev_length);\n\n /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,\n s.prev_length - MIN_MATCH, bflush);***/\n bflush = _tr_tally$1(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH$1);\n /* Insert in hash table all strings up to the end of the match.\n * strstart-1 and strstart are already inserted. If there is not\n * enough lookahead, the last two strings are not inserted in\n * the hash table.\n */\n s.lookahead -= s.prev_length - 1;\n s.prev_length -= 2;\n do {\n if (++s.strstart <= max_insert) {\n /*** INSERT_STRING(s, s.strstart, hash_head); ***/\n s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH$1 - 1]);\n hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];\n s.head[s.ins_h] = s.strstart;\n /***/\n }\n } while (--s.prev_length !== 0);\n s.match_available = 0;\n s.match_length = MIN_MATCH$1 - 1;\n s.strstart++;\n\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n } else if (s.match_available) {\n /* If there was no match at the previous position, output a\n * single literal. If there was a match but the current match\n * is longer, truncate the previous match to a single literal.\n */\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = _tr_tally$1(s, 0, s.window[s.strstart - 1]);\n\n if (bflush) {\n /*** FLUSH_BLOCK_ONLY(s, 0) ***/\n flush_block_only(s, false);\n /***/\n }\n s.strstart++;\n s.lookahead--;\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n } else {\n /* There is no previous match to compare with, wait for\n * the next step to decide.\n */\n s.match_available = 1;\n s.strstart++;\n s.lookahead--;\n }\n }\n //Assert (flush != Z_NO_FLUSH, \"no flush?\");\n if (s.match_available) {\n //Tracevv((stderr,\"%c\", s->window[s->strstart-1]));\n /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/\n bflush = _tr_tally$1(s, 0, s.window[s.strstart - 1]);\n\n s.match_available = 0;\n }\n s.insert = s.strstart < MIN_MATCH$1 - 1 ? s.strstart : MIN_MATCH$1 - 1;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n\n return BS_BLOCK_DONE;\n};\n\n\n/* ===========================================================================\n * For Z_RLE, simply look for runs of bytes, generate matches only of distance\n * one. Do not maintain a hash table. (It will be regenerated if this run of\n * deflate switches away from Z_RLE.)\n */\nconst deflate_rle = (s, flush) => {\n\n let bflush; /* set if current block must be flushed */\n let prev; /* byte at distance one to match */\n let scan, strend; /* scan goes up to strend for length of run */\n\n const _win = s.window;\n\n for (;;) {\n /* Make sure that we always have enough lookahead, except\n * at the end of the input file. We need MAX_MATCH bytes\n * for the longest run, plus one for the unrolled loop.\n */\n if (s.lookahead <= MAX_MATCH$1) {\n fill_window(s);\n if (s.lookahead <= MAX_MATCH$1 && flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n if (s.lookahead === 0) { break; } /* flush the current block */\n }\n\n /* See how many times the previous byte repeats */\n s.match_length = 0;\n if (s.lookahead >= MIN_MATCH$1 && s.strstart > 0) {\n scan = s.strstart - 1;\n prev = _win[scan];\n if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {\n strend = s.strstart + MAX_MATCH$1;\n do {\n /*jshint noempty:false*/\n } while (prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n prev === _win[++scan] && prev === _win[++scan] &&\n scan < strend);\n s.match_length = MAX_MATCH$1 - (strend - scan);\n if (s.match_length > s.lookahead) {\n s.match_length = s.lookahead;\n }\n }\n //Assert(scan <= s->window+(uInt)(s->window_size-1), \"wild scan\");\n }\n\n /* Emit match if have run of MIN_MATCH or longer, else emit literal */\n if (s.match_length >= MIN_MATCH$1) {\n //check_match(s, s.strstart, s.strstart - 1, s.match_length);\n\n /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/\n bflush = _tr_tally$1(s, 1, s.match_length - MIN_MATCH$1);\n\n s.lookahead -= s.match_length;\n s.strstart += s.match_length;\n s.match_length = 0;\n } else {\n /* No match, output a literal byte */\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = _tr_tally$1(s, 0, s.window[s.strstart]);\n\n s.lookahead--;\n s.strstart++;\n }\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n};\n\n/* ===========================================================================\n * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.\n * (It will be regenerated if this run of deflate switches away from Huffman.)\n */\nconst deflate_huff = (s, flush) => {\n\n let bflush; /* set if current block must be flushed */\n\n for (;;) {\n /* Make sure that we have a literal to write. */\n if (s.lookahead === 0) {\n fill_window(s);\n if (s.lookahead === 0) {\n if (flush === Z_NO_FLUSH) {\n return BS_NEED_MORE;\n }\n break; /* flush the current block */\n }\n }\n\n /* Output a literal byte */\n s.match_length = 0;\n //Tracevv((stderr,\"%c\", s->window[s->strstart]));\n /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/\n bflush = _tr_tally$1(s, 0, s.window[s.strstart]);\n s.lookahead--;\n s.strstart++;\n if (bflush) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n }\n s.insert = 0;\n if (flush === Z_FINISH) {\n /*** FLUSH_BLOCK(s, 1); ***/\n flush_block_only(s, true);\n if (s.strm.avail_out === 0) {\n return BS_FINISH_STARTED;\n }\n /***/\n return BS_FINISH_DONE;\n }\n if (s.last_lit) {\n /*** FLUSH_BLOCK(s, 0); ***/\n flush_block_only(s, false);\n if (s.strm.avail_out === 0) {\n return BS_NEED_MORE;\n }\n /***/\n }\n return BS_BLOCK_DONE;\n};\n\n/* Values for max_lazy_match, good_match and max_chain_length, depending on\n * the desired pack level (0..9). The values given below have been tuned to\n * exclude worst case performance for pathological files. Better values may be\n * found for specific files.\n */\nfunction Config(good_length, max_lazy, nice_length, max_chain, func) {\n\n this.good_length = good_length;\n this.max_lazy = max_lazy;\n this.nice_length = nice_length;\n this.max_chain = max_chain;\n this.func = func;\n}\n\nconst configuration_table = [\n /* good lazy nice chain */\n new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */\n new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */\n new Config(4, 5, 16, 8, deflate_fast), /* 2 */\n new Config(4, 6, 32, 32, deflate_fast), /* 3 */\n\n new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */\n new Config(8, 16, 32, 32, deflate_slow), /* 5 */\n new Config(8, 16, 128, 128, deflate_slow), /* 6 */\n new Config(8, 32, 128, 256, deflate_slow), /* 7 */\n new Config(32, 128, 258, 1024, deflate_slow), /* 8 */\n new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */\n];\n\n\n/* ===========================================================================\n * Initialize the \"longest match\" routines for a new zlib stream\n */\nconst lm_init = (s) => {\n\n s.window_size = 2 * s.w_size;\n\n /*** CLEAR_HASH(s); ***/\n zero$1(s.head); // Fill with NIL (= 0);\n\n /* Set the default configuration parameters:\n */\n s.max_lazy_match = configuration_table[s.level].max_lazy;\n s.good_match = configuration_table[s.level].good_length;\n s.nice_match = configuration_table[s.level].nice_length;\n s.max_chain_length = configuration_table[s.level].max_chain;\n\n s.strstart = 0;\n s.block_start = 0;\n s.lookahead = 0;\n s.insert = 0;\n s.match_length = s.prev_length = MIN_MATCH$1 - 1;\n s.match_available = 0;\n s.ins_h = 0;\n};\n\n\nfunction DeflateState() {\n this.strm = null; /* pointer back to this zlib stream */\n this.status = 0; /* as the name implies */\n this.pending_buf = null; /* output still pending */\n this.pending_buf_size = 0; /* size of pending_buf */\n this.pending_out = 0; /* next pending byte to output to the stream */\n this.pending = 0; /* nb of bytes in the pending buffer */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.gzhead = null; /* gzip header information to write */\n this.gzindex = 0; /* where in extra, name, or comment */\n this.method = Z_DEFLATED; /* can only be DEFLATED */\n this.last_flush = -1; /* value of flush param for previous deflate call */\n\n this.w_size = 0; /* LZ77 window size (32K by default) */\n this.w_bits = 0; /* log2(w_size) (8..16) */\n this.w_mask = 0; /* w_size - 1 */\n\n this.window = null;\n /* Sliding window. Input bytes are read into the second half of the window,\n * and move to the first half later to keep a dictionary of at least wSize\n * bytes. With this organization, matches are limited to a distance of\n * wSize-MAX_MATCH bytes, but this ensures that IO is always\n * performed with a length multiple of the block size.\n */\n\n this.window_size = 0;\n /* Actual size of window: 2*wSize, except when the user input buffer\n * is directly used as sliding window.\n */\n\n this.prev = null;\n /* Link to older string with same hash index. To limit the size of this\n * array to 64K, this link is maintained only for the last 32K strings.\n * An index in this array is thus a window index modulo 32K.\n */\n\n this.head = null; /* Heads of the hash chains or NIL. */\n\n this.ins_h = 0; /* hash index of string to be inserted */\n this.hash_size = 0; /* number of elements in hash table */\n this.hash_bits = 0; /* log2(hash_size) */\n this.hash_mask = 0; /* hash_size-1 */\n\n this.hash_shift = 0;\n /* Number of bits by which ins_h must be shifted at each input\n * step. It must be such that after MIN_MATCH steps, the oldest\n * byte no longer takes part in the hash key, that is:\n * hash_shift * MIN_MATCH >= hash_bits\n */\n\n this.block_start = 0;\n /* Window position at the beginning of the current output block. Gets\n * negative when the window is moved backwards.\n */\n\n this.match_length = 0; /* length of best match */\n this.prev_match = 0; /* previous match */\n this.match_available = 0; /* set if previous match exists */\n this.strstart = 0; /* start of string to insert */\n this.match_start = 0; /* start of matching string */\n this.lookahead = 0; /* number of valid bytes ahead in window */\n\n this.prev_length = 0;\n /* Length of the best match at previous step. Matches not greater than this\n * are discarded. This is used in the lazy match evaluation.\n */\n\n this.max_chain_length = 0;\n /* To speed up deflation, hash chains are never searched beyond this\n * length. A higher limit improves compression ratio but degrades the\n * speed.\n */\n\n this.max_lazy_match = 0;\n /* Attempt to find a better match only when the current match is strictly\n * smaller than this value. This mechanism is used only for compression\n * levels >= 4.\n */\n // That's alias to max_lazy_match, don't use directly\n //this.max_insert_length = 0;\n /* Insert new strings in the hash table only if the match length is not\n * greater than this length. This saves time but degrades compression.\n * max_insert_length is used only for compression levels <= 3.\n */\n\n this.level = 0; /* compression level (1..9) */\n this.strategy = 0; /* favor or force Huffman coding*/\n\n this.good_match = 0;\n /* Use a faster search when the previous match is longer than this */\n\n this.nice_match = 0; /* Stop searching when current match exceeds this */\n\n /* used by trees.c: */\n\n /* Didn't use ct_data typedef below to suppress compiler warning */\n\n // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */\n // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */\n // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */\n\n // Use flat array of DOUBLE size, with interleaved fata,\n // because JS does not support effective\n this.dyn_ltree = new Uint16Array(HEAP_SIZE$1 * 2);\n this.dyn_dtree = new Uint16Array((2 * D_CODES$1 + 1) * 2);\n this.bl_tree = new Uint16Array((2 * BL_CODES$1 + 1) * 2);\n zero$1(this.dyn_ltree);\n zero$1(this.dyn_dtree);\n zero$1(this.bl_tree);\n\n this.l_desc = null; /* desc. for literal tree */\n this.d_desc = null; /* desc. for distance tree */\n this.bl_desc = null; /* desc. for bit length tree */\n\n //ush bl_count[MAX_BITS+1];\n this.bl_count = new Uint16Array(MAX_BITS$1 + 1);\n /* number of codes at each bit length for an optimal tree */\n\n //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */\n this.heap = new Uint16Array(2 * L_CODES$1 + 1); /* heap used to build the Huffman trees */\n zero$1(this.heap);\n\n this.heap_len = 0; /* number of elements in the heap */\n this.heap_max = 0; /* element of largest frequency */\n /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.\n * The same heap array is used to build all trees.\n */\n\n this.depth = new Uint16Array(2 * L_CODES$1 + 1); //uch depth[2*L_CODES+1];\n zero$1(this.depth);\n /* Depth of each subtree used as tie breaker for trees of equal frequency\n */\n\n this.l_buf = 0; /* buffer index for literals or lengths */\n\n this.lit_bufsize = 0;\n /* Size of match buffer for literals/lengths. There are 4 reasons for\n * limiting lit_bufsize to 64K:\n * - frequencies can be kept in 16 bit counters\n * - if compression is not successful for the first block, all input\n * data is still in the window so we can still emit a stored block even\n * when input comes from standard input. (This can also be done for\n * all blocks if lit_bufsize is not greater than 32K.)\n * - if compression is not successful for a file smaller than 64K, we can\n * even emit a stored file instead of a stored block (saving 5 bytes).\n * This is applicable only for zip (not gzip or zlib).\n * - creating new Huffman trees less frequently may not provide fast\n * adaptation to changes in the input data statistics. (Take for\n * example a binary file with poorly compressible code followed by\n * a highly compressible string table.) Smaller buffer sizes give\n * fast adaptation but have of course the overhead of transmitting\n * trees more frequently.\n * - I can't count above 4\n */\n\n this.last_lit = 0; /* running index in l_buf */\n\n this.d_buf = 0;\n /* Buffer index for distances. To simplify the code, d_buf and l_buf have\n * the same number of elements. To use different lengths, an extra flag\n * array would be necessary.\n */\n\n this.opt_len = 0; /* bit length of current block with optimal trees */\n this.static_len = 0; /* bit length of current block with static trees */\n this.matches = 0; /* number of string matches in current block */\n this.insert = 0; /* bytes at end of window left to insert */\n\n\n this.bi_buf = 0;\n /* Output buffer. bits are inserted starting at the bottom (least\n * significant bits).\n */\n this.bi_valid = 0;\n /* Number of valid bits in bi_buf. All bits above the last valid bit\n * are always zero.\n */\n\n // Used for window memory init. We safely ignore it for JS. That makes\n // sense only for pointers and memory check tools.\n //this.high_water = 0;\n /* High water mark offset in window for initialized bytes -- bytes above\n * this are set to zero in order to avoid memory check warnings when\n * longest match routines access bytes past the input. This is then\n * updated to the new high water mark.\n */\n}\n\n\nconst deflateResetKeep = (strm) => {\n\n if (!strm || !strm.state) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.total_in = strm.total_out = 0;\n strm.data_type = Z_UNKNOWN$1;\n\n const s = strm.state;\n s.pending = 0;\n s.pending_out = 0;\n\n if (s.wrap < 0) {\n s.wrap = -s.wrap;\n /* was made negative by deflate(..., Z_FINISH); */\n }\n s.status = (s.wrap ? INIT_STATE : BUSY_STATE);\n strm.adler = (s.wrap === 2) ?\n 0 // crc32(0, Z_NULL, 0)\n :\n 1; // adler32(0, Z_NULL, 0)\n s.last_flush = Z_NO_FLUSH;\n _tr_init$1(s);\n return Z_OK;\n};\n\n\nconst deflateReset = (strm) => {\n\n const ret = deflateResetKeep(strm);\n if (ret === Z_OK) {\n lm_init(strm.state);\n }\n return ret;\n};\n\n\nconst deflateSetHeader = (strm, head) => {\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR; }\n if (strm.state.wrap !== 2) { return Z_STREAM_ERROR; }\n strm.state.gzhead = head;\n return Z_OK;\n};\n\n\nconst deflateInit2 = (strm, level, method, windowBits, memLevel, strategy) => {\n\n if (!strm) { // === Z_NULL\n return Z_STREAM_ERROR;\n }\n let wrap = 1;\n\n if (level === Z_DEFAULT_COMPRESSION) {\n level = 6;\n }\n\n if (windowBits < 0) { /* suppress zlib wrapper */\n wrap = 0;\n windowBits = -windowBits;\n }\n\n else if (windowBits > 15) {\n wrap = 2; /* write gzip wrapper instead */\n windowBits -= 16;\n }\n\n\n if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED ||\n windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||\n strategy < 0 || strategy > Z_FIXED$1) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n\n if (windowBits === 8) {\n windowBits = 9;\n }\n /* until 256-byte window bug fixed */\n\n const s = new DeflateState();\n\n strm.state = s;\n s.strm = strm;\n\n s.wrap = wrap;\n s.gzhead = null;\n s.w_bits = windowBits;\n s.w_size = 1 << s.w_bits;\n s.w_mask = s.w_size - 1;\n\n s.hash_bits = memLevel + 7;\n s.hash_size = 1 << s.hash_bits;\n s.hash_mask = s.hash_size - 1;\n s.hash_shift = ~~((s.hash_bits + MIN_MATCH$1 - 1) / MIN_MATCH$1);\n\n s.window = new Uint8Array(s.w_size * 2);\n s.head = new Uint16Array(s.hash_size);\n s.prev = new Uint16Array(s.w_size);\n\n // Don't need mem init magic for JS.\n //s.high_water = 0; /* nothing written to s->window yet */\n\n s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */\n\n s.pending_buf_size = s.lit_bufsize * 4;\n\n //overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);\n //s->pending_buf = (uchf *) overlay;\n s.pending_buf = new Uint8Array(s.pending_buf_size);\n\n // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)\n //s->d_buf = overlay + s->lit_bufsize/sizeof(ush);\n s.d_buf = 1 * s.lit_bufsize;\n\n //s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;\n s.l_buf = (1 + 2) * s.lit_bufsize;\n\n s.level = level;\n s.strategy = strategy;\n s.method = method;\n\n return deflateReset(strm);\n};\n\nconst deflateInit = (strm, level) => {\n\n return deflateInit2(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);\n};\n\n\nconst deflate = (strm, flush) => {\n\n let beg, val; // for gzip header write only\n\n if (!strm || !strm.state ||\n flush > Z_BLOCK || flush < 0) {\n return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;\n }\n\n const s = strm.state;\n\n if (!strm.output ||\n (!strm.input && strm.avail_in !== 0) ||\n (s.status === FINISH_STATE && flush !== Z_FINISH)) {\n return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);\n }\n\n s.strm = strm; /* just in case */\n const old_flush = s.last_flush;\n s.last_flush = flush;\n\n /* Write the header */\n if (s.status === INIT_STATE) {\n\n if (s.wrap === 2) { // GZIP header\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n put_byte(s, 31);\n put_byte(s, 139);\n put_byte(s, 8);\n if (!s.gzhead) { // s->gzhead == Z_NULL\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, 0);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, OS_CODE);\n s.status = BUSY_STATE;\n }\n else {\n put_byte(s, (s.gzhead.text ? 1 : 0) +\n (s.gzhead.hcrc ? 2 : 0) +\n (!s.gzhead.extra ? 0 : 4) +\n (!s.gzhead.name ? 0 : 8) +\n (!s.gzhead.comment ? 0 : 16)\n );\n put_byte(s, s.gzhead.time & 0xff);\n put_byte(s, (s.gzhead.time >> 8) & 0xff);\n put_byte(s, (s.gzhead.time >> 16) & 0xff);\n put_byte(s, (s.gzhead.time >> 24) & 0xff);\n put_byte(s, s.level === 9 ? 2 :\n (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?\n 4 : 0));\n put_byte(s, s.gzhead.os & 0xff);\n if (s.gzhead.extra && s.gzhead.extra.length) {\n put_byte(s, s.gzhead.extra.length & 0xff);\n put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);\n }\n if (s.gzhead.hcrc) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending, 0);\n }\n s.gzindex = 0;\n s.status = EXTRA_STATE;\n }\n }\n else // DEFLATE header\n {\n let header = (Z_DEFLATED + ((s.w_bits - 8) << 4)) << 8;\n let level_flags = -1;\n\n if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {\n level_flags = 0;\n } else if (s.level < 6) {\n level_flags = 1;\n } else if (s.level === 6) {\n level_flags = 2;\n } else {\n level_flags = 3;\n }\n header |= (level_flags << 6);\n if (s.strstart !== 0) { header |= PRESET_DICT; }\n header += 31 - (header % 31);\n\n s.status = BUSY_STATE;\n putShortMSB(s, header);\n\n /* Save the adler32 of the preset dictionary: */\n if (s.strstart !== 0) {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n strm.adler = 1; // adler32(0L, Z_NULL, 0);\n }\n }\n\n//#ifdef GZIP\n if (s.status === EXTRA_STATE) {\n if (s.gzhead.extra/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n\n while (s.gzindex < (s.gzhead.extra.length & 0xffff)) {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n break;\n }\n }\n put_byte(s, s.gzhead.extra[s.gzindex] & 0xff);\n s.gzindex++;\n }\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (s.gzindex === s.gzhead.extra.length) {\n s.gzindex = 0;\n s.status = NAME_STATE;\n }\n }\n else {\n s.status = NAME_STATE;\n }\n }\n if (s.status === NAME_STATE) {\n if (s.gzhead.name/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.name.length) {\n val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.gzindex = 0;\n s.status = COMMENT_STATE;\n }\n }\n else {\n s.status = COMMENT_STATE;\n }\n }\n if (s.status === COMMENT_STATE) {\n if (s.gzhead.comment/* != Z_NULL*/) {\n beg = s.pending; /* start of bytes to update crc */\n //int val;\n\n do {\n if (s.pending === s.pending_buf_size) {\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n flush_pending(strm);\n beg = s.pending;\n if (s.pending === s.pending_buf_size) {\n val = 1;\n break;\n }\n }\n // JS specific: little magic to add zero terminator to end of string\n if (s.gzindex < s.gzhead.comment.length) {\n val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;\n } else {\n val = 0;\n }\n put_byte(s, val);\n } while (val !== 0);\n\n if (s.gzhead.hcrc && s.pending > beg) {\n strm.adler = crc32_1(strm.adler, s.pending_buf, s.pending - beg, beg);\n }\n if (val === 0) {\n s.status = HCRC_STATE;\n }\n }\n else {\n s.status = HCRC_STATE;\n }\n }\n if (s.status === HCRC_STATE) {\n if (s.gzhead.hcrc) {\n if (s.pending + 2 > s.pending_buf_size) {\n flush_pending(strm);\n }\n if (s.pending + 2 <= s.pending_buf_size) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n strm.adler = 0; //crc32(0L, Z_NULL, 0);\n s.status = BUSY_STATE;\n }\n }\n else {\n s.status = BUSY_STATE;\n }\n }\n//#endif\n\n /* Flush as much pending output as possible */\n if (s.pending !== 0) {\n flush_pending(strm);\n if (strm.avail_out === 0) {\n /* Since avail_out is 0, deflate will be called again with\n * more output space, but possibly with both pending and\n * avail_in equal to zero. There won't be anything to do,\n * but this is not an error situation so make sure we\n * return OK instead of BUF_ERROR at next call of deflate:\n */\n s.last_flush = -1;\n return Z_OK;\n }\n\n /* Make sure there is something to do and avoid duplicate consecutive\n * flushes. For repeated and useless calls with Z_FINISH, we keep\n * returning Z_STREAM_END instead of Z_BUF_ERROR.\n */\n } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&\n flush !== Z_FINISH) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* User must not provide more input after the first FINISH: */\n if (s.status === FINISH_STATE && strm.avail_in !== 0) {\n return err(strm, Z_BUF_ERROR);\n }\n\n /* Start a new block or continue the current one.\n */\n if (strm.avail_in !== 0 || s.lookahead !== 0 ||\n (flush !== Z_NO_FLUSH && s.status !== FINISH_STATE)) {\n let bstate = (s.strategy === Z_HUFFMAN_ONLY) ? deflate_huff(s, flush) :\n (s.strategy === Z_RLE ? deflate_rle(s, flush) :\n configuration_table[s.level].func(s, flush));\n\n if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {\n s.status = FINISH_STATE;\n }\n if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {\n if (strm.avail_out === 0) {\n s.last_flush = -1;\n /* avoid BUF_ERROR next call, see above */\n }\n return Z_OK;\n /* If flush != Z_NO_FLUSH && avail_out == 0, the next call\n * of deflate should use the same flush parameter to make sure\n * that the flush is complete. So we don't have to output an\n * empty block here, this will be done at next call. This also\n * ensures that for a very small output buffer, we emit at most\n * one empty block.\n */\n }\n if (bstate === BS_BLOCK_DONE) {\n if (flush === Z_PARTIAL_FLUSH) {\n _tr_align$1(s);\n }\n else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */\n\n _tr_stored_block$1(s, 0, 0, false);\n /* For a full flush, this empty block will be recognized\n * as a special marker by inflate_sync().\n */\n if (flush === Z_FULL_FLUSH) {\n /*** CLEAR_HASH(s); ***/ /* forget history */\n zero$1(s.head); // Fill with NIL (= 0);\n\n if (s.lookahead === 0) {\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n }\n }\n flush_pending(strm);\n if (strm.avail_out === 0) {\n s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */\n return Z_OK;\n }\n }\n }\n //Assert(strm->avail_out > 0, \"bug2\");\n //if (strm.avail_out <= 0) { throw new Error(\"bug2\");}\n\n if (flush !== Z_FINISH) { return Z_OK; }\n if (s.wrap <= 0) { return Z_STREAM_END; }\n\n /* Write the trailer */\n if (s.wrap === 2) {\n put_byte(s, strm.adler & 0xff);\n put_byte(s, (strm.adler >> 8) & 0xff);\n put_byte(s, (strm.adler >> 16) & 0xff);\n put_byte(s, (strm.adler >> 24) & 0xff);\n put_byte(s, strm.total_in & 0xff);\n put_byte(s, (strm.total_in >> 8) & 0xff);\n put_byte(s, (strm.total_in >> 16) & 0xff);\n put_byte(s, (strm.total_in >> 24) & 0xff);\n }\n else\n {\n putShortMSB(s, strm.adler >>> 16);\n putShortMSB(s, strm.adler & 0xffff);\n }\n\n flush_pending(strm);\n /* If avail_out is zero, the application will call deflate again\n * to flush the rest.\n */\n if (s.wrap > 0) { s.wrap = -s.wrap; }\n /* write the trailer only once! */\n return s.pending !== 0 ? Z_OK : Z_STREAM_END;\n};\n\n\nconst deflateEnd = (strm) => {\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n const status = strm.state.status;\n if (status !== INIT_STATE &&\n status !== EXTRA_STATE &&\n status !== NAME_STATE &&\n status !== COMMENT_STATE &&\n status !== HCRC_STATE &&\n status !== BUSY_STATE &&\n status !== FINISH_STATE\n ) {\n return err(strm, Z_STREAM_ERROR);\n }\n\n strm.state = null;\n\n return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK;\n};\n\n\n/* =========================================================================\n * Initializes the compression dictionary from the given byte\n * sequence without producing any compressed output.\n */\nconst deflateSetDictionary = (strm, dictionary) => {\n\n let dictLength = dictionary.length;\n\n if (!strm/*== Z_NULL*/ || !strm.state/*== Z_NULL*/) {\n return Z_STREAM_ERROR;\n }\n\n const s = strm.state;\n const wrap = s.wrap;\n\n if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {\n return Z_STREAM_ERROR;\n }\n\n /* when using zlib wrappers, compute Adler-32 for provided dictionary */\n if (wrap === 1) {\n /* adler32(strm->adler, dictionary, dictLength); */\n strm.adler = adler32_1(strm.adler, dictionary, dictLength, 0);\n }\n\n s.wrap = 0; /* avoid computing Adler-32 in read_buf */\n\n /* if dictionary would fill window, just replace the history */\n if (dictLength >= s.w_size) {\n if (wrap === 0) { /* already empty otherwise */\n /*** CLEAR_HASH(s); ***/\n zero$1(s.head); // Fill with NIL (= 0);\n s.strstart = 0;\n s.block_start = 0;\n s.insert = 0;\n }\n /* use the tail */\n // dictionary = dictionary.slice(dictLength - s.w_size);\n let tmpDict = new Uint8Array(s.w_size);\n tmpDict.set(dictionary.subarray(dictLength - s.w_size, dictLength), 0);\n dictionary = tmpDict;\n dictLength = s.w_size;\n }\n /* insert dictionary into window and hash */\n const avail = strm.avail_in;\n const next = strm.next_in;\n const input = strm.input;\n strm.avail_in = dictLength;\n strm.next_in = 0;\n strm.input = dictionary;\n fill_window(s);\n while (s.lookahead >= MIN_MATCH$1) {\n let str = s.strstart;\n let n = s.lookahead - (MIN_MATCH$1 - 1);\n do {\n /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */\n s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH$1 - 1]);\n\n s.prev[str & s.w_mask] = s.head[s.ins_h];\n\n s.head[s.ins_h] = str;\n str++;\n } while (--n);\n s.strstart = str;\n s.lookahead = MIN_MATCH$1 - 1;\n fill_window(s);\n }\n s.strstart += s.lookahead;\n s.block_start = s.strstart;\n s.insert = s.lookahead;\n s.lookahead = 0;\n s.match_length = s.prev_length = MIN_MATCH$1 - 1;\n s.match_available = 0;\n strm.next_in = next;\n strm.input = input;\n strm.avail_in = avail;\n s.wrap = wrap;\n return Z_OK;\n};\n\n\nvar deflateInit_1 = deflateInit;\nvar deflateInit2_1 = deflateInit2;\nvar deflateReset_1 = deflateReset;\nvar deflateResetKeep_1 = deflateResetKeep;\nvar deflateSetHeader_1 = deflateSetHeader;\nvar deflate_2 = deflate;\nvar deflateEnd_1 = deflateEnd;\nvar deflateSetDictionary_1 = deflateSetDictionary;\nvar deflateInfo = 'pako deflate (from Nodeca project)';\n\n/* Not implemented\nmodule.exports.deflateBound = deflateBound;\nmodule.exports.deflateCopy = deflateCopy;\nmodule.exports.deflateParams = deflateParams;\nmodule.exports.deflatePending = deflatePending;\nmodule.exports.deflatePrime = deflatePrime;\nmodule.exports.deflateTune = deflateTune;\n*/\n\nvar deflate_1 = {\n\tdeflateInit: deflateInit_1,\n\tdeflateInit2: deflateInit2_1,\n\tdeflateReset: deflateReset_1,\n\tdeflateResetKeep: deflateResetKeep_1,\n\tdeflateSetHeader: deflateSetHeader_1,\n\tdeflate: deflate_2,\n\tdeflateEnd: deflateEnd_1,\n\tdeflateSetDictionary: deflateSetDictionary_1,\n\tdeflateInfo: deflateInfo\n};\n\nconst _has = (obj, key) => {\n return Object.prototype.hasOwnProperty.call(obj, key);\n};\n\nvar assign = function (obj /*from1, from2, from3, ...*/) {\n const sources = Array.prototype.slice.call(arguments, 1);\n while (sources.length) {\n const source = sources.shift();\n if (!source) { continue; }\n\n if (typeof source !== 'object') {\n throw new TypeError(source + 'must be non-object');\n }\n\n for (const p in source) {\n if (_has(source, p)) {\n obj[p] = source[p];\n }\n }\n }\n\n return obj;\n};\n\n\n// Join array of chunks to single array.\nvar flattenChunks = (chunks) => {\n // calculate data length\n let len = 0;\n\n for (let i = 0, l = chunks.length; i < l; i++) {\n len += chunks[i].length;\n }\n\n // join chunks\n const result = new Uint8Array(len);\n\n for (let i = 0, pos = 0, l = chunks.length; i < l; i++) {\n let chunk = chunks[i];\n result.set(chunk, pos);\n pos += chunk.length;\n }\n\n return result;\n};\n\nvar common = {\n\tassign: assign,\n\tflattenChunks: flattenChunks\n};\n\n// String encode/decode helpers\n\n\n// Quick check if we can use fast array to bin string conversion\n//\n// - apply(Array) can fail on Android 2.2\n// - apply(Uint8Array) can fail on iOS 5.1 Safari\n//\nlet STR_APPLY_UIA_OK = true;\n\ntry { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }\n\n\n// Table with utf8 lengths (calculated by first byte of sequence)\n// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,\n// because max possible codepoint is 0x10ffff\nconst _utf8len = new Uint8Array(256);\nfor (let q = 0; q < 256; q++) {\n _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);\n}\n_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start\n\n\n// convert string to array (typed, when possible)\nvar string2buf = (str) => {\n let buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;\n\n // count binary size\n for (m_pos = 0; m_pos < str_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;\n }\n\n // allocate buffer\n buf = new Uint8Array(buf_len);\n\n // convert\n for (i = 0, m_pos = 0; i < buf_len; m_pos++) {\n c = str.charCodeAt(m_pos);\n if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {\n c2 = str.charCodeAt(m_pos + 1);\n if ((c2 & 0xfc00) === 0xdc00) {\n c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);\n m_pos++;\n }\n }\n if (c < 0x80) {\n /* one byte */\n buf[i++] = c;\n } else if (c < 0x800) {\n /* two bytes */\n buf[i++] = 0xC0 | (c >>> 6);\n buf[i++] = 0x80 | (c & 0x3f);\n } else if (c < 0x10000) {\n /* three bytes */\n buf[i++] = 0xE0 | (c >>> 12);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n } else {\n /* four bytes */\n buf[i++] = 0xf0 | (c >>> 18);\n buf[i++] = 0x80 | (c >>> 12 & 0x3f);\n buf[i++] = 0x80 | (c >>> 6 & 0x3f);\n buf[i++] = 0x80 | (c & 0x3f);\n }\n }\n\n return buf;\n};\n\n// Helper\nconst buf2binstring = (buf, len) => {\n // On Chrome, the arguments in a function call that are allowed is `65534`.\n // If the length of the buffer is smaller than that, we can use this optimization,\n // otherwise we will take a slower path.\n if (len < 65534) {\n if (buf.subarray && STR_APPLY_UIA_OK) {\n return String.fromCharCode.apply(null, buf.length === len ? buf : buf.subarray(0, len));\n }\n }\n\n let result = '';\n for (let i = 0; i < len; i++) {\n result += String.fromCharCode(buf[i]);\n }\n return result;\n};\n\n\n// convert array to string\nvar buf2string = (buf, max) => {\n let i, out;\n const len = max || buf.length;\n\n // Reserve max possible length (2 words per char)\n // NB: by unknown reasons, Array is significantly faster for\n // String.fromCharCode.apply than Uint16Array.\n const utf16buf = new Array(len * 2);\n\n for (out = 0, i = 0; i < len;) {\n let c = buf[i++];\n // quick process ascii\n if (c < 0x80) { utf16buf[out++] = c; continue; }\n\n let c_len = _utf8len[c];\n // skip 5 & 6 byte codes\n if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }\n\n // apply mask on first byte\n c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;\n // join the rest\n while (c_len > 1 && i < len) {\n c = (c << 6) | (buf[i++] & 0x3f);\n c_len--;\n }\n\n // terminated by end of string?\n if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }\n\n if (c < 0x10000) {\n utf16buf[out++] = c;\n } else {\n c -= 0x10000;\n utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);\n utf16buf[out++] = 0xdc00 | (c & 0x3ff);\n }\n }\n\n return buf2binstring(utf16buf, out);\n};\n\n\n// Calculate max possible position in utf8 buffer,\n// that will not break sequence. If that's not possible\n// - (very small limits) return max size as is.\n//\n// buf[] - utf8 bytes array\n// max - length limit (mandatory);\nvar utf8border = (buf, max) => {\n\n max = max || buf.length;\n if (max > buf.length) { max = buf.length; }\n\n // go back from last position, until start of sequence found\n let pos = max - 1;\n while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }\n\n // Very small and broken sequence,\n // return max, because we should return something anyway.\n if (pos < 0) { return max; }\n\n // If we came to start of buffer - that means buffer is too small,\n // return max too.\n if (pos === 0) { return max; }\n\n return (pos + _utf8len[buf[pos]] > max) ? pos : max;\n};\n\nvar strings = {\n\tstring2buf: string2buf,\n\tbuf2string: buf2string,\n\tutf8border: utf8border\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction ZStream() {\n /* next input byte */\n this.input = null; // JS specific, because we have no pointers\n this.next_in = 0;\n /* number of bytes available at input */\n this.avail_in = 0;\n /* total number of input bytes read so far */\n this.total_in = 0;\n /* next output byte should be put there */\n this.output = null; // JS specific, because we have no pointers\n this.next_out = 0;\n /* remaining free space at output */\n this.avail_out = 0;\n /* total number of bytes output so far */\n this.total_out = 0;\n /* last error message, NULL if no error */\n this.msg = ''/*Z_NULL*/;\n /* not visible by applications */\n this.state = null;\n /* best guess about the data type: binary or text */\n this.data_type = 2/*Z_UNKNOWN*/;\n /* adler32 value of the uncompressed data */\n this.adler = 0;\n}\n\nvar zstream = ZStream;\n\nconst toString = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nconst {\n Z_NO_FLUSH: Z_NO_FLUSH$1, Z_SYNC_FLUSH, Z_FULL_FLUSH: Z_FULL_FLUSH$1, Z_FINISH: Z_FINISH$1,\n Z_OK: Z_OK$1, Z_STREAM_END: Z_STREAM_END$1,\n Z_DEFAULT_COMPRESSION: Z_DEFAULT_COMPRESSION$1,\n Z_DEFAULT_STRATEGY: Z_DEFAULT_STRATEGY$1,\n Z_DEFLATED: Z_DEFLATED$1\n} = constants;\n\n/* ===========================================================================*/\n\n\n/**\n * class Deflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[deflate]],\n * [[deflateRaw]] and [[gzip]].\n **/\n\n/* internal\n * Deflate.chunks -> Array\n *\n * Chunks of output data, if [[Deflate#onData]] not overridden.\n **/\n\n/**\n * Deflate.result -> Uint8Array\n *\n * Compressed result, generated by default [[Deflate#onData]]\n * and [[Deflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Deflate#push]] with `Z_FINISH` / `true` param).\n **/\n\n/**\n * Deflate.err -> Number\n *\n * Error code after deflate finished. 0 (Z_OK) on success.\n * You will not need it in real life, because deflate errors\n * are possible only on wrong options or bad `onData` / `onEnd`\n * custom handlers.\n **/\n\n/**\n * Deflate.msg -> String\n *\n * Error message, if [[Deflate.err]] != 0\n **/\n\n\n/**\n * new Deflate(options)\n * - options (Object): zlib deflate options.\n *\n * Creates new deflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `level`\n * - `windowBits`\n * - `memLevel`\n * - `strategy`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw deflate\n * - `gzip` (Boolean) - create gzip wrapper\n * - `header` (Object) - custom header for gzip\n * - `text` (Boolean) - true if compressed data believed to be text\n * - `time` (Number) - modification time, unix timestamp\n * - `os` (Number) - operation system code\n * - `extra` (Array) - array of bytes with extra data (max 65536)\n * - `name` (String) - file name (binary string)\n * - `comment` (String) - comment (binary string)\n * - `hcrc` (Boolean) - true if header crc should be added\n *\n * ##### Example:\n *\n * ```javascript\n * const pako = require('pako')\n * , chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9])\n * , chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * const deflate = new pako.Deflate({ level: 3});\n *\n * deflate.push(chunk1, false);\n * deflate.push(chunk2, true); // true -> last chunk\n *\n * if (deflate.err) { throw new Error(deflate.err); }\n *\n * console.log(deflate.result);\n * ```\n **/\nfunction Deflate(options) {\n this.options = common.assign({\n level: Z_DEFAULT_COMPRESSION$1,\n method: Z_DEFLATED$1,\n chunkSize: 16384,\n windowBits: 15,\n memLevel: 8,\n strategy: Z_DEFAULT_STRATEGY$1\n }, options || {});\n\n let opt = this.options;\n\n if (opt.raw && (opt.windowBits > 0)) {\n opt.windowBits = -opt.windowBits;\n }\n\n else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {\n opt.windowBits += 16;\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new zstream();\n this.strm.avail_out = 0;\n\n let status = deflate_1.deflateInit2(\n this.strm,\n opt.level,\n opt.method,\n opt.windowBits,\n opt.memLevel,\n opt.strategy\n );\n\n if (status !== Z_OK$1) {\n throw new Error(messages[status]);\n }\n\n if (opt.header) {\n deflate_1.deflateSetHeader(this.strm, opt.header);\n }\n\n if (opt.dictionary) {\n let dict;\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n // If we need to compress text, change encoding to utf8.\n dict = strings.string2buf(opt.dictionary);\n } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {\n dict = new Uint8Array(opt.dictionary);\n } else {\n dict = opt.dictionary;\n }\n\n status = deflate_1.deflateSetDictionary(this.strm, dict);\n\n if (status !== Z_OK$1) {\n throw new Error(messages[status]);\n }\n\n this._dict_set = true;\n }\n}\n\n/**\n * Deflate#push(data[, flush_mode]) -> Boolean\n * - data (Uint8Array|ArrayBuffer|String): input data. Strings will be\n * converted to utf8 byte sequence.\n * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.\n * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.\n *\n * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with\n * new compressed chunks. Returns `true` on success. The last data block must\n * have `flush_mode` Z_FINISH (or `true`). That will flush internal pending\n * buffers and call [[Deflate#onEnd]].\n *\n * On fail call [[Deflate#onEnd]] with error code and return false.\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nDeflate.prototype.push = function (data, flush_mode) {\n const strm = this.strm;\n const chunkSize = this.options.chunkSize;\n let status, _flush_mode;\n\n if (this.ended) { return false; }\n\n if (flush_mode === ~~flush_mode) _flush_mode = flush_mode;\n else _flush_mode = flush_mode === true ? Z_FINISH$1 : Z_NO_FLUSH$1;\n\n // Convert data if needed\n if (typeof data === 'string') {\n // If we need to compress text, change encoding to utf8.\n strm.input = strings.string2buf(data);\n } else if (toString.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n for (;;) {\n if (strm.avail_out === 0) {\n strm.output = new Uint8Array(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n // Make sure avail_out > 6 to avoid repeating markers\n if ((_flush_mode === Z_SYNC_FLUSH || _flush_mode === Z_FULL_FLUSH$1) && strm.avail_out <= 6) {\n this.onData(strm.output.subarray(0, strm.next_out));\n strm.avail_out = 0;\n continue;\n }\n\n status = deflate_1.deflate(strm, _flush_mode);\n\n // Ended => flush and finish\n if (status === Z_STREAM_END$1) {\n if (strm.next_out > 0) {\n this.onData(strm.output.subarray(0, strm.next_out));\n }\n status = deflate_1.deflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return status === Z_OK$1;\n }\n\n // Flush if out buffer full\n if (strm.avail_out === 0) {\n this.onData(strm.output);\n continue;\n }\n\n // Flush if requested and has data\n if (_flush_mode > 0 && strm.next_out > 0) {\n this.onData(strm.output.subarray(0, strm.next_out));\n strm.avail_out = 0;\n continue;\n }\n\n if (strm.avail_in === 0) break;\n }\n\n return true;\n};\n\n\n/**\n * Deflate#onData(chunk) -> Void\n * - chunk (Uint8Array): output data.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nDeflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Deflate#onEnd(status) -> Void\n * - status (Number): deflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called once after you tell deflate that the input stream is\n * complete (Z_FINISH). By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nDeflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK$1) {\n this.result = common.flattenChunks(this.chunks);\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * deflate(data[, options]) -> Uint8Array\n * - data (Uint8Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * Compress `data` with deflate algorithm and `options`.\n *\n * Supported options are:\n *\n * - level\n * - windowBits\n * - memLevel\n * - strategy\n * - dictionary\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n *\n * ##### Example:\n *\n * ```javascript\n * const pako = require('pako')\n * const data = new Uint8Array([1,2,3,4,5,6,7,8,9]);\n *\n * console.log(pako.deflate(data));\n * ```\n **/\nfunction deflate$1(input, options) {\n const deflator = new Deflate(options);\n\n deflator.push(input, true);\n\n // That will never happens, if you don't cheat with options :)\n if (deflator.err) { throw deflator.msg || messages[deflator.err]; }\n\n return deflator.result;\n}\n\n\n/**\n * deflateRaw(data[, options]) -> Uint8Array\n * - data (Uint8Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction deflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return deflate$1(input, options);\n}\n\n\n/**\n * gzip(data[, options]) -> Uint8Array\n * - data (Uint8Array|String): input data to compress.\n * - options (Object): zlib deflate options.\n *\n * The same as [[deflate]], but create gzip wrapper instead of\n * deflate one.\n **/\nfunction gzip(input, options) {\n options = options || {};\n options.gzip = true;\n return deflate$1(input, options);\n}\n\n\nvar Deflate_1 = Deflate;\nvar deflate_2$1 = deflate$1;\nvar deflateRaw_1 = deflateRaw;\nvar gzip_1 = gzip;\nvar constants$1 = constants;\n\nvar deflate_1$1 = {\n\tDeflate: Deflate_1,\n\tdeflate: deflate_2$1,\n\tdeflateRaw: deflateRaw_1,\n\tgzip: gzip_1,\n\tconstants: constants$1\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n// See state defs from inflate.js\nconst BAD = 30; /* got a data error -- remain here until reset */\nconst TYPE = 12; /* i: waiting for type bits, including last-flag bit */\n\n/*\n Decode literal, length, and distance codes and write out the resulting\n literal and match bytes until either not enough input or output is\n available, an end-of-block is encountered, or a data error is encountered.\n When large enough input and output buffers are supplied to inflate(), for\n example, a 16K input buffer and a 64K output buffer, more than 95% of the\n inflate execution time is spent in this routine.\n\n Entry assumptions:\n\n state.mode === LEN\n strm.avail_in >= 6\n strm.avail_out >= 258\n start >= strm.avail_out\n state.bits < 8\n\n On return, state.mode is one of:\n\n LEN -- ran out of enough output space or enough available input\n TYPE -- reached end of block code, inflate() to interpret next block\n BAD -- error in block data\n\n Notes:\n\n - The maximum input bits used by a length/distance pair is 15 bits for the\n length code, 5 bits for the length extra, 15 bits for the distance code,\n and 13 bits for the distance extra. This totals 48 bits, or six bytes.\n Therefore if strm.avail_in >= 6, then there is enough input to avoid\n checking for available input while decoding.\n\n - The maximum bytes that a single length/distance pair can output is 258\n bytes, which is the maximum length that can be coded. inflate_fast()\n requires strm.avail_out >= 258 for each loop to avoid checking for\n output space.\n */\nvar inffast = function inflate_fast(strm, start) {\n let _in; /* local strm.input */\n let last; /* have enough input while in < last */\n let _out; /* local strm.output */\n let beg; /* inflate()'s initial strm.output */\n let end; /* while out < end, enough space available */\n//#ifdef INFLATE_STRICT\n let dmax; /* maximum distance from zlib header */\n//#endif\n let wsize; /* window size or zero if not using window */\n let whave; /* valid bytes in the window */\n let wnext; /* window write index */\n // Use `s_window` instead `window`, avoid conflict with instrumentation tools\n let s_window; /* allocated sliding window, if wsize != 0 */\n let hold; /* local strm.hold */\n let bits; /* local strm.bits */\n let lcode; /* local strm.lencode */\n let dcode; /* local strm.distcode */\n let lmask; /* mask for first level of length codes */\n let dmask; /* mask for first level of distance codes */\n let here; /* retrieved table entry */\n let op; /* code bits, operation, extra bits, or */\n /* window position, window bytes to copy */\n let len; /* match length, unused bytes */\n let dist; /* match distance */\n let from; /* where to copy match from */\n let from_source;\n\n\n let input, output; // JS specific, because we have no pointers\n\n /* copy state to local variables */\n const state = strm.state;\n //here = state.here;\n _in = strm.next_in;\n input = strm.input;\n last = _in + (strm.avail_in - 5);\n _out = strm.next_out;\n output = strm.output;\n beg = _out - (start - strm.avail_out);\n end = _out + (strm.avail_out - 257);\n//#ifdef INFLATE_STRICT\n dmax = state.dmax;\n//#endif\n wsize = state.wsize;\n whave = state.whave;\n wnext = state.wnext;\n s_window = state.window;\n hold = state.hold;\n bits = state.bits;\n lcode = state.lencode;\n dcode = state.distcode;\n lmask = (1 << state.lenbits) - 1;\n dmask = (1 << state.distbits) - 1;\n\n\n /* decode literals and length/distances until end-of-block or not enough\n input data or output space */\n\n top:\n do {\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n\n here = lcode[hold & lmask];\n\n dolen:\n for (;;) { // Goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n if (op === 0) { /* literal */\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n output[_out++] = here & 0xffff/*here.val*/;\n }\n else if (op & 16) { /* length base */\n len = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (op) {\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n len += hold & ((1 << op) - 1);\n hold >>>= op;\n bits -= op;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", len));\n if (bits < 15) {\n hold += input[_in++] << bits;\n bits += 8;\n hold += input[_in++] << bits;\n bits += 8;\n }\n here = dcode[hold & dmask];\n\n dodist:\n for (;;) { // goto emulation\n op = here >>> 24/*here.bits*/;\n hold >>>= op;\n bits -= op;\n op = (here >>> 16) & 0xff/*here.op*/;\n\n if (op & 16) { /* distance base */\n dist = here & 0xffff/*here.val*/;\n op &= 15; /* number of extra bits */\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n if (bits < op) {\n hold += input[_in++] << bits;\n bits += 8;\n }\n }\n dist += hold & ((1 << op) - 1);\n//#ifdef INFLATE_STRICT\n if (dist > dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n//#endif\n hold >>>= op;\n bits -= op;\n //Tracevv((stderr, \"inflate: distance %u\\n\", dist));\n op = _out - beg; /* max distance in output */\n if (dist > op) { /* see if copy from window */\n op = dist - op; /* distance back in window */\n if (op > whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD;\n break top;\n }\n\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// if (len <= op - whave) {\n// do {\n// output[_out++] = 0;\n// } while (--len);\n// continue top;\n// }\n// len -= op - whave;\n// do {\n// output[_out++] = 0;\n// } while (--op > whave);\n// if (op === 0) {\n// from = _out - dist;\n// do {\n// output[_out++] = output[from++];\n// } while (--len);\n// continue top;\n// }\n//#endif\n }\n from = 0; // window index\n from_source = s_window;\n if (wnext === 0) { /* very common case */\n from += wsize - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n else if (wnext < op) { /* wrap around window */\n from += wsize + wnext - op;\n op -= wnext;\n if (op < len) { /* some from end of window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = 0;\n if (wnext < len) { /* some from start of window */\n op = wnext;\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n }\n else { /* contiguous in window */\n from += wnext - op;\n if (op < len) { /* some from window */\n len -= op;\n do {\n output[_out++] = s_window[from++];\n } while (--op);\n from = _out - dist; /* rest from output */\n from_source = output;\n }\n }\n while (len > 2) {\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n output[_out++] = from_source[from++];\n len -= 3;\n }\n if (len) {\n output[_out++] = from_source[from++];\n if (len > 1) {\n output[_out++] = from_source[from++];\n }\n }\n }\n else {\n from = _out - dist; /* copy direct from output */\n do { /* minimum length is three */\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n output[_out++] = output[from++];\n len -= 3;\n } while (len > 2);\n if (len) {\n output[_out++] = output[from++];\n if (len > 1) {\n output[_out++] = output[from++];\n }\n }\n }\n }\n else if ((op & 64) === 0) { /* 2nd level distance code */\n here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dodist;\n }\n else {\n strm.msg = 'invalid distance code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n }\n else if ((op & 64) === 0) { /* 2nd level length code */\n here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];\n continue dolen;\n }\n else if (op & 32) { /* end-of-block */\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.mode = TYPE;\n break top;\n }\n else {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD;\n break top;\n }\n\n break; // need to emulate goto via \"continue\"\n }\n } while (_in < last && _out < end);\n\n /* return unused bytes (on entry, bits < 8, so in won't go too far back) */\n len = bits >> 3;\n _in -= len;\n bits -= len << 3;\n hold &= (1 << bits) - 1;\n\n /* update state and return */\n strm.next_in = _in;\n strm.next_out = _out;\n strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));\n strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));\n state.hold = hold;\n state.bits = bits;\n return;\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nconst MAXBITS = 15;\nconst ENOUGH_LENS = 852;\nconst ENOUGH_DISTS = 592;\n//const ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nconst CODES = 0;\nconst LENS = 1;\nconst DISTS = 2;\n\nconst lbase = new Uint16Array([ /* Length codes 257..285 base */\n 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,\n 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0\n]);\n\nconst lext = new Uint8Array([ /* Length codes 257..285 extra */\n 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,\n 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78\n]);\n\nconst dbase = new Uint16Array([ /* Distance codes 0..29 base */\n 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,\n 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,\n 8193, 12289, 16385, 24577, 0, 0\n]);\n\nconst dext = new Uint8Array([ /* Distance codes 0..29 extra */\n 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,\n 23, 23, 24, 24, 25, 25, 26, 26, 27, 27,\n 28, 28, 29, 29, 64, 64\n]);\n\nconst inflate_table = (type, lens, lens_index, codes, table, table_index, work, opts) =>\n{\n const bits = opts.bits;\n //here = opts.here; /* table entry for duplication */\n\n let len = 0; /* a code's length in bits */\n let sym = 0; /* index of code symbols */\n let min = 0, max = 0; /* minimum and maximum code lengths */\n let root = 0; /* number of index bits for root table */\n let curr = 0; /* number of index bits for current table */\n let drop = 0; /* code bits to drop for sub-table */\n let left = 0; /* number of prefix codes available */\n let used = 0; /* code entries in table used */\n let huff = 0; /* Huffman code */\n let incr; /* for incrementing code, index */\n let fill; /* index for replicating entries */\n let low; /* low bits for current root entry */\n let mask; /* mask for low root bits */\n let next; /* next available space in table */\n let base = null; /* base value table to use */\n let base_index = 0;\n// let shoextra; /* extra bits table to use */\n let end; /* use base and extra for symbol > end */\n const count = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */\n const offs = new Uint16Array(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */\n let extra = null;\n let extra_index = 0;\n\n let here_bits, here_op, here_val;\n\n /*\n Process a set of code lengths to create a canonical Huffman code. The\n code lengths are lens[0..codes-1]. Each length corresponds to the\n symbols 0..codes-1. The Huffman code is generated by first sorting the\n symbols by length from short to long, and retaining the symbol order\n for codes with equal lengths. Then the code starts with all zero bits\n for the first code of the shortest length, and the codes are integer\n increments for the same length, and zeros are appended as the length\n increases. For the deflate format, these bits are stored backwards\n from their more natural integer increment ordering, and so when the\n decoding tables are built in the large loop below, the integer codes\n are incremented backwards.\n\n This routine assumes, but does not check, that all of the entries in\n lens[] are in the range 0..MAXBITS. The caller must assure this.\n 1..MAXBITS is interpreted as that code length. zero means that that\n symbol does not occur in this code.\n\n The codes are sorted by computing a count of codes for each length,\n creating from that a table of starting indices for each length in the\n sorted table, and then entering the symbols in order in the sorted\n table. The sorted table is work[], with that space being provided by\n the caller.\n\n The length counts are used for other purposes as well, i.e. finding\n the minimum and maximum length codes, determining if there are any\n codes at all, checking for a valid set of lengths, and looking ahead\n at length counts to determine sub-table sizes when building the\n decoding tables.\n */\n\n /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */\n for (len = 0; len <= MAXBITS; len++) {\n count[len] = 0;\n }\n for (sym = 0; sym < codes; sym++) {\n count[lens[lens_index + sym]]++;\n }\n\n /* bound code lengths, force root to be within code lengths */\n root = bits;\n for (max = MAXBITS; max >= 1; max--) {\n if (count[max] !== 0) { break; }\n }\n if (root > max) {\n root = max;\n }\n if (max === 0) { /* no symbols to code at all */\n //table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */\n //table.bits[opts.table_index] = 1; //here.bits = (var char)1;\n //table.val[opts.table_index++] = 0; //here.val = (var short)0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n\n //table.op[opts.table_index] = 64;\n //table.bits[opts.table_index] = 1;\n //table.val[opts.table_index++] = 0;\n table[table_index++] = (1 << 24) | (64 << 16) | 0;\n\n opts.bits = 1;\n return 0; /* no symbols, but wait for decoding to report error */\n }\n for (min = 1; min < max; min++) {\n if (count[min] !== 0) { break; }\n }\n if (root < min) {\n root = min;\n }\n\n /* check for an over-subscribed or incomplete set of lengths */\n left = 1;\n for (len = 1; len <= MAXBITS; len++) {\n left <<= 1;\n left -= count[len];\n if (left < 0) {\n return -1;\n } /* over-subscribed */\n }\n if (left > 0 && (type === CODES || max !== 1)) {\n return -1; /* incomplete set */\n }\n\n /* generate offsets into symbol table for each length for sorting */\n offs[1] = 0;\n for (len = 1; len < MAXBITS; len++) {\n offs[len + 1] = offs[len] + count[len];\n }\n\n /* sort symbols by length, by symbol order within each length */\n for (sym = 0; sym < codes; sym++) {\n if (lens[lens_index + sym] !== 0) {\n work[offs[lens[lens_index + sym]]++] = sym;\n }\n }\n\n /*\n Create and fill in decoding tables. In this loop, the table being\n filled is at next and has curr index bits. The code being used is huff\n with length len. That code is converted to an index by dropping drop\n bits off of the bottom. For codes where len is less than drop + curr,\n those top drop + curr - len bits are incremented through all values to\n fill the table with replicated entries.\n\n root is the number of index bits for the root table. When len exceeds\n root, sub-tables are created pointed to by the root entry with an index\n of the low root bits of huff. This is saved in low to check for when a\n new sub-table should be started. drop is zero when the root table is\n being filled, and drop is root when sub-tables are being filled.\n\n When a new sub-table is needed, it is necessary to look ahead in the\n code lengths to determine what size sub-table is needed. The length\n counts are used for this, and so count[] is decremented as codes are\n entered in the tables.\n\n used keeps track of how many table entries have been allocated from the\n provided *table space. It is checked for LENS and DIST tables against\n the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in\n the initial root table size constants. See the comments in inftrees.h\n for more information.\n\n sym increments through all symbols, and the loop terminates when\n all codes of length max, i.e. all codes, have been processed. This\n routine permits incomplete codes, so another loop after this one fills\n in the rest of the decoding tables with invalid code markers.\n */\n\n /* set up for code type */\n // poor man optimization - use if-else instead of switch,\n // to avoid deopts in old v8\n if (type === CODES) {\n base = extra = work; /* dummy value--not used */\n end = 19;\n\n } else if (type === LENS) {\n base = lbase;\n base_index -= 257;\n extra = lext;\n extra_index -= 257;\n end = 256;\n\n } else { /* DISTS */\n base = dbase;\n extra = dext;\n end = -1;\n }\n\n /* initialize opts for loop */\n huff = 0; /* starting code */\n sym = 0; /* starting code symbol */\n len = min; /* starting code length */\n next = table_index; /* current table to fill in */\n curr = root; /* current table index bits */\n drop = 0; /* current bits to drop from code for index */\n low = -1; /* trigger new sub-table when len > root */\n used = 1 << root; /* use root table entries */\n mask = used - 1; /* mask for comparing low */\n\n /* check available table space */\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* process all codes and make table entries */\n for (;;) {\n /* create table entry */\n here_bits = len - drop;\n if (work[sym] < end) {\n here_op = 0;\n here_val = work[sym];\n }\n else if (work[sym] > end) {\n here_op = extra[extra_index + work[sym]];\n here_val = base[base_index + work[sym]];\n }\n else {\n here_op = 32 + 64; /* end of block */\n here_val = 0;\n }\n\n /* replicate for those indices with low len bits equal to huff */\n incr = 1 << (len - drop);\n fill = 1 << curr;\n min = fill; /* save offset to next table */\n do {\n fill -= incr;\n table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;\n } while (fill !== 0);\n\n /* backwards increment the len-bit code huff */\n incr = 1 << (len - 1);\n while (huff & incr) {\n incr >>= 1;\n }\n if (incr !== 0) {\n huff &= incr - 1;\n huff += incr;\n } else {\n huff = 0;\n }\n\n /* go to next symbol, update count, len */\n sym++;\n if (--count[len] === 0) {\n if (len === max) { break; }\n len = lens[lens_index + work[sym]];\n }\n\n /* create new sub-table if needed */\n if (len > root && (huff & mask) !== low) {\n /* if first time, transition to sub-tables */\n if (drop === 0) {\n drop = root;\n }\n\n /* increment past last table */\n next += min; /* here min is 1 << curr */\n\n /* determine length of next table */\n curr = len - drop;\n left = 1 << curr;\n while (curr + drop < max) {\n left -= count[curr + drop];\n if (left <= 0) { break; }\n curr++;\n left <<= 1;\n }\n\n /* check for enough space */\n used += 1 << curr;\n if ((type === LENS && used > ENOUGH_LENS) ||\n (type === DISTS && used > ENOUGH_DISTS)) {\n return 1;\n }\n\n /* point entry in root table to sub-table */\n low = huff & mask;\n /*table.op[low] = curr;\n table.bits[low] = root;\n table.val[low] = next - opts.table_index;*/\n table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;\n }\n }\n\n /* fill in remaining table entry if code is incomplete (guaranteed to have\n at most one remaining entry, since if the code is incomplete, the\n maximum code length that was allowed to get this far is one bit) */\n if (huff !== 0) {\n //table.op[next + huff] = 64; /* invalid code marker */\n //table.bits[next + huff] = len - drop;\n //table.val[next + huff] = 0;\n table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;\n }\n\n /* set return parameters */\n //opts.table_index += used;\n opts.bits = root;\n return 0;\n};\n\n\nvar inftrees = inflate_table;\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\n\n\n\n\n\nconst CODES$1 = 0;\nconst LENS$1 = 1;\nconst DISTS$1 = 2;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nconst {\n Z_FINISH: Z_FINISH$2, Z_BLOCK: Z_BLOCK$1, Z_TREES,\n Z_OK: Z_OK$2, Z_STREAM_END: Z_STREAM_END$2, Z_NEED_DICT, Z_STREAM_ERROR: Z_STREAM_ERROR$1, Z_DATA_ERROR: Z_DATA_ERROR$1, Z_MEM_ERROR, Z_BUF_ERROR: Z_BUF_ERROR$1,\n Z_DEFLATED: Z_DEFLATED$2\n} = constants;\n\n\n/* STATES ====================================================================*/\n/* ===========================================================================*/\n\n\nconst HEAD = 1; /* i: waiting for magic header */\nconst FLAGS = 2; /* i: waiting for method and flags (gzip) */\nconst TIME = 3; /* i: waiting for modification time (gzip) */\nconst OS = 4; /* i: waiting for extra flags and operating system (gzip) */\nconst EXLEN = 5; /* i: waiting for extra length (gzip) */\nconst EXTRA = 6; /* i: waiting for extra bytes (gzip) */\nconst NAME = 7; /* i: waiting for end of file name (gzip) */\nconst COMMENT = 8; /* i: waiting for end of comment (gzip) */\nconst HCRC = 9; /* i: waiting for header crc (gzip) */\nconst DICTID = 10; /* i: waiting for dictionary check value */\nconst DICT = 11; /* waiting for inflateSetDictionary() call */\nconst TYPE$1 = 12; /* i: waiting for type bits, including last-flag bit */\nconst TYPEDO = 13; /* i: same, but skip check to exit inflate on new block */\nconst STORED = 14; /* i: waiting for stored size (length and complement) */\nconst COPY_ = 15; /* i/o: same as COPY below, but only first time in */\nconst COPY = 16; /* i/o: waiting for input or output to copy stored block */\nconst TABLE = 17; /* i: waiting for dynamic block table lengths */\nconst LENLENS = 18; /* i: waiting for code length code lengths */\nconst CODELENS = 19; /* i: waiting for length/lit and distance code lengths */\nconst LEN_ = 20; /* i: same as LEN below, but only first time in */\nconst LEN = 21; /* i: waiting for length/lit/eob code */\nconst LENEXT = 22; /* i: waiting for length extra bits */\nconst DIST = 23; /* i: waiting for distance code */\nconst DISTEXT = 24; /* i: waiting for distance extra bits */\nconst MATCH = 25; /* o: waiting for output space to copy string */\nconst LIT = 26; /* o: waiting for output space to write literal */\nconst CHECK = 27; /* i: waiting for 32-bit check value */\nconst LENGTH = 28; /* i: waiting for 32-bit length (gzip) */\nconst DONE = 29; /* finished check, done -- remain here until reset */\nconst BAD$1 = 30; /* got a data error -- remain here until reset */\nconst MEM = 31; /* got an inflate() memory error -- remain here until reset */\nconst SYNC = 32; /* looking for synchronization bytes to restart inflate() */\n\n/* ===========================================================================*/\n\n\n\nconst ENOUGH_LENS$1 = 852;\nconst ENOUGH_DISTS$1 = 592;\n//const ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);\n\nconst MAX_WBITS$1 = 15;\n/* 32K LZ77 window */\nconst DEF_WBITS = MAX_WBITS$1;\n\n\nconst zswap32 = (q) => {\n\n return (((q >>> 24) & 0xff) +\n ((q >>> 8) & 0xff00) +\n ((q & 0xff00) << 8) +\n ((q & 0xff) << 24));\n};\n\n\nfunction InflateState() {\n this.mode = 0; /* current inflate mode */\n this.last = false; /* true if processing last block */\n this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */\n this.havedict = false; /* true if dictionary provided */\n this.flags = 0; /* gzip header method and flags (0 if zlib) */\n this.dmax = 0; /* zlib header max distance (INFLATE_STRICT) */\n this.check = 0; /* protected copy of check value */\n this.total = 0; /* protected copy of output count */\n // TODO: may be {}\n this.head = null; /* where to save gzip header information */\n\n /* sliding window */\n this.wbits = 0; /* log base 2 of requested window size */\n this.wsize = 0; /* window size or zero if not using window */\n this.whave = 0; /* valid bytes in the window */\n this.wnext = 0; /* window write index */\n this.window = null; /* allocated sliding window, if needed */\n\n /* bit accumulator */\n this.hold = 0; /* input bit accumulator */\n this.bits = 0; /* number of bits in \"in\" */\n\n /* for string and stored block copying */\n this.length = 0; /* literal or length of data to copy */\n this.offset = 0; /* distance back to copy string from */\n\n /* for table and code decoding */\n this.extra = 0; /* extra bits needed */\n\n /* fixed and dynamic code tables */\n this.lencode = null; /* starting table for length/literal codes */\n this.distcode = null; /* starting table for distance codes */\n this.lenbits = 0; /* index bits for lencode */\n this.distbits = 0; /* index bits for distcode */\n\n /* dynamic table building */\n this.ncode = 0; /* number of code length code lengths */\n this.nlen = 0; /* number of length code lengths */\n this.ndist = 0; /* number of distance code lengths */\n this.have = 0; /* number of code lengths in lens[] */\n this.next = null; /* next available space in codes[] */\n\n this.lens = new Uint16Array(320); /* temporary storage for code lengths */\n this.work = new Uint16Array(288); /* work area for code table building */\n\n /*\n because we don't have pointers in js, we use lencode and distcode directly\n as buffers so we don't need codes\n */\n //this.codes = new Int32Array(ENOUGH); /* space for code tables */\n this.lendyn = null; /* dynamic table for length/literal codes (JS specific) */\n this.distdyn = null; /* dynamic table for distance codes (JS specific) */\n this.sane = 0; /* if false, allow invalid distance too far */\n this.back = 0; /* bits back of last unprocessed length/lit */\n this.was = 0; /* initial length of match */\n}\n\n\nconst inflateResetKeep = (strm) => {\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR$1; }\n const state = strm.state;\n strm.total_in = strm.total_out = state.total = 0;\n strm.msg = ''; /*Z_NULL*/\n if (state.wrap) { /* to support ill-conceived Java test suite */\n strm.adler = state.wrap & 1;\n }\n state.mode = HEAD;\n state.last = 0;\n state.havedict = 0;\n state.dmax = 32768;\n state.head = null/*Z_NULL*/;\n state.hold = 0;\n state.bits = 0;\n //state.lencode = state.distcode = state.next = state.codes;\n state.lencode = state.lendyn = new Int32Array(ENOUGH_LENS$1);\n state.distcode = state.distdyn = new Int32Array(ENOUGH_DISTS$1);\n\n state.sane = 1;\n state.back = -1;\n //Tracev((stderr, \"inflate: reset\\n\"));\n return Z_OK$2;\n};\n\n\nconst inflateReset = (strm) => {\n\n if (!strm || !strm.state) { return Z_STREAM_ERROR$1; }\n const state = strm.state;\n state.wsize = 0;\n state.whave = 0;\n state.wnext = 0;\n return inflateResetKeep(strm);\n\n};\n\n\nconst inflateReset2 = (strm, windowBits) => {\n let wrap;\n\n /* get the state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR$1; }\n const state = strm.state;\n\n /* extract wrap request from windowBits parameter */\n if (windowBits < 0) {\n wrap = 0;\n windowBits = -windowBits;\n }\n else {\n wrap = (windowBits >> 4) + 1;\n if (windowBits < 48) {\n windowBits &= 15;\n }\n }\n\n /* set number of window bits, free window if different */\n if (windowBits && (windowBits < 8 || windowBits > 15)) {\n return Z_STREAM_ERROR$1;\n }\n if (state.window !== null && state.wbits !== windowBits) {\n state.window = null;\n }\n\n /* update state and reset the rest of it */\n state.wrap = wrap;\n state.wbits = windowBits;\n return inflateReset(strm);\n};\n\n\nconst inflateInit2 = (strm, windowBits) => {\n\n if (!strm) { return Z_STREAM_ERROR$1; }\n //strm.msg = Z_NULL; /* in case we return an error */\n\n const state = new InflateState();\n\n //if (state === Z_NULL) return Z_MEM_ERROR;\n //Tracev((stderr, \"inflate: allocated\\n\"));\n strm.state = state;\n state.window = null/*Z_NULL*/;\n const ret = inflateReset2(strm, windowBits);\n if (ret !== Z_OK$2) {\n strm.state = null/*Z_NULL*/;\n }\n return ret;\n};\n\n\nconst inflateInit = (strm) => {\n\n return inflateInit2(strm, DEF_WBITS);\n};\n\n\n/*\n Return state with length and distance decoding tables and index sizes set to\n fixed code decoding. Normally this returns fixed tables from inffixed.h.\n If BUILDFIXED is defined, then instead this routine builds the tables the\n first time it's called, and returns those tables the first time and\n thereafter. This reduces the size of the code by about 2K bytes, in\n exchange for a little execution time. However, BUILDFIXED should not be\n used for threaded applications, since the rewriting of the tables and virgin\n may not be thread-safe.\n */\nlet virgin = true;\n\nlet lenfix, distfix; // We have no pointers in JS, so keep tables separate\n\n\nconst fixedtables = (state) => {\n\n /* build fixed huffman tables if first call (may not be thread safe) */\n if (virgin) {\n lenfix = new Int32Array(512);\n distfix = new Int32Array(32);\n\n /* literal/length table */\n let sym = 0;\n while (sym < 144) { state.lens[sym++] = 8; }\n while (sym < 256) { state.lens[sym++] = 9; }\n while (sym < 280) { state.lens[sym++] = 7; }\n while (sym < 288) { state.lens[sym++] = 8; }\n\n inftrees(LENS$1, state.lens, 0, 288, lenfix, 0, state.work, { bits: 9 });\n\n /* distance table */\n sym = 0;\n while (sym < 32) { state.lens[sym++] = 5; }\n\n inftrees(DISTS$1, state.lens, 0, 32, distfix, 0, state.work, { bits: 5 });\n\n /* do this just once */\n virgin = false;\n }\n\n state.lencode = lenfix;\n state.lenbits = 9;\n state.distcode = distfix;\n state.distbits = 5;\n};\n\n\n/*\n Update the window with the last wsize (normally 32K) bytes written before\n returning. If window does not exist yet, create it. This is only called\n when a window is already in use, or when output has been written during this\n inflate call, but the end of the deflate stream has not been reached yet.\n It is also called to create a window for dictionary data when a dictionary\n is loaded.\n\n Providing output buffers larger than 32K to inflate() should provide a speed\n advantage, since only the last 32K of output is copied to the sliding window\n upon return from inflate(), and since all distances after the first 32K of\n output will fall in the output data, making match copies simpler and faster.\n The advantage may be dependent on the size of the processor's data caches.\n */\nconst updatewindow = (strm, src, end, copy) => {\n\n let dist;\n const state = strm.state;\n\n /* if it hasn't been done already, allocate space for the window */\n if (state.window === null) {\n state.wsize = 1 << state.wbits;\n state.wnext = 0;\n state.whave = 0;\n\n state.window = new Uint8Array(state.wsize);\n }\n\n /* copy state->wsize or less output bytes into the circular window */\n if (copy >= state.wsize) {\n state.window.set(src.subarray(end - state.wsize, end), 0);\n state.wnext = 0;\n state.whave = state.wsize;\n }\n else {\n dist = state.wsize - state.wnext;\n if (dist > copy) {\n dist = copy;\n }\n //zmemcpy(state->window + state->wnext, end - copy, dist);\n state.window.set(src.subarray(end - copy, end - copy + dist), state.wnext);\n copy -= dist;\n if (copy) {\n //zmemcpy(state->window, end - copy, copy);\n state.window.set(src.subarray(end - copy, end), 0);\n state.wnext = copy;\n state.whave = state.wsize;\n }\n else {\n state.wnext += dist;\n if (state.wnext === state.wsize) { state.wnext = 0; }\n if (state.whave < state.wsize) { state.whave += dist; }\n }\n }\n return 0;\n};\n\n\nconst inflate = (strm, flush) => {\n\n let state;\n let input, output; // input/output buffers\n let next; /* next input INDEX */\n let put; /* next output INDEX */\n let have, left; /* available input and output */\n let hold; /* bit buffer */\n let bits; /* bits in bit buffer */\n let _in, _out; /* save starting available input and output */\n let copy; /* number of stored or match bytes to copy */\n let from; /* where to copy match bytes from */\n let from_source;\n let here = 0; /* current decoding table entry */\n let here_bits, here_op, here_val; // paked \"here\" denormalized (JS specific)\n //let last; /* parent table entry */\n let last_bits, last_op, last_val; // paked \"last\" denormalized (JS specific)\n let len; /* length to copy for repeats, bits to drop */\n let ret; /* return code */\n const hbuf = new Uint8Array(4); /* buffer for gzip header crc calculation */\n let opts;\n\n let n; // temporary variable for NEED_BITS\n\n const order = /* permutation of code lengths */\n new Uint8Array([ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ]);\n\n\n if (!strm || !strm.state || !strm.output ||\n (!strm.input && strm.avail_in !== 0)) {\n return Z_STREAM_ERROR$1;\n }\n\n state = strm.state;\n if (state.mode === TYPE$1) { state.mode = TYPEDO; } /* skip check */\n\n\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n _in = have;\n _out = left;\n ret = Z_OK$2;\n\n inf_leave: // goto emulation\n for (;;) {\n switch (state.mode) {\n case HEAD:\n if (state.wrap === 0) {\n state.mode = TYPEDO;\n break;\n }\n //=== NEEDBITS(16);\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((state.wrap & 2) && hold === 0x8b1f) { /* gzip header */\n state.check = 0/*crc32(0L, Z_NULL, 0)*/;\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32_1(state.check, hbuf, 2, 0);\n //===//\n\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = FLAGS;\n break;\n }\n state.flags = 0; /* expect zlib header */\n if (state.head) {\n state.head.done = false;\n }\n if (!(state.wrap & 1) || /* check if zlib header allowed */\n (((hold & 0xff)/*BITS(8)*/ << 8) + (hold >> 8)) % 31) {\n strm.msg = 'incorrect header check';\n state.mode = BAD$1;\n break;\n }\n if ((hold & 0x0f)/*BITS(4)*/ !== Z_DEFLATED$2) {\n strm.msg = 'unknown compression method';\n state.mode = BAD$1;\n break;\n }\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n len = (hold & 0x0f)/*BITS(4)*/ + 8;\n if (state.wbits === 0) {\n state.wbits = len;\n }\n else if (len > state.wbits) {\n strm.msg = 'invalid window size';\n state.mode = BAD$1;\n break;\n }\n\n // !!! pako patch. Force use `options.windowBits` if passed.\n // Required to always use max window size by default.\n state.dmax = 1 << state.wbits;\n //state.dmax = 1 << len;\n\n //Tracev((stderr, \"inflate: zlib header ok\\n\"));\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = hold & 0x200 ? DICTID : TYPE$1;\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n break;\n case FLAGS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.flags = hold;\n if ((state.flags & 0xff) !== Z_DEFLATED$2) {\n strm.msg = 'unknown compression method';\n state.mode = BAD$1;\n break;\n }\n if (state.flags & 0xe000) {\n strm.msg = 'unknown header flags set';\n state.mode = BAD$1;\n break;\n }\n if (state.head) {\n state.head.text = ((hold >> 8) & 1);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32_1(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = TIME;\n /* falls through */\n case TIME:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.time = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC4(state.check, hold)\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n hbuf[2] = (hold >>> 16) & 0xff;\n hbuf[3] = (hold >>> 24) & 0xff;\n state.check = crc32_1(state.check, hbuf, 4, 0);\n //===\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = OS;\n /* falls through */\n case OS:\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (state.head) {\n state.head.xflags = (hold & 0xff);\n state.head.os = (hold >> 8);\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32_1(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = EXLEN;\n /* falls through */\n case EXLEN:\n if (state.flags & 0x0400) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length = hold;\n if (state.head) {\n state.head.extra_len = hold;\n }\n if (state.flags & 0x0200) {\n //=== CRC2(state.check, hold);\n hbuf[0] = hold & 0xff;\n hbuf[1] = (hold >>> 8) & 0xff;\n state.check = crc32_1(state.check, hbuf, 2, 0);\n //===//\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n else if (state.head) {\n state.head.extra = null/*Z_NULL*/;\n }\n state.mode = EXTRA;\n /* falls through */\n case EXTRA:\n if (state.flags & 0x0400) {\n copy = state.length;\n if (copy > have) { copy = have; }\n if (copy) {\n if (state.head) {\n len = state.head.extra_len - state.length;\n if (!state.head.extra) {\n // Use untyped array for more convenient processing later\n state.head.extra = new Uint8Array(state.head.extra_len);\n }\n state.head.extra.set(\n input.subarray(\n next,\n // extra field is limited to 65536 bytes\n // - no need for additional size check\n next + copy\n ),\n /*len + copy > state.head.extra_max - len ? state.head.extra_max : copy,*/\n len\n );\n //zmemcpy(state.head.extra + len, next,\n // len + copy > state.head.extra_max ?\n // state.head.extra_max - len : copy);\n }\n if (state.flags & 0x0200) {\n state.check = crc32_1(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n state.length -= copy;\n }\n if (state.length) { break inf_leave; }\n }\n state.length = 0;\n state.mode = NAME;\n /* falls through */\n case NAME:\n if (state.flags & 0x0800) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n // TODO: 2 or 1 bytes?\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.name_max*/)) {\n state.head.name += String.fromCharCode(len);\n }\n } while (len && copy < have);\n\n if (state.flags & 0x0200) {\n state.check = crc32_1(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.name = null;\n }\n state.length = 0;\n state.mode = COMMENT;\n /* falls through */\n case COMMENT:\n if (state.flags & 0x1000) {\n if (have === 0) { break inf_leave; }\n copy = 0;\n do {\n len = input[next + copy++];\n /* use constant limit because in js we should not preallocate memory */\n if (state.head && len &&\n (state.length < 65536 /*state.head.comm_max*/)) {\n state.head.comment += String.fromCharCode(len);\n }\n } while (len && copy < have);\n if (state.flags & 0x0200) {\n state.check = crc32_1(state.check, input, copy, next);\n }\n have -= copy;\n next += copy;\n if (len) { break inf_leave; }\n }\n else if (state.head) {\n state.head.comment = null;\n }\n state.mode = HCRC;\n /* falls through */\n case HCRC:\n if (state.flags & 0x0200) {\n //=== NEEDBITS(16); */\n while (bits < 16) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.check & 0xffff)) {\n strm.msg = 'header crc mismatch';\n state.mode = BAD$1;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n }\n if (state.head) {\n state.head.hcrc = ((state.flags >> 9) & 1);\n state.head.done = true;\n }\n strm.adler = state.check = 0;\n state.mode = TYPE$1;\n break;\n case DICTID:\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n strm.adler = state.check = zswap32(hold);\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = DICT;\n /* falls through */\n case DICT:\n if (state.havedict === 0) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n return Z_NEED_DICT;\n }\n strm.adler = state.check = 1/*adler32(0L, Z_NULL, 0)*/;\n state.mode = TYPE$1;\n /* falls through */\n case TYPE$1:\n if (flush === Z_BLOCK$1 || flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case TYPEDO:\n if (state.last) {\n //--- BYTEBITS() ---//\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n state.mode = CHECK;\n break;\n }\n //=== NEEDBITS(3); */\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.last = (hold & 0x01)/*BITS(1)*/;\n //--- DROPBITS(1) ---//\n hold >>>= 1;\n bits -= 1;\n //---//\n\n switch ((hold & 0x03)/*BITS(2)*/) {\n case 0: /* stored block */\n //Tracev((stderr, \"inflate: stored block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = STORED;\n break;\n case 1: /* fixed block */\n fixedtables(state);\n //Tracev((stderr, \"inflate: fixed codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = LEN_; /* decode codes */\n if (flush === Z_TREES) {\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break inf_leave;\n }\n break;\n case 2: /* dynamic block */\n //Tracev((stderr, \"inflate: dynamic codes block%s\\n\",\n // state.last ? \" (last)\" : \"\"));\n state.mode = TABLE;\n break;\n case 3:\n strm.msg = 'invalid block type';\n state.mode = BAD$1;\n }\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n break;\n case STORED:\n //--- BYTEBITS() ---// /* go to byte boundary */\n hold >>>= bits & 7;\n bits -= bits & 7;\n //---//\n //=== NEEDBITS(32); */\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if ((hold & 0xffff) !== ((hold >>> 16) ^ 0xffff)) {\n strm.msg = 'invalid stored block lengths';\n state.mode = BAD$1;\n break;\n }\n state.length = hold & 0xffff;\n //Tracev((stderr, \"inflate: stored length %u\\n\",\n // state.length));\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n state.mode = COPY_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case COPY_:\n state.mode = COPY;\n /* falls through */\n case COPY:\n copy = state.length;\n if (copy) {\n if (copy > have) { copy = have; }\n if (copy > left) { copy = left; }\n if (copy === 0) { break inf_leave; }\n //--- zmemcpy(put, next, copy); ---\n output.set(input.subarray(next, next + copy), put);\n //---//\n have -= copy;\n next += copy;\n left -= copy;\n put += copy;\n state.length -= copy;\n break;\n }\n //Tracev((stderr, \"inflate: stored end\\n\"));\n state.mode = TYPE$1;\n break;\n case TABLE:\n //=== NEEDBITS(14); */\n while (bits < 14) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.nlen = (hold & 0x1f)/*BITS(5)*/ + 257;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ndist = (hold & 0x1f)/*BITS(5)*/ + 1;\n //--- DROPBITS(5) ---//\n hold >>>= 5;\n bits -= 5;\n //---//\n state.ncode = (hold & 0x0f)/*BITS(4)*/ + 4;\n //--- DROPBITS(4) ---//\n hold >>>= 4;\n bits -= 4;\n //---//\n//#ifndef PKZIP_BUG_WORKAROUND\n if (state.nlen > 286 || state.ndist > 30) {\n strm.msg = 'too many length or distance symbols';\n state.mode = BAD$1;\n break;\n }\n//#endif\n //Tracev((stderr, \"inflate: table sizes ok\\n\"));\n state.have = 0;\n state.mode = LENLENS;\n /* falls through */\n case LENLENS:\n while (state.have < state.ncode) {\n //=== NEEDBITS(3);\n while (bits < 3) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.lens[order[state.have++]] = (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n while (state.have < 19) {\n state.lens[order[state.have++]] = 0;\n }\n // We have separate tables & no pointers. 2 commented lines below not needed.\n //state.next = state.codes;\n //state.lencode = state.next;\n // Switch to use dynamic table\n state.lencode = state.lendyn;\n state.lenbits = 7;\n\n opts = { bits: state.lenbits };\n ret = inftrees(CODES$1, state.lens, 0, 19, state.lencode, 0, state.work, opts);\n state.lenbits = opts.bits;\n\n if (ret) {\n strm.msg = 'invalid code lengths set';\n state.mode = BAD$1;\n break;\n }\n //Tracev((stderr, \"inflate: code lengths ok\\n\"));\n state.have = 0;\n state.mode = CODELENS;\n /* falls through */\n case CODELENS:\n while (state.have < state.nlen + state.ndist) {\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)];/*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_val < 16) {\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.lens[state.have++] = here_val;\n }\n else {\n if (here_val === 16) {\n //=== NEEDBITS(here.bits + 2);\n n = here_bits + 2;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n if (state.have === 0) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD$1;\n break;\n }\n len = state.lens[state.have - 1];\n copy = 3 + (hold & 0x03);//BITS(2);\n //--- DROPBITS(2) ---//\n hold >>>= 2;\n bits -= 2;\n //---//\n }\n else if (here_val === 17) {\n //=== NEEDBITS(here.bits + 3);\n n = here_bits + 3;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 3 + (hold & 0x07);//BITS(3);\n //--- DROPBITS(3) ---//\n hold >>>= 3;\n bits -= 3;\n //---//\n }\n else {\n //=== NEEDBITS(here.bits + 7);\n n = here_bits + 7;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n len = 0;\n copy = 11 + (hold & 0x7f);//BITS(7);\n //--- DROPBITS(7) ---//\n hold >>>= 7;\n bits -= 7;\n //---//\n }\n if (state.have + copy > state.nlen + state.ndist) {\n strm.msg = 'invalid bit length repeat';\n state.mode = BAD$1;\n break;\n }\n while (copy--) {\n state.lens[state.have++] = len;\n }\n }\n }\n\n /* handle error breaks in while */\n if (state.mode === BAD$1) { break; }\n\n /* check for end-of-block code (better have one) */\n if (state.lens[256] === 0) {\n strm.msg = 'invalid code -- missing end-of-block';\n state.mode = BAD$1;\n break;\n }\n\n /* build code tables -- note: do not change the lenbits or distbits\n values here (9 and 6) without reading the comments in inftrees.h\n concerning the ENOUGH constants, which depend on those values */\n state.lenbits = 9;\n\n opts = { bits: state.lenbits };\n ret = inftrees(LENS$1, state.lens, 0, state.nlen, state.lencode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.lenbits = opts.bits;\n // state.lencode = state.next;\n\n if (ret) {\n strm.msg = 'invalid literal/lengths set';\n state.mode = BAD$1;\n break;\n }\n\n state.distbits = 6;\n //state.distcode.copy(state.codes);\n // Switch to use dynamic table\n state.distcode = state.distdyn;\n opts = { bits: state.distbits };\n ret = inftrees(DISTS$1, state.lens, state.nlen, state.ndist, state.distcode, 0, state.work, opts);\n // We have separate tables & no pointers. 2 commented lines below not needed.\n // state.next_index = opts.table_index;\n state.distbits = opts.bits;\n // state.distcode = state.next;\n\n if (ret) {\n strm.msg = 'invalid distances set';\n state.mode = BAD$1;\n break;\n }\n //Tracev((stderr, 'inflate: codes ok\\n'));\n state.mode = LEN_;\n if (flush === Z_TREES) { break inf_leave; }\n /* falls through */\n case LEN_:\n state.mode = LEN;\n /* falls through */\n case LEN:\n if (have >= 6 && left >= 258) {\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n inffast(strm, _out);\n //--- LOAD() ---\n put = strm.next_out;\n output = strm.output;\n left = strm.avail_out;\n next = strm.next_in;\n input = strm.input;\n have = strm.avail_in;\n hold = state.hold;\n bits = state.bits;\n //---\n\n if (state.mode === TYPE$1) {\n state.back = -1;\n }\n break;\n }\n state.back = 0;\n for (;;) {\n here = state.lencode[hold & ((1 << state.lenbits) - 1)]; /*BITS(state.lenbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if (here_bits <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if (here_op && (here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.lencode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n state.length = here_val;\n if (here_op === 0) {\n //Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?\n // \"inflate: literal '%c'\\n\" :\n // \"inflate: literal 0x%02x\\n\", here.val));\n state.mode = LIT;\n break;\n }\n if (here_op & 32) {\n //Tracevv((stderr, \"inflate: end of block\\n\"));\n state.back = -1;\n state.mode = TYPE$1;\n break;\n }\n if (here_op & 64) {\n strm.msg = 'invalid literal/length code';\n state.mode = BAD$1;\n break;\n }\n state.extra = here_op & 15;\n state.mode = LENEXT;\n /* falls through */\n case LENEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.length += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n //Tracevv((stderr, \"inflate: length %u\\n\", state.length));\n state.was = state.length;\n state.mode = DIST;\n /* falls through */\n case DIST:\n for (;;) {\n here = state.distcode[hold & ((1 << state.distbits) - 1)];/*BITS(state.distbits)*/\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n if ((here_op & 0xf0) === 0) {\n last_bits = here_bits;\n last_op = here_op;\n last_val = here_val;\n for (;;) {\n here = state.distcode[last_val +\n ((hold & ((1 << (last_bits + last_op)) - 1))/*BITS(last.bits + last.op)*/ >> last_bits)];\n here_bits = here >>> 24;\n here_op = (here >>> 16) & 0xff;\n here_val = here & 0xffff;\n\n if ((last_bits + here_bits) <= bits) { break; }\n //--- PULLBYTE() ---//\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n //---//\n }\n //--- DROPBITS(last.bits) ---//\n hold >>>= last_bits;\n bits -= last_bits;\n //---//\n state.back += last_bits;\n }\n //--- DROPBITS(here.bits) ---//\n hold >>>= here_bits;\n bits -= here_bits;\n //---//\n state.back += here_bits;\n if (here_op & 64) {\n strm.msg = 'invalid distance code';\n state.mode = BAD$1;\n break;\n }\n state.offset = here_val;\n state.extra = (here_op) & 15;\n state.mode = DISTEXT;\n /* falls through */\n case DISTEXT:\n if (state.extra) {\n //=== NEEDBITS(state.extra);\n n = state.extra;\n while (bits < n) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n state.offset += hold & ((1 << state.extra) - 1)/*BITS(state.extra)*/;\n //--- DROPBITS(state.extra) ---//\n hold >>>= state.extra;\n bits -= state.extra;\n //---//\n state.back += state.extra;\n }\n//#ifdef INFLATE_STRICT\n if (state.offset > state.dmax) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD$1;\n break;\n }\n//#endif\n //Tracevv((stderr, \"inflate: distance %u\\n\", state.offset));\n state.mode = MATCH;\n /* falls through */\n case MATCH:\n if (left === 0) { break inf_leave; }\n copy = _out - left;\n if (state.offset > copy) { /* copy from window */\n copy = state.offset - copy;\n if (copy > state.whave) {\n if (state.sane) {\n strm.msg = 'invalid distance too far back';\n state.mode = BAD$1;\n break;\n }\n// (!) This block is disabled in zlib defaults,\n// don't enable it for binary compatibility\n//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR\n// Trace((stderr, \"inflate.c too far\\n\"));\n// copy -= state.whave;\n// if (copy > state.length) { copy = state.length; }\n// if (copy > left) { copy = left; }\n// left -= copy;\n// state.length -= copy;\n// do {\n// output[put++] = 0;\n// } while (--copy);\n// if (state.length === 0) { state.mode = LEN; }\n// break;\n//#endif\n }\n if (copy > state.wnext) {\n copy -= state.wnext;\n from = state.wsize - copy;\n }\n else {\n from = state.wnext - copy;\n }\n if (copy > state.length) { copy = state.length; }\n from_source = state.window;\n }\n else { /* copy from output */\n from_source = output;\n from = put - state.offset;\n copy = state.length;\n }\n if (copy > left) { copy = left; }\n left -= copy;\n state.length -= copy;\n do {\n output[put++] = from_source[from++];\n } while (--copy);\n if (state.length === 0) { state.mode = LEN; }\n break;\n case LIT:\n if (left === 0) { break inf_leave; }\n output[put++] = state.length;\n left--;\n state.mode = LEN;\n break;\n case CHECK:\n if (state.wrap) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n // Use '|' instead of '+' to make sure that result is signed\n hold |= input[next++] << bits;\n bits += 8;\n }\n //===//\n _out -= left;\n strm.total_out += _out;\n state.total += _out;\n if (_out) {\n strm.adler = state.check =\n /*UPDATE(state.check, put - _out, _out);*/\n (state.flags ? crc32_1(state.check, output, _out, put - _out) : adler32_1(state.check, output, _out, put - _out));\n\n }\n _out = left;\n // NB: crc32 stored as signed 32-bit int, zswap32 returns signed too\n if ((state.flags ? hold : zswap32(hold)) !== state.check) {\n strm.msg = 'incorrect data check';\n state.mode = BAD$1;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: check matches trailer\\n\"));\n }\n state.mode = LENGTH;\n /* falls through */\n case LENGTH:\n if (state.wrap && state.flags) {\n //=== NEEDBITS(32);\n while (bits < 32) {\n if (have === 0) { break inf_leave; }\n have--;\n hold += input[next++] << bits;\n bits += 8;\n }\n //===//\n if (hold !== (state.total & 0xffffffff)) {\n strm.msg = 'incorrect length check';\n state.mode = BAD$1;\n break;\n }\n //=== INITBITS();\n hold = 0;\n bits = 0;\n //===//\n //Tracev((stderr, \"inflate: length matches trailer\\n\"));\n }\n state.mode = DONE;\n /* falls through */\n case DONE:\n ret = Z_STREAM_END$2;\n break inf_leave;\n case BAD$1:\n ret = Z_DATA_ERROR$1;\n break inf_leave;\n case MEM:\n return Z_MEM_ERROR;\n case SYNC:\n /* falls through */\n default:\n return Z_STREAM_ERROR$1;\n }\n }\n\n // inf_leave <- here is real place for \"goto inf_leave\", emulated via \"break inf_leave\"\n\n /*\n Return from inflate(), updating the total counts and the check value.\n If there was no progress during the inflate() call, return a buffer\n error. Call updatewindow() to create and/or update the window state.\n Note: a memory error from inflate() is non-recoverable.\n */\n\n //--- RESTORE() ---\n strm.next_out = put;\n strm.avail_out = left;\n strm.next_in = next;\n strm.avail_in = have;\n state.hold = hold;\n state.bits = bits;\n //---\n\n if (state.wsize || (_out !== strm.avail_out && state.mode < BAD$1 &&\n (state.mode < CHECK || flush !== Z_FINISH$2))) {\n if (updatewindow(strm, strm.output, strm.next_out, _out - strm.avail_out)) ;\n }\n _in -= strm.avail_in;\n _out -= strm.avail_out;\n strm.total_in += _in;\n strm.total_out += _out;\n state.total += _out;\n if (state.wrap && _out) {\n strm.adler = state.check = /*UPDATE(state.check, strm.next_out - _out, _out);*/\n (state.flags ? crc32_1(state.check, output, _out, strm.next_out - _out) : adler32_1(state.check, output, _out, strm.next_out - _out));\n }\n strm.data_type = state.bits + (state.last ? 64 : 0) +\n (state.mode === TYPE$1 ? 128 : 0) +\n (state.mode === LEN_ || state.mode === COPY_ ? 256 : 0);\n if (((_in === 0 && _out === 0) || flush === Z_FINISH$2) && ret === Z_OK$2) {\n ret = Z_BUF_ERROR$1;\n }\n return ret;\n};\n\n\nconst inflateEnd = (strm) => {\n\n if (!strm || !strm.state /*|| strm->zfree == (free_func)0*/) {\n return Z_STREAM_ERROR$1;\n }\n\n let state = strm.state;\n if (state.window) {\n state.window = null;\n }\n strm.state = null;\n return Z_OK$2;\n};\n\n\nconst inflateGetHeader = (strm, head) => {\n\n /* check state */\n if (!strm || !strm.state) { return Z_STREAM_ERROR$1; }\n const state = strm.state;\n if ((state.wrap & 2) === 0) { return Z_STREAM_ERROR$1; }\n\n /* save header structure */\n state.head = head;\n head.done = false;\n return Z_OK$2;\n};\n\n\nconst inflateSetDictionary = (strm, dictionary) => {\n const dictLength = dictionary.length;\n\n let state;\n let dictid;\n let ret;\n\n /* check state */\n if (!strm /* == Z_NULL */ || !strm.state /* == Z_NULL */) { return Z_STREAM_ERROR$1; }\n state = strm.state;\n\n if (state.wrap !== 0 && state.mode !== DICT) {\n return Z_STREAM_ERROR$1;\n }\n\n /* check for correct dictionary identifier */\n if (state.mode === DICT) {\n dictid = 1; /* adler32(0, null, 0)*/\n /* dictid = adler32(dictid, dictionary, dictLength); */\n dictid = adler32_1(dictid, dictionary, dictLength, 0);\n if (dictid !== state.check) {\n return Z_DATA_ERROR$1;\n }\n }\n /* copy dictionary to window using updatewindow(), which will amend the\n existing dictionary if appropriate */\n ret = updatewindow(strm, dictionary, dictLength, dictLength);\n if (ret) {\n state.mode = MEM;\n return Z_MEM_ERROR;\n }\n state.havedict = 1;\n // Tracev((stderr, \"inflate: dictionary set\\n\"));\n return Z_OK$2;\n};\n\n\nvar inflateReset_1 = inflateReset;\nvar inflateReset2_1 = inflateReset2;\nvar inflateResetKeep_1 = inflateResetKeep;\nvar inflateInit_1 = inflateInit;\nvar inflateInit2_1 = inflateInit2;\nvar inflate_2 = inflate;\nvar inflateEnd_1 = inflateEnd;\nvar inflateGetHeader_1 = inflateGetHeader;\nvar inflateSetDictionary_1 = inflateSetDictionary;\nvar inflateInfo = 'pako inflate (from Nodeca project)';\n\n/* Not implemented\nmodule.exports.inflateCopy = inflateCopy;\nmodule.exports.inflateGetDictionary = inflateGetDictionary;\nmodule.exports.inflateMark = inflateMark;\nmodule.exports.inflatePrime = inflatePrime;\nmodule.exports.inflateSync = inflateSync;\nmodule.exports.inflateSyncPoint = inflateSyncPoint;\nmodule.exports.inflateUndermine = inflateUndermine;\n*/\n\nvar inflate_1 = {\n\tinflateReset: inflateReset_1,\n\tinflateReset2: inflateReset2_1,\n\tinflateResetKeep: inflateResetKeep_1,\n\tinflateInit: inflateInit_1,\n\tinflateInit2: inflateInit2_1,\n\tinflate: inflate_2,\n\tinflateEnd: inflateEnd_1,\n\tinflateGetHeader: inflateGetHeader_1,\n\tinflateSetDictionary: inflateSetDictionary_1,\n\tinflateInfo: inflateInfo\n};\n\n// (C) 1995-2013 Jean-loup Gailly and Mark Adler\n// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin\n//\n// This software is provided 'as-is', without any express or implied\n// warranty. In no event will the authors be held liable for any damages\n// arising from the use of this software.\n//\n// Permission is granted to anyone to use this software for any purpose,\n// including commercial applications, and to alter it and redistribute it\n// freely, subject to the following restrictions:\n//\n// 1. The origin of this software must not be misrepresented; you must not\n// claim that you wrote the original software. If you use this software\n// in a product, an acknowledgment in the product documentation would be\n// appreciated but is not required.\n// 2. Altered source versions must be plainly marked as such, and must not be\n// misrepresented as being the original software.\n// 3. This notice may not be removed or altered from any source distribution.\n\nfunction GZheader() {\n /* true if compressed data believed to be text */\n this.text = 0;\n /* modification time */\n this.time = 0;\n /* extra flags (not used when writing a gzip file) */\n this.xflags = 0;\n /* operating system */\n this.os = 0;\n /* pointer to extra field or Z_NULL if none */\n this.extra = null;\n /* extra field length (valid if extra != Z_NULL) */\n this.extra_len = 0; // Actually, we don't need it in JS,\n // but leave for few code modifications\n\n //\n // Setup limits is not necessary because in js we should not preallocate memory\n // for inflate use constant limit in 65536 bytes\n //\n\n /* space at extra (only when reading header) */\n // this.extra_max = 0;\n /* pointer to zero-terminated file name or Z_NULL */\n this.name = '';\n /* space at name (only when reading header) */\n // this.name_max = 0;\n /* pointer to zero-terminated comment or Z_NULL */\n this.comment = '';\n /* space at comment (only when reading header) */\n // this.comm_max = 0;\n /* true if there was or will be a header crc */\n this.hcrc = 0;\n /* true when done reading gzip header (not used when writing a gzip file) */\n this.done = false;\n}\n\nvar gzheader = GZheader;\n\nconst toString$1 = Object.prototype.toString;\n\n/* Public constants ==========================================================*/\n/* ===========================================================================*/\n\nconst {\n Z_NO_FLUSH: Z_NO_FLUSH$2, Z_FINISH: Z_FINISH$3,\n Z_OK: Z_OK$3, Z_STREAM_END: Z_STREAM_END$3, Z_NEED_DICT: Z_NEED_DICT$1, Z_STREAM_ERROR: Z_STREAM_ERROR$2, Z_DATA_ERROR: Z_DATA_ERROR$2, Z_MEM_ERROR: Z_MEM_ERROR$1\n} = constants;\n\n/* ===========================================================================*/\n\n\n/**\n * class Inflate\n *\n * Generic JS-style wrapper for zlib calls. If you don't need\n * streaming behaviour - use more simple functions: [[inflate]]\n * and [[inflateRaw]].\n **/\n\n/* internal\n * inflate.chunks -> Array\n *\n * Chunks of output data, if [[Inflate#onData]] not overridden.\n **/\n\n/**\n * Inflate.result -> Uint8Array|String\n *\n * Uncompressed result, generated by default [[Inflate#onData]]\n * and [[Inflate#onEnd]] handlers. Filled after you push last chunk\n * (call [[Inflate#push]] with `Z_FINISH` / `true` param).\n **/\n\n/**\n * Inflate.err -> Number\n *\n * Error code after inflate finished. 0 (Z_OK) on success.\n * Should be checked if broken data possible.\n **/\n\n/**\n * Inflate.msg -> String\n *\n * Error message, if [[Inflate.err]] != 0\n **/\n\n\n/**\n * new Inflate(options)\n * - options (Object): zlib inflate options.\n *\n * Creates new inflator instance with specified params. Throws exception\n * on bad params. Supported options:\n *\n * - `windowBits`\n * - `dictionary`\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information on these.\n *\n * Additional options, for internal needs:\n *\n * - `chunkSize` - size of generated data chunks (16K by default)\n * - `raw` (Boolean) - do raw inflate\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n * By default, when no options set, autodetect deflate/gzip data format via\n * wrapper header.\n *\n * ##### Example:\n *\n * ```javascript\n * const pako = require('pako')\n * const chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9])\n * const chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]);\n *\n * const inflate = new pako.Inflate({ level: 3});\n *\n * inflate.push(chunk1, false);\n * inflate.push(chunk2, true); // true -> last chunk\n *\n * if (inflate.err) { throw new Error(inflate.err); }\n *\n * console.log(inflate.result);\n * ```\n **/\nfunction Inflate(options) {\n this.options = common.assign({\n chunkSize: 1024 * 64,\n windowBits: 15,\n to: ''\n }, options || {});\n\n const opt = this.options;\n\n // Force window size for `raw` data, if not set directly,\n // because we have no header for autodetect.\n if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {\n opt.windowBits = -opt.windowBits;\n if (opt.windowBits === 0) { opt.windowBits = -15; }\n }\n\n // If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate\n if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&\n !(options && options.windowBits)) {\n opt.windowBits += 32;\n }\n\n // Gzip header has no info about windows size, we can do autodetect only\n // for deflate. So, if window size not set, force it to max when gzip possible\n if ((opt.windowBits > 15) && (opt.windowBits < 48)) {\n // bit 3 (16) -> gzipped data\n // bit 4 (32) -> autodetect gzip/deflate\n if ((opt.windowBits & 15) === 0) {\n opt.windowBits |= 15;\n }\n }\n\n this.err = 0; // error code, if happens (0 = Z_OK)\n this.msg = ''; // error message\n this.ended = false; // used to avoid multiple onEnd() calls\n this.chunks = []; // chunks of compressed data\n\n this.strm = new zstream();\n this.strm.avail_out = 0;\n\n let status = inflate_1.inflateInit2(\n this.strm,\n opt.windowBits\n );\n\n if (status !== Z_OK$3) {\n throw new Error(messages[status]);\n }\n\n this.header = new gzheader();\n\n inflate_1.inflateGetHeader(this.strm, this.header);\n\n // Setup dictionary\n if (opt.dictionary) {\n // Convert data if needed\n if (typeof opt.dictionary === 'string') {\n opt.dictionary = strings.string2buf(opt.dictionary);\n } else if (toString$1.call(opt.dictionary) === '[object ArrayBuffer]') {\n opt.dictionary = new Uint8Array(opt.dictionary);\n }\n if (opt.raw) { //In raw mode we need to set the dictionary early\n status = inflate_1.inflateSetDictionary(this.strm, opt.dictionary);\n if (status !== Z_OK$3) {\n throw new Error(messages[status]);\n }\n }\n }\n}\n\n/**\n * Inflate#push(data[, flush_mode]) -> Boolean\n * - data (Uint8Array|ArrayBuffer): input data\n * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE\n * flush modes. See constants. Skipped or `false` means Z_NO_FLUSH,\n * `true` means Z_FINISH.\n *\n * Sends input data to inflate pipe, generating [[Inflate#onData]] calls with\n * new output chunks. Returns `true` on success. If end of stream detected,\n * [[Inflate#onEnd]] will be called.\n *\n * `flush_mode` is not needed for normal operation, because end of stream\n * detected automatically. You may try to use it for advanced things, but\n * this functionality was not tested.\n *\n * On fail call [[Inflate#onEnd]] with error code and return false.\n *\n * ##### Example\n *\n * ```javascript\n * push(chunk, false); // push one of data chunks\n * ...\n * push(chunk, true); // push last chunk\n * ```\n **/\nInflate.prototype.push = function (data, flush_mode) {\n const strm = this.strm;\n const chunkSize = this.options.chunkSize;\n const dictionary = this.options.dictionary;\n let status, _flush_mode, last_avail_out;\n\n if (this.ended) return false;\n\n if (flush_mode === ~~flush_mode) _flush_mode = flush_mode;\n else _flush_mode = flush_mode === true ? Z_FINISH$3 : Z_NO_FLUSH$2;\n\n // Convert data if needed\n if (toString$1.call(data) === '[object ArrayBuffer]') {\n strm.input = new Uint8Array(data);\n } else {\n strm.input = data;\n }\n\n strm.next_in = 0;\n strm.avail_in = strm.input.length;\n\n for (;;) {\n if (strm.avail_out === 0) {\n strm.output = new Uint8Array(chunkSize);\n strm.next_out = 0;\n strm.avail_out = chunkSize;\n }\n\n status = inflate_1.inflate(strm, _flush_mode);\n\n if (status === Z_NEED_DICT$1 && dictionary) {\n status = inflate_1.inflateSetDictionary(strm, dictionary);\n\n if (status === Z_OK$3) {\n status = inflate_1.inflate(strm, _flush_mode);\n } else if (status === Z_DATA_ERROR$2) {\n // Replace code with more verbose\n status = Z_NEED_DICT$1;\n }\n }\n\n // Skip snyc markers if more data follows and not raw mode\n while (strm.avail_in > 0 &&\n status === Z_STREAM_END$3 &&\n strm.state.wrap > 0 &&\n data[strm.next_in] !== 0)\n {\n inflate_1.inflateReset(strm);\n status = inflate_1.inflate(strm, _flush_mode);\n }\n\n switch (status) {\n case Z_STREAM_ERROR$2:\n case Z_DATA_ERROR$2:\n case Z_NEED_DICT$1:\n case Z_MEM_ERROR$1:\n this.onEnd(status);\n this.ended = true;\n return false;\n }\n\n // Remember real `avail_out` value, because we may patch out buffer content\n // to align utf8 strings boundaries.\n last_avail_out = strm.avail_out;\n\n if (strm.next_out) {\n if (strm.avail_out === 0 || status === Z_STREAM_END$3) {\n\n if (this.options.to === 'string') {\n\n let next_out_utf8 = strings.utf8border(strm.output, strm.next_out);\n\n let tail = strm.next_out - next_out_utf8;\n let utf8str = strings.buf2string(strm.output, next_out_utf8);\n\n // move tail & realign counters\n strm.next_out = tail;\n strm.avail_out = chunkSize - tail;\n if (tail) strm.output.set(strm.output.subarray(next_out_utf8, next_out_utf8 + tail), 0);\n\n this.onData(utf8str);\n\n } else {\n this.onData(strm.output.length === strm.next_out ? strm.output : strm.output.subarray(0, strm.next_out));\n }\n }\n }\n\n // Must repeat iteration if out buffer is full\n if (status === Z_OK$3 && last_avail_out === 0) continue;\n\n // Finalize if end of stream reached.\n if (status === Z_STREAM_END$3) {\n status = inflate_1.inflateEnd(this.strm);\n this.onEnd(status);\n this.ended = true;\n return true;\n }\n\n if (strm.avail_in === 0) break;\n }\n\n return true;\n};\n\n\n/**\n * Inflate#onData(chunk) -> Void\n * - chunk (Uint8Array|String): output data. When string output requested,\n * each chunk will be string.\n *\n * By default, stores data blocks in `chunks[]` property and glue\n * those in `onEnd`. Override this handler, if you need another behaviour.\n **/\nInflate.prototype.onData = function (chunk) {\n this.chunks.push(chunk);\n};\n\n\n/**\n * Inflate#onEnd(status) -> Void\n * - status (Number): inflate status. 0 (Z_OK) on success,\n * other if not.\n *\n * Called either after you tell inflate that the input stream is\n * complete (Z_FINISH). By default - join collected chunks,\n * free memory and fill `results` / `err` properties.\n **/\nInflate.prototype.onEnd = function (status) {\n // On success - join\n if (status === Z_OK$3) {\n if (this.options.to === 'string') {\n this.result = this.chunks.join('');\n } else {\n this.result = common.flattenChunks(this.chunks);\n }\n }\n this.chunks = [];\n this.err = status;\n this.msg = this.strm.msg;\n};\n\n\n/**\n * inflate(data[, options]) -> Uint8Array|String\n * - data (Uint8Array): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Decompress `data` with inflate/ungzip and `options`. Autodetect\n * format via wrapper header by default. That's why we don't provide\n * separate `ungzip` method.\n *\n * Supported options are:\n *\n * - windowBits\n *\n * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)\n * for more information.\n *\n * Sugar (options):\n *\n * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify\n * negative windowBits implicitly.\n * - `to` (String) - if equal to 'string', then result will be converted\n * from utf8 to utf16 (javascript) string. When string output requested,\n * chunk length can differ from `chunkSize`, depending on content.\n *\n *\n * ##### Example:\n *\n * ```javascript\n * const pako = require('pako');\n * const input = pako.deflate(new Uint8Array([1,2,3,4,5,6,7,8,9]));\n * let output;\n *\n * try {\n * output = pako.inflate(input);\n * } catch (err)\n * console.log(err);\n * }\n * ```\n **/\nfunction inflate$1(input, options) {\n const inflator = new Inflate(options);\n\n inflator.push(input);\n\n // That will never happens, if you don't cheat with options :)\n if (inflator.err) throw inflator.msg || messages[inflator.err];\n\n return inflator.result;\n}\n\n\n/**\n * inflateRaw(data[, options]) -> Uint8Array|String\n * - data (Uint8Array): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * The same as [[inflate]], but creates raw data, without wrapper\n * (header and adler32 crc).\n **/\nfunction inflateRaw(input, options) {\n options = options || {};\n options.raw = true;\n return inflate$1(input, options);\n}\n\n\n/**\n * ungzip(data[, options]) -> Uint8Array|String\n * - data (Uint8Array): input data to decompress.\n * - options (Object): zlib inflate options.\n *\n * Just shortcut to [[inflate]], because it autodetects format\n * by header.content. Done for convenience.\n **/\n\n\nvar Inflate_1 = Inflate;\nvar inflate_2$1 = inflate$1;\nvar inflateRaw_1 = inflateRaw;\nvar ungzip = inflate$1;\nvar constants$2 = constants;\n\nvar inflate_1$1 = {\n\tInflate: Inflate_1,\n\tinflate: inflate_2$1,\n\tinflateRaw: inflateRaw_1,\n\tungzip: ungzip,\n\tconstants: constants$2\n};\n\nconst { Deflate: Deflate$1, deflate: deflate$2, deflateRaw: deflateRaw$1, gzip: gzip$1 } = deflate_1$1;\n\nconst { Inflate: Inflate$1, inflate: inflate$2, inflateRaw: inflateRaw$1, ungzip: ungzip$1 } = inflate_1$1;\n\n\n\nvar Deflate_1$1 = Deflate$1;\nvar deflate_1$2 = deflate$2;\nvar deflateRaw_1$1 = deflateRaw$1;\nvar gzip_1$1 = gzip$1;\nvar Inflate_1$1 = Inflate$1;\nvar inflate_1$2 = inflate$2;\nvar inflateRaw_1$1 = inflateRaw$1;\nvar ungzip_1 = ungzip$1;\nvar constants_1 = constants;\n\nvar pako = {\n\tDeflate: Deflate_1$1,\n\tdeflate: deflate_1$2,\n\tdeflateRaw: deflateRaw_1$1,\n\tgzip: gzip_1$1,\n\tInflate: Inflate_1$1,\n\tinflate: inflate_1$2,\n\tinflateRaw: inflateRaw_1$1,\n\tungzip: ungzip_1,\n\tconstants: constants_1\n};\n\nexport { pako as p };\n","import { p as pako } from './pako.esm-856454b6.js';\n\nvar _a;\nconst GZip = (_a = class {\n constructor(level = 1) {\n if (level < 0 || level > 9) {\n throw new Error(\"Invalid gzip compression level, it should be between 0 and 9\");\n }\n this.level = level;\n }\n static fromConfig({ level }) {\n return new _a(level);\n }\n encode(data) {\n const gzipped = pako.gzip(data, { level: this.level });\n return gzipped;\n }\n decode(data, out) {\n const uncompressed = pako.ungzip(data);\n if (out !== void 0) {\n out.set(uncompressed);\n return out;\n }\n return uncompressed;\n }\n}, _a.codecId = \"gzip\", _a);\n\nexport default GZip;\n","import { p as pako } from './pako.esm-856454b6.js';\n\nvar _a;\nconst Zlib = (_a = class {\n constructor(level = 1) {\n if (level < -1 || level > 9) {\n throw new Error(\"Invalid zlib compression level, it should be between -1 and 9\");\n }\n this.level = level;\n }\n static fromConfig({ level }) {\n return new _a(level);\n }\n encode(data) {\n const gzipped = pako.deflate(data, { level: this.level });\n return gzipped;\n }\n decode(data, out) {\n const uncompressed = pako.inflate(data);\n if (out !== void 0) {\n out.set(uncompressed);\n return out;\n }\n return uncompressed;\n }\n}, _a.codecId = \"zlib\", _a);\n\nexport default Zlib;\n","var blosc_codec = function() {\n typeof document !== \"undefined\" && document.currentScript ? document.currentScript.src : void 0;\n return function(blosc_codec2) {\n blosc_codec2 = blosc_codec2 || {};\n var f;\n f || (f = typeof blosc_codec2 !== \"undefined\" ? blosc_codec2 : {});\n var aa, ba;\n f.ready = new Promise(function(a, b) {\n aa = a;\n ba = b;\n });\n var r = {}, t;\n for (t in f)\n f.hasOwnProperty(t) && (r[t] = f[t]);\n var ca = \"./this.program\", da = f.print || console.log.bind(console), u = f.printErr || console.warn.bind(console);\n for (t in r)\n r.hasOwnProperty(t) && (f[t] = r[t]);\n r = null;\n f.thisProgram && (ca = f.thisProgram);\n var v;\n f.wasmBinary && (v = f.wasmBinary);\n f.noExitRuntime && (f.noExitRuntime);\n typeof WebAssembly !== \"object\" && w(\"no native wasm support detected\");\n var y, ea = false, fa = typeof TextDecoder !== \"undefined\" ? new TextDecoder(\"utf8\") : void 0;\n function ha(a, b, c) {\n var d = b + c;\n for (c = b; a[c] && !(c >= d); )\n ++c;\n if (16 < c - b && a.subarray && fa)\n return fa.decode(a.subarray(b, c));\n for (d = \"\"; b < c; ) {\n var e = a[b++];\n if (e & 128) {\n var g = a[b++] & 63;\n if ((e & 224) == 192)\n d += String.fromCharCode((e & 31) << 6 | g);\n else {\n var k = a[b++] & 63;\n e = (e & 240) == 224 ? (e & 15) << 12 | g << 6 | k : (e & 7) << 18 | g << 12 | k << 6 | a[b++] & 63;\n 65536 > e ? d += String.fromCharCode(e) : (e -= 65536, d += String.fromCharCode(55296 | e >> 10, 56320 | e & 1023));\n }\n } else\n d += String.fromCharCode(e);\n }\n return d;\n }\n function ia(a, b, c) {\n var d = z;\n if (0 < c) {\n c = b + c - 1;\n for (var e = 0; e < a.length; ++e) {\n var g = a.charCodeAt(e);\n if (55296 <= g && 57343 >= g) {\n var k = a.charCodeAt(++e);\n g = 65536 + ((g & 1023) << 10) | k & 1023;\n }\n if (127 >= g) {\n if (b >= c)\n break;\n d[b++] = g;\n } else {\n if (2047 >= g) {\n if (b + 1 >= c)\n break;\n d[b++] = 192 | g >> 6;\n } else {\n if (65535 >= g) {\n if (b + 2 >= c)\n break;\n d[b++] = 224 | g >> 12;\n } else {\n if (b + 3 >= c)\n break;\n d[b++] = 240 | g >> 18;\n d[b++] = 128 | g >> 12 & 63;\n }\n d[b++] = 128 | g >> 6 & 63;\n }\n d[b++] = 128 | g & 63;\n }\n }\n d[b] = 0;\n }\n }\n var ja = typeof TextDecoder !== \"undefined\" ? new TextDecoder(\"utf-16le\") : void 0;\n function ka(a, b) {\n var c = a >> 1;\n for (var d = c + b / 2; !(c >= d) && A[c]; )\n ++c;\n c <<= 1;\n if (32 < c - a && ja)\n return ja.decode(z.subarray(a, c));\n c = 0;\n for (d = \"\"; ; ) {\n var e = C[a + 2 * c >> 1];\n if (e == 0 || c == b / 2)\n return d;\n ++c;\n d += String.fromCharCode(e);\n }\n }\n function la(a, b, c) {\n c === void 0 && (c = 2147483647);\n if (2 > c)\n return 0;\n c -= 2;\n var d = b;\n c = c < 2 * a.length ? c / 2 : a.length;\n for (var e = 0; e < c; ++e)\n C[b >> 1] = a.charCodeAt(e), b += 2;\n C[b >> 1] = 0;\n return b - d;\n }\n function ma(a) {\n return 2 * a.length;\n }\n function na(a, b) {\n for (var c = 0, d = \"\"; !(c >= b / 4); ) {\n var e = D[a + 4 * c >> 2];\n if (e == 0)\n break;\n ++c;\n 65536 <= e ? (e -= 65536, d += String.fromCharCode(55296 | e >> 10, 56320 | e & 1023)) : d += String.fromCharCode(e);\n }\n return d;\n }\n function oa(a, b, c) {\n c === void 0 && (c = 2147483647);\n if (4 > c)\n return 0;\n var d = b;\n c = d + c - 4;\n for (var e = 0; e < a.length; ++e) {\n var g = a.charCodeAt(e);\n if (55296 <= g && 57343 >= g) {\n var k = a.charCodeAt(++e);\n g = 65536 + ((g & 1023) << 10) | k & 1023;\n }\n D[b >> 2] = g;\n b += 4;\n if (b + 4 > c)\n break;\n }\n D[b >> 2] = 0;\n return b - d;\n }\n function pa(a) {\n for (var b = 0, c = 0; c < a.length; ++c) {\n var d = a.charCodeAt(c);\n 55296 <= d && 57343 >= d && ++c;\n b += 4;\n }\n return b;\n }\n var E, F, z, C, A, D, G, qa, ra;\n function sa(a) {\n E = a;\n f.HEAP8 = F = new Int8Array(a);\n f.HEAP16 = C = new Int16Array(a);\n f.HEAP32 = D = new Int32Array(a);\n f.HEAPU8 = z = new Uint8Array(a);\n f.HEAPU16 = A = new Uint16Array(a);\n f.HEAPU32 = G = new Uint32Array(a);\n f.HEAPF32 = qa = new Float32Array(a);\n f.HEAPF64 = ra = new Float64Array(a);\n }\n var ta = f.INITIAL_MEMORY || 16777216;\n f.wasmMemory ? y = f.wasmMemory : y = new WebAssembly.Memory({ initial: ta / 65536, maximum: 32768 });\n y && (E = y.buffer);\n ta = E.byteLength;\n sa(E);\n var I, ua = [], va = [], wa = [], xa = [];\n function ya() {\n var a = f.preRun.shift();\n ua.unshift(a);\n }\n var J = 0, K = null;\n f.preloadedImages = {};\n f.preloadedAudios = {};\n function w(a) {\n if (f.onAbort)\n f.onAbort(a);\n u(a);\n ea = true;\n a = new WebAssembly.RuntimeError(\"abort(\" + a + \"). Build with -s ASSERTIONS=1 for more info.\");\n ba(a);\n throw a;\n }\n function Aa(a) {\n var b = L;\n return String.prototype.startsWith ? b.startsWith(a) : b.indexOf(a) === 0;\n }\n function Ba() {\n return Aa(\"data:application/octet-stream;base64,\");\n }\n var L = \"blosc_codec.wasm\";\n if (!Ba()) {\n var Ca = L;\n L = f.locateFile ? f.locateFile(Ca, \"\") : \"\" + Ca;\n }\n function Da() {\n try {\n if (v)\n return new Uint8Array(v);\n throw \"both async and sync fetching of the wasm failed\";\n } catch (a) {\n w(a);\n }\n }\n function N(a) {\n for (; 0 < a.length; ) {\n var b = a.shift();\n if (typeof b == \"function\")\n b(f);\n else {\n var c = b.T;\n typeof c === \"number\" ? b.O === void 0 ? I.get(c)() : I.get(c)(b.O) : c(b.O === void 0 ? null : b.O);\n }\n }\n }\n function Ea(a) {\n this.N = a - 16;\n this.$ = function(b) {\n D[this.N + 8 >> 2] = b;\n };\n this.X = function(b) {\n D[this.N + 0 >> 2] = b;\n };\n this.Y = function() {\n D[this.N + 4 >> 2] = 0;\n };\n this.W = function() {\n F[this.N + 12 >> 0] = 0;\n };\n this.Z = function() {\n F[this.N + 13 >> 0] = 0;\n };\n this.V = function(b, c) {\n this.$(b);\n this.X(c);\n this.Y();\n this.W();\n this.Z();\n };\n }\n function Fa(a) {\n switch (a) {\n case 1:\n return 0;\n case 2:\n return 1;\n case 4:\n return 2;\n case 8:\n return 3;\n default:\n throw new TypeError(\"Unknown type size: \" + a);\n }\n }\n var Ga = void 0;\n function P(a) {\n for (var b = \"\"; z[a]; )\n b += Ga[z[a++]];\n return b;\n }\n var Q = {}, R = {}, S = {};\n function Ha(a) {\n if (a === void 0)\n return \"_unknown\";\n a = a.replace(/[^a-zA-Z0-9_]/g, \"$\");\n var b = a.charCodeAt(0);\n return 48 <= b && 57 >= b ? \"_\" + a : a;\n }\n function Ia(a, b) {\n a = Ha(a);\n return new Function(\"body\", \"return function \" + a + '() {\\n \"use strict\"; return body.apply(this, arguments);\\n};\\n')(b);\n }\n function Ja(a) {\n var b = Error, c = Ia(a, function(d) {\n this.name = a;\n this.message = d;\n d = Error(d).stack;\n d !== void 0 && (this.stack = this.toString() + \"\\n\" + d.replace(/^Error(:[^\\n]*)?\\n/, \"\"));\n });\n c.prototype = Object.create(b.prototype);\n c.prototype.constructor = c;\n c.prototype.toString = function() {\n return this.message === void 0 ? this.name : this.name + \": \" + this.message;\n };\n return c;\n }\n var Ka = void 0;\n function T(a) {\n throw new Ka(a);\n }\n var La = void 0;\n function Ma(a, b) {\n function c(h) {\n h = b(h);\n if (h.length !== d.length)\n throw new La(\"Mismatched type converter count\");\n for (var l = 0; l < d.length; ++l)\n U(d[l], h[l]);\n }\n var d = [];\n d.forEach(function(h) {\n S[h] = a;\n });\n var e = Array(a.length), g = [], k = 0;\n a.forEach(function(h, l) {\n R.hasOwnProperty(h) ? e[l] = R[h] : (g.push(h), Q.hasOwnProperty(h) || (Q[h] = []), Q[h].push(function() {\n e[l] = R[h];\n ++k;\n k === g.length && c(e);\n }));\n });\n g.length === 0 && c(e);\n }\n function U(a, b, c) {\n c = c || {};\n if (!(\"argPackAdvance\" in b))\n throw new TypeError(\"registerType registeredInstance requires argPackAdvance\");\n var d = b.name;\n a || T('type \"' + d + '\" must have a positive integer typeid pointer');\n if (R.hasOwnProperty(a)) {\n if (c.U)\n return;\n T(\"Cannot register type '\" + d + \"' twice\");\n }\n R[a] = b;\n delete S[a];\n Q.hasOwnProperty(a) && (b = Q[a], delete Q[a], b.forEach(function(e) {\n e();\n }));\n }\n var Na = [], V = [{}, { value: void 0 }, { value: null }, { value: true }, { value: false }];\n function Qa(a) {\n 4 < a && --V[a].P === 0 && (V[a] = void 0, Na.push(a));\n }\n function Ra(a) {\n switch (a) {\n case void 0:\n return 1;\n case null:\n return 2;\n case true:\n return 3;\n case false:\n return 4;\n default:\n var b = Na.length ? Na.pop() : V.length;\n V[b] = { P: 1, value: a };\n return b;\n }\n }\n function Sa(a) {\n return this.fromWireType(G[a >> 2]);\n }\n function Ta(a) {\n if (a === null)\n return \"null\";\n var b = typeof a;\n return b === \"object\" || b === \"array\" || b === \"function\" ? a.toString() : \"\" + a;\n }\n function Ua(a, b) {\n switch (b) {\n case 2:\n return function(c) {\n return this.fromWireType(qa[c >> 2]);\n };\n case 3:\n return function(c) {\n return this.fromWireType(ra[c >> 3]);\n };\n default:\n throw new TypeError(\"Unknown float type: \" + a);\n }\n }\n function Va(a) {\n var b = Function;\n if (!(b instanceof Function))\n throw new TypeError(\"new_ called with constructor type \" + typeof b + \" which is not a function\");\n var c = Ia(b.name || \"unknownFunctionName\", function() {\n });\n c.prototype = b.prototype;\n c = new c();\n a = b.apply(c, a);\n return a instanceof Object ? a : c;\n }\n function Wa(a) {\n for (; a.length; ) {\n var b = a.pop();\n a.pop()(b);\n }\n }\n function Xa(a, b) {\n var c = f;\n if (c[a].L === void 0) {\n var d = c[a];\n c[a] = function() {\n c[a].L.hasOwnProperty(arguments.length) || T(\"Function '\" + b + \"' called with an invalid number of arguments (\" + arguments.length + \") - expects one of (\" + c[a].L + \")!\");\n return c[a].L[arguments.length].apply(this, arguments);\n };\n c[a].L = [];\n c[a].L[d.S] = d;\n }\n }\n function Ya(a, b, c) {\n f.hasOwnProperty(a) ? ((c === void 0 || f[a].L !== void 0 && f[a].L[c] !== void 0) && T(\"Cannot register public name '\" + a + \"' twice\"), Xa(a, a), f.hasOwnProperty(c) && T(\"Cannot register multiple overloads of a function with the same number of arguments (\" + c + \")!\"), f[a].L[c] = b) : (f[a] = b, c !== void 0 && (f[a].ba = c));\n }\n function Za(a, b) {\n for (var c = [], d = 0; d < a; d++)\n c.push(D[(b >> 2) + d]);\n return c;\n }\n function $a(a, b) {\n 0 <= a.indexOf(\"j\") || w(\"Assertion failed: getDynCaller should only be called with i64 sigs\");\n var c = [];\n return function() {\n c.length = arguments.length;\n for (var d = 0; d < arguments.length; d++)\n c[d] = arguments[d];\n var e;\n a.indexOf(\"j\") != -1 ? e = c && c.length ? f[\"dynCall_\" + a].apply(null, [b].concat(c)) : f[\"dynCall_\" + a].call(null, b) : e = I.get(b).apply(null, c);\n return e;\n };\n }\n function ab(a, b) {\n a = P(a);\n var c = a.indexOf(\"j\") != -1 ? $a(a, b) : I.get(b);\n typeof c !== \"function\" && T(\"unknown function pointer with signature \" + a + \": \" + b);\n return c;\n }\n var bb = void 0;\n function cb(a) {\n a = db(a);\n var b = P(a);\n W(a);\n return b;\n }\n function eb(a, b) {\n function c(g) {\n e[g] || R[g] || (S[g] ? S[g].forEach(c) : (d.push(g), e[g] = true));\n }\n var d = [], e = {};\n b.forEach(c);\n throw new bb(a + \": \" + d.map(cb).join([\", \"]));\n }\n function fb(a, b, c) {\n switch (b) {\n case 0:\n return c ? function(d) {\n return F[d];\n } : function(d) {\n return z[d];\n };\n case 1:\n return c ? function(d) {\n return C[d >> 1];\n } : function(d) {\n return A[d >> 1];\n };\n case 2:\n return c ? function(d) {\n return D[d >> 2];\n } : function(d) {\n return G[d >> 2];\n };\n default:\n throw new TypeError(\"Unknown integer type: \" + a);\n }\n }\n var gb = {};\n function hb() {\n if (!ib) {\n var a = { USER: \"web_user\", LOGNAME: \"web_user\", PATH: \"/\", PWD: \"/\", HOME: \"/home/web_user\", LANG: (typeof navigator === \"object\" && navigator.languages && navigator.languages[0] || \"C\").replace(\"-\", \"_\") + \".UTF-8\", _: ca || \"./this.program\" }, b;\n for (b in gb)\n a[b] = gb[b];\n var c = [];\n for (b in a)\n c.push(b + \"=\" + a[b]);\n ib = c;\n }\n return ib;\n }\n for (var ib, jb = [null, [], []], kb = Array(256), X = 0; 256 > X; ++X)\n kb[X] = String.fromCharCode(X);\n Ga = kb;\n Ka = f.BindingError = Ja(\"BindingError\");\n La = f.InternalError = Ja(\"InternalError\");\n f.count_emval_handles = function() {\n for (var a = 0, b = 5; b < V.length; ++b)\n V[b] !== void 0 && ++a;\n return a;\n };\n f.get_first_emval = function() {\n for (var a = 5; a < V.length; ++a)\n if (V[a] !== void 0)\n return V[a];\n return null;\n };\n bb = f.UnboundTypeError = Ja(\"UnboundTypeError\");\n va.push({ T: function() {\n lb();\n } });\n var mb = { p: function(a) {\n return Y(a + 16) + 16;\n }, o: function(a, b, c) {\n new Ea(a).V(b, c);\n throw a;\n }, z: function(a, b, c, d, e) {\n var g = Fa(c);\n b = P(b);\n U(a, { name: b, fromWireType: function(k) {\n return !!k;\n }, toWireType: function(k, h) {\n return h ? d : e;\n }, argPackAdvance: 8, readValueFromPointer: function(k) {\n if (c === 1)\n var h = F;\n else if (c === 2)\n h = C;\n else if (c === 4)\n h = D;\n else\n throw new TypeError(\"Unknown boolean type size: \" + b);\n return this.fromWireType(h[k >> g]);\n }, M: null });\n }, y: function(a, b) {\n b = P(b);\n U(a, { name: b, fromWireType: function(c) {\n var d = V[c].value;\n Qa(c);\n return d;\n }, toWireType: function(c, d) {\n return Ra(d);\n }, argPackAdvance: 8, readValueFromPointer: Sa, M: null });\n }, h: function(a, b, c) {\n c = Fa(c);\n b = P(b);\n U(a, { name: b, fromWireType: function(d) {\n return d;\n }, toWireType: function(d, e) {\n if (typeof e !== \"number\" && typeof e !== \"boolean\")\n throw new TypeError('Cannot convert \"' + Ta(e) + '\" to ' + this.name);\n return e;\n }, argPackAdvance: 8, readValueFromPointer: Ua(b, c), M: null });\n }, e: function(a, b, c, d, e, g) {\n var k = Za(b, c);\n a = P(a);\n e = ab(d, e);\n Ya(a, function() {\n eb(\"Cannot call \" + a + \" due to unbound types\", k);\n }, b - 1);\n Ma(k, function(h) {\n var l = a, n = a;\n h = [h[0], null].concat(h.slice(1));\n var p = e, q = h.length;\n 2 > q && T(\"argTypes array size mismatch! Must at least get return value and 'this' types!\");\n for (var x = h[1] !== null && false, B = false, m = 1; m < h.length; ++m)\n if (h[m] !== null && h[m].M === void 0) {\n B = true;\n break;\n }\n var Oa = h[0].name !== \"void\", H = \"\", M = \"\";\n for (m = 0; m < q - 2; ++m)\n H += (m !== 0 ? \", \" : \"\") + \"arg\" + m, M += (m !== 0 ? \", \" : \"\") + \"arg\" + m + \"Wired\";\n n = \"return function \" + Ha(n) + \"(\" + H + \") {\\nif (arguments.length !== \" + (q - 2) + \") {\\nthrowBindingError('function \" + n + \" called with ' + arguments.length + ' arguments, expected \" + (q - 2) + \" args!');\\n}\\n\";\n B && (n += \"var destructors = [];\\n\");\n var Pa = B ? \"destructors\" : \"null\";\n H = \"throwBindingError invoker fn runDestructors retType classParam\".split(\" \");\n p = [T, p, g, Wa, h[0], h[1]];\n x && (n += \"var thisWired = classParam.toWireType(\" + Pa + \", this);\\n\");\n for (m = 0; m < q - 2; ++m)\n n += \"var arg\" + m + \"Wired = argType\" + m + \".toWireType(\" + Pa + \", arg\" + m + \"); // \" + h[m + 2].name + \"\\n\", H.push(\"argType\" + m), p.push(h[m + 2]);\n x && (M = \"thisWired\" + (0 < M.length ? \", \" : \"\") + M);\n n += (Oa ? \"var rv = \" : \"\") + \"invoker(fn\" + (0 < M.length ? \", \" : \"\") + M + \");\\n\";\n if (B)\n n += \"runDestructors(destructors);\\n\";\n else\n for (m = x ? 1 : 2; m < h.length; ++m)\n q = m === 1 ? \"thisWired\" : \"arg\" + (m - 2) + \"Wired\", h[m].M !== null && (n += q + \"_dtor(\" + q + \"); // \" + h[m].name + \"\\n\", H.push(q + \"_dtor\"), p.push(h[m].M));\n Oa && (n += \"var ret = retType.fromWireType(rv);\\nreturn ret;\\n\");\n H.push(n + \"}\\n\");\n h = Va(H).apply(null, p);\n m = b - 1;\n if (!f.hasOwnProperty(l))\n throw new La(\"Replacing nonexistant public symbol\");\n f[l].L !== void 0 && m !== void 0 ? f[l].L[m] = h : (f[l] = h, f[l].S = m);\n return [];\n });\n }, c: function(a, b, c, d, e) {\n function g(n) {\n return n;\n }\n b = P(b);\n e === -1 && (e = 4294967295);\n var k = Fa(c);\n if (d === 0) {\n var h = 32 - 8 * c;\n g = function(n) {\n return n << h >>> h;\n };\n }\n var l = b.indexOf(\"unsigned\") != -1;\n U(a, { name: b, fromWireType: g, toWireType: function(n, p) {\n if (typeof p !== \"number\" && typeof p !== \"boolean\")\n throw new TypeError('Cannot convert \"' + Ta(p) + '\" to ' + this.name);\n if (p < d || p > e)\n throw new TypeError('Passing a number \"' + Ta(p) + '\" from JS side to C/C++ side to an argument of type \"' + b + '\", which is outside the valid range [' + d + \", \" + e + \"]!\");\n return l ? p >>> 0 : p | 0;\n }, argPackAdvance: 8, readValueFromPointer: fb(b, k, d !== 0), M: null });\n }, b: function(a, b, c) {\n function d(g) {\n g >>= 2;\n var k = G;\n return new e(E, k[g + 1], k[g]);\n }\n var e = [Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array, Uint32Array, Float32Array, Float64Array][b];\n c = P(c);\n U(a, { name: c, fromWireType: d, argPackAdvance: 8, readValueFromPointer: d }, { U: true });\n }, i: function(a, b) {\n b = P(b);\n var c = b === \"std::string\";\n U(a, { name: b, fromWireType: function(d) {\n var e = G[d >> 2];\n if (c)\n for (var g = d + 4, k = 0; k <= e; ++k) {\n var h = d + 4 + k;\n if (k == e || z[h] == 0) {\n g = g ? ha(z, g, h - g) : \"\";\n if (l === void 0)\n var l = g;\n else\n l += String.fromCharCode(0), l += g;\n g = h + 1;\n }\n }\n else {\n l = Array(e);\n for (k = 0; k < e; ++k)\n l[k] = String.fromCharCode(z[d + 4 + k]);\n l = l.join(\"\");\n }\n W(d);\n return l;\n }, toWireType: function(d, e) {\n e instanceof ArrayBuffer && (e = new Uint8Array(e));\n var g = typeof e === \"string\";\n g || e instanceof Uint8Array || e instanceof Uint8ClampedArray || e instanceof Int8Array || T(\"Cannot pass non-string to std::string\");\n var k = (c && g ? function() {\n for (var n = 0, p = 0; p < e.length; ++p) {\n var q = e.charCodeAt(p);\n 55296 <= q && 57343 >= q && (q = 65536 + ((q & 1023) << 10) | e.charCodeAt(++p) & 1023);\n 127 >= q ? ++n : n = 2047 >= q ? n + 2 : 65535 >= q ? n + 3 : n + 4;\n }\n return n;\n } : function() {\n return e.length;\n })(), h = Y(4 + k + 1);\n G[h >> 2] = k;\n if (c && g)\n ia(e, h + 4, k + 1);\n else if (g)\n for (g = 0; g < k; ++g) {\n var l = e.charCodeAt(g);\n 255 < l && (W(h), T(\"String has UTF-16 code units that do not fit in 8 bits\"));\n z[h + 4 + g] = l;\n }\n else\n for (g = 0; g < k; ++g)\n z[h + 4 + g] = e[g];\n d !== null && d.push(W, h);\n return h;\n }, argPackAdvance: 8, readValueFromPointer: Sa, M: function(d) {\n W(d);\n } });\n }, d: function(a, b, c) {\n c = P(c);\n if (b === 2) {\n var d = ka;\n var e = la;\n var g = ma;\n var k = function() {\n return A;\n };\n var h = 1;\n } else\n b === 4 && (d = na, e = oa, g = pa, k = function() {\n return G;\n }, h = 2);\n U(a, { name: c, fromWireType: function(l) {\n for (var n = G[l >> 2], p = k(), q, x = l + 4, B = 0; B <= n; ++B) {\n var m = l + 4 + B * b;\n if (B == n || p[m >> h] == 0)\n x = d(x, m - x), q === void 0 ? q = x : (q += String.fromCharCode(0), q += x), x = m + b;\n }\n W(l);\n return q;\n }, toWireType: function(l, n) {\n typeof n !== \"string\" && T(\"Cannot pass non-string to C++ string type \" + c);\n var p = g(n), q = Y(4 + p + b);\n G[q >> 2] = p >> h;\n e(n, q + 4, p + b);\n l !== null && l.push(W, q);\n return q;\n }, argPackAdvance: 8, readValueFromPointer: Sa, M: function(l) {\n W(l);\n } });\n }, A: function(a, b) {\n b = P(b);\n U(a, {\n aa: true,\n name: b,\n argPackAdvance: 0,\n fromWireType: function() {\n },\n toWireType: function() {\n }\n });\n }, n: Qa, x: function(a) {\n 4 < a && (V[a].P += 1);\n }, C: function(a, b) {\n var c = R[a];\n c === void 0 && T(\"_emval_take_value has unknown type \" + cb(a));\n a = c.readValueFromPointer(b);\n return Ra(a);\n }, t: function() {\n w();\n }, r: function(a, b, c) {\n z.copyWithin(a, b, b + c);\n }, s: function(a) {\n a >>>= 0;\n var b = z.length;\n if (2147483648 < a)\n return false;\n for (var c = 1; 4 >= c; c *= 2) {\n var d = b * (1 + 0.2 / c);\n d = Math.min(d, a + 100663296);\n d = Math.max(16777216, a, d);\n 0 < d % 65536 && (d += 65536 - d % 65536);\n a: {\n try {\n y.grow(Math.min(2147483648, d) - E.byteLength + 65535 >>> 16);\n sa(y.buffer);\n var e = 1;\n break a;\n } catch (g) {\n }\n e = void 0;\n }\n if (e)\n return true;\n }\n return false;\n }, u: function(a, b) {\n var c = 0;\n hb().forEach(function(d, e) {\n var g = b + c;\n e = D[a + 4 * e >> 2] = g;\n for (g = 0; g < d.length; ++g)\n F[e++ >> 0] = d.charCodeAt(g);\n F[e >> 0] = 0;\n c += d.length + 1;\n });\n return 0;\n }, v: function(a, b) {\n var c = hb();\n D[a >> 2] = c.length;\n var d = 0;\n c.forEach(function(e) {\n d += e.length + 1;\n });\n D[b >> 2] = d;\n return 0;\n }, w: function() {\n return 0;\n }, q: function() {\n }, g: function(a, b, c, d) {\n for (var e = 0, g = 0; g < c; g++) {\n for (var k = D[b + 8 * g >> 2], h = D[b + (8 * g + 4) >> 2], l = 0; l < h; l++) {\n var n = z[k + l], p = jb[a];\n n === 0 || n === 10 ? ((a === 1 ? da : u)(ha(p, 0)), p.length = 0) : p.push(n);\n }\n e += h;\n }\n D[d >> 2] = e;\n return 0;\n }, a: y, l: function() {\n return 0;\n }, k: function() {\n return 0;\n }, j: function() {\n }, B: function() {\n return 6;\n }, m: function() {\n }, f: function() {\n } };\n (function() {\n function a(e) {\n f.asm = e.exports;\n I = f.asm.D;\n J--;\n f.monitorRunDependencies && f.monitorRunDependencies(J);\n J == 0 && (K && (e = K, K = null, e()));\n }\n function b(e) {\n a(e.instance);\n }\n function c(e) {\n return Promise.resolve().then(Da).then(function(g) {\n return WebAssembly.instantiate(g, d);\n }).then(e, function(g) {\n u(\"failed to asynchronously prepare wasm: \" + g);\n w(g);\n });\n }\n var d = { a: mb };\n J++;\n f.monitorRunDependencies && f.monitorRunDependencies(J);\n if (f.instantiateWasm)\n try {\n return f.instantiateWasm(d, a);\n } catch (e) {\n return u(\"Module.instantiateWasm callback failed with error: \" + e), false;\n }\n (function() {\n return v || typeof WebAssembly.instantiateStreaming !== \"function\" || Ba() || Aa(\"file://\") || typeof fetch !== \"function\" ? c(b) : fetch(L, { credentials: \"same-origin\" }).then(function(e) {\n return WebAssembly.instantiateStreaming(e, d).then(b, function(g) {\n u(\"wasm streaming compile failed: \" + g);\n u(\"falling back to ArrayBuffer instantiation\");\n return c(b);\n });\n });\n })().catch(ba);\n return {};\n })();\n var lb = f.___wasm_call_ctors = function() {\n return (lb = f.___wasm_call_ctors = f.asm.E).apply(null, arguments);\n }, Y = f._malloc = function() {\n return (Y = f._malloc = f.asm.F).apply(null, arguments);\n }, W = f._free = function() {\n return (W = f._free = f.asm.G).apply(null, arguments);\n }, db = f.___getTypeName = function() {\n return (db = f.___getTypeName = f.asm.H).apply(null, arguments);\n };\n f.___embind_register_native_and_builtin_types = function() {\n return (f.___embind_register_native_and_builtin_types = f.asm.I).apply(null, arguments);\n };\n f.dynCall_jiiiii = function() {\n return (f.dynCall_jiiiii = f.asm.J).apply(null, arguments);\n };\n f.dynCall_jiji = function() {\n return (f.dynCall_jiji = f.asm.K).apply(null, arguments);\n };\n var Z;\n K = function nb() {\n Z || ob();\n Z || (K = nb);\n };\n function ob() {\n function a() {\n if (!Z && (Z = true, f.calledRun = true, !ea)) {\n N(va);\n N(wa);\n aa(f);\n if (f.onRuntimeInitialized)\n f.onRuntimeInitialized();\n if (f.postRun)\n for (typeof f.postRun == \"function\" && (f.postRun = [f.postRun]); f.postRun.length; ) {\n var b = f.postRun.shift();\n xa.unshift(b);\n }\n N(xa);\n }\n }\n if (!(0 < J)) {\n if (f.preRun)\n for (typeof f.preRun == \"function\" && (f.preRun = [f.preRun]); f.preRun.length; )\n ya();\n N(ua);\n 0 < J || (f.setStatus ? (f.setStatus(\"Running...\"), setTimeout(function() {\n setTimeout(function() {\n f.setStatus(\"\");\n }, 1);\n a();\n }, 1)) : a());\n }\n }\n f.run = ob;\n if (f.preInit)\n for (typeof f.preInit == \"function\" && (f.preInit = [f.preInit]); 0 < f.preInit.length; )\n f.preInit.pop()();\n ob();\n return blosc_codec2.ready;\n };\n}();\n\nvar __isNode = typeof process !== 'undefined' && process.versions != null && process.versions.node != null;\nvar __toBinary = __isNode\n ? base64 => new Uint8Array(Buffer.from(base64, 'base64'))\n : /* @__PURE__ */ (() => {\n var table = new Uint8Array(128);\n for (var i = 0; i < 64; i++) table[i < 26 ? i + 65 : i < 52 ? i + 71 : i < 62 ? i - 4 : i * 4 - 205] = i;\n return base64 => {\n var n = base64.length, bytes = new Uint8Array((n - (base64[n - 1] == '=') - (base64[n - 2] == '=')) * 3 / 4 | 0);\n for (var i = 0, j = 0; i < n;) {\n var c0 = table[base64.charCodeAt(i++)], c1 = table[base64.charCodeAt(i++)];\n var c2 = table[base64.charCodeAt(i++)], c3 = table[base64.charCodeAt(i++)];\n bytes[j++] = (c0 << 2) | (c1 >> 4);\n bytes[j++] = (c1 << 4) | (c2 >> 2);\n bytes[j++] = (c2 << 6) | c3;\n }\n return bytes\n }\n })();\nvar wasmBinary = __toBinary(\"AGFzbQEAAAABwAImYAF/AX9gA39/fwF/YAV/f39/fwF/YAJ/fwF/YAJ/fwBgAX8AYAN/f38AYAR/f39/AX9gBH9/f38AYAAAYAZ/f39/f38Bf2AFf39/f38AYAZ/f39/f38AYAd/f39/f39/AX9gBH9/f38BfmAFf39/f38BfmAIf39/f39/f38Bf2AJf39/f39/f39/AX9gAn5/AX9gC39/f39/f39/f39/AX9gA39+fwF+YAN/f34AYAN/f34Bf2ADfn9/AX9gAn5+AX5gCH9/f39/f39/AGAJf39/f39/f39/AGAFf35/f38AYAABf2ANf39/f39/f39/f39/fwF/YA9/f39/f39/f39/f39/f38Bf2AFf39/fn8Bf2AGf3x/f39/AX9gAX8BfmACf38BfmAHf35/f39/fwF+YAF+AX5gBH5/f34BfgK0AR0BYQFiAAYBYQFjAAsBYQFkAAYBYQFlAAwBYQFmAAUBYQFnAAcBYQFoAAYBYQFpAAQBYQFqAAMBYQFrAAABYQFsAAABYQFtAAMBYQFuAAUBYQFvAAYBYQFwAAABYQFxAAIBYQFyAAEBYQFzAAABYQF0AAkBYQF1AAMBYQF2AAMBYQF3AAABYQF4AAUBYQF5AAQBYQF6AAsBYQFBAAQBYQFCAAcBYQFDAAMBYQFhAgGAAoCAAgOtBKsEBAEBAwIACAAAAAQHAQEBAAIBAAQDAQMBBAEFAwUFAAYAAwAIAgIDAQgBAwYBCwEBAAQYBAEEBwoGAwMLBwgBBggDCwUDAwMGCAEGBAYABwIGAAABAAIEBAYEBQMDAAsABgwDAAANBgIYAwkAAQwGBggAAgAAAAUQHQAEAQMbBwcHBwMDBh4TBAgBAgECCgcGCgYEAAQAARARAwAIAAYDBgAFBQUFBQUJCwUGAQAFBQICAgcHAwQEAAcSARIXJQQGAwMDAAUEAQABBQUDAAMGCgAFBQMBHwUDAwUFAREDBwoEAAUBAwcKCiEGBQEABgYGBQUIAxMNAAADAAkBBwcHBwcHBwcAAQgGBwMRAgICAgYCCAoCAgcCCAAFBAUFAAMAAAIKBBQACQwMCwMLCAgICwwAAQEFAAUABQkDAAMSEhcGAQAUAAAJCQkJBgAJCQkJCQkJCQkJCQkJDQ0ABgcBAQcHAgEBAgEEAwoABAcFBRwKCgoFAgoCAgMaGQUEAgICAgkFCwICAQoQAggMIiMCBgYBDAICAgICAgICAgMCAg0MAgoCAgIECgICAgQTAQEHAQcBCAUGCgUFBAYkBwUAAAgWFgYRAA0CAgsDEAUBAgYHCwIBAgIABRUVAwUABgIBCQEGAgIHBwcFAAoEAgIHAQAAAAAABAMGCAgIAAAFBgQAAAEDAwEDBQUABAEDAQQABAMNDQQECgoFAg4PDg8ODg4ICAgBCAEBAQEHBAUBcAFWVgYJAX8BQaD9wQILByYIAUQBAAFFAJYCAUYATAFHADgBSADhAgFJAMMBAUoAvgIBSwC9AgmlAQEAQQELVX9f5wK6ArYCf1+rAqECuAPVA6MDrgOPA50DjQG0Ap8CngKdApwCmwK3BLkEvgTBBKcEpgSiBKAEnwTBA8YDtwO5A7oDvQOlA6EDoAO/A8QDsgOxA7ADrwOaA5kDwAPFA7MDtAO1A7YDnAObA9cC3QLfAn9f0wLSAtEC0AJ/X/UB9QHOAswCywLKAl/PAl/DAsUCyQJfxALHAsgCwQLAAgqyrRGrBBYAIAAgASkAADcAACAAIAEpAAg3AAgLrgEBA38CQCACQX1qIgQgAE0EQCAAIQMMAQsgASgAACAAKAAAcyIDRQRAIAAhAwNAIAFBBGohASADQQRqIgMgBE8NAiABKAAAIAMoAABzIgVFDQALIAUQJSADaiAAaw8LIAMQJQ8LAkAgAyACQX9qTw0AIAEvAAAgAy8AAEcNACABQQJqIQEgA0ECaiEDCyADIAJJBH8gA0EBaiADIAEtAAAgAy0AAEYbBSADCyAAawtoAAJAAkACQAJAAkAgAkF7ag4EAQIDBAALIAAgARDeAQ8LIAAgARDdAQ8LIAAgARDcAQ8LIAApAABCgMaV/cub741PfkHAACABa62Ipw8LIAApAABC48iVvcub741PfkHAACABa62IpwsUACAAKAAAIgBBCHQgACABQQNGGws4AQF/IAMgASAAIAEgACADIAFraiIFIAIgBSACSRsQHSIFakYEfyAAIAVqIAQgAhAdIAVqBSAFCwsIACAAQYh/SwuTAQECfyABIANNBEAgACABEBwgAEEQaiABQRBqEBwgACADIAFrIgRqIQUgBEEhTgRAIABBIGohAANAIAAgAUEgaiIEEBwgAEEQaiABQTBqEBwgBCEBIABBIGoiACAFSQ0ACwsgAyEBIAUhAAsgASACSQRAA0AgACABLQAAOgAAIABBAWohACABQQFqIgEgAkcNAAsLC5gBAQR/QQMhAQJAIAAoAgQiAkEgTQRAIAACfyAAKAIIIgEgACgCEE8EQCAAIAEgAkEDdmsiAzYCCEEAIQEgAkEHcQwBCyABIAAoAgwiA0YNAiAAIAEgASADayACQQN2IgQgASAEayADSSIBGyIEayIDNgIIIAIgBEEDdGsLNgIEIAAgAygAADYCAAsgAQ8LQQFBAiACQSBJGwsIACAAZ0EfcwsIACAAaEEDdgsPACAAIAAoAgQgAWo2AgQLHAAgACACQQEgA3QiA2sgACACIABrIANLGyABGwvzAgICfwF+AkAgAkUNACAAIAJqIgNBf2ogAToAACAAIAE6AAAgAkEDSQ0AIANBfmogAToAACAAIAE6AAEgA0F9aiABOgAAIAAgAToAAiACQQdJDQAgA0F8aiABOgAAIAAgAToAAyACQQlJDQAgAEEAIABrQQNxIgRqIgMgAUH/AXFBgYKECGwiATYCACADIAIgBGtBfHEiBGoiAkF8aiABNgIAIARBCUkNACADIAE2AgggAyABNgIEIAJBeGogATYCACACQXRqIAE2AgAgBEEZSQ0AIAMgATYCGCADIAE2AhQgAyABNgIQIAMgATYCDCACQXBqIAE2AgAgAkFsaiABNgIAIAJBaGogATYCACACQWRqIAE2AgAgBCADQQRxQRhyIgRrIgJBIEkNACABrSIFQiCGIAWEIQUgAyAEaiEBA0AgASAFNwMYIAEgBTcDECABIAU3AwggASAFNwMAIAFBIGohASACQWBqIgJBH0sNAAsLIAALDQAgACABdEEAIAJrdguCBAEDfyACQYAETwRAIAAgASACEBAaIAAPCyAAIAJqIQMCQCAAIAFzQQNxRQRAAkAgAkEBSARAIAAhAgwBCyAAQQNxRQRAIAAhAgwBCyAAIQIDQCACIAEtAAA6AAAgAUEBaiEBIAJBAWoiAiADTw0BIAJBA3ENAAsLAkAgA0F8cSIEQcAASQ0AIAIgBEFAaiIFSw0AA0AgAiABKAIANgIAIAIgASgCBDYCBCACIAEoAgg2AgggAiABKAIMNgIMIAIgASgCEDYCECACIAEoAhQ2AhQgAiABKAIYNgIYIAIgASgCHDYCHCACIAEoAiA2AiAgAiABKAIkNgIkIAIgASgCKDYCKCACIAEoAiw2AiwgAiABKAIwNgIwIAIgASgCNDYCNCACIAEoAjg2AjggAiABKAI8NgI8IAFBQGshASACQUBrIgIgBU0NAAsLIAIgBE8NAQNAIAIgASgCADYCACABQQRqIQEgAkEEaiICIARJDQALDAELIANBBEkEQCAAIQIMAQsgA0F8aiIEIABJBEAgACECDAELIAAhAgNAIAIgAS0AADoAACACIAEtAAE6AAEgAiABLQACOgACIAIgAS0AAzoAAyABQQRqIQEgAkEEaiICIARNDQALCyACIANJBEADQCACIAEtAAA6AAAgAUEBaiEBIAJBAWoiAiADRw0ACwsgAAsbAQF/IABBAWoiABAkIgFBCHQgAEEIdCABdmoLhQEBBn8gACgCICEGIAAoAhgiBSADIAAoAgQiCGsiB0kEQEF/IAF0QX9zIQEgACgCKCEJA0AgCSABIAVxQQJ0aiAGIAUgCGogAiAEEFpBAnRqIgooAgA2AgAgCiAFNgIAIAVBAWoiBSAHSQ0ACwsgACAHNgIYIAYgAyACIAQQWkECdGooAgALXAEBfyABKAI4QQFGBEAgAgRAIAAQKw8LIAAQLg8LIAAQgAFBAnQiA0GwpwFqKAIAQQh0IAEoAixqIQAgASgCBCADaigCACEBIAIEQCAAIAEQK2sPCyAAIAEQLmsLDAAgAEEBahAkQQh0CwkAIAAgATsAAAsWACAAQbHz3fF5bEETQRQgAUEDRht2C5sBAQV/IwBBEGsiBSQAIAUgAjYCDCACQRh2IQYgAUEEaiEHIAAhBANAIAQiAyAHTwRAIAIgA0F8aiIEKAAARg0BCwsCQCADIAFNDQAgA0F/aiIELQAAIAZHDQAgBUEMakEDciECA0AgBCIDIAFNBEAgASEDDAILIANBf2oiBC0AACACQX9qIgItAABGDQALCyAFQRBqJAAgACADawsNACABQX9zIABqQQJLC3gBA38CQAJAIAFBfWoiBCAAIgNNDQADQCACIAMoAABzIgVFBEAgA0EEaiIDIARJDQEMAgsLIAUQJSADaiEDDAELIAMgAU8NAANAIAMtAAAgAkH/AXFHDQEgAkEIdiECIANBAWoiAyABRw0ACyABIABrDwsgAyAAawsJACAAIAE2AAALFAAgAUUEQEEADwsgACABIAIQqQQLigEBA38gACgCHCIBEJkEAkAgACgCECICIAEoAhQiAyADIAJLGyICRQ0AIAAoAgwgASgCECACECoaIAAgACgCDCACajYCDCABIAEoAhAgAmo2AhAgACAAKAIUIAJqNgIUIAAgACgCECACazYCECABIAEoAhQgAmsiADYCFCAADQAgASABKAIINgIQCwsRACAAIAEpAAA3AAAgAEEIagvXAgEFfyAABEAgAEF8aiIBKAIAIgQhAyABIQIgAEF4aigCACIFQX9MBEAgASAFaiIAKAIFIgIgACgCCTYCCCAAKAIJIAI2AgQgBCAFQX9zaiEDIABBAWohAgsgASAEaiIAKAIAIgEgACABakF8aigCAEcEQCAAKAIEIgQgACgCCDYCCCAAKAIIIAQ2AgQgASADaiEDCyACIAM2AgAgA0F8cSACakF8aiADQX9zNgIAIAICfyACKAIAQXhqIgBB/wBNBEAgAEEDdkF/agwBCyAAZyEBIABBHSABa3ZBBHMgAUECdGtB7gBqIABB/x9NDQAaIABBHiABa3ZBAnMgAUEBdGtBxwBqIgBBPyAAQT9JGwsiA0EEdCIAQYDtAWo2AgQgAiAAQYjtAWoiACgCADYCCCAAIAI2AgAgAigCCCACNgIEQYj1AUGI9QEpAwBCASADrYaENwMACwtUAQJ/IAAoAgQhASAAKAIMIAAoAgAQ/AEgACAAKAIEQQdxNgIEIAAgACgCACABQXhxdjYCACAAIAAoAhAiAiAAKAIMIAFBA3ZqIgAgACACSxs2AgwLEQAgACgAAEGx893xeWxBEXYLIgADQCAAIAEpAAA3AAAgAUEIaiEBIABBCGoiACACSQ0ACwsdACAAQYABTwRAIAAQJEEkag8LIABBsKYBai0AAAsKACABIABBA3R3Cw0AIAAoAgggACgCDGoLpQEBAX8gAkEDTwRAIAAgASgCBDYCCCABKAIAIQEgACACQX5qNgIAIAAgATYCBA8LAkACfwJAAkAgAiADaiICDgQDAQEAAQsgASgCACIDQX9qDAELIAEoAgAhAyABIAJBAnRqKAIACyEEIAFBBEEIIAJBAUsbaigCACEBIAAgAzYCBCAAIAE2AgggACAENgIADwsgACABKQIANwIAIAAgASgCCDYCCAtVAQJ/IAQgARDQASEGIAMoAgAiBSAEIABrIgRJBEADQCACIAAgBWogARDQAUECdGogBTYCACAFQQFqIgUgBEkNAAsLIAMgBDYCACACIAZBAnRqKAIAC7QEARV/IwBBEGsiDiQAIAAoAiAgASAAKAJ8IAMQHkECdGoiBSgCACEDIAAoAnghBiAAKAIIIQ8gACgCDCEMIAAoAighEiAAKAKAASEIIAAoAhAhEyAFIAEgACgCBCINayIJNgIAIBIgCUF/IAZBf2p0QX9zIhRxQQN0aiEHIAlBCWohCgJ/IAMgE0kEQCAHQgA3AgBBAAwBC0EAIAkgFGsiACAAIAlLGyEVIAdBBGohBiAMIA1qIRYgDCAPaiEXQX8gCHRBf3MhEUEIIQtBACEIA0ACfyAEQQAgECAIIBAgCEkbIgAgA2ogDEkbRQRAIAAgAWogAyANaiAAaiACEB0gAGoiACADaiEFIA0MAQsgDyANIAAgAWogAyAPaiAAaiACIBcgFhAgIABqIgAgA2oiBSAMSRsLIRggBSAKIAAgCiADa0sbIAogACALSyIFGyEKIAAgCyAFGyELAkAgACABaiIZIAJGDQAgEiADIBRxQQN0aiEFAkACQCADIBhqIABqLQAAIBktAABJBEAgByADNgIAIAMgFUsNASAOQQxqIQcMAwsgBiADNgIAIAMgFUsEQCAAIQggBSEGDAILIA5BDGohBgwCCyAAIRAgBUEEaiIHIQULIBFFDQAgEUF/aiERIAUoAgAiAyATTw0BCwsgBkEANgIAIAdBADYCACALQYB9aiIAQcABIABBwAFJG0EAIAtBgANLGwshAyAOQRBqJAAgAyAKIAlrQXhqIgAgAyAASxsLHAEBfyAAKAIAIAAoAgQgARApIQIgACABECYgAgssACACRQRAIAAoAgQgASgCBEYPCyAAIAFGBEBBAQ8LIAAQkAEgARCQARBdRQukBAEDf0EBIQYCQCABRSACQQRqAn8gACgChAFBAU4EQCAAKAIAIgQoAixBAkYEQCAEIAAQmAQ2AiwLIAAgAEGYFmoQrgEgACAAQaQWahCuASAAEJcEQQFqIQYgACgCqC1BCmpBA3YiBSAAKAKsLUEKakEDdiIEIAQgBUsbDAELIAJBBWoiBAsiBUtyRQRAIAAgASACIAMQjgIMAQsgACgCvC0hAQJAIAQgBUcEQCAAKAKIAUEERw0BCyAAIAAvAbgtIANBAmpB//8DcSICIAF0ciIEOwG4LSAAAn8gAUEOTgRAIAAgACgCFCIBQQFqNgIUIAEgACgCCGogBDoAACAAIAAoAhQiAUEBajYCFCABIAAoAghqIABBuS1qLQAAOgAAIAAgAkEQIAAoArwtIgFrdjsBuC0gAUFzagwBCyABQQNqCzYCvC0gAEGA2wBBgNkAEIsCDAELIAAgAC8BuC0gA0EEakH//wNxIgIgAXRyIgQ7AbgtIAACfyABQQ5OBEAgACAAKAIUIgFBAWo2AhQgASAAKAIIaiAEOgAAIAAgACgCFCIBQQFqNgIUIAEgACgCCGogAEG5LWotAAA6AAAgACACQRAgACgCvC0iAWt2OwG4LSABQXNqDAELIAFBA2oLNgK8LSAAIABBnBZqKAIAQQFqIABBqBZqKAIAQQFqIAYQlgQgACAAQZQBaiAAQYgTahCLAgsgABCNAiADBEAgABCMAgsL9QEBAX8gAkUEQCAAQgA3AgAgAEEANgIQIABCADcCCEG4fw8LIAAgATYCDCAAIAFBBGo2AhAgAkEETwRAIAAgASACaiIBQXxqIgM2AgggACADKAAANgIAIAFBf2otAAAiAUUEQCAAQQA2AgRBfw8LIABBCCABECRrNgIEIAIPCyAAIAE2AgggACABLQAAIgM2AgACQAJAAkAgAkF+ag4CAQACCyAAIAEtAAJBEHQgA3IiAzYCAAsgACABLQABQQh0IANqNgIACyABIAJqQX9qLQAAIgFFBEAgAEEANgIEQWwPCyAAQSggARAkIAJBA3RqazYCBCACCy0BAX8gAUECdEGwwwFqKAIAIAAoAgBBICABIAAoAgRqa3ZxIQIgACABECYgAgsxAQF/IAAgACgCBCIDIAJqNgIEIAAgACgCACACQQJ0QbDDAWooAgAgAXEgA3RyNgIACyEAIAJBAkYEQCABIABBAnRqKAIADwsgASAAQQF0ai8BAAtIAAJAAkACQAJAIANBf2oOAwABAgMLIAIgAUECdGogADYCAA8LIAIgAUECdGogACAEazYCAA8LIAIgAUEBdGogACAEazsBAAsL6QIBAX8CQCAAIAFGDQAgASAAayACa0EAIAJBAXRrTQRAIAAgASACECoPCyAAIAFzQQNxIQMCQAJAIAAgAUkEQCADBEAgACEDDAMLIABBA3FFBEAgACEDDAILIAAhAwNAIAJFDQQgAyABLQAAOgAAIAFBAWohASACQX9qIQIgA0EBaiIDQQNxDQALDAELAkAgAw0AIAAgAmpBA3EEQANAIAJFDQUgACACQX9qIgJqIgMgASACai0AADoAACADQQNxDQALCyACQQNNDQADQCAAIAJBfGoiAmogASACaigCADYCACACQQNLDQALCyACRQ0CA0AgACACQX9qIgJqIAEgAmotAAA6AAAgAg0ACwwCCyACQQNNDQADQCADIAEoAgA2AgAgAUEEaiEBIANBBGohAyACQXxqIgJBA0sNAAsLIAJFDQADQCADIAEtAAA6AAAgA0EBaiEDIAFBAWohASACQX9qIgINAAsLIAALDQAgASACRiAAQSBGcQsJAEEIIAAQtQELCAAgACABEDQLIQAgAULP1tO+0ser2UJ+IAB8Qh+JQoeVr6+Ytt6bnn9+CyYBAX8jAEEQayICJAAgAiABNgIMQdjpASAAIAEQuQEgAkEQaiQAC2AAAkACQAJAAkAgAkF4ag4ZAgMDAwMDAwMBAwMDAwMDAwMDAwMDAwMDAAMLIAAgARCUAg8LIAAgARBWDwsgACABEDcPCyACQQdNBEAgACABIAIQwwQPCyAAIAEgAhDCBAt/AQF/IABBQGsoAgAQcARAIAAoAhghAiAAAn8gAQRAIAIQKwwBCyACEC4LNgIoCyAAKAIcIQIgAAJ/IAEEQCACECshASAAKAIgECshAiAAKAIkECsMAQsgAhAuIQEgACgCIBAuIQIgACgCJBAuCzYCNCAAIAI2AjAgACABNgIsC4MBAQN/IAFFBEBBAA8LIAJBQGsoAgAQcEUEQCABQQt0DwsgAigCOEEBRgRAIAFBgAxsDwsgAigCKCABbCEEIAIoAgAhBkEAIQIDQCAGIAAgAmotAABBAnRqKAIAIQUgBAJ/IAMEQCAFECsMAQsgBRAuC2shBCACQQFqIgIgAUcNAAsgBAuwBgEXfyMAQRBrIhQkAEEBIAAoAoABdCEKIAAoAighDgJAIAAoAiAgASAAKAJ8IAQQWkECdGoiDCgCACIJQQAgASAAKAIEIg9rIghBfyAAKAJ4QX9qdEF/cyIQayIGIAYgCEsbIhUgACgCECAAKAIUIAggACgCdBAnIhYgFSAWSxsiDU0NACAKIQcCQANAIA4gCSIGIBBxQQN0aiIJKAIEIhdBAUcgB0ECSXJFBEAgCSALNgIEIAdBf2ohByAGIQsgCSgCACIJIA1LDQEMAgsLIBdBAUYEQCAJQgA3AgALIAsiBkUNAQsgDkEEaiEJA0AgCSAGIBBxQQN0aigCACELIAAgBiACIAcgDSAFELwDIAdBAWohByALIgYNAAsLIAAoAgghGCAAKAIMIREgDCgCACEHIAwgCDYCACAKQX9qIQogCEEJaiESIA4gCCAQcUEDdGoiE0EEaiEMAkAgByAWTQRAIAohBkEAIQgMAQsgDyARaiEZIBEgGGohGiAIQQJqIRsgCEEBaiEcQQAhCEEAIQtBACENA0ACfyAFQQFGQQAgDSALIA0gC0kbIgYgB2ogEUkbRQRAIAEgBmogByAPaiAGaiACEB0gBmohBiAPDAELIBggDyABIAZqIAcgGGogBmogAiAaIBkQICAGaiIGIAdqIBFJGwshFwJAIAYgCE0NACAGIAhrQQJ0IBwgB2sQJCADKAIAQQFqECRrSgRAIAMgGyAHazYCACAGIQgLIAYgB2ogEiAGIBIgB2tLGyESIAEgBmogAkcNAEEAIAogBUECRhshBgwCCyAOIAcgEHFBA3RqIQkCQAJAIAcgF2ogBmotAAAgASAGai0AAEkEQCATIAc2AgAgByAVSw0BIBRBDGohEyAKIQYMBAsgDCAHNgIAIAcgFUsEQCAGIQsgCSEMDAILIBRBDGohDCAKIQYMAwsgBiENIAlBBGoiEyEJCyAKQX9qIgYgCk8NASAGIQogCSgCACIHIBZLDQALCyAMQQA2AgAgE0EANgIAIAZFIAVBAkdyRQRAIAAgASACIAMgCCAGIAQQuwMhCAsgACASQXhqNgIYIBRBEGokACAIC44BAQh/IAAoAhgiAyABIAAoAgQiBWsiAUkEQEF/IAAoAnhBf2p0QX9zIQYgACgCfCEHIAAoAighCCAAKAIgIQkDQCAJIAMgBWogByACEFpBAnRqIgQoAgAhCiAEIAM2AgAgCCADIAZxQQN0aiIEQQE2AgQgBCAKNgIAIANBAWoiAyABSQ0ACwsgACABNgIYCw4AIAAgARDjAUECEOIBC6cBACAAIAEtAAA6AAAgACABLQABOgABIAAgAS0AAjoAAiAAIAEtAAM6AAMgACABLQAEOgAEIAAgAS0ABToABSAAIAEtAAY6AAYgACABLQAHOgAHIAAgAS0ACDoACCAAIAEtAAk6AAkgACABLQAKOgAKIAAgAS0ACzoACyAAIAEtAAw6AAwgACABLQANOgANIAAgAS0ADjoADiAAIAEtAA86AA8gAEEQagvTAQEDfyAAQUBrKAIAEHAEQCABBEAgACgCACEGA0AgBiACIAVqLQAAQQJ0aiIHIAcoAgBBAmo2AgAgBUEBaiIFIAFHDQALCyAAIAAoAhggAUEBdGo2AhgLIAAoAgQgARCAAUECdGoiASABKAIAQQFqNgIAIAAgACgCHEEBajYCHCAAKAIMIANBAWoQJEECdGoiASABKAIAQQFqNgIAIAAgACgCJEEBajYCJCAAKAIIIARBfWoQPEECdGoiASABKAIAQQFqNgIAIAAgACgCIEEBajYCIAsWACAAIAEgAiADEFIgASACIAMQogNqC7cIAQR/IwBBEGsiBiQAIABBQGsoAgAQcCEFIABBADYCOAJAIAAoAhxFBEAgAkGACE0EQCAAQQE2AjgLIAAoAjwiBCgCgAhBAkYEQEEAIQIgAEEANgI4IAUEQCAAQQA2AhggACgCACIFQQFBCyAEQQAQ+QEiAWt0QQEgARsiATYCACAAIAAoAhggAWo2AhhBASEBA0AgBSABQQJ0akEBQQsgBCABEPkBIgdrdEEBIAcbIgc2AgAgACAAKAIYIAdqNgIYIAFBAWoiAUGAAkcNAAsLIAYgBEG0GWoQcyAAQQA2AhwgACgCBCEBIAYoAgghBQNAIAEgAkECdGpBAUEKIAUgAhCYASIEa3RBASAEGyIENgIAIAAgACgCHCAEajYCHCACQQFqIgJBJEcNAAsgBiAAKAI8QYgOahBzQQAhAiAAQQA2AiAgACgCCCEBIAYoAgghBQNAIAEgAkECdGpBAUEKIAUgAhCYASIEa3RBASAEGyIENgIAIAAgACgCICAEajYCICACQQFqIgJBNUcNAAsgBiAAKAI8QYQIahBzQQAhAiAAQQA2AiQgACgCDCEBIAYoAgghBQNAIAEgAkECdGpBAUEKIAUgAhCYASIEa3RBASAEGyIENgIAIAAgACgCJCAEajYCJCACQQFqIgJBIEcNAAsMAgsgBQRAIAZB/wE2AgAgACgCACAGIAEgAhCqARogACAAKAIAQf8BQQEQbzYCGAsgACgCBCIBQoGAgIAQNwKIASABQoGAgIAQNwKAASABQoGAgIAQNwJ4IAFCgYCAgBA3AnAgAUKBgICAEDcCaCABQoGAgIAQNwJgIAFCgYCAgBA3AlggAUKBgICAEDcCUCABQoGAgIAQNwJIIAFCgYCAgBA3AkAgAUKBgICAEDcCOCABQoGAgIAQNwIwIAFCgYCAgBA3AiggAUKBgICAEDcCICABQoGAgIAQNwIYIAFCgYCAgBA3AhAgAUKBgICAEDcCCCABQoGAgIAQNwIAIABBJDYCHCAAKAIIIQFBACECA0AgASACQQJ0akEBNgIAIAJBAWoiAkE1Rw0ACyAAQTU2AiAgACgCDCIBQoGAgIAQNwJ4IAFCgYCAgBA3AnAgAUKBgICAEDcCaCABQoGAgIAQNwJgIAFCgYCAgBA3AlggAUKBgICAEDcCUCABQoGAgIAQNwJIIAFCgYCAgBA3AkAgAUKBgICAEDcCOCABQoGAgIAQNwIwIAFCgYCAgBA3AiggAUKBgICAEDcCICABQoGAgIAQNwIYIAFCgYCAgBA3AhAgAUKBgICAEDcCCCABQoGAgIAQNwIAIABBIDYCJAwBCyAFBEAgACAAKAIAQf8BQQEQbzYCGAsgACAAKAIEQSNBABBvNgIcIAAgACgCCEE0QQAQbzYCICAAIAAoAgxBH0EAEG82AiQLIAAgAxBRIAZBEGokAAssAAJAAkACQCACQXtqDgIBAgALIAAgARDeAQ8LIAAgARDdAQ8LIAAgARDcAQshACAAIAIgACgCBCICajYCBCAAIAAoAgAgASACdHI2AgALMAACQAJAAkAgA0F+ag4CAAECCyACIAFBAnRqIAA2AgAPCyACIAFBAXRqIAA7AQALC0oBAn8CQCAALQAAIgJFIAIgAS0AACIDR3INAANAIAEtAAEhAyAALQABIgJFDQEgAUEBaiEBIABBAWohACACIANGDQALCyACIANrC20BAX8jAEGAAmsiBSQAIARBgMAEcSACIANMckUEQCAFIAFB/wFxIAIgA2siAkGAAiACQYACSSIBGxAoGiABRQRAA0AgACAFQYACEGYgAkGAfmoiAkH/AUsNAAsLIAAgBSACEGYLIAVBgAJqJAALBgAgABA4CwsAIAAgAUEBEOIBCy8BAn8gACgCBCAAKAIAQQJ0aiICLQACIQMgACACLwEAIAEgAi0AAxBGajYCACADCy8BAn8gACgCBCAAKAIAQQJ0aiICLQACIQMgACACLwEAIAEgAi0AAxBCajYCACADC0YAIAAgARBzIAAgACgCBCAAKAIIIAJBA3RqIgAoAgQiAUGAgAJqIgJBgIB8cSABayACQRB2dSAAKAIAakEBdGovAQA2AgALGgAgAARAIAIEQCADIAAgAhEEAA8LIAAQOAsL0AUBA38gAEH//wNxIQMgAEEQdiEEQQEhACACQQFGBEAgAyABLQAAaiIAQY+AfGogACAAQfD/A0sbIgAgBGoiAUEQdCICQYCAPGogAiABQfD/A0sbIAByDwsgAQR/IAJBEE8EQAJAAkACQCACQa8rSwRAA0BB2wIhBSABIQADQCADIAAtAABqIgMgBGogAyAALQABaiIDaiADIAAtAAJqIgNqIAMgAC0AA2oiA2ogAyAALQAEaiIDaiADIAAtAAVqIgNqIAMgAC0ABmoiA2ogAyAALQAHaiIDaiADIAAtAAhqIgNqIAMgAC0ACWoiA2ogAyAALQAKaiIDaiADIAAtAAtqIgNqIAMgAC0ADGoiA2ogAyAALQANaiIDaiADIAAtAA5qIgNqIAMgAC0AD2oiA2ohBCAAQRBqIQAgBUF/aiIFDQALIARB8f8DcCEEIANB8f8DcCEDIAFBsCtqIQEgAkHQVGoiAkGvK0sNAAsgAkUNAyACQRBJDQELA0AgAyABLQAAaiIAIARqIAAgAS0AAWoiAGogACABLQACaiIAaiAAIAEtAANqIgBqIAAgAS0ABGoiAGogACABLQAFaiIAaiAAIAEtAAZqIgBqIAAgAS0AB2oiAGogACABLQAIaiIAaiAAIAEtAAlqIgBqIAAgAS0ACmoiAGogACABLQALaiIAaiAAIAEtAAxqIgBqIAAgAS0ADWoiAGogACABLQAOaiIAaiAAIAEtAA9qIgNqIQQgAUEQaiEBIAJBcGoiAkEPSw0ACyACRQ0BCwNAIAMgAS0AAGoiAyAEaiEEIAFBAWohASACQX9qIgINAAsLIARB8f8DcCEEIANB8f8DcCEDCyAEQRB0IANyDwsgAgRAA0AgAyABLQAAaiIDIARqIQQgAUEBaiEBIAJBf2oiAg0ACwsgBEHx/wNwQRB0IANBj4B8aiADIANB8P8DSxtyBSAACwsYACAALQAAQSBxRQRAIAEgAiAAEKUBGgsLDAAgACABKQAANwAACx8AIAAgASACKAIEEEY2AgAgARAjGiAAIAJBCGo2AgQLCQBBAUEFIAAbC88MAQ1/AkACQAJAAkACQCAAKAKEAUF7ag4DAQICAAsgACgCBCELIAAoAnQhByAAKAIQIQUgACgCFCEKIAAoAighCCAAKAIMIQ9BASAAKAKAAXQhDEEDIQYCQCAAIAAoAngiDSAAKAJ8IAFBBBAsIgQgBSABIAtrIglBASAHdCIHayAFIAkgBWsgB0sbIAobIgdNDQBBACAJQQEgDXQiBmsiBSAFIAlLGyEKIAZBf2ohDSAJQQJqIQ5BAyEGA0ACQCAEIAtqIgUgBmotAAAgASAGai0AAEcNACABIAUgAhAdIgUgBk0NACADIA4gBGs2AgAgBSIGIAFqIAJHDQAMAgsgBCAKTQ0BIAxBf2oiDEUNASAIIAQgDXFBAnRqKAIAIgQgB0sNAAsLIAAoAnAiACgCBCEFIAAoAgAhByAAKAJ4IQggACgCDCEKIAAoAighDSAAKAIgIQQgASAAKAJ8QQQQWiEAIAxFDQMgBCAAQQJ0aigCACIEIApNDQMgCyAPaiELQQAgByAFayIAQQEgCHQiCGsiDiAOIABLGyEOIAhBf2ohCCABQQRqIRAgCSAPayAAakECaiEJA0ACQCAEIAVqIgAoAAAgASgAAEcNACAQIABBBGogAiAHIAsQIEEEaiIAIAZNDQAgAyAJIARrNgIAIAAhBiAAIAFqIAJGDQQLIAQgDk0NBCAMQX9qIgxFDQQgBiEAIA0gBCAIcUECdGooAgAiBCAKSw0ACwwCCyAAKAIEIQsgACgCdCEHIAAoAhAhBSAAKAIUIQogACgCKCEIIAAoAgwhD0EBIAAoAoABdCEMQQMhBgJAIAAgACgCeCINIAAoAnwgAUEFECwiBCAFIAEgC2siCUEBIAd0IgdrIAUgCSAFayAHSxsgChsiB00NAEEAIAlBASANdCIGayIFIAUgCUsbIQogBkF/aiENIAlBAmohDkEDIQYDQAJAIAQgC2oiBSAGai0AACABIAZqLQAARw0AIAEgBSACEB0iBSAGTQ0AIAMgDiAEazYCACAFIgYgAWogAkcNAAwCCyAEIApNDQEgDEF/aiIMRQ0BIAggBCANcUECdGooAgAiBCAHSw0ACwsgACgCcCIAKAIEIQUgACgCACEHIAAoAnghCCAAKAIMIQogACgCKCENIAAoAiAhBCABIAAoAnxBBRBaIQAgDEUNAiAEIABBAnRqKAIAIgQgCk0NAiALIA9qIQtBACAHIAVrIgBBASAIdCIIayIOIA4gAEsbIQ4gCEF/aiEIIAFBBGohECAJIA9rIABqQQJqIQkDQAJAIAQgBWoiACgAACABKAAARw0AIBAgAEEEaiACIAcgCxAgQQRqIgAgBk0NACADIAkgBGs2AgAgACEGIAAgAWogAkYNAwsgBCAOTQ0DIAxBf2oiDEUNAyAGIQAgDSAEIAhxQQJ0aigCACIEIApLDQALDAELIAAoAgQhCyAAKAJ0IQcgACgCECEFIAAoAhQhCiAAKAIoIQggACgCDCEPQQEgACgCgAF0IQxBAyEGAkAgACAAKAJ4Ig0gACgCfCABQQYQLCIEIAUgASALayIJQQEgB3QiB2sgBSAJIAVrIAdLGyAKGyIHTQ0AQQAgCUEBIA10IgZrIgUgBSAJSxshCiAGQX9qIQ0gCUECaiEOQQMhBgNAAkAgBCALaiIFIAZqLQAAIAEgBmotAABHDQAgASAFIAIQHSIFIAZNDQAgAyAOIARrNgIAIAUiBiABaiACRw0ADAILIAQgCk0NASAMQX9qIgxFDQEgCCAEIA1xQQJ0aigCACIEIAdLDQALCyAAKAJwIgAoAgQhBSAAKAIAIQcgACgCeCEIIAAoAgwhCiAAKAIoIQ0gACgCICEEIAEgACgCfEEGEFohACAMRQ0BIAQgAEECdGooAgAiBCAKTQ0BIAsgD2ohC0EAIAcgBWsiAEEBIAh0IghrIg4gDiAASxshDiAIQX9qIQggAUEEaiEQIAkgD2sgAGpBAmohCQNAAkAgBCAFaiIAKAAAIAEoAABHDQAgECAAQQRqIAIgByALECBBBGoiACAGTQ0AIAMgCSAEazYCACAAIQYgACABaiACRg0CCyAEIA5NDQIgDEF/aiIMRQ0CIAYhACANIAQgCHFBAnRqKAIAIgQgCksNAAsLIAAPCyAGC9wFAQx/IwBBEGsiCiQAAn8gBEEDTQRAIApBADYCDCAKQQxqIAMgBBAqGiAAIAEgAiAKQQxqQQQQayIAQWwgABAhGyAAIAAgBEsbDAELIABBACABKAIAQQF0QQJqECghDkFUIAMoAAAiBUEPcSIAQQpLDQAaIAIgAEEFajYCACADIARqIgJBfGohCyACQXlqIQ8gAkF7aiEQQQQhAiAFQQR2IQQgAEEGaiEMQSAgAHQiCEEBciEJIAEoAgAhDSADIQZBACEAQQAhBQNAAkACQCAARQRAIAUhBwwBCyAFIQAgBEH//wNxQf//A0YEQANAIABBGGohAAJ/IAYgEEkEQCAGQQJqIgYoAAAgAnYMAQsgAkEQaiECIARBEHYLIgRB//8DcUH//wNGDQALCyAEQQNxIgdBA0YEQANAIAJBAmohAiAAQQNqIQAgBEECdiIEQQNxIgdBA0YNAAsLQVAgACAHaiIHIA1LDQMaIAJBAmohAgJAIAcgBU0EQCAFIQcMAQsgDiAFQQF0akEAIAcgBWtBAXQQKBoLIAYgD0tBACAGIAJBA3VqIgAgC0sbRQRAIAAoAAAgAkEHcSICdiEEDAILIARBAnYhBAsgBiEACwJ/IAxBf2ogBCAIQX9qcSIGIAhBAXRBf2oiBSAJayINSQ0AGiAEIAVxIgRBACANIAQgCEgbayEGIAwLIQUgDiAHQQF0aiAGQX9qIgQ7AQAgBEEBIAZrIAZBAUgbIAlqIgkgCEgEQANAIAxBf2ohDCAJIAhBAXUiCEgNAAsLIAIgBWoiAiAAIAtrQQN0aiACQQdxIAAgD0sgACACQQN1aiIAIAtLcSIFGyECIAsgACAFGyIGKAAAIQUgCUECTgRAIARFIQAgBSACdiEEIAdBAWoiBSABKAIAIg1NDQELC0FsIAlBAUcgAkEgSnINABogASAHNgIAIAYgAkEHakEDdWogA2sLIQAgCkEQaiQAIAALTgECfyABKAIIIAJBA3RqIgIoAgAhAyABKAIEIQQgACABKAIAIgAgACACKAIEakEQdiIAEEcgASAEIAMgASgCACAAdWpBAXRqLwEANgIACxsAIABBASAAGyEAAkAgABBMIgANABASAAsgAAsKACAAQVBqQQpJC0cBA38gAkEEaiEFQQAhAgNAIAAgAkECdGoiAyADKAIAIAV2QQFqIgM2AgAgAyAEaiEEIAEgAkchAyACQQFqIQIgAw0ACyAECwcAIABBAkcL9AIBAn8jAEEgayIFJAACf0EAIAFBCEkNABogBUEIaiAAIAEQ+QNBAEEAECENABogA0F8cSEGAkACQAJAAkAgA0EDcUEBaw4DAgEAAwsgBUEIaiAEIAIgBkECcmotAABBAnRqIgAvAQAgAC0AAhBbIAVBCGoQOQsgBUEIaiAEIAIgBkEBcmotAABBAnRqIgAvAQAgAC0AAhBbCyAFQQhqIAQgAiAGai0AAEECdGoiAC8BACAALQACEFsgBUEIahA5CyAGBEADQCAFQQhqIAQgAiAGaiIAQX9qLQAAQQJ0aiIBLwEAIAEtAAIQWyAFQQhqIAQgAEF+ai0AAEECdGoiAS8BACABLQACEFsgBUEIahA5IAVBCGogBCAAQX1qLQAAQQJ0aiIALwEAIAAtAAIQWyAFQQhqIAQgAiAGQXxqIgZqLQAAQQJ0aiIALwEAIAAtAAIQWyAFQQhqEDkgBg0ACwsgBUEIahD4AwshBiAFQSBqJAAgBgs/AQF/IAEhAiACAn9BpOoBKAIAQX9MBEAgACACQdjpARClAQwBCyAAIAJB2OkBEKUBCyIARgRADwsgACABbhoLPgEBfyAAIAEvAAAiAjYCDCAAIAFBBGoiATYCBCAAQQEgAnQ2AgAgACABQQEgAkF/anRBASACG0ECdGo2AggLDgAgACABIAIQRyAAEDkLPwEBfyAAIAAoAhQiAkEBajYCFCACIAAoAghqIAFBCHY6AAAgACAAKAIUIgJBAWo2AhQgAiAAKAIIaiABOgAAC44FAQp/IAAoAiwiAkH6fWohCCAAKAJ0IQUgAiEBA0AgACgCPCAFayAAKAJsIgVrIQQgBSABIAhqTwRAIAAoAjgiASABIAJqIAIQKhogACAAKAJwIAJrNgJwIAAgACgCbCACayIFNgJsIAAgACgCXCACazYCXCAAKAJEIAAoAkwiA0EBdGohAQNAIAFBfmoiAUEAIAEvAQAiByACayIGIAYgB0sbOwEAIANBf2oiAw0ACyAAKAJAIAJBAXRqIQEgAiEDA0AgAUF+aiIBQQAgAS8BACIHIAJrIgYgBiAHSxs7AQAgA0F/aiIDDQALIAIgBGohBAsCQCAAKAIAIgEoAgRFDQAgACABIAAoAnQgACgCOCAFamogBBCeBCAAKAJ0aiIFNgJ0AkAgACgCtC0iAyAFakEDSQ0AIAAgACgCOCIHIAAoAmwgA2siAWoiBC0AACIGNgJIIAAgACgCVCIJIAQtAAEgBiAAKAJYIgZ0c3EiBDYCSANAIANFDQEgACABIAdqLQACIAQgBnRzIAlxIgQ2AkggACgCQCAAKAI0IAFxQQF0aiAAKAJEIARBAXRqIgovAQA7AQAgCiABOwEAIAAgA0F/aiIDNgK0LSABQQFqIQEgAyAFakECSw0ACwsgBUGFAksNACAAKAIAKAIERQ0AIAAoAiwhAQwBCwsCQCAAKAI8IgMgACgCwC0iAk0NACACIAAoAnQgACgCbGoiAUkEQCAAKAI4IAFqQQAgAyABayICQYICIAJBggJJGyICECgaIAAgASACajYCwC0PCyABQYICaiIBIAJNDQAgACgCOCACakEAIAMgAmsiAyABIAJrIgIgAiADSxsiAhAoGiAAIAAoAsAtIAJqNgLALQsLEQAgACABKAAANgAAIABBBGoLEQAgACABLwAAOwAAIABBAmoLTAEBfyMAQRBrIgEkACABQQA2AgwCQAJ/IAFBICAAELUBIgA2AgxBAEEMIAAbRQsEQCABKAIMIgANAQsQ/ANBACEACyABQRBqJAAgAAtJAQJ/IAAoAgQiBUEIdSEGIAAoAgAiACABIAVBAXEEfyACKAIAIAZqKAIABSAGCyACaiADQQIgBUECcRsgBCAAKAIAKAIYEQsACxYAAn8gABCRAQRAIAAoAgAMAQsgAAsLsAEBAX8gAQJ/IAJBB00EQCAAKAIAIAEoAgAtAAA6AAAgACgCACABKAIALQABOgABIAAoAgAgASgCAC0AAjoAAiAAKAIAIAEoAgAtAAM6AAMgASABKAIAIAJBAnQiAkGQwwFqKAIAaiIDNgIAIAAoAgAgAygAADYABCABKAIAIAJB8MIBaigCAGsMAQsgACgCACABKAIAEGcgASgCAAtBCGo2AgAgACAAKAIAQQhqNgIAC9EDAQp/IwBB8ABrIgskACAAQQhqIQxBASAFdCEKAkAgAkF/RgRAIAAgBTYCBCAAQQE2AgAMAQtBgIAEIAVBf2p0QRB1IQ0gCkF/aiIOIQhBASEGA0ACQCABIAdBAXQiD2ovAQAiCUH//wNGBEAgDCAIQQN0aiAHNgIEIAhBf2ohCEEBIQkMAQsgBkEAIA0gCUEQdEEQdUobIQYLIAsgD2ogCTsBACACIAdHIQkgB0EBaiEHIAkNAAsgACAFNgIEIAAgBjYCACAKQQN2IApBAXZqQQNqIQlBACEHQQAhBgNAIAEgBkEBdGouAQAiAEEBTgRAIABB//8DcSIAQQEgAEEBSxshDUEAIQADQCAMIAdBA3RqIAY2AgQDQCAHIAlqIA5xIgcgCEsNAAsgAEEBaiIAIA1HDQALCyACIAZGIQAgBkEBaiEGIABFDQALCyAKQQEgCkEBSxshAkEAIQgDQCALIAwgCEEDdGoiACgCBCIGQQF0aiIBIAEvAQAiAUEBajsBACAAIAUgARAkayIHOgADIAAgASAHdCAKazsBACAAIAQgBkECdCIBaigCADoAAiAAIAEgA2ooAgA2AgQgCEEBaiIIIAJHDQALIAtB8ABqJAALPAEDfwNAIAAgA0ECdGoiAiACKAIAQQR0QX9qIgI2AgAgAiAEaiEEIAEgA0chAiADQQFqIQMgAg0ACyAECwQAIAALHQAgAEHAAE8EQCAAECRBE2oPCyAAQfClAWotAAALUQAgAiABayECAn8gBUUEQCABIAIgAyAEIAYQcQwBCyABIAIgAyAEIAYQ+gMLIgUQISAFRXJFBEAgASAFaiAAayIAQQAgACAEQX9qSRsPCyAFCx8AIAAgASACLwEAEEY2AgAgARAjGiAAIAJBBGo2AgQLNwEBfyADQdsLTQRAIAAgASACIAMQqgEPC0F/IQUgBEEDcQR/IAUFIAAgASACIANBACAEEIMCCwsjAEIAIAEQTiAAhUKHla+vmLbem55/fkLj3MqV/M7y9YV/fAsNACABIABBAnRqKAIAC0ABAX8jAEEgayIAJAAgAEEIahC0BEGg7AEgACgCGDYCAEGY7AEgACkDEDcCAEGQ7AEgACkDCDcCACAAQSBqJAALPAACQCAAKAJEQQFHBEAgACgCFCAAKAIkbUEBSg0BCyAAELkCDwsgABC4AiAAQoGAgIBwNwLAESAAKAIsC6sDAQN/IAEgAEEEaiIEakF/akEAIAFrcSIFIAJqIAAgACgCACIBakF8ak0EfyAAKAIEIgMgACgCCDYCCCAAKAIIIAM2AgQgBCAFRwRAIAAgAEF8aigCACIDQR91IANzayIDIAUgBGsiBCADKAIAaiIFNgIAIAVBfHEgA2pBfGogBTYCACAAIARqIgAgASAEayIBNgIACwJAIAJBGGogAU0EQCAAIAJqQQhqIgMgASACayIBQXhqIgQ2AgAgBEF8cSADakF8akEHIAFrNgIAIAMCfyADKAIAQXhqIgFB/wBNBEAgAUEDdkF/agwBCyABZyEEIAFBHSAEa3ZBBHMgBEECdGtB7gBqIAFB/x9NDQAaIAFBHiAEa3ZBAnMgBEEBdGtBxwBqIgFBPyABQT9JGwsiAUEEdCIEQYDtAWo2AgQgAyAEQYjtAWoiBCgCADYCCCAEIAM2AgAgAygCCCADNgIEQYj1AUGI9QEpAwBCASABrYaENwMAIAAgAkEIaiIBNgIAIAFBfHEgAGpBfGogATYCAAwBCyAAIAFqQXxqIAE2AgALIABBBGoFIAMLC0sBAn8gACgCBCIGQQh1IQcgACgCACIAIAEgAiAGQQFxBH8gAygCACAHaigCAAUgBwsgA2ogBEECIAZBAnEbIAUgACgCACgCFBEMAAtdAQF/IAAoAhAiA0UEQCAAQQE2AiQgACACNgIYIAAgATYCEA8LAkAgASADRgRAIAAoAhhBAkcNASAAIAI2AhgPCyAAQQE6ADYgAEECNgIYIAAgACgCJEEBajYCJAsLIAACQCAAKAIEIAFHDQAgACgCHEEBRg0AIAAgAjYCHAsLogEAIABBAToANQJAIAAoAgQgAkcNACAAQQE6ADQgACgCECICRQRAIABBATYCJCAAIAM2AhggACABNgIQIANBAUcNASAAKAIwQQFHDQEgAEEBOgA2DwsgASACRgRAIAAoAhgiAkECRgRAIAAgAzYCGCADIQILIAAoAjBBAUcgAkEBR3INASAAQQE6ADYPCyAAQQE6ADYgACAAKAIkQQFqNgIkCws3AQJ/IABB/OMBNgIAAn8gACgCBEF0aiICIgEgASgCCEF/aiIBNgIIIAFBf0wLBEAgAhA4CyAAC4oRAg9/AX4jAEHQAGsiBSQAIAUgATYCTCAFQTdqIRMgBUE4aiERQQAhAQJAA0ACQCANQQBIDQAgAUH/////ByANa0oEQEGw7AFBPTYCAEF/IQ0MAQsgASANaiENCyAFKAJMIgkhAQJAAkACQCAJLQAAIgYEQANAAkACQCAGQf8BcSIGRQRAIAEhBgwBCyAGQSVHDQEgASEGA0AgAS0AAUElRw0BIAUgAUECaiIHNgJMIAZBAWohBiABLQACIQogByEBIApBJUYNAAsLIAYgCWshASAABEAgACAJIAEQZgsgAQ0GIAUoAkwiB0EBaiEBQX8hDwJAIAcsAAEiBhBuRQ0AIActAAJBJEcNACAHQQNqIQEgBkFQaiEPQQEhEgsgBSABNgJMQQAhDgJAIAEsAAAiCkFgaiIHQR9LBEAgASEGDAELIAEhBkEBIAd0IgdBidEEcUUNAANAIAUgAUEBaiIGNgJMIAcgDnIhDiABLAABIgpBYGoiB0EgTw0BIAYhAUEBIAd0IgdBidEEcQ0ACwsCQCAKQSpGBEACfwJAIAYsAAEiARBuRQ0AIAYtAAJBJEcNACABQQJ0IARqQcB+akEKNgIAIAZBA2ohASAGLAABQQN0IANqQYB9aigCACELQQEMAQsgEg0GIAZBAWohASAARQRAIAUgATYCTEEAIRJBACELDAMLIAIgAigCACIGQQRqNgIAIAYoAgAhC0EACyESIAUgATYCTCALQX9KDQFBACALayELIA5BgMAAciEODAELIAVBzABqELwBIgtBAEgNBCAFKAJMIQELQX8hCAJAIAEtAABBLkcNACABLQABQSpGBEACQAJAIAEsAAIiBhBuRQ0AIAEtAANBJEcNACAGQQJ0IARqQcB+akEKNgIAIAEsAAJBA3QgA2pBgH1qKAIAIQggAUEEaiEBDAELIBINBiABQQJqIQEgAEUEQEEAIQgMAQsgAiACKAIAIgZBBGo2AgAgBigCACEICyAFIAE2AkwMAQsgBSABQQFqNgJMIAVBzABqELwBIQggBSgCTCEBC0EAIQcDQCAHIRBBfyEMIAEiCiwAAEG/f2pBOUsNCCAFIApBAWoiATYCTCAKLAAAIBBBOmxqQf/PAWotAAAiB0F/akEISQ0ACwJAAkAgB0ETRwRAIAdFDQogD0EATgRAIAQgD0ECdGogBzYCACAFIAMgD0EDdGopAwA3A0AMAgsgAEUNCCAFQUBrIAcgAhC7AQwCCyAPQX9KDQkLQQAhASAARQ0HCyAOQf//e3EiBiAOIA5BgMAAcRshB0EAIQxBkNQBIQ8gESEOAkACQAJAAn8CQAJAAkACQAJ/AkACQAJAAkACQAJAAkAgCiwAACIBQV9xIAEgAUEPcUEDRhsgASAQGyIBQah/ag4hBBQUFBQUFBQUDhQPBg4ODhQGFBQUFAIFAxQUCRQBFBQEAAsCQCABQb9/ag4HDhQLFA4ODgALIAFB0wBGDQkMEwsgBSkDQCEUQZDUAQwFC0EAIQECQAJAAkACQAJAAkACQCAQQf8BcQ4IAAECAwQaBQYaCyAFKAJAIA02AgAMGQsgBSgCQCANNgIADBgLIAUoAkAgDaw3AwAMFwsgBSgCQCANOwEADBYLIAUoAkAgDToAAAwVCyAFKAJAIA02AgAMFAsgBSgCQCANrDcDAAwTCyAIQQggCEEISxshCCAHQQhyIQdB+AAhAQsgBSkDQCARIAFBIHEQ2wIhCSAHQQhxRQ0DIAUpA0BQDQMgAUEEdkGQ1AFqIQ9BAiEMDAMLIAUpA0AgERDaAiEJIAdBCHFFDQIgCCARIAlrIgFBAWogCCABShshCAwCCyAFKQNAIhRCf1cEQCAFQgAgFH0iFDcDQEEBIQxBkNQBDAELIAdBgBBxBEBBASEMQZHUAQwBC0GS1AFBkNQBIAdBAXEiDBsLIQ8gFCARENkCIQkLIAdB//97cSAHIAhBf0obIQcgCCAFKQNAIhRQRXJFBEBBACEIIBEhCQwMCyAIIBRQIBEgCWtqIgEgCCABShshCAwLCyAFKAJAIgFBmtQBIAEbIgkgCBDYAiIBIAggCWogARshDiAGIQcgASAJayAIIAEbIQgMCgsgCARAIAUoAkAMAgtBACEBIABBICALQQAgBxBeDAILIAVBADYCDCAFIAUpA0A+AgggBSAFQQhqNgJAQX8hCCAFQQhqCyEGQQAhAQJAA0AgBigCACIJRQ0BIAVBBGogCRC6ASIKQQBIIgkgCiAIIAFrS3JFBEAgBkEEaiEGIAggASAKaiIBSw0BDAILC0F/IQwgCQ0LCyAAQSAgCyABIAcQXiABRQRAQQAhAQwBC0EAIQogBSgCQCEGA0AgBigCACIJRQ0BIAVBBGogCRC6ASIJIApqIgogAUoNASAAIAVBBGogCRBmIAZBBGohBiAKIAFJDQALCyAAQSAgCyABIAdBgMAAcxBeIAsgASALIAFKGyEBDAgLIAAgBSsDQCALIAggByABQQARIAAhAQwHCyAFIAUpA0A8ADdBASEIIBMhCSAGIQcMBAsgBSABQQFqIgc2AkwgAS0AASEGIAchAQwACwALIA0hDCAADQQgEkUNAkEBIQEDQCAEIAFBAnRqKAIAIgAEQCADIAFBA3RqIAAgAhC7AUEBIQwgAUEBaiIBQQpHDQEMBgsLQQEhDCABQQpPDQRBACEGA0AgBg0BIAFBAWoiAUEKRg0FIAQgAUECdGooAgAhBgwACwALQX8hDAwDCyAAQSAgDCAOIAlrIgogCCAIIApIGyIGaiIQIAsgCyAQSBsiASAQIAcQXiAAIA8gDBBmIABBMCABIBAgB0GAgARzEF4gAEEwIAYgCkEAEF4gACAJIAoQZiAAQSAgASAQIAdBgMAAcxBeDAELC0EAIQwLIAVB0ABqJAAgDAsWACAARQRAQQAPC0Gw7AEgADYCAEF/CyIBAX8jAEEQayIBIAA2AgggASABKAIIKAIENgIMIAEoAgwLCgAgAC0AC0EHdgsRACAAEJEBBEAgACgCABA4CwvYAQEIf0G6fyEJAkAgACACKAIEIgggAigCACIKaiINaiABSw0AQWwhCSADKAIAIg4gCmoiDyAESw0AIAAgCmoiBCACKAIIIgtrIQwgACABQWBqIgEgDiAKQQAQxAEgAyAPNgIAAkACQCALIAQgBWtNBEAgDCEFDAELIAsgBCAGa0sNAiAHIAwgBWsiA2oiACAIaiAHTQRAIAQgACAIEEoaDAILIAQgAEEAIANrEEohACACIAMgCGoiCDYCBCAAIANrIQQLIAQgASAFIAhBARDEAQsgDSEJCyAJC4wCAQJ/IwBBgAFrIg4kACAOIAM2AnxBfyENAkACQAJAAkACQCACDgQBAAMCBAsgBkUEQEG4fyENDAQLQWwhDSAFLQAAIgIgA0sNAyAAIAcgAkECdCICaigCACACIAhqKAIAEPgCIAEgADYCAEEBIQ0MAwsgASAJNgIAQQAhDQwCCyAKRQRAQWwhDQwCC0EAIQ0gC0UgDEEZSHINAUEIIAR0QQhqIQBBACEDA0AgA0FAayIDIABJDQALDAELQWwhDSAOIA5B/ABqIA5B+ABqIAUgBhBrIgIQIQ0AIA4oAngiAyAESw0AIAAgDiAOKAJ8IAcgCCADEH0gASAANgIAIAIhDQsgDkGAAWokACANCxAAIAAvAAAgAC0AAkEQdHILEQAgACABQQRqIAEoAgAQ5gILXgEBf0G4fyEDIAIQaSICIAFNBH8gACACakF/ai0AACIAQQNxQQJ0QcCrAWooAgAgAmogAEEGdiIBQQJ0QdCrAWooAgBqIABBIHFBBXYiAEEBc2ogACABRXFqBSADCwsVACAAIAFBA3RqKAIEQf//A2pBEHYLdgECfyMAQSBrIgUkACABIAIgBCgCECIGENgBQX8gBnRBf3NGBEAgACgCGCEGIAAoAhQhACAFIAQpAhA3AxggBSAEKQIINwMQIAUgBCkCADcDCCAAIAYgASACENcBIAMgASACENUBIAVBCGoQpwMLIAVBIGokAAuaAQACfwJAAkACQCAAKAKEAUF7ag4DAQICAAtBACAAKAIEIAAoAhhqIAFLDQIaIAAgAUEEEFQgACABIAIgA0EEQQEQUw8LQQAgACgCBCAAKAIYaiABSw0BGiAAIAFBBRBUIAAgASACIANBBUEBEFMPC0EAIAAoAgQgACgCGGogAUsNABogACABQQYQVCAAIAEgAiADQQZBARBTCwuaAQACfwJAAkACQCAAKAKEAUF7ag4DAQICAAtBACAAKAIEIAAoAhhqIAFLDQIaIAAgAUEEEFQgACABIAIgA0EEQQIQUw8LQQAgACgCBCAAKAIYaiABSw0BGiAAIAFBBRBUIAAgASACIANBBUECEFMPC0EAIAAoAgQgACgCGGogAUsNABogACABQQYQVCAAIAEgAiADQQZBAhBTCwuaAQACfwJAAkACQCAAKAKEAUF7ag4DAQICAAtBACAAKAIEIAAoAhhqIAFLDQIaIAAgAUEEEFQgACABIAIgA0EEQQAQUw8LQQAgACgCBCAAKAIYaiABSw0BGiAAIAFBBRBUIAAgASACIANBBUEAEFMPC0EAIAAoAgQgACgCGGogAUsNABogACABQQYQVCAAIAEgAiADQQZBABBTCwt6AQN/Qbp/IQUgA0H/H0tBAkEBIANBH0sbaiIEIANqIgYgAU0EfwJAAkACQAJAIARBf2oOAwABAgMLIAAgA0EDdDoAAAwCCyAAIANBBHRBBHJB9P8DcRAvDAELIAAgA0EEdEEMchBNCyAAIARqIAIgAxAqGiAGBSAFCws5AQJ/IAAoAhQhAyAAKAIMIQIgAEECEOEBIAEgAmoiASADSwRAIABBATYCGEEADwsgACABNgIMIAILTAEBfyABEOMBIQECQCAAKAIgRQRAIAAoAggiAiABaiIBIAAoAgRNDQELIABBATYCGEEADwsgACABNgIQIAAgATYCDCAAIAE2AgggAgvjAwEGfyABQRBtIQggAUEQTgRAA0AgACAGQQJ0IgVqIgFBACABKAIAIgEgAmsiAyADIAFLGzYCACAAIAVBBHJqIgFBACABKAIAIgMgAmsiBCAEIANLGzYCACABQQAgASgCBCIBIAJrIgMgAyABSxs2AgQgACAFQQxyaiIBQQAgASgCACIDIAJrIgQgBCADSxs2AgAgAUEAIAEoAgQiAyACayIEIAQgA0sbNgIEIAFBACABKAIIIgMgAmsiBCAEIANLGzYCCCABQQAgASgCDCIBIAJrIgMgAyABSxs2AgwgACAFQRxyaiIBQQAgASgCACIDIAJrIgQgBCADSxs2AgAgAUEAIAEoAgQiAyACayIEIAQgA0sbNgIEIAFBACABKAIIIgMgAmsiBCAEIANLGzYCCCABQQAgASgCDCIDIAJrIgQgBCADSxs2AgwgAUEAIAEoAhAiAyACayIEIAQgA0sbNgIQIAFBACABKAIUIgMgAmsiBCAEIANLGzYCFCABQQAgASgCGCIDIAJrIgQgBCADSxs2AhggAUEAIAEoAhwiASACayIDIAMgAUsbNgIcIAAgBUE8cmoiAUEAIAEoAgAiASACayIFIAUgAUsbNgIAIAZBEGohBiAHQQFqIgcgCEcNAAsLC5ICAQJ/IwBB8ABrIhAkAEF/IQ8CQAJAAkACQAJAIAQOBAIAAwEECyACIAZB/wFxEIcEQQAhD0EAECENAyABRQRAQbp/IQ8MBAsgACAHLQAAOgAAQQEhDwwDCyACIAwgDRAqGkEAIQ8MAgsgAiAJIAsgCiAOQYAwEKkBIgAQISEBIBBB8ABqJAAgAEEAIAEbDwsgECADIAggBhCnASIEIAUgBSAHIAhBf2oiA2otAABBAnRqIgcoAgAiCUECTwR/IAcgCUF/ajYCACADBSAICyAGEKYBIg8QIQ0AIAAgASAQIAYgBBCoASIPECENACACIBAgBiAEIA5BgDAQqQEiACAPIAAQIRshDwsgEEHwAGokACAPC+ABAAJAIAMgBEcEQAJAAkAgCkEDTQRAIAlFDQEgBEHnB00EQEEDIQkgACgCAEECRg0DC0EKIAprIAh0QQN2IARLDQQgBCAIQX9qdiADTQ0BDAQLQX8hCkF/IQMgCQRAIAcgCCABIAIQzQMhAwtBAyEJAn8gACgCAARAIAYgASACEMwDIQoLIAMgCk0LQQAgAyABIAIgBCAFEMsDQQN0IAEgAiAEEMoDaiIBTRsNAyAKIAFNDQELIABBATYCAEECIQkLIAkPCyAAQQA2AgAgCUUgA0ECS3IPCyAAQQA2AgBBAAsXACAAIAFB//8DcRAvIAAgAUEQdjoAAgs4AQF/IABCADcCCCAAQgA3AhAgAEIANwIYIABBADYCICAAKAIAIQQgAEIANwIAIAQgASACIAMQZAvBAQEDfwJAIAIoAhAiAwR/IAMFIAIQhQQNASACKAIQCyACKAIUIgVrIAFJBEAgAiAAIAEgAigCJBEBAA8LAkAgAiwAS0EASARAQQAhAwwBCyABIQQDQCAEIgNFBEBBACEDDAILIAAgA0F/aiIEai0AAEEKRw0ACyACIAAgAyACKAIkEQEAIgQgA0kNASAAIANqIQAgASADayEBIAIoAhQhBQsgBSAAIAEQKhogAiACKAIUIAFqNgIUIAEgA2ohBAsgBAv9AgIIfwV+AkACf0F/IAFBCyABGyIGQQVJDQAaQVQgBkEMSw0AGkF/IAYgAyAEEIACSQ0AGiADIAZ2IQxBASAGdCEHQoCAgICAgICAwAAgA62AIQ5BPiAGa60iDUJsfCEPQQAhAQJAA0AgAiABQQJ0aigCACIFIANGDQECQCAFRQRAIAAgAUEBdGpBADsBAAwBCyAFIAxNBEAgACABQQF0akH//wM7AQAgB0F/aiEHDAELIA4gBa1+IhAgDYgiEaciBUH//wNxIgpBB00EQCAQIBFC//8DgyANhn0gCkECdEHghAFqNQIAIA+GViAFaiEFCyAAIAFBAXRqIAU7AQAgBSAIIAVBEHRBEHUiBSAIQRB0QRB1SiIKGyEIIAEgCSAKGyEJIAcgBWshBwsgAUEBaiIBIARNDQALIAAgCUEBdGoiAS4BACIFQQF1QQAgB2tKDQIgBiIFIAAgBSACIAMgBBCIBCILECFFDQEaCyALCw8LIAEgBSAHajsBACAGCw0AIAAgASACQQIQgQILUgACf0FUIARBDEsNABpBfyAEQQVJDQAaIANBAWogBGxBA3ZBA2pBgAQgAxsgAUsEQCAAIAEgAiADIARBABCCAg8LIAAgASACIAMgBEEBEIICCwvIBAEKfyMAQZAIayIJJABBASEGQVQhB0EBIAN0IgggBU0EQCAIQQF2IgxBASADG0ECdCEKIAAgAzsBACAAQQRqIg5BfmogAjsBAEEAIQAgCUEANgIAIAhBf2oiBSEHIAJBAWoiCyACTwRAIAUhBwNAIAkgBkECdGoCfyABIAZBf2oiDUEBdGouAQAiD0F/RgRAIAQgB2ogDToAACAHQX9qIQcgAEEBagwBCyAAIA9qCyIANgIAIAZBAWoiBiALTQ0ACwsgCiAOaiEKIAkgC0ECdGogCEEBajYCACAIQQN2IAxqQQNqIQxBACEAQQAhBgNAIAEgAEEBdGouAQAiDUEBTgRAQQAhCwNAIAQgBmogADoAAANAIAYgDGogBXEiBiAHSw0ACyALQQFqIgsgDUcNAAsLIABBAWoiACACTQ0ACyAIQQEgCEEBSxshAEEAIQYDQCAJIAQgBmotAABBAnRqIgUgBSgCACIFQQFqNgIAIA4gBUEBdGogBiAIajsBACAGQQFqIgYgAEcNAAsgA0EQdCAIayIEQYCABGohBUEAIQZBACEHA0ACQAJAAkACQCABIAZBAXRqLgEAIgBBAWoOAwEAAQILIAogBkEDdGogBTYCBAwCCyAKIAZBA3RqIgAgB0F/ajYCACAAIAQ2AgQgB0EBaiEHDAELIAogBkEDdGoiCCAHIABrNgIAIAggAyAAQX9qECRrIghBEHQgACAIdGs2AgQgACAHaiEHCyAGQQFqIgYgAk0NAAtBACEHCyAJQZAIaiQAIAcLrwEBAn8gAEEAIAEoAgAiAEECdEEEahAoIQQgAwRAIANBAEoEQCACIANqIQMDQCAEIAItAABBAnRqIgUgBSgCAEEBajYCACACQQFqIgIgA0kNAAsLA0AgACICQX9qIQAgBCACQQJ0aigCAEUNAAsgASACNgIAQQAhA0EAIQADQCAEIANBAnRqKAIAIgEgACABIABLGyEAIANBAWoiAyACTQ0ACyAADwsgAUEANgIAQQALCwAgACABIAIQKhoLmg0BF38jAEFAaiIHQgA3AzAgB0IANwM4IAdCADcDICAHQgA3AygCQAJAAn8CQAJAIAIEQANAIAdBIGogASAIQQF0ai8BAEEBdGoiBiAGLwEAQQFqOwEAIAhBAWoiCCACRw0ACyAEKAIAIQhBDyEKIAcvAT4iDA0CIAcvATxFDQFBDiEKQQAhDAwCCyAEKAIAIQgLQQ0hCkEAIQwgBy8BOg0AQQwhCiAHLwE4DQBBCyEKIAcvATYNAEEKIQogBy8BNA0AQQkhCiAHLwEyDQBBCCEKIAcvATANAEEHIQogBy8BLg0AQQYhCiAHLwEsDQBBBSEKIAcvASoNAEEEIQogBy8BKA0AQQMhCiAHLwEmDQBBAiEKIAcvASQNACAHLwEiIgtFBEAgAyADKAIAIgBBBGo2AgAgAEHAAjYBACADIAMoAgAiAEEEajYCACAAQcACNgEAIARBATYCAAwDCyAIQQBHIQ5BASEKQQEhCEEADAELIAogCCAIIApLGyEOQQEhCAJAA0AgB0EgaiAIQQF0ai8BAA0BIAhBAWoiCCAKRw0ACyAKIQgLIAcvASIhC0EBCyEQQX8hCSALQf//A3EiBkECSw0BQQQgBy8BJCIRIAZBAXRqayIGQQBIDQEgBkEBdCAHLwEmIhJrIgZBAEgNASAGQQF0IAcvASgiE2siBkEASA0BIAZBAXQgBy8BKiIUayIGQQBIDQEgBkEBdCAHLwEsIhVrIgZBAEgNASAGQQF0IAcvAS4iGGsiBkEASA0BIAZBAXQgBy8BMCIbayIGQQBIDQEgBkEBdCAHLwEyIhxrIgZBAEgNASAGQQF0IAcvATQiDWsiBkEASA0BIAZBAXQgBy8BNiIWayIGQQBIDQEgBkEBdCAHLwE4IhdrIgZBAEgNASAGQQF0IAcvAToiGWsiBkEASA0BIAZBAXQgBy8BPCIaayIGQQBIDQEgBkEBdCAMayIGQQBIIAZBACAARSAQchtyDQFBACEJIAdBADsBAiAHIAs7AQQgByALIBFqIgY7AQYgByAGIBJqIgY7AQggByAGIBNqIgY7AQogByAGIBRqIgY7AQwgByAGIBVqIgY7AQ4gByAGIBhqIgY7ARAgByAGIBtqIgY7ARIgByAGIBxqIgY7ARQgByAGIA1qIgY7ARYgByAGIBZqIgY7ARggByAGIBdqIgY7ARogByAGIBlqIgY7ARwgByAGIBpqOwEeIAIEQANAIAEgCUEBdGovAQAiBgRAIAcgBkEBdGoiBiAGLwEAIgZBAWo7AQAgBSAGQQF0aiAJOwEACyAJQQFqIgkgAkcNAAsLIAggDiAOIAhJGyENQRMhDkEAIRQgBSEWIAUhF0EAIRACQAJAAkAgAA4CAgABC0EBIQkgDUEJSw0DQYACIQ5B3uoAIRdB3ukAIRZBASEQDAELIABBAkYhFEF/IQ5BoO4AIRdBoO0AIRYgAEECRwRADAELQQEhCSANQQlLDQILQQEgDXQiEUF/aiEbIAMoAgAhEkEAIRMgDSEGQQAhC0F/IRoDQEEBIAZ0IRkCQANAIAggD2shFQJ/QQAgDiAFIBNBAXRqLwEAIgZKDQAaIA4gBk4EQEEAIQZB4AAMAQsgFiAGQQF0IgBqLwEAIQYgACAXai0AAAshACALIA92IRxBfyAVdCEJIBkhAgNAIBIgAiAJaiICIBxqQQJ0aiIYIAY7AQIgGCAVOgABIBggADoAACACDQALQQEgCEF/anQhCQNAIAkiAEEBdiEJIAAgC3ENAAsgB0EgaiAIQQF0aiICIAIvAQBBf2oiAjsBACAAQX9qIAtxIABqQQAgABshCyATQQFqIRMgAkH//wNxRQRAIAggCkYNAiABIAUgE0EBdGovAQBBAXRqLwEAIQgLIAggDU0NACALIBtxIgAgGkYNAAtBASAIIA8gDSAPGyIPayIGdCEMIAggCkkEQCAKIA9rIQIgCCEJAkADQCAMIAdBIGogCUEBdGovAQBrIglBAUgNASAJQQF0IQwgBkEBaiIGIA9qIgkgCkkNAAsgAiEGC0EBIAZ0IQwLQQEhCSAQIAwgEWoiEUHUBktxIBQgEUHQBEtxcg0DIAMoAgAiAiAAQQJ0aiIJIA06AAEgCSAGOgAAIAkgEiAZQQJ0aiISIAJrQQJ2OwECIAAhGgwBCwsgCwRAIBIgC0ECdGoiAEEAOwECIAAgFToAASAAQcAAOgAACyADIAMoAgAgEUECdGo2AgAgBCANNgIAC0EAIQkLIAkLygIBC38gACACQQJ0akHcFmooAgAhBgJAIAJBAXQiAyAAKALQKCIFSgRAIAIhBAwBCyAAIAZqQdgoaiEKIAEgBkECdGohCyAAQdwWaiEIIABB2ChqIQkDQAJ/IAMgAyAFTg0AGiABIAggA0EBciIFQQJ0aigCACIHQQJ0ai8BACIEIAEgCCADQQJ0aigCACIMQQJ0ai8BACINTwRAIAMgBCANRw0BGiADIAcgCWotAAAgCSAMai0AAEsNARoLIAULIQQgCy8BACIFIAEgACAEQQJ0akHcFmooAgAiA0ECdGovAQAiB0kEQCACIQQMAgsCQCAFIAdHDQAgCi0AACAAIANqQdgoai0AAEsNACACIQQMAgsgACACQQJ0akHcFmogAzYCACAEIgJBAXQiAyAAKALQKCIFTA0ACwsgACAEQQJ0akHcFmogBjYCAAuyBQEKfyABKAIIIgMoAgAhByADKAIMIQUgASgCACEGIABCgICAgNDHADcC0ChBfyEDAkAgBUEASgRAA0ACQCAGIAJBAnRqIgQvAQAEQCAAIAAoAtAoQQFqIgM2AtAoIAAgA0ECdGpB3BZqIAI2AgAgACACakHYKGpBADoAACACIQMMAQsgBEEAOwECCyACQQFqIgIgBUcNAAsgACgC0CgiAkEBSg0BCwNAIAAgAkEBaiICNgLQKCAAIAJBAnRqQdwWaiADQQFqIglBACADQQJIIgQbIgg2AgAgBiAIQQJ0IgJqQQE7AQAgACAIakHYKGpBADoAACAAIAAoAqgtQX9qNgKoLSAHBEAgACAAKAKsLSACIAdqLwECazYCrC0LIAkgAyAEGyEDIAAoAtAoIgJBAkgNAAsLIAEgAzYCBCACQQF2IQIDQCAAIAYgAhCtASACQQFKIQQgAkF/aiECIAQNAAsgACgC0CghAiAAQdwWaiEKIABB2ChqIQsDQCAAIAJBf2o2AtAoIAAoAuAWIQcgACAKIAJBAnRqKAIANgLgFiAAIAZBARCtASAAIAAoAtQoQX9qIgI2AtQoIAAoAuAWIQQgCiACQQJ0aiAHNgIAIAAgACgC1ChBf2oiAjYC1CggCiACQQJ0aiAENgIAIAYgBUECdGogBiAEQQJ0aiIILwEAIAYgB0ECdGoiCS8BAGo7AQAgBSALaiAEIAtqLQAAIgQgByALai0AACICIAIgBEkbQQFqOgAAIAggBTsBAiAJIAU7AQIgACAFNgLgFiAAIAZBARCtASAFQQFqIQUgACgC0CgiAkEBSg0ACyAAIAAoAtQoQX9qIgI2AtQoIAAgAkECdGpB3BZqIAAoAuAWNgIAIAAgASgCACABKAIEIAEoAggQlAQgBiADIABBvBZqEJMEC5gCAQN/QX4hAgJAIABFDQAgACgCHCIBRQ0AAkACQCABKAIEIgNBu39qDi0BAgICAQICAgICAgICAgICAgICAgICAQICAgICAgICAgICAQICAgICAgICAgEACyADQZoFRg0AIANBKkcNAQsCfwJ/An8gASgCCCICBEAgACgCKCACIAAoAiQRBAAgACgCHCEBCyABKAJEIgILBEAgACgCKCACIAAoAiQRBAAgACgCHCEBCyABKAJAIgILBEAgACgCKCACIAAoAiQRBAAgACgCHCEBCyABKAI4IgILBEAgACgCKCACIAAoAiQRBAAgACgCHCEBCyAAKAIoIAEgACgCJBEEACAAQQA2AhxBfUEAIANB8QBGGyECCyACCx0AIABBCSABIAFBAUgbIgBBDCAAQQxIGzsBmIAQC6IDAQZ/IwBBEGsiAyQAAn8gACgCBCIBIAAoAggiAkYEQCAAKAIAIgIgACgCDCACKAIAKAIQEQQAIAAoAgAiAiADQQxqIAIoAgAoAgwRAwAhASAAIAMoAgwiAjYCDCACRQRAIABBAToAEEEADAILIAAgASACaiICNgIICwJAIAIgAWsiAiABLQAAQQF0QcAJai8BAEELdkEBaiIESQRAIABBEWogASACEEohBiAAKAIAIgEgACgCDCABKAIAKAIQEQQAIABBADYCDANAIAAoAgAiASADQQhqIAEoAgAoAgwRAwAhBUEAIAMoAggiAUUNAxogACACakERaiAFIAEgBCACayIFIAEgBUkbIgEQKhogACgCACIFIAEgBSgCACgCEBEEACABIAJqIgIgBEkNAAsgACAGNgIEIAAgACAEakERajYCCAwBCyACQQRNBEAgAEERaiABIAIQSiEBIAAoAgAiBCAAKAIMIAQoAgAoAhARBAAgACABIAJqNgIIIAAgATYCBCAAQQA2AgwMAQsgACABNgIEC0EBCyECIANBEGokACACCx4BAX8gAEEFRiABQRBKcgR/IAMFIAIgAW1B/wBKCwvCAgEKfyAAKAIMLQAAIghBAnYgACgCKCIJIAFMcSENIAggCUEBSnEhDiAAKAIYIQsgASEKQQEhDAJAAkACQCAIQRBxIAlBEEpyDQAgAiABIAltIghBgAFIcg0AIAghCiAJIQwgCUEBTg0ADAELIAYgBiAFIA0bIA4bIQIgCiAMbCEPIAtBfGohEANAQX8hCCAEQQBIIAQgEEtyDQIgAyAEaigAACILQQBIDQIgCyAAKAIYIARBBGoiBGtKDQIgAyAEaiEIAkAgCiALRgRAIAIgCCAKEFAaDAELIAggCyACIAogACgCQBEHACAKRg0AQX4PCyACIApqIQIgBCALaiEEIBFBAWoiESAMRw0ACwsCQCAOBEAgCSABIAYgBRCtAgwBCyANRQ0AIAkgASAGIAUgBxCsAiIIQQBIDQELIA8hCAsgCAufBQEKfyMAQRBrIgokAAJAAkAgACgCDC0AACIJQQFxRSAAKAIoIgtBAkhyRQRAIAsgASAFIAcQswIMAQsgCyABSgRAIAUhBwwBCyAJQQRxRQRAIAUhBwwBCyALIAEgBSAHIAgQsgIiCEEASA0BCyABQQEgCyAJQRBxIAJyGyINbSEFIAAiASgCOEEBRgR/QQogASgCPGsFQQELIQ4gDUEBSARAQQAhCAwBC0EAIQJBACEIA0AgA0EEaiEMIAUhAyAAKAI4QQNGBEAgBRCVAiEDCwJAIAMgDGogBEwNACAEIAxrIgNBAU4NAEEAIQgMAgsgBkEEaiEJAn8CQAJAAkACQAJAAkACQCAAKAI4IgEOBgYAAQIDBAULIAcgAiAFbGogCSAFIAMgDhCqAgwGCyAHIAIgBWxqIQ8gCSEBIAMhECAAKAI8IREgBSISQYCAgIB4TQR/IA8gASASIBAgERCpAgVBfwsMBQsgByACIAVsaiAFIAkgAxCxAgwECyAHIAIgBWxqIAUgCSADIAAoAjwQsAIMAwsgByACIAVsaiAFIAkgAyAAKAI8EK8CDAILIAogAUEFTQR/IAFBAnRBgBBqKAIABUEACzYCDCAKIAooAgwiAEGa1AEgABs2AgBB6BEgChBPQY8SQS8QckF7IQgMAwsgACgCPCAHIAIgBWxqIAUgCSADIAAoAgwtAAAgC0EBSnEQrgILIgEgA0oEQEF/IQgMAgsgAUEASARAQX4hCAwCCwJAIAFFIAEgBUZyRQRAIAEgDGohAwwBCyAFIAxqIgMgBEoEQEEAIQgMAwsgCSAHIAIgBWxqIAUQUBogBSEBCyAGIAEQNCAIQQRqIAFqIQggASAJaiEGIAJBAWoiAiANRw0ACwsgCkEQaiQAIAgL9AMCBX8CfgJAAkADQCAAIABBf2pxDQEgAEEIIABBCEsbIQBBiPUBKQMAIggCfyABQQNqQXxxQQggAUEISxsiAUH/AE0EQCABQQN2QX9qDAELIAFnIQIgAUEdIAJrdkEEcyACQQJ0a0HuAGogAUH/H00NABogAUEeIAJrdkECcyACQQF0a0HHAGoiAkE/IAJBP0kbCyIErYgiB1BFBEADQCAHIAd6IgiIIQcCfiAEIAinaiIEQQR0IgNBiO0BaigCACICIANBgO0BaiIGRwRAIAIgACABEIgBIgUNBiACKAIEIgUgAigCCDYCCCACKAIIIAU2AgQgAiAGNgIIIAIgA0GE7QFqIgMoAgA2AgQgAyACNgIAIAIoAgQgAjYCCCAEQQFqIQQgB0IBiAwBC0GI9QFBiPUBKQMAQn4gBK2JgzcDACAHQgGFCyIHQgBSDQALQYj1ASkDACEIC0E/IAh5p2tBBHQiAkGA7QFqIQMgAkGI7QFqKAIAIQICQCAIQoCAgIAEVA0AQeMAIQQgAiADRg0AA0AgBEUNASACIAAgARCIASIFDQQgBEF/aiEEIAIoAggiAiADRw0ACyADIQILIAFBMGoQtgENAAsgAiADRg0AA0AgAiAAIAEQiAEiBQ0CIAIoAggiAiADRw0ACwtBACEFCyAFC/0DAQZ/QejqASgCACICIABBA2pBfHEiA2ohAQJAIANBAU5BACABIAJNG0UEQCABPwBBEHRNDQEgARARDQELQbDsAUEwNgIAQQAPC0EAIQNB6OoBIAE2AgAgAkEBTgR/QRAhAyAAIAJqIgRBcGoiAEEQNgIMIABBEDYCAAJAAkACQEGA9QEoAgAiAUUNACACIAEoAghHDQAgAiACQXxqKAIAIgNBH3UgA3NrIgZBfGooAgAhBSABIAQ2AghBcCEDIAYgBSAFQR91c2siASABKAIAakF8aigCAEF/Sg0BIAEoAgQiAiABKAIINgIIIAEoAgggAjYCBCABIAAgAWsiADYCAAwCCyACQRA2AgwgAkEQNgIAIAIgBDYCCCACIAE2AgRBgPUBIAI2AgALIAIgA2oiASAAIAFrIgA2AgALIABBfHEgAWpBfGogAEF/czYCACABAn8gASgCAEF4aiIAQf8ATQRAIABBA3ZBf2oMAQsgAGchAiAAQR0gAmt2QQRzIAJBAnRrQe4AaiAAQf8fTQ0AGiAAQR4gAmt2QQJzIAJBAXRrQccAaiIAQT8gAEE/SRsLIgJBBHQiAEGA7QFqNgIEIAEgAEGI7QFqIgAoAgA2AgggACABNgIAIAEoAgggATYCBEGI9QFBiPUBKQMAQgEgAq2GhDcDAEEBBSADCwtSAQF/IAAoAgQhBCAAKAIAIgAgAQJ/QQAgAkUNABogBEEIdSIBIARBAXFFDQAaIAIoAgAgAWooAgALIAJqIANBAiAEQQJxGyAAKAIAKAIcEQgAC3UBA38CQAJAA0AgACABQcDUAWotAABHBEBB1wAhAiABQQFqIgFB1wBHDQEMAgsLIAEhAiABDQBBoNUBIQAMAQtBoNUBIQEDQCABLQAAIQMgAUEBaiIAIQEgAw0AIAAhASACQX9qIgINAAsLQfDsASgCABogAAsLACAAIAEgAhDcAgsSACAARQRAQQAPCyAAIAEQ1gILuwIAAkAgAUEUSw0AAkACQAJAAkACQAJAAkACQAJAAkAgAUF3ag4KAAECAwQFBgcICQoLIAIgAigCACIBQQRqNgIAIAAgASgCADYCAA8LIAIgAigCACIBQQRqNgIAIAAgATQCADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATUCADcDAA8LIAIgAigCAEEHakF4cSIBQQhqNgIAIAAgASkDADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATIBADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATMBADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATAAADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATEAADcDAA8LIAIgAigCAEEHakF4cSIBQQhqNgIAIAAgASsDADkDAA8LIAAgAkEAEQQACwtEAQR/IAAoAgAiAiwAACIDEG4EQANAIAAgAkEBaiIENgIAIAFBCmwgA2pBUGohASACLAABIQMgBCECIAMQbg0ACwsgAQsoAQF/IwBBEGsiASQAIAEgADYCDEHoywFBBSABKAIMEAAgAUEQaiQACygBAX8jAEEQayIBJAAgASAANgIMQZDMAUEEIAEoAgwQACABQRBqJAALKAEBfyMAQRBrIgEkACABIAA2AgxBuMwBQQMgASgCDBAAIAFBEGokAAsoAQF/IwBBEGsiASQAIAEgADYCDEHgzAFBAiABKAIMEAAgAUEQaiQACycBAX8jAEEQayIBJAAgASAANgIMQcwPQQEgASgCDBAAIAFBEGokAAsoAQF/IwBBEGsiASQAIAEgADYCDEGIzQFBACABKAIMEAAgAUEQaiQAC+ABAEH45gFBsMQBEBlBhOcBQbXEAUEBQQFBABAYEPMCEPICEPECEPACEO8CEO4CEO0CEOwCEOsCEOoCEOkCQbAOQZ/FARAHQejPAUGrxQEQB0GQzwFBBEHMxQEQAkG0zgFBAkHZxQEQAkHYzQFBBEHoxQEQAkGoDkH3xQEQFxDoAkGlxgEQwgFBysYBEMEBQfHGARDAAUGQxwEQvwFBuMcBEL4BQdXHARC9ARDlAhDkAkHAyAEQwgFB4MgBEMEBQYHJARDAAUGiyQEQvwFBxMkBEL4BQeXJARC9ARDjAhDiAguNBAEDfyMAQRBrIgUkACAFIAI2AgggBSAANgIMIAAgA2ohBwJAIANBB0wEQCADQQFIDQEDQCAAIAItAAA6AAAgAkEBaiECIABBAWoiACAHRw0ACyAFIAc2AgwgBSACNgIIDAELIARBAUYEQCAFQQxqIAVBCGogACACaxB8IAUoAgwhAAsgByABTQRAIAAgA2ohBiAEQQFHIAAgBSgCCCICa0EPSnJFBEADQCAAIAIQZyACQQhqIQIgAEEIaiIAIAZJDQAMAwsACyAAIAIQHCAAQRBqIAJBEGoQHCADQSFIDQEgAEEgaiEAA0AgACACQSBqIgEQHCAAQRBqIAJBMGoQHCABIQIgAEEgaiIAIAZJDQALDAELAkAgACABSwRAIAAhAQwBCwJAIARBAUcgACAFKAIIIgZrQQ9KckUEQCAAIQIgBiEDA0AgAiADEGcgA0EIaiEDIAJBCGoiAiABSQ0ACyABIABrIQQMAQsgACAGEBwgAEEQaiAGQRBqEBwgASAAayIEQSFIDQAgAEEgaiEAIAYhAgNAIAAgAkEgaiIDEBwgAEEQaiACQTBqEBwgAyECIABBIGoiACABSQ0ACwsgBSAEIAZqNgIICyABIAdPDQAgBSgCCCEAA0AgASAALQAAOgAAIABBAWohACABQQFqIgEgB0cNAAsgBSAHNgIMIAUgADYCCAsgBUEQaiQACwkAIAAoAgAQDAtBAQJ/IAAgACgCuOABIgM2AsTgASAAKAK84AEhBCAAIAE2ArzgASAAIAEgAmo2ArjgASAAIAEgBCADa2o2AsDgAQtbAQF/Qbh/IQMCQCABQQNJDQAgAiAAEJUBIgFBA3YiADYCCEEBIQMgAiABQQFxNgIEIAIgAUEBdkEDcSIBNgIAAkACQCABQX9qDgMCAQABC0FsDwsgACEDCyADCw4AIAAoAgAQFiAAKAIAC6wBAQF/IAAoAuzhASEBIABBADYChOEBIAAgARBpNgLI4AEgAEIANwP44AEgAEIANwO44AEgAEHA4AFqQgA3AwAgAEGo0ABqIgFBjICA4AA2AgAgAEEANgKY4gEgAEIANwOI4QEgAEGs0AFqQdCwASkCADcCACAAQbTQAWpB2LABKAIANgIAIAAgATYCDCAAIABBmCBqNgIIIAAgAEGgMGo2AgQgACAAQRBqNgIACx4AIAAoApDiARCXAyAAQQA2AqDiASAAQgA3A5DiAQu3EAEMfyMAQfAAayIFJABBbCEGAkAgA0EKSQ0AIAIvAAAhCyACLwACIQcgAi8ABCEMIAVBCGogBCgCABA0IAMgDCAHIAtqakEGaiIISQ0AIAUtAAohCSAFQdgAaiACQQZqIgIgCxBFIgYQIQ0AIAVBQGsgAiALaiICIAcQRSIGECENACAFQShqIAIgB2oiAiAMEEUiBhAhDQAgBUEQaiACIAxqIAMgCGsQRSIGECENACAEQQRqIQggACABQQNqQQJ2IgJqIgcgAmoiDCACaiILIAAgAWoiDkF9aiIPSSEKIAVB2ABqECMhAiAFQUBrECMhAyAFQShqECMhBAJAIAVBEGoQIyACIANyIARyciALIA9PckUEQCAHIQQgDCEDIAshAgNAIAggBSgCWCAFKAJcIAkQKUEBdGoiBi0AACEKIAVB2ABqIAYtAAEQJiAAIAo6AAAgCCAFKAJAIAUoAkQgCRApQQF0aiIGLQAAIQogBUFAayAGLQABECYgBCAKOgAAIAggBSgCKCAFKAIsIAkQKUEBdGoiBi0AACEKIAVBKGogBi0AARAmIAMgCjoAACAIIAUoAhAgBSgCFCAJEClBAXRqIgYtAAAhCiAFQRBqIAYtAAEQJiACIAo6AAAgCCAFKAJYIAUoAlwgCRApQQF0aiIGLQAAIQogBUHYAGogBi0AARAmIAAgCjoAASAIIAUoAkAgBSgCRCAJEClBAXRqIgYtAAAhCiAFQUBrIAYtAAEQJiAEIAo6AAEgCCAFKAIoIAUoAiwgCRApQQF0aiIGLQAAIQogBUEoaiAGLQABECYgAyAKOgABIAggBSgCECAFKAIUIAkQKUEBdGoiBi0AACEKIAVBEGogBi0AARAmIAIgCjoAASADQQJqIQMgBEECaiEEIABBAmohACAFQdgAahAjGiAFQUBrECMaIAVBKGoQIxogBUEQahAjGiACQQJqIgIgD0kNAAtBACEKDAELIAshAiAMIQMgByEECyADIAtLBEBBbCEGDAELIAQgDEsEQEFsIQYMAQtBbCEGIAAgB0sNAAJAIAVB2ABqECMgB0F9aiIGIABNcg0AA0AgCCAFKAJYIAUoAlwgCRApQQF0aiINLQAAIRAgBUHYAGogDS0AARAmIAAgEDoAACAIIAUoAlggBSgCXCAJEClBAXRqIg0tAAAhECAFQdgAaiANLQABECYgACAQOgABIAVB2ABqECMhDSAAQQJqIgAgBk8NASANRQ0ACwsCQCAFQdgAahAjIAAgB09yDQADQCAIIAUoAlggBSgCXCAJEClBAXRqIgYtAAAhDSAFQdgAaiAGLQABECYgACANOgAAIAVB2ABqECMhBiAAQQFqIgAgB08NASAGRQ0ACwsgACAHSQRAA0AgCCAFKAJYIAUoAlwgCRApQQF0aiIGLQAAIQ0gBUHYAGogBi0AARAmIAAgDToAACAAQQFqIgAgB0cNAAsLAkAgBUFAaxAjIAxBfWoiACAETXINAANAIAggBSgCQCAFKAJEIAkQKUEBdGoiBy0AACEGIAVBQGsgBy0AARAmIAQgBjoAACAIIAUoAkAgBSgCRCAJEClBAXRqIgctAAAhBiAFQUBrIActAAEQJiAEIAY6AAEgBUFAaxAjIQcgBEECaiIEIABPDQEgB0UNAAsLAkAgBUFAaxAjIAQgDE9yDQADQCAIIAUoAkAgBSgCRCAJEClBAXRqIgAtAAAhByAFQUBrIAAtAAEQJiAEIAc6AAAgBUFAaxAjIQAgBEEBaiIEIAxPDQEgAEUNAAsLIAQgDEkEQANAIAggBSgCQCAFKAJEIAkQKUEBdGoiAC0AACEHIAVBQGsgAC0AARAmIAQgBzoAACAEQQFqIgQgDEcNAAsLAkAgBUEoahAjIAtBfWoiACADTXINAANAIAggBSgCKCAFKAIsIAkQKUEBdGoiBC0AACEHIAVBKGogBC0AARAmIAMgBzoAACAIIAUoAiggBSgCLCAJEClBAXRqIgQtAAAhByAFQShqIAQtAAEQJiADIAc6AAEgBUEoahAjIQQgA0ECaiIDIABPDQEgBEUNAAsLAkAgBUEoahAjIAMgC09yDQADQCAIIAUoAiggBSgCLCAJEClBAXRqIgAtAAAhBCAFQShqIAAtAAEQJiADIAQ6AAAgBUEoahAjIQAgA0EBaiIDIAtPDQEgAEUNAAsLIAMgC0kEQANAIAggBSgCKCAFKAIsIAkQKUEBdGoiAC0AACEEIAVBKGogAC0AARAmIAMgBDoAACADQQFqIgMgC0cNAAsLAkAgBUEQahAjIApBAXNyDQADQCAIIAUoAhAgBSgCFCAJEClBAXRqIgAtAAAhAyAFQRBqIAAtAAEQJiACIAM6AAAgCCAFKAIQIAUoAhQgCRApQQF0aiIALQAAIQMgBUEQaiAALQABECYgAiADOgABIAVBEGoQIyEAIAJBAmoiAiAPTw0BIABFDQALCwJAIAVBEGoQIyACIA5Pcg0AA0AgCCAFKAIQIAUoAhQgCRApQQF0aiIALQAAIQMgBUEQaiAALQABECYgAiADOgAAIAVBEGoQIyEAIAJBAWoiAiAOTw0BIABFDQALCyACIA5JBEADQCAIIAUoAhAgBSgCFCAJEClBAXRqIgAtAAAhAyAFQRBqIAAtAAEQJiACIAM6AAAgAkEBaiICIA5HDQALCyABQWwgBSgCXCAFKAJgIAUoAmQQSyAFKAJEIAUoAkggBSgCTBBLcSAFKAIsIAUoAjAgBSgCNBBLcSAFKAIUIAUoAhggBSgCHBBLcRshBgsgBUHwAGokACAGC7YUAQ1/IwBB8ABrIgUkAEFsIQYCQCADQQpJDQAgAi8AACELIAIvAAIhCSACLwAEIQwgBUEIaiAEKAIAEDQgAyAMIAkgC2pqQQZqIgdJDQAgBS0ACiEIIAVB2ABqIAJBBmoiAiALEEUiBhAhDQAgBUFAayACIAtqIgIgCRBFIgYQIQ0AIAVBKGogAiAJaiICIAwQRSIGECENACAFQRBqIAIgDGogAyAHaxBFIgYQIQ0AIARBBGohByAAIAFBA2pBAnYiAmoiCSACaiIMIAJqIgsgACABaiIRQX1qIg9JIQ0gBUHYAGoQIyECIAVBQGsQIyEDIAVBKGoQIyEEAkAgBUEQahAjIAIgA3IgBHJyIAsgD09yRQRAIAkhAiAMIQQgCyEDA0AgACAHIAUoAlggBSgCXCAIEClBAnRqIgYvAQA7AAAgBUHYAGogBi0AAhAmIAYtAAMhDSACIAcgBSgCQCAFKAJEIAgQKUECdGoiBi8BADsAACAFQUBrIAYtAAIQJiAGLQADIQogBCAHIAUoAiggBSgCLCAIEClBAnRqIgYvAQA7AAAgBUEoaiAGLQACECYgBi0AAyEOIAMgByAFKAIQIAUoAhQgCBApQQJ0aiIGLwEAOwAAIAVBEGogBi0AAhAmIAYtAAMhBiAAIA1qIg0gByAFKAJYIAUoAlwgCBApQQJ0aiIALwEAOwAAIAVB2ABqIAAtAAIQJiAALQADIRAgAiAKaiICIAcgBSgCQCAFKAJEIAgQKUECdGoiAC8BADsAACAFQUBrIAAtAAIQJiAALQADIQogBCAOaiIEIAcgBSgCKCAFKAIsIAgQKUECdGoiAC8BADsAACAFQShqIAAtAAIQJiAALQADIQ4gAyAGaiIGIAcgBSgCECAFKAIUIAgQKUECdGoiAy8BADsAACAFQRBqIAMtAAIQJiANIBBqIQAgAiAKaiECIAQgDmohBCAGIAMtAANqIgMgD0khDSAFQdgAahAjIQYgBUFAaxAjIQogBUEoahAjIQ4gBUEQahAjIRAgAyAPTw0CIAYgCnIgDnIgEHJFDQALDAELIAshAyAMIQQgCSECCyAEIAtLBEBBbCEGDAELIAIgDEsEQEFsIQYMAQtBbCEGIAAgCUsNAAJAIAVB2ABqECMgCUF9aiIKIABNcg0AA0AgACAHIAUoAlggBSgCXCAIEClBAnRqIgYvAQA7AAAgBUHYAGogBi0AAhAmIAAgBi0AA2oiBiAHIAUoAlggBSgCXCAIEClBAnRqIgAvAQA7AAAgBUHYAGogAC0AAhAmIAYgAC0AA2ohACAFQdgAahAjDQEgACAKSQ0ACwsCQCAFQdgAahAjIAAgCUF+aiIGS3INAANAIAAgByAFKAJYIAUoAlwgCBApQQJ0aiIKLwEAOwAAIAVB2ABqIAotAAIQJiAAIAotAANqIQAgBUHYAGoQIw0BIAAgBk0NAAsLIAAgBk0EQANAIAAgByAFKAJYIAUoAlwgCBApQQJ0aiIKLwEAOwAAIAVB2ABqIAotAAIQJiAAIAotAANqIgAgBk0NAAsLAkAgACAJTw0AIAAgByAFKAJYIAUoAlwgCBApIglBAnRqIgAtAAA6AAAgAC0AA0EBRgRAIAVB2ABqIAAtAAIQJgwBCyAFKAJcQR9LDQAgBUHYAGogByAJQQJ0ai0AAhAmIAUoAlxBIUkNACAFQSA2AlwLAkAgBUFAaxAjIAxBfWoiCSACTXINAANAIAIgByAFKAJAIAUoAkQgCBApQQJ0aiIALwEAOwAAIAVBQGsgAC0AAhAmIAIgAC0AA2oiAiAHIAUoAkAgBSgCRCAIEClBAnRqIgAvAQA7AAAgBUFAayAALQACECYgAiAALQADaiECIAVBQGsQIw0BIAIgCUkNAAsLAkAgBUFAaxAjIAIgDEF+aiIAS3INAANAIAIgByAFKAJAIAUoAkQgCBApQQJ0aiIJLwEAOwAAIAVBQGsgCS0AAhAmIAIgCS0AA2ohAiAFQUBrECMNASACIABNDQALCyACIABNBEADQCACIAcgBSgCQCAFKAJEIAgQKUECdGoiCS8BADsAACAFQUBrIAktAAIQJiACIAktAANqIgIgAE0NAAsLAkAgAiAMTw0AIAIgByAFKAJAIAUoAkQgCBApIgJBAnRqIgAtAAA6AAAgAC0AA0EBRgRAIAVBQGsgAC0AAhAmDAELIAUoAkRBH0sNACAFQUBrIAcgAkECdGotAAIQJiAFKAJEQSFJDQAgBUEgNgJECwJAIAVBKGoQIyALQX1qIgIgBE1yDQADQCAEIAcgBSgCKCAFKAIsIAgQKUECdGoiAC8BADsAACAFQShqIAAtAAIQJiAEIAAtAANqIgQgByAFKAIoIAUoAiwgCBApQQJ0aiIALwEAOwAAIAVBKGogAC0AAhAmIAQgAC0AA2ohBCAFQShqECMNASAEIAJJDQALCwJAIAVBKGoQIyAEIAtBfmoiAEtyDQADQCAEIAcgBSgCKCAFKAIsIAgQKUECdGoiAi8BADsAACAFQShqIAItAAIQJiAEIAItAANqIQQgBUEoahAjDQEgBCAATQ0ACwsgBCAATQRAA0AgBCAHIAUoAiggBSgCLCAIEClBAnRqIgIvAQA7AAAgBUEoaiACLQACECYgBCACLQADaiIEIABNDQALCwJAIAQgC08NACAEIAcgBSgCKCAFKAIsIAgQKSICQQJ0aiIALQAAOgAAIAAtAANBAUYEQCAFQShqIAAtAAIQJgwBCyAFKAIsQR9LDQAgBUEoaiAHIAJBAnRqLQACECYgBSgCLEEhSQ0AIAVBIDYCLAsCQCAFQRBqECMgDUEBc3INAANAIAMgByAFKAIQIAUoAhQgCBApQQJ0aiIALwEAOwAAIAVBEGogAC0AAhAmIAMgAC0AA2oiAiAHIAUoAhAgBSgCFCAIEClBAnRqIgAvAQA7AAAgBUEQaiAALQACECYgAiAALQADaiEDIAVBEGoQIw0BIAMgD0kNAAsLAkAgBUEQahAjIAMgEUF+aiIAS3INAANAIAMgByAFKAIQIAUoAhQgCBApQQJ0aiICLwEAOwAAIAVBEGogAi0AAhAmIAMgAi0AA2ohAyAFQRBqECMNASADIABNDQALCyADIABNBEADQCADIAcgBSgCECAFKAIUIAgQKUECdGoiAi8BADsAACAFQRBqIAItAAIQJiADIAItAANqIgMgAE0NAAsLAkAgAyARTw0AIAMgByAFKAIQIAUoAhQgCBApIgJBAnRqIgAtAAA6AAAgAC0AA0EBRgRAIAVBEGogAC0AAhAmDAELIAUoAhRBH0sNACAFQRBqIAcgAkECdGotAAIQJiAFKAIUQSFJDQAgBUEgNgIUCyABQWwgBSgCXCAFKAJgIAUoAmQQSyAFKAJEIAUoAkggBSgCTBBLcSAFKAIsIAUoAjAgBSgCNBBLcSAFKAIUIAUoAhggBSgCHBBLcRshBgsgBUHwAGokACAGC48DAQR/IwBBIGsiBSQAIAUgBCgCABA0IAUtAAIhByAFQQhqIAIgAxBFIgIQIUUEQCAEQQRqIQICQCAFQQhqECMgACABaiIDQX1qIgQgAE1yDQADQCACIAUoAgggBSgCDCAHEClBAXRqIgYtAAAhCCAFQQhqIAYtAAEQJiAAIAg6AAAgAiAFKAIIIAUoAgwgBxApQQF0aiIGLQAAIQggBUEIaiAGLQABECYgACAIOgABIAVBCGoQIyEGIABBAmoiACAETw0BIAZFDQALCwJAIAVBCGoQIyAAIANPcg0AA0AgAiAFKAIIIAUoAgwgBxApQQF0aiIELQAAIQYgBUEIaiAELQABECYgACAGOgAAIAVBCGoQIyEEIABBAWoiACADTw0BIARFDQALCyAAIANJBEADQCACIAUoAgggBSgCDCAHEClBAXRqIgQtAAAhBiAFQQhqIAQtAAEQJiAAIAY6AAAgAEEBaiIAIANHDQALCyABQWwgBSgCDCAFKAIQIAUoAhQQSxshAgsgBUEgaiQAIAILwgQBDX8jAEEQayIFJAAgBUEEaiAAKAIAEDQgBS0ABCEHIANB8ARqQQBB7AAQKCEIQVQhBAJAIAdBDEsNACADQdwJaiIMIAggBUEIaiAFQQxqIAEgAhD7ASIQECFFBEAgBSgCDCINIAdLDQEgA0GoBWohBiANIQQDQCAEIgJBf2ohBCAIIAJBAnRqKAIARQ0AC0EBIQFBACEEIAJBAWoiCkECTwRAA0AgCCABQQJ0IgtqKAIAIQ4gBiALaiAJNgIAIAkgDmohCSABIAJHIQsgAUEBaiEBIAsNAAsLIANB3AVqIQsgBiAJNgIAIAUoAggiAQRAA0AgBiAEIAxqLQAAIg5BAnRqIg8gDygCACIPQQFqNgIAIAsgD0EBdGoiDyAOOgABIA8gBDoAACAEQQFqIgQgAUcNAAsLQQAhASADQQA2AqgFIApBAk8EQCANQX9zIAdqIQZBASEEA0AgCCAEQQJ0IgxqKAIAIQ4gAyAMaiABNgIAIA4gBCAGanQgAWohASACIARHIQwgBEEBaiEEIAwNAAsLIA1BAWoiDSACayIBIAcgAWtBAWoiCEkEQCAKQQJJIQYDQEEBIQQgBkUEQANAIARBAnQiCiADIAFBNGxqaiADIApqKAIAIAF2NgIAIAIgBEchCiAEQQFqIQQgCg0ACwsgAUEBaiIBIAhJDQALCyAAQQRqIAcgCyAJIANBpAVqIAMgAiANEJYDIAVBAToABSAFIAc6AAYgACAFKAIENgIACyAQIQQLIAVBEGokACAEC+ACAQl/IwBBEGsiBCQAIARBADYCDCAEQQA2AggCQCADQUBrIgkgAyAEQQhqIARBDGogASACEPsBIggQIQ0AIARBBGogACgCABA0QQEhASAEKAIMIgUgBC0ABEEBak0EQEEAIQIgBEEAOgAFIAQgBToABiAAIAQoAgQ2AgAgBUEBakEBSwRAA0AgAyABQQJ0aiIGKAIAIQcgBiACNgIAIAcgAUF/anQgAmohAiABIAVGIQYgAUEBaiEBIAZFDQALCyAEKAIIIgdFDQEgAEEEaiEKIAVBAWohC0EAIQADQCADIAAgCWotAAAiBUECdGoiBigCACIBIAFBASAFdEEBdSIMaiICSQRAIAsgBWshBQNAIAogAUEBdGoiAiAFOgABIAIgADoAACABQQFqIgEgBigCACAMaiICSQ0ACwsgBiACNgIAIABBAWoiACAHRw0ACwwBC0FUIQgLIARBEGokACAICxQAIAAoAABBgPqerQNsQSAgAWt2CygAAkACQAJAIAAoAowBQX9qDgIAAQILIAAgARDCAw8LIAAgARDHAwsLOgEBfyABIAAoAgRrIgEgACgCGCICQYAIaksEQCAAIAEgASACa0GAeGoiAEGABCAAQYAESRtrNgIYCwsVACAAEJEBBEAgACgCBA8LIAAtAAsLRQEBfwJAIAIgA00gACABTXINAANAIABBf2oiAC0AACACQX9qIgItAABHDQEgBEEBaiEEIAIgA00NASAAIAFLDQALCyAECwwAIABBICABa62IpwsQACAAIAEgAigCCHRBA3RqCxIAIABBwAAgAWutiKdBACABGwsvAEEgIAFrIgEgAkkEQCAAp0F/IAJ0QX9zcQ8LIAAgASACa62Ip0F/IAJ0QX9zcQsgACACrSAAIAGtQgp8IAN+fULjyJW9y5vvjU9+fEIKfAsoAQF/IwBBEGsiAiQAIABBzA8gAkEIaiABEMYCEBs2AgAgAkEQaiQACxAAIAAgAjYCBCAAIAE2AgALGwAgACkAAEKAgOz8y5vvjU9+QcAAIAFrrYinCxsAIAApAABCgICA2Mub741PfkHAACABa62IpwsUACAAKAAAQbHz3fF5bEEgIAFrdgsNACAAKAIIQQh2QQFxCxAAIABCADcCACAAQgA3AggLUgEBfyAAKAIgIgIgAUkEQCACRQRAIAAgACgCCDYCEAsCQCABQQJJDQAgACAAKAIUQXxxIgI2AhQgAiAAKAIQTw0AIAAgAjYCEAsgACABNgIgCwtHAQF/IAAoAgwhAyAAIAIQ4QEgACgCFCABayIBIANJBEAgAEEBNgIYQQAPCyABIAAoAhBJBEAgACABNgIQCyAAIAE2AhQgAQsKACAAQQNqQXxxCw8AIAAgARDnASACQQNsTwsdAQF/IAAgACgCACAAKAIEayIBNgIQIAAgATYCDAsvACAAQQA2AhggACAAKAIINgIMIAAgACgCBDYCFCAAKAIgQQJPBEAgAEEBNgIgCwsHACABIABrCw0AIAAoAhAgACgCDEkLFQAgACABQX9qQQYgAUEHSxt2QQJqC8oBAQd/AkAgAUUNACAAKAIEIgMgACgCCCIGIAMgBksbIQgDQCADIAhGDQEgACgCACIJIANBDGxqIgUhBCABIAUoAgQiB00EQCAEIAcgAWs2AgQPCyAEQQA2AgQgASAHayIBIAUoAggiBEkEQCAFIAQgAWsiATYCCCABIAJPDQIgA0EBaiICIAZJBEAgCUEMaiADQQxsaiIDIAMoAgQgAWo2AgQLIAAgAjYCBA8LIAVBADYCCCAAIANBAWoiAzYCBCABIARrIgENAAsLC5gEAgx/AX4jAEEQayIIJAAgBCAFaiEJIAEoAoQBIQ8gASgCjAEgARDsARDzASELAkACQCAFQQFIDQAgACgCBCAAKAIITw0AIAlBYGohDANAIAggACAJIARrIgUgDxCmAyAIKAIAIg1FDQIgASAEENIBIAEgBBDRASABIAIgAyAEIAgoAgQiBSALEQIAIQYgAykCACESIAMgDTYCACADIBI3AgQgBCAFaiIKIAZrIQcgCCgCCCIQQX1qIQ4gAigCDCEEAkACQCAKIAxNBEAgBCAHEBwgAigCDCEEIAZBEE0EQCACIAQgBmo2AgwMAwsgBEEQaiAHQRBqIgUQHCAEQSBqIAdBIGoQHCAGQTFIDQEgBCAGaiERIARBMGohBANAIAQgBUEgaiIHEBwgBEEQaiAFQTBqEBwgByEFIARBIGoiBCARSQ0ACwwBCyAEIAcgCiAMECILIAIgAigCDCAGajYCDCAGQYCABEkNACACQQE2AiQgAiACKAIEIAIoAgBrQQN1NgIoCyACKAIEIgQgDUEDajYCACAEIAY7AQQgDkGAgARPBEAgAkECNgIkIAIgBCACKAIAa0EDdTYCKAsgBCAOOwEGIAIgBEEIajYCBCAKIBBqIgQgCU8NASAAKAIEIAAoAghJDQALCyAJIARrIQULIAEgBBDSASABIAQQ0QEgASACIAMgBCAFIAsRAgAhACAIQRBqJAAgAAtRAQJ/IwBBIGsiASQAIAEgACgCEDYCGCABIAApAgg3AxAgASAAKQIANwMIQQEhAiABQQhqEOgBRQRAIAAoAnBBAEdBAXQhAgsgAUEgaiQAIAILGwEBfyAAKAIQIAAoAgwiAUkEQCAAIAE2AhALCwwAIAAgACgCCDYCEAsRACABIAAoAgRrQYCAgIB6SwupAQEEfwJAIAEgACgCACIDRgRAIAAoAgwhAyAAKAIQIQUgACgCCCEEQQEhBgwBCyAAIAAoAgwiBTYCECAAIAAoAgQiBDYCCCAAIAMgBGsiAzYCDCAAIAEgA2s2AgQgAyAFa0EHSw0AIAAgAzYCECADIQULIAAgASACaiICNgIAIAIgBCAFak0gAyAEaiABTXJFBEAgACADIAIgBGsiACAAIANKGzYCEAsgBguRAwEGfyACKAIoIQYgAigCBCEJIAIoAiQhByACKAIgIgoEQCADQv8BViADQv+BBFZqIANC/v///w9WaiEIC0G6fyEFAkAgAUESSQ0AQQAgBEEARyAEQf8BS2ogBEH//wNLaiAGGyIGIAdBAEpBAnRqQSBBACAKQQBHQQEgCXStIANacSIBG3IgCEEGdHIhB0EAIQUgAigCAEUEQCAAQajqvmkQTUEEIQULIAAgBWogBzoAACAFQQFyIQUgAUUEQCAAIAVqIAlBA3RBsH9qOgAAIAVBAWohBQsCQAJAAkACQCAGQX9qDgMAAQIDCyAAIAVqIAQ6AAAgBUEBaiEFDAILIAAgBWogBEH//wNxEC8gBUECaiEFDAELIAAgBWogBBBNIAVBBGohBQsCQAJAAkACQCAIQX9qDgMBAgMACyABRQ0DIAAgBWogAzwAACAFQQFqDwsgACAFaiADp0GAfmpB//8DcRAvIAVBAmoPCyAAIAVqIAOnEE0gBUEEag8LIAAgBWogAzcAACAFQQhqIQULIAULHQAgAEEANgIkIAAgACgCCDYCDCAAIAAoAgA2AgQLFQAgAUEobCAAQQJ0akGQmQFqKAIACwoAIAAgAUEFS2sLAwABC00AIAAoAvAFIAAoApgDIAAoApwDIAAoAqADEGQgACgCgAYQ9wMgAEEANgKQBiAAQgA3A4gGIABCADcDgAYgAEIANwP4BSAAQgA3A/AFC0QBA38gAkEATgR/A0AgBCABIANBAnQiBGooAgAgACAEai0AAmxqIQQgAiADRyEFIANBAWohAyAFDQALIARBA3YFIAMLC6AEAQV/IwBBEGsiCyQAIAtB/wE2AgxBfyEJAkAgBUEDcQ0AIAFFBEBBACEJDAELQbh/IQkgA0GAgAhLDQAgACABaiEMAkAgB0EARyAIQQBHcSIIRQ0AIAcoAgBBAkcNACAAIAAgDCACIAMgBCAGEIEBIQkMAQsgBSALQQxqIAIgAyAFEIkEIgkQIQ0AIAMgCUYEQCAAIAItAAA6AABBASEJDAELIAkgA0EHdkEEak0hCkEAIQkgCg0AAkAgB0UNAAJAAkAgBygCACIJQQFGBEAgBiAFIAsoAgwQ+wMNASAHQQA2AgAMAwsgCUUNAiAIQQFzRQ0BDAILIAhFDQELIAAgACAMIAIgAyAEIAYQgQEhCQwBCyAFQYAIaiIIIAUgCygCDCIKQQsgAyAKQQEQgQIgBUGAEGoQ/wMiCRAhDQAgCkECdCINIAhqQQRqQQBB/AcgDWsQKBogACABIAggCiAJEIAEIgEQIQRAIAEhCQwBCwJAAkAgBwRAIAcoAgBFBEAgAUEMaiEFDAILIAYgBSAKEPcBIQkgCCAFIAoQ9wEhCiABQQxqIgUgA0lBACAJIAEgCmpLGw0BIAAgACAMIAIgAyAEIAYQgQEhCQwDC0EAIQkgAUEMaiADTw0CDAELQQAhCSAFIANPDQEgB0EANgIACyAGBEAgBiAIQYAIECoaCyAAIAAgAWogDCACIAMgBCAIEIEBIQkLIAtBEGokACAJCw0AIAAgAUECdGotAAILgAIBBn8jAEGQA2siBCQAIARBDDYCjAMCQCADQQJJDQAgBEEgaiAEQYwDaiACIAMQqgEiBSADRiEGIAVBAUYgAyAFRnINACAEQQYgAyAEKAKMAyIHEKcBIgggBEEgaiADIAcQpgEiBhAhDQAgACABIAQgByAIEKgBIgUQISIJBEAgBSEGDAELIARBoAFqIAQgByAIIARB4ABqQcAAEKkBIgYQIQ0AIAAgACAFaiAJGyIFIAAgAWogBWsiASACIAMgBEGgAWogAyADQQd2akEIaiABTRCGBCIBECEEQCABIQYMAQtBACEGIAFFDQAgASAFaiAAayEGCyAEQZADaiQAIAYLggQBBn8jAEGQAmsiCyQAQbh/IQgCQCAFRQ0AIAQsAAAiCUH/AXEhBgJAAkAgCUF/TARAIAZBgn9qQQF2IgkgBU8NA0FsIQggBkGBf2oiB0H/AUsNAyAHRQ0CIARBAWohBEEAIQUDQCAAIAVqIAQgBUEBdmoiBi0AAEEEdjoAACAAIAVBAXJqIAYtAABBD3E6AAAgBUECaiIFIAdJDQALIAkhBgwBCyAGIAVPDQIgACAEQQFqIAYgCxCBBCIHIQggBxAhDQILIAFCADcCAEEAIQQgAUEANgIwIAFCADcCKCABQgA3AiAgAUIANwIYIAFCADcCECABQgA3AghBbCEIIAdFDQFBACEFA0AgACAFaiIJLQAAIgpBC0sNAiABIApBAnRqIgogCigCAEEBajYCAEEBIAktAAB0QQF1IARqIQQgBUEBaiIFIAdHDQALIARFDQEgBBAkQQFqIgVBDEsNASADIAU2AgBBAUEBIAV0IARrIgMQJCIEdCADRw0BIAAgB2ogBEEBaiIAOgAAIAEgAEECdGoiACAAKAIAQQFqNgIAIAEoAgQiAEECSSAAQQFxcg0BIAIgB0EBajYCACAGQQFqIQgMAQsgAUIANwIAIAFBADYCMCABQgA3AiggAUIANwIgIAFCADcCGCABQgA3AhAgAUIANwIICyALQZACaiQAIAgLCAAgACABEE0LMQECfyAAEIQEIAAQOSAAKAIMIgIgACgCEEkEfyACIAAoAghrIAAoAgRBAEdqBSABCwtFAQF/IAAoAgQhASAAKAIMIAAoAgAQ/AEgACAAKAIMIAFBA3ZqNgIMIAAgACgCBEEHcTYCBCAAIAAoAgAgAUF4cXY2AgALLwAgACABNgIMIAAgATYCCCAAQgA3AgAgACABIAJqQXxqNgIQQbp/QQAgAkEFSRsLGgAgABAkQQFqIgAgARAkQQJqIgEgACABSRsLQQEBfyABQX9qECQhBCABIAIQgAIiASAEIANrIgIgACACIABJGyIAIAEgAEsbIgBBBSAAQQVLGyIAQQwgAEEMSRsL5AQBC38Cf0F/IANBAWoiDiADSQ0AGiAEQQFqIQ8gBEF7aiEHQQEgBHQiDEEBaiEKIAAgAWpBfmohDUEEIQEgACEIA0ACQAJAIAtFBEAgBiEEDAELAkAgBiIEIA5PDQADQCACIARBAXRqLwEADQEgAyAERiEJIARBAWohBCAJRQ0ACyAKIQkMAgsgBCAORgRAIAohCQwCCyAEIAZBGGoiCU8EQEH//wMgAXQhCwNAIAUgCCANTXJFBEBBun8PCyAIIAcgC2oiBjsAACAGQRB2IQcgCEECaiEIIAkiBkEYaiIQIQkgBCAQTw0ACwsgBCAGQQNqIglPBEADQEEDIAF0IAdqIQcgAUECaiEBIAQgCSIGQQNqIglPDQALCyAEIAZrIAF0IAdqIQcgAUEPSARAIAFBAmohAQwBCyAFIAggDU1yRQRAQbp/DwsgCCAHOwAAIAFBcmohASAHQRB2IQcgCEECaiEIC0F/IAIgBEEBdGouAQAiBkEAIAZrIAZBAEgbIApqIglBAUgNAhogASAPakEAIApBf3MgDEEBdGoiCyAGQQFqIgYgDEgbIAZqIgogC0hrIQYgCSAMSARAA0AgD0F/aiEPIAkgDEEBdSIMSA0ACwsgCiABdCAHaiEHIAZBEUgEfyAGBSAFIAggDU1yRQRAQbp/DwsgCCAHOwAAIAdBEHYhByAIQQJqIQggBkFwagshASAJQQJIDQAgCkEBRiELIAkhCiAEQQFqIgYgDkkNAQsLQX8gCUEBRw0AGiAFRQRAQbp/IAggDUsNARoLIAggBzsAACAIIAFBB2pBCG1qIABrCwvgBgEJfyABKAIAIQwgBUEAQYAgECghByADRQRAIABBACAMQQFqECgaIAFBADYCAEEADwsgB0GAGGohCCAHQYAQaiEJIAdBgAhqIQogAiADaiENAkAgA0EUSARAIAIhAwwBCyANQXFqIQ4gAkEEaiEFIAIoAAAhBgNAIAUoAAAhAyAHIAZB/wFxQQJ0aiIFIAUoAgBBAWo2AgAgCiAGQQZ2QfwHcWoiBSAFKAIAQQFqNgIAIAkgBkEOdkH8B3FqIgUgBSgCAEEBajYCACAIIAZBFnZB/AdxaiIFIAUoAgBBAWo2AgAgAigACCEFIAcgA0H/AXFBAnRqIgYgBigCAEEBajYCACAKIANBBnZB/AdxaiIGIAYoAgBBAWo2AgAgCSADQQ52QfwHcWoiBiAGKAIAQQFqNgIAIAggA0EWdkH8B3FqIgMgAygCAEEBajYCACACKAAMIQsgByAFQf8BcUECdGoiAyADKAIAQQFqNgIAIAogBUEGdkH8B3FqIgMgAygCAEEBajYCACAJIAVBDnZB/AdxaiIDIAMoAgBBAWo2AgAgCCAFQRZ2QfwHcWoiAyADKAIAQQFqNgIAIAJBEGoiAygAACEGIAcgC0H/AXFBAnRqIgUgBSgCAEEBajYCACAKIAtBBnZB/AdxaiIFIAUoAgBBAWo2AgAgCSALQQ52QfwHcWoiBSAFKAIAQQFqNgIAIAggC0EWdkH8B3FqIgUgBSgCAEEBajYCACACQRRqIQUgAyECIAUgDkkNAAsLIAMgDUkEQANAIAcgAy0AAEECdGoiAiACKAIAQQFqNgIAIANBAWoiAyANRw0ACwsCQCAERSAMQf8BIAwbIgJB/wFPcg0AQf8BIQMDQAJAIAcgA0ECdCIEaiIFIAUoAgAgBCAIaigCACAEIAlqKAIAIAQgCmooAgBqamoiBDYCACAEDQAgA0F/aiIDIAJLDQEMAgsLQVAPCyACQf8BIAJB/wFJGyEFQQAhA0EAIQYDQCAAIANBAnQiAmogAiAIaigCACACIAlqKAIAIAIgCmooAgAgAiAHaigCAGpqaiICNgIAIAIgBiACIAZLGyEGIAMgBUchAiADQQFqIQMgAg0ACwNAIAUiAkF/aiEFIAAgAkECdGooAgBFDQALIAEgAjYCACAGC4gDAgV/BX4gAEEoaiIBIAAoAkgiBWohAgJ+IAApAwAiBkIgWgRAIAApAxAiB0IHiSAAKQMIIghCAYl8IAApAxgiCUIMiXwgACkDICIKQhKJfCAIEIQBIAcQhAEgCRCEASAKEIQBDAELIAApAxhCxc/ZsvHluuonfAsgBnwhBgJAIAIgAEEwaiIESQRAIAEhAwwBCwNAQgAgASkAABBOIAaFQhuJQoeVr6+Ytt6bnn9+QuPcypX8zvL1hX98IQYgBCIDIgFBCGoiBCACTQ0ACwsCQCADQQRqIgEgAksEQCADIQEMAQsgAygAAK1Ch5Wvr5i23puef34gBoVCF4lCz9bTvtLHq9lCfkL5893xmfaZqxZ8IQYLIAEgAkkEQCAAIAVqQShqIQADQCABMQAAQsXP2bLx5brqJ34gBoVCC4lCh5Wvr5i23puef34hBiABQQFqIgEgAEcNAAsLIAZCIYggBoVCz9bTvtLHq9lCfiIGQh2IIAaFQvnz3fGZ9pmrFn4iBkIgiCAGhQv4AgICfwR+IAAgACkDACACrXw3AwACQAJAIAAoAkgiAyACakEfTQRAIAAgA2pBKGogASACEKsBIAAoAkggAmohAQwBCyABIAJqIQQCQAJ/IAMEQCAAQShqIgIgA2ogAUEgIANrEKsBIAAgACkDCCACKQAAEE43AwggACAAKQMQIAApADAQTjcDECAAIAApAxggACkAOBBONwMYIAAgACkDICAAQUBrKQAAEE43AyAgACgCSCECIABBADYCSCABIAJrQSBqIQELIAFBIGogBEsLBEAgASECDAELIARBYGohAyAAKQMgIQUgACkDGCEGIAApAxAhByAAKQMIIQgDQCAIIAEpAAAQTiEIIAcgASkACBBOIQcgBiABKQAQEE4hBiAFIAEpABgQTiEFIAFBIGoiAiEBIAIgA00NAAsgACAFNwMgIAAgBjcDGCAAIAc3AxAgACAINwMICyACIARPDQEgAEEoaiACIAQgAmsiARCrAQsgACABNgJICwtlACAAQgA3AyggAEL56tDQ58mh5OEANwMgIABCADcDGCAAQs/W077Sx6vZQjcDECAAQtbrgu7q/Yn14AA3AwggAEIANwMAIABCADcDMCAAQgA3AzggAEFAa0IANwMAIABCADcDSAsVACABBEAgAiAAIAERAwAPCyAAEEwLYQEDf0F+IQECQCAARQ0AIAAoAhwiAkUNACAAKAIkIgNFDQAgAigCNCIBBEAgACgCKCABIAMRBAAgACgCJCEDIAAoAhwhAgsgACgCKCACIAMRBABBACEBIABBADYCHAsgAQudCwEMfyACQQBOBEBBBEEDIAEvAQIiCxshB0EHQYoBIAsbIQQgAEG5LWohCEF/IQYDQCALIQkCQCAJIAEgDCINQQFqIgxBAnRqLwECIgtHIAVBAWoiAyAETnJFBEAgAyEFDAELAkAgAyAHSARAIAAgCUECdGoiBUH8FGohByAFQf4UaiEKIAAvAbgtIQQgACgCvC0hBQNAIAovAQAhBiAAIAQgBy8BACIOIAV0ciIEOwG4LSAAAn8gBUEQIAZrSgRAIAAgACgCFCIFQQFqNgIUIAUgACgCCGogBDoAACAAIAAoAhQiBUEBajYCFCAFIAAoAghqIAgtAAA6AAAgACAOQRAgACgCvC0iBWt2IgQ7AbgtIAUgBmpBcGoMAQsgBSAGagsiBTYCvC0gA0F/aiIDDQALDAELIAACfyAJBEACQCAGIAlGBEAgAC8BuC0hByAAKAK8LSEEIAMhBQwBCyAAIAlBAnRqIgZB/hRqLwEAIQMgACAALwG4LSAGQfwUai8BACIKIAAoArwtIgZ0ciIHOwG4LQJAIAZBECADa0oEQCAAIAAoAhQiBkEBajYCFCAGIAAoAghqIAc6AAAgACAAKAIUIgZBAWo2AhQgBiAAKAIIaiAILQAAOgAAIAMgACgCvC0iBmpBcGohBCAKQRAgBmt2IQcMAQsgAyAGaiEECyAAIAQ2ArwtCyAHIAAvAbwVIgYgBHRyIQcCQCAEQRAgAC8BvhUiA2tKBEAgACAHOwG4LSAAIAAoAhQiBEEBajYCFCAEIAAoAghqIAc6AAAgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiAILQAAOgAAIAMgACgCvC0iB2pBcGohBCAGQRAgB2t2IQcMAQsgAyAEaiEECyAAIAQ2ArwtIAAgByAFQf3/A2pB//8DcSIFIAR0ciIDOwG4LSAEQQ9OBEAgACAAKAIUIgZBAWo2AhQgBiAAKAIIaiADOgAAIAAgACgCFCIDQQFqNgIUIAMgACgCCGogCC0AADoAACAAIAVBECAAKAK8LSIFa3Y7AbgtIAVBcmoMAgsgBEECagwBCyAFQQlMBEAgAC8BuC0gAC8BwBUiCiAAKAK8LSIDdHIhBwJAIANBECAALwHCFSIGa0oEQCAAIAc7AbgtIAAgACgCFCIDQQFqNgIUIAMgACgCCGogBzoAACAAIAAoAhQiA0EBajYCFCADIAAoAghqIAgtAAA6AAAgBiAAKAK8LSIDakFwaiEEIApBECADa3YhBwwBCyADIAZqIQQLIAAgBDYCvC0gACAHIAVB/v8DakH//wNxIgUgBHRyIgM7AbgtIARBDk4EQCAAIAAoAhQiBkEBajYCFCAGIAAoAghqIAM6AAAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiAILQAAOgAAIAAgBUEQIAAoArwtIgVrdjsBuC0gBUFzagwCCyAEQQNqDAELIAAvAbgtIAAvAcQVIgogACgCvC0iA3RyIQcCQCADQRAgAC8BxhUiBmtKBEAgACAHOwG4LSAAIAAoAhQiA0EBajYCFCADIAAoAghqIAc6AAAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiAILQAAOgAAIAYgACgCvC0iA2pBcGohBCAKQRAgA2t2IQcMAQsgAyAGaiEECyAAIAQ2ArwtIAAgByAFQfb/A2pB//8DcSIFIAR0ciIDOwG4LSAEQQpOBEAgACAAKAIUIgZBAWo2AhQgBiAAKAIIaiADOgAAIAAgACgCFCIDQQFqNgIUIAMgACgCCGogCC0AADoAACAAIAVBECAAKAK8LSIFa3Y7AbgtIAVBd2oMAQsgBEEHags2ArwtC0EAIQUCfyALRQRAQYoBIQRBAwwBC0EGQQcgCSALRiIDGyEEQQNBBCADGwshByAJIQYLIAIgDUcNAAsLC7kCAQx/IAEvAQIhBiACQQJ0IAFqQf//AzsBBiACQQBOBEBBB0GKASAGGyEIQQRBAyAGGyEHIABBwBVqIQsgAEHEFWohDCAAQbwVaiENQX8hCQNAIAYhBAJAIAQgASAKIg5BAWoiCkECdGovAQIiBkcgA0EBaiIFIAhOckUEQCAFIQMMAQsCfyAFIAdIBEAgACAEQQJ0akH8FGoiAy8BACAFagwBCyAEBEAgBCAJRwRAIAAgBEECdGpB/BRqIgMgAy8BAEEBajsBAAsgDSIDLwEAQQFqDAELIANBCUwEQCALIgMvAQBBAWoMAQsgDCIDLwEAQQFqCyEFIAMgBTsBAEEAIQMCfyAGRQRAQQMhB0GKAQwBC0EDQQQgBCAGRiIFGyEHQQZBByAFGwshCCAEIQkLIAIgDkcNAAsLC+EIAQp/AkAgACgCoC1FBEAgAC8BuC0hBSAAKAK8LSEEDAELIABBuS1qIQgDQCADQQFqIQogACgCmC0gA2otAAAhBQJAIAACfyAAKAKkLSADQQF0ai8BACIJRQRAIAEgBUECdGoiBC8BAiEDIAAgAC8BuC0gBC8BACIHIAAoArwtIgR0ciIFOwG4LSAEQRAgA2tKBEAgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiAFOgAAIAAgACgCFCIEQQFqNgIUIAQgACgCCGogCC0AADoAACAAIAdBECAAKAK8LSIEa3YiBTsBuC0gAyAEakFwagwCCyADIARqDAELIAVBoOUAai0AACILQQJ0IgdBgAhyIAFqIgQvAQYhAyAAIAAvAbgtIAQvAQQiDCAAKAK8LSIGdHIiBDsBuC0gAAJ/IAZBECADa0oEQCAAIAAoAhQiBkEBajYCFCAGIAAoAghqIAQ6AAAgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiAILQAAOgAAIAAgDEEQIAAoArwtIgZrdiIEOwG4LSADIAZqQXBqDAELIAMgBmoLIgM2ArwtIAtBeGpBE00EQCAAIAQgBSAHQaDnAGooAgBrQf//A3EiBiADdHIiBDsBuC0gAAJ/IANBECAHQYDkAGooAgAiBWtKBEAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiAEOgAAIAAgACgCFCIDQQFqNgIUIAMgACgCCGogCC0AADoAACAAIAZBECAAKAK8LSIDa3YiBDsBuC0gAyAFakFwagwBCyADIAVqCyIDNgK8LQsgAiAJQX9qIgcgB0EHdkGAAmogB0GAAkkbQaDoAGotAAAiC0ECdCIJaiIFLwECIQYgACAEIAUvAQAiDCADdHIiBTsBuC0gAAJ/IANBECAGa0oEQCAAIAAoAhQiA0EBajYCFCADIAAoAghqIAU6AAAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiAILQAAOgAAIAAgDEEQIAAoArwtIgNrdiIFOwG4LSADIAZqQXBqDAELIAMgBmoLIgQ2ArwtIAtBBEkNASAAIAUgByAJQaDsAGooAgBrQf//A3EiByAEdHIiBTsBuC0gBEEQIAlBgNoAaigCACIDa0oEQCAAIAAoAhQiBEEBajYCFCAEIAAoAghqIAU6AAAgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiAILQAAOgAAIAAgB0EQIAAoArwtIgRrdiIFOwG4LSADIARqQXBqDAELIAMgBGoLIgQ2ArwtCyAKIgMgACgCoC1JDQALCyABQYIIai8BACECIAAgBSABLwGACCIBIAR0ciIDOwG4LSAEQRAgAmtKBEAgACAAKAIUIgpBAWo2AhQgCiAAKAIIaiADOgAAIAAgACgCFCIDQQFqNgIUIAMgACgCCGogAEG5LWotAAA6AAAgACABQRAgACgCvC0iAWt2OwG4LSAAIAEgAmpBcGo2ArwtDwsgACACIARqNgK8LQuXAQECfwJAAn8gACgCvC0iAUEJTgRAIAAgACgCFCIBQQFqNgIUIAEgACgCCGogAC0AuC06AAAgACAAKAIUIgFBAWo2AhQgAEG5LWotAAAhAiABIAAoAghqDAELIAFBAUgNASAAIAAoAhQiAUEBajYCFCAALQC4LSECIAEgACgCCGoLIAI6AAALIABBADYCvC0gAEEAOwG4LQvaBAEBfwNAIAAgAUECdGpBADsBlAEgAUEBaiIBQZ4CRw0ACyAAQQA7AfwUIABBADsBiBMgAEHEFWpBADsBACAAQcAVakEAOwEAIABBvBVqQQA7AQAgAEG4FWpBADsBACAAQbQVakEAOwEAIABBsBVqQQA7AQAgAEGsFWpBADsBACAAQagVakEAOwEAIABBpBVqQQA7AQAgAEGgFWpBADsBACAAQZwVakEAOwEAIABBmBVqQQA7AQAgAEGUFWpBADsBACAAQZAVakEAOwEAIABBjBVqQQA7AQAgAEGIFWpBADsBACAAQYQVakEAOwEAIABBgBVqQQA7AQAgAEH8E2pBADsBACAAQfgTakEAOwEAIABB9BNqQQA7AQAgAEHwE2pBADsBACAAQewTakEAOwEAIABB6BNqQQA7AQAgAEHkE2pBADsBACAAQeATakEAOwEAIABB3BNqQQA7AQAgAEHYE2pBADsBACAAQdQTakEAOwEAIABB0BNqQQA7AQAgAEHME2pBADsBACAAQcgTakEAOwEAIABBxBNqQQA7AQAgAEHAE2pBADsBACAAQbwTakEAOwEAIABBuBNqQQA7AQAgAEG0E2pBADsBACAAQbATakEAOwEAIABBrBNqQQA7AQAgAEGoE2pBADsBACAAQaQTakEAOwEAIABBoBNqQQA7AQAgAEGcE2pBADsBACAAQZgTakEAOwEAIABBlBNqQQA7AQAgAEGQE2pBADsBACAAQYwTakEAOwEAIABCADcCrC0gAEGUCWpBATsBACAAQQA2AqgtIABBADYCoC0LngEBAn8gACAALwG4LSADQf//A3EiBCAAKAK8LSIDdHIiBTsBuC0gAAJ/IANBDk4EQCAAIAAoAhQiA0EBajYCFCADIAAoAghqIAU6AAAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiAAQbktai0AADoAACAAIARBECAAKAK8LSIDa3Y7AbgtIANBc2oMAQsgA0EDags2ArwtIAAgASACEJoEC5cEARB/IAAoAnwiBCAEQQJ2IAAoAngiBCAAKAKMAUkbIQlBACAAKAJsIgIgACgCLGtBhgJqIgMgAyACSxshDCAAKAJ0IgcgACgCkAEiAyADIAdLGyENIAAoAjgiDiACaiIFQYICaiEPIAQgBWoiAi0AACEKIAJBf2otAAAhCyAAKAI0IRAgACgCQCERA0ACQAJAIAEgDmoiAyAEaiICLQAAIApHDQAgAkF/ai0AACALRw0AIAMtAAAgBS0AAEcNAEECIQYgAy0AASAFLQABRw0AA0ACQCAFIAZqIgItAAEgAy0AA0cEQCACQQFqIQIMAQsgAi0AAiADLQAERwRAIAJBAmohAgwBCyACLQADIAMtAAVHBEAgAkEDaiECDAELIAItAAQgAy0ABkcEQCACQQRqIQIMAQsgAi0ABSADLQAHRwRAIAJBBWohAgwBCyACLQAGIAMtAAhHBEAgAkEGaiECDAELIAItAAcgAy0ACUcEQCACQQdqIQIMAQsgBkH5AUshCCAFIAZBCGoiBmohAiAIDQAgAy0ACiEIIANBCGohAyACLQAAIAhGDQELCyACIA9rIgNBggJqIgIgBEwNACAAIAE2AnAgAiANTgRAIAIhBAwCCyACIAVqLQAAIQogAyAFai0AgQIhCyACIQQLIAwgESABIBBxQQF0ai8BACIBTw0AIAlBf2oiCQ0BCwsgByAEIAQgB0sbC+BGATF/IwBBsIAEayIZJAAgAygCACELIANBADYCACACIARqIjdBe2ogNyAHQQJGIjsbITIgAiEdAn8CQCALIAEiJ2oiOEF0aiI5ICdJDQAgBkH/HyAGQf8fSRshOiA4QXtqIhpBf2ohLyAaQX1qISYgASEeA0AgACgCkIAQIg1BgIAEaiAeIAAoAoSAECIfayIOSyEMIB8gACgCjIAQIhtqIRwgACgCiIAQISogACgCnIAQISsgHigAACEiIAAoApSAECIGIA5JBEADQCAAIAZB//8DcUEBdGpBgIAIaiAGIAAgBiAfahA6QQJ0aiILKAIAayIEQf//AyAEQf//A0kbOwEAIAsgBjYCACAGQQFqIgYgDkkNAAsLIA0gDkGBgHxqIAwbISwgHiAnayEXIAAgDjYClIAQICJB//8DcSAiQRB2RiAiQf8BcSAiQRh2RnEhJSAbICpqITAgHEEEaiESIB5BCGohLiAeQQRqIRMgHkF/aiEWIAAgHhA6QQJ0IiBqKAIAIRRBAyEMQQAhD0EAIS1BACENQQAhESAFISQDQAJAICRFIBQgLElyDQBBACEQAkAgCkEAIA4gFGtBCEkbDQACQAJ/AkACQCAbIBRNBEAgDCAWai8AACAUIB9qIhggDGpBf2ovAABHDQUgIiAYKAAARw0FIBhBBGohBiAmIBNNBH8gEwUgBigAACATKAAAcyIEDQIgBkEEaiEGIC4LIgQgJkkEQANAIAYoAAAgBCgAAHMiCwRAIAsQJSAEaiATayEGDAcLIAZBBGohBiAEQQRqIgQgJkkNAAsLAkAgBCAvTw0AIAYvAAAgBC8AAEcNACAGQQJqIQYgBEECaiEECyAEIBpJBH8gBEEBaiAEIAYtAAAgBC0AAEYbBSAECyATayEGDAQLICIgFCAqaiIEKAAARw0EIARBBGohBgJ/IBMgGiAeIBsgFGtqIhUgFSAaSxsiC0F9aiIYIBNNDQAaIAYoAAAgEygAAHMiBA0CIAZBBGohBiAuCyIEIBhJBEADQCAGKAAAIAQoAABzIhAEQCAQECUgBGogE2sMBQsgBkEEaiEGIARBBGoiBCAYSQ0ACwsCQCAEIAtBf2pPDQAgBi8AACAELwAARw0AIAZBAmohBiAEQQJqIQQLIAQgC0kEfyAEQQFqIAQgBi0AACAELQAARhsFIAQLIBNrDAILIAQQJSEGDAILIAQQJQshBCAUIB9qIA8CfyAEQQRqIhAgHmogC0cgFSAaT3JFBEAgHCEEAn8CQCAmIAsiBksEQCAcKAAAIAsoAABzIgQNASALQQRqIQYgEiEECyAGICZJBEADQCAEKAAAIAYoAABzIg8EQCAPECUgBmogC2sMBAsgBEEEaiEEIAZBBGoiBiAmSQ0ACwsCQCAGIC9PDQAgBC8AACAGLwAARw0AIARBAmohBCAGQQJqIQYLIAYgGkkEfyAGQQFqIAYgBC0AACAGLQAARhsFIAYLIAtrDAELIAQQJQsgEGohEAsgECAMSiIECxshDyAQIAwgBBshDAwBCyAGQQRqIhAgDCAQIAxKIgQbIQwgGCAPIAQbIQ8LICRBf2ohJAJAAkAgDCAQRyAMIBRqIA5LciAQQQRIcg0AIBBBfWohFUEAIQZBECELQQEhBANAIAAgBiAUakH//wNxQQF0akGAgAhqLwEAIhggBCAEIBhJIjEbIQQgBiARIDEbIREgC0EEdSEYQRAgC0EBaiAxGyELIAYgGGoiBiAVSA0ACyAUQQAgBCAUIARJIgYbQQAgBEEBSyIEG2shFCAERQ0AQQNBAiAGGyEGIBAhDAwBCwJAIBENACAAIBRB//8DcUEBdGpBgIAIai8BAEEBRw0AIA1FBEBBASENICVFDQEgEyAaICIQM0EEaiEtQQIhDQsgDUECRyAUQX9qIhggLElyDQBBAiENIBsgGBAyRQ0AICIgKiAfIBggG0kiBBsgGGoiECgAAEcNACAQQQRqIDAgGiAEGyIGICIQM0EEaiELICogACgCkIAQIgRqIRQCQCAYIBtJBEAgBiALIBBqRgRAIBwgGiALICIQPRAzIAtqIQsLIBAgFCAiEDEhDQwBCyAQIBAgHCAiEDEiDWsgHEcgBCAbT3INACAwIBRBACANayAiED0QMSANaiENCyAYIBggDWsiBCAsIAQgLEsbIhRrIAtqIgQgLUkgCyAtS3JFBEAgCyAYIC1raiIEIBsgGyAEEDIbIRRBACERQQIhBkECIQ0MAgtBACERQQIhBiAbIBQQMkUEQEECIQ0gGyEUDAILAkAgDCAEIC0gBCAtSRsiC08EQCAPIQ0gDCELDAELIB4gFCAfaiINa0H//wNKDQMLIBQgACAUQf//A3FBAXRqQYCACGovAQAiBEkEQCANIQ8gCyEMDAMLIBQgBGshFCANIQ9BAiENIAshDAwBCyAUIAAgESAUakH//wNxQQF0akGAgAhqLwEAayEUQQAhBgsgBkEDRw0BCwsCQCAkRSAJQQFHIA4gLGtB/v8DS3JyDQAgDiAgICtqKAIAIhEgLGogKygCgIAQICsoAoSAECISayINayIUa0H//wNLDQADQCAkRQ0BICIgESASaiIEKAAARgRAIARBBGohBgJ/AkACfyATIBogHiANIBFraiIEIAQgGksbIhxBfWoiECATTQ0AGiAGKAAAIBMoAABzIgQNASAGQQRqIQYgLgsiBCAQSQRAA0AgBigAACAEKAAAcyILBEAgCxAlIARqIBNrDAQLIAZBBGohBiAEQQRqIgQgEEkNAAsLAkAgBCAcQX9qTw0AIAYvAAAgBC8AAEcNACAGQQJqIQYgBEECaiEECyAEIBxJBH8gBEEBaiAEIAYtAAAgBC0AAEYbBSAECyATawwBCyAEECULQQRqIgQgDCAEIAxKIgQbIQwgFCAfaiAPIAQbIQ8LICRBf2ohJCARICsgEUH//wNxQQF0akGAgAhqLwEAIgRrIREgDiAUIARrIhRrQYCABEkNAAsLAkACQAJ/AkACQCAMQQROBEAgHiAPayEPQRIgDCAMQW1qQRJJGyAMIAobIhwgOksNASAXQQ5KIgsNAiAXQQFqIQYgFwwDCyAeQQFqIR4MAwsgBwRAIB0gF0H/AW5qIBdqQQlqIDJLDQQLIB1BAWohBgJAIBdBD08EQCAdQfABOgAAIBdBcWoiBEH/AU8EQCAGQf8BIB4gJ2tB8n1qIgRB/wFuIgZBAWoQKBogBkGBfmwgBGohBCAGIB1qQQJqIQYLIAYgBDoAACAGQQFqIQYMAQsgHSAXQQR0OgAACyAGICcgBiAXaiIEEDsgBCAPQf//A3EQLyAcQXxqIQwgBEECaiEEIAcEQCAEIAxB/wFuakEGaiAySw0ECyAdLQAAIQsgDEEPTwRAIB0gC0EPajoAACAcQW1qIgtB/gNPBEAgBEH/ASAcQe97aiIMQf4DbiILQQF0IgRBAmoQKBogC0GCfGwgDGohCyAGIAQgHmogJ2tqQQRqIQQLIAtB/wFPBEAgBEH/AToAACALQYF+aiELIARBAWohBAsgBCALOgAAIARBAWohHSAcIB5qIh4hJwwDCyAdIAsgDGo6AAAgHCAeaiIeIScgBCEdDAILIBdBAWoiBiAXQXFqQf8BbWoLIQQgGSAXNgIMIBlCgICAgBA3AgQgGSAENgIAIAYiBEEOSgRAIAYgBkFxakH/AW1qQQFqIQQLIBkgBjYCHCAZQoCAgIAQNwIUIBkgBDYCECAXQQJqIQQCfwJAIBdBDU4EQCAZIAQ2AiwgGUKAgICAEDcCJCAZIBdBA2oiDSAXQXNqQf8BbWo2AiAMAQsgGSAENgIsIBlCgICAgBA3AiQgGSAENgIgIBdBA2oiDSAXQQxHDQEaCyAXIBdBdGpB/wFtakEEagshBCAZIA02AjwgGUKAgICAEDcCNCAZIAQ2AjAgBiAXQXFqQf8BbWogFyALG0EDaiEEQQQhBgNAIAQhCyAGQRNPBEAgBkFtakH/AW0gBGpBAWohCwsgGSAGQQR0aiIMIBc2AgwgDCAPNgIEIAwgBjYCCCAMIAs2AgAgBiAcRyELIAZBAWohBiALDQALQQEhFCAZIBxBBHRqIgZBATYCHCAGQoCAgIAQNwIUIAZCgICAgBA3AiQgBkECNgIsIAZBAzYCPCAGQoCAgIAQNwI0IAYgBigCACIEQQFqNgIQIAYgBEECajYCICAGIARBA2o2AjACQANAIB4gFCIYaiIhIDlNBEAgGSAYQQR0IgRqIjQoAgAhMyAZIBhBAWoiFEEEdGoiNSgCACE2AkACQAJAIAgEQCA2IDNMBEAgBCAZakFAaygCACAzQQNqSA0ECyAAKAKQgBAiDEGAgARqICEgH2siIEshCyAfIAAoAoyAECITaiEbICEoAAAhIyAOICBJBEADQCAAIA5B//8DcUEBdGpBgIAIaiAOIAAgDiAfahA6QQJ0aiIGKAIAayIEQf//AyAEQf//A0kbOwEAIAYgDjYCACAOQQFqIg4gIEkNAAsLIAwgIEGBgHxqIAsbIRcgACAgNgKUgBAgI0H//wNxICNBEHZGICNB/wFxICNBGHZGcSEuIBMgKmohLCAbQQRqIQ8gIUEIaiEiICFBBGohFSAhQX9qITAgACAhEDpBAnQiMWooAgAhDkEDIQxBACESQQAhKUEAIQ1BACERIAUhJANAAkAgJEUgDiAXSXINAEEAIRACQCAKQQAgICAOa0EISRsNAAJAAn8CQAJAIBMgDk0EQCAMIDBqLwAAIA4gH2oiFiAMakF/ai8AAEcNBSAjIBYoAABHDQUgFkEEaiEGICYgFU0EfyAVBSAGKAAAIBUoAABzIgQNAiAGQQRqIQYgIgsiBCAmSQRAA0AgBigAACAEKAAAcyILBEAgCxAlIARqIBVrIQYMBwsgBkEEaiEGIARBBGoiBCAmSQ0ACwsCQCAEIC9PDQAgBi8AACAELwAARw0AIAZBAmohBiAEQQJqIQQLIAQgGkkEfyAEQQFqIAQgBi0AACAELQAARhsFIAQLIBVrIQYMBAsgIyAOICpqIgQoAABHDQQgBEEEaiEGAn8gFSAaICEgEyAOa2oiJSAlIBpLGyILQX1qIhYgFU0NABogBigAACAVKAAAcyIEDQIgBkEEaiEGICILIgQgFkkEQANAIAYoAAAgBCgAAHMiEARAIBAQJSAEaiAVawwFCyAGQQRqIQYgBEEEaiIEIBZJDQALCwJAIAQgC0F/ak8NACAGLwAAIAQvAABHDQAgBkECaiEGIARBAmohBAsgBCALSQR/IARBAWogBCAGLQAAIAQtAABGGwUgBAsgFWsMAgsgBBAlIQYMAgsgBBAlCyEEIA4gH2ogEgJ/ICEgBEEEaiIQaiALRyAlIBpPckUEQCAbIQQCfwJAICYgCyIGSwRAIBsoAAAgCygAAHMiBA0BIAtBBGohBiAPIQQLIAYgJkkEQANAIAQoAAAgBigAAHMiEgRAIBIQJSAGaiALawwECyAEQQRqIQQgBkEEaiIGICZJDQALCwJAIAYgL08NACAELwAAIAYvAABHDQAgBEECaiEEIAZBAmohBgsgBiAaSQR/IAZBAWogBiAELQAAIAYtAABGGwUgBgsgC2sMAQsgBBAlCyAQaiEQCyAQIAxKIgQLGyESIBAgDCAEGyEMDAELIAZBBGoiECAMIBAgDEoiBBshDCAWIBIgBBshEgsgJEF/aiEkAkACQCAMIBBHIAwgDmogIEtyIBBBBEhyDQAgEEF9aiElQQAhBkEQIQtBASEEA0AgACAGIA5qQf//A3FBAXRqQYCACGovAQAiFiAEIAQgFkkiLRshBCAGIBEgLRshESALQQR1IRZBECALQQFqIC0bIQsgBiAWaiIGICVIDQALIA5BACAEIA4gBEkiBhtBACAEQQFLIgQbayEOIARFDQBBA0ECIAYbIQYgECEMDAELAkAgEQ0AIAAgDkH//wNxQQF0akGAgAhqLwEAQQFHDQAgDUUEQEEBIQ0gLkUNASAVIBogIxAzQQRqISlBAiENCyANQQJHIA5Bf2oiFiAXSXINAEECIQ0gEyAWEDJFDQAgIyAqIB8gFiATSSIEGyAWaiIQKAAARw0AIBBBBGogLCAaIAQbIgYgIxAzQQRqIQsgKiAAKAKQgBAiBGohDgJAIBYgE0kEQCAGIAsgEGpGBEAgGyAaIAsgIxA9EDMgC2ohCwsgECAOICMQMSENDAELIBAgECAbICMQMSINayAbRyAEIBNPcg0AICwgDkEAIA1rICMQPRAxIA1qIQ0LIBYgFiANayIEIBcgBCAXSxsiDmsgC2oiBCApSSALIClLckUEQCALIBYgKWtqIgQgEyATIAQQMhshDkEAIRFBAiEGQQIhDQwCC0EAIRFBAiEGIBMgDhAyRQRAQQIhDSATIQ4MAgsCQCAMIAQgKSAEIClJGyILTwRAIBIhDSAMIQsMAQsgISAOIB9qIg1rQf//A0oNAwsgDiAAIA5B//8DcUEBdGpBgIAIai8BACIESQRAIA0hEiALIQwMAwsgDiAEayEOIA0hEkECIQ0gCyEMDAELIA4gACAOIBFqQf//A3FBAXRqQYCACGovAQBrIQ5BACEGCyAGQQNHDQELCwJAICRFIAlBAUcgICAXa0H+/wNLcnINACAgICsgMWooAgAiESAXaiArKAKAgBAgKygChIAQIg9rIg1rIg5rQf//A0sNAANAICRFDQEgIyAPIBFqIgQoAABGBEAgBEEEaiEGAn8CQAJ/IBUgGiAhIA0gEWtqIgQgBCAaSxsiG0F9aiIQIBVNDQAaIAYoAAAgFSgAAHMiBA0BIAZBBGohBiAiCyIEIBBJBEADQCAGKAAAIAQoAABzIgsEQCALECUgBGogFWsMBAsgBkEEaiEGIARBBGoiBCAQSQ0ACwsCQCAEIBtBf2pPDQAgBi8AACAELwAARw0AIAZBAmohBiAEQQJqIQQLIAQgG0kEfyAEQQFqIAQgBi0AACAELQAARhsFIAQLIBVrDAELIAQQJQtBBGoiBCAMIAQgDEoiBBshDCAOIB9qIBIgBBshEgsgJEF/aiEkIBEgKyARQf//A3FBAXRqQYCACGovAQAiBGshESAgIA4gBGsiDmtBgIAESQ0ACwsgDEEESA0CQRIgDCAMQW1qQRJJGyAMIAobIQ8gISASayEODAELIDYgM0wNAiAAKAKQgBAiDEGAgARqICEgH2siIEshCyAfIAAoAoyAECITaiEbICEoAAAhKCAOICBJBEADQCAAIA5B//8DcUEBdGpBgIAIaiAOIAAgDiAfahA6QQJ0aiIGKAIAayIEQf//AyAEQf//A0kbOwEAIAYgDjYCACAOQQFqIg4gIEkNAAsLIAwgIEGBgHxqIAsbISMgACAgNgKUgBAgKEH//wNxIChBEHZGIChB/wFxIChBGHZGcSEtIBMgKmohIiAbQQRqISQgIUEIaiEXICFBBGohFSAhQX9qIS4gACAhEDpBAnQiMGooAgAhDkEAIRJBACEpQQAhDUEAIREgBSEQIBwgGGsiMSEPA0ACQCAQRSAOICNJcg0AQQAhDAJAIApBACAgIA5rQQhJGw0AAkACfwJAAkAgEyAOTQRAIA8gLmovAAAgDiAfaiIWIA9qQX9qLwAARw0FICggFigAAEcNBSAWQQRqIQYgJiAVTQR/IBUFIAYoAAAgFSgAAHMiBA0CIAZBBGohBiAXCyIEICZJBEADQCAGKAAAIAQoAABzIgsEQCALECUgBGogFWshBgwHCyAGQQRqIQYgBEEEaiIEICZJDQALCwJAIAQgL08NACAGLwAAIAQvAABHDQAgBkECaiEGIARBAmohBAsgBCAaSQR/IARBAWogBCAGLQAAIAQtAABGGwUgBAsgFWshBgwECyAoIA4gKmoiBCgAAEcNBCAEQQRqIQYCfyAVIBogISATIA5raiIlICUgGksbIgtBfWoiFiAVTQ0AGiAGKAAAIBUoAABzIgQNAiAGQQRqIQYgFwsiBCAWSQRAA0AgBigAACAEKAAAcyIMBEAgDBAlIARqIBVrDAULIAZBBGohBiAEQQRqIgQgFkkNAAsLAkAgBCALQX9qTw0AIAYvAAAgBC8AAEcNACAGQQJqIQYgBEECaiEECyAEIAtJBH8gBEEBaiAEIAYtAAAgBC0AAEYbBSAECyAVawwCCyAEECUhBgwCCyAEECULIQQgDiAfaiASAn8gISAEQQRqIgxqIAtHICUgGk9yRQRAIBshBAJ/AkAgJiALIgZLBEAgGygAACALKAAAcyIEDQEgC0EEaiEGICQhBAsgBiAmSQRAA0AgBCgAACAGKAAAcyISBEAgEhAlIAZqIAtrDAQLIARBBGohBCAGQQRqIgYgJkkNAAsLAkAgBiAvTw0AIAQvAAAgBi8AAEcNACAEQQJqIQQgBkECaiEGCyAGIBpJBH8gBkEBaiAGIAQtAAAgBi0AAEYbBSAGCyALawwBCyAEECULIAxqIQwLIAwgD0oiBAsbIRIgDCAPIAQbIQ8MAQsgBkEEaiIMIA8gDCAPSiIEGyEPIBYgEiAEGyESCyAQQX9qIRACQAJAIAwgD0cgDiAPaiAgS3IgDEEESHINACAMQX1qISVBACEGQRAhC0EBIQQDQCAAIAYgDmpB//8DcUEBdGpBgIAIai8BACIWIAQgBCAWSSIsGyEEIAYgESAsGyERIAtBBHUhFkEQIAtBAWogLBshCyAGIBZqIgYgJUgNAAsgDkEAIAQgDiAESSIGG0EAIARBAUsiBBtrIQ4gBEUNAEEDQQIgBhshBiAMIQ8MAQsCQCARDQAgACAOQf//A3FBAXRqQYCACGovAQBBAUcNACANRQRAQQEhDSAtRQ0BIBUgGiAoEDNBBGohKUECIQ0LIA1BAkcgDkF/aiIlICNJcg0AQQIhDSATICUQMkUNACAoICogHyAlIBNJIgQbICVqIhYoAABHDQAgFkEEaiAiIBogBBsiBiAoEDNBBGohCyAqIAAoApCAECIEaiEMAkAgJSATSQRAIAYgCyAWakYEQCAbIBogCyAoED0QMyALaiELCyAWIAwgKBAxIQ0MAQsgFiAWIBsgKBAxIg1rIBtHIAQgE09yDQAgIiAMQQAgDWsgKBA9EDEgDWohDQsgJSAlIA1rIgQgIyAEICNLGyIMayALaiIEIClJIAsgKUtyRQRAIAsgJSApa2oiBCATIBMgBBAyGyEOQQAhEUECIQZBAiENDAILQQAhEUECIQYgEyAMEDJFBEBBAiENIBMhDgwCCwJAIA8gBCApIAQgKUkbIgtPBEAgEiENIA8hCwwBCyAhIAwgH2oiDWtB//8DSg0DCyAMIAAgDEH//wNxQQF0akGAgAhqLwEAIgRJBEAgDSESIAshDwwDCyAMIARrIQ4gDSESQQIhDSALIQ8MAQsgDiAAIA4gEWpB//8DcUEBdGpBgIAIai8BAGshDkEAIQYLIAZBA0cNAQsLAkAgEEUgCUEBRyAgICNrQf7/A0tycg0AICAgKyAwaigCACIRICNqICsoAoCAECArKAKEgBAiDWsiDGsiDmtB//8DSw0AA0AgEEUNASAoIA0gEWoiBCgAAEYEQCAEQQRqIQYCfwJAAn8gFSAaICEgDCARa2oiBCAEIBpLGyIbQX1qIiQgFU0NABogBigAACAVKAAAcyIEDQEgBkEEaiEGIBcLIgQgJEkEQANAIAYoAAAgBCgAAHMiCwRAIAsQJSAEaiAVawwECyAGQQRqIQYgBEEEaiIEICRJDQALCwJAIAQgG0F/ak8NACAGLwAAIAQvAABHDQAgBkECaiEGIARBAmohBAsgBCAbSQR/IARBAWogBCAGLQAAIAQtAABGGwUgBAsgFWsMAQsgBBAlC0EEaiIEIA8gBCAPSiIEGyEPIA4gH2ogEiAEGyESCyAQQX9qIRAgESArIBFB//8DcUEBdGpBgIAIai8BACIEayERICAgDiAEayIOa0GAgARJDQALCyAPIDFMDQEgISASayEOIApFIA9BbWpBEk9yRQRAQRIhDwwBCyAPRQ0BCyAPIDpLBEAgFCEcDAULIA8gGGpB/x9KBEAgFCEcDAULIDMgNCgCDCINQQFqIgYgDUFxakH/AW1qIA0gDUEOShtrIQwgBiIEQQ5KBH8gDSANQXJqQf8BbWpBAmoFIAQLIAxqIgQgNkgEQCA1IAY2AgwgNUKAgICAEDcCBCA1IAQ2AgALIA1BAmoiBiEEIA1BDEoEfyANIA1Bc2pB/wFtakEDagUgBAsgDGoiBCAZIBhBAmpBBHRqIgsoAgBIBEAgCyAGNgIMIAtCgICAgBA3AgQgCyAENgIACyANQQNqIgYhBCANQQxOBH8gDSANQXRqQf8BbWpBBGoFIAQLIAxqIgQgGSAYQQNqQQR0aiILKAIASARAIAsgBjYCDCALQoCAgIAQNwIEIAsgBDYCAAsgD0EETgRAIDRBDHIhDUEEIQYgGSAYQQR0akEIciEMA0AgBiAYaiESAn8gDCgCAEEBRgRAQQAhESAYIA0oAgAiC0oEQCAZIBggC2tBBHRqKAIAIRELIAsiBEEPTgR/IAsgC0FxakH/AW1qQQFqBSAEC0EDaiEEIAZBE08EfyAGQW1qQf8BbSAEakEBagUgBAsgEWoMAQsgNCgCACEEQQAhCyAGQRNPBH8gBkFtakH/AW1BBGoFQQMLIARqCyERAkAgEiAcQQNqTARAIBEgGSASQQR0aigCACAKa0oNAQsgGSASQQR0aiIEIAs2AgwgBCAONgIEIAQgBjYCCCAEIBE2AgAgEiAcIBwgEkgbIBwgBiAPRhshHAsgBiAPRiEEIAZBAWohBiAERQ0ACwsgGSAcQQR0aiIGQQE2AhwgBkKAgICAEDcCFCAGQoCAgIAQNwIkIAZBAjYCLCAGQQM2AjwgBkKAgICAEDcCNCAGIAYoAgAiBEEBajYCECAGIARBAmo2AiAgBiAEQQNqNgIwCyAgIQ4LIBwgFEoNAQsLIBwgGSAcQQR0aiIEKAIIIg9rIRggBCgCBCEOCwNAIBkgGEEEdGoiCygCCCEGIAsgDzYCCCALKAIEIQQgCyAONgIEIBggBk4hCyAYIAZrIRggBiEPIAQhDiALDQALQQAhBiAcQQFIDQADQAJ/IBkgBkEEdGoiBCgCCCIPQQFGBEAgHkEBaiEeIAZBAWoMAQsgHiAnayESIAQoAgQhCyAHBEAgHSASQf8BbmogEmpBCWogMksNBAsgHUEBaiENAkAgEkEPTwRAIB1B8AE6AAAgEkFxaiIOQf8BTwRAIA1B/wEgEkHyfWoiBEH/AW4iDEEBahAoGiAMQYF+bCAEaiEOIAwgHWpBAmohDQsgDSAOOgAAIA1BAWohDQwBCyAdIBJBBHQ6AAALIA0gJyANIBJqIgQQOyAEIAtB//8DcRAvIA9BfGohDCAEQQJqIQsgBwRAIAsgDEH/AW5qQQZqIDJLDQQLIB0tAAAhBAJ/IAxBD08EQCAdIARBD2o6AAAgD0FtaiIRQf4DTwRAIAtB/wEgD0Hve2oiDEH+A24iC0EBdCIEQQJqECgaIAtBgnxsIAxqIREgDSAEIB5qICdrakEEaiELCyARQf8BTwRAIAtB/wE6AAAgEUGBfmohESALQQFqIQsLIAsgEToAACALQQFqDAELIB0gBCAMajoAACALCyEdIA8gHmoiHiEnIAYgD2oLIgYgHEgNAAsLIB4gOU0NAQwCCwtBACAHQQJHDQEaCyA4ICdrIgZB8AFqQf8BbiEAAkAgB0UNACAAIAZqIB1qQQFqIDJBBWogNyA7GyIATQ0AQQAgB0EBRg0BGiAdQX9zIABqIgAgAEHwAWpB/wFuayEGCyAGICdqIQUCQCAGQQ9PBEAgHUHwAToAACAdQQFqIQAgBkFxaiIEQf8BSQRAIAAiHSAEOgAADAILIABB/wEgBkHyfWoiAEH/AW4iBEEBahAoGiAEIB1qQQJqIh0gBEGBfmwgAGo6AAAMAQsgHSAGQQR0OgAACyAdQQFqICcgBhAqIQAgAyAFIAFrNgIAIAAgBmogAmsLIQAgGUGwgARqJAAgAAuuPQE0fwJAIARBAExBACAGQQJGGw0AIAMoAgAiCkGAgIDwB0sNACAAIAAoAoCAECAKajYCgIAQQQkgBSAFQQFIGyIFQQwgBUEMSBsiB0EMbCIJQZQWaigCACEuAkACfwJAAn8CfwJAIAdBCU0EQCADQQA2AgAgAiAEaiI3QXtqIDcgBkECRiI4GyEmIAEgCmohMSABISUgAiEJIApBDUgNBCAxQXRqIi8gAUkNBEGANCAHdkEBcSEyIDFBe2oiGEF/aiErIBhBfWohHgNAIAAoApSAECEHIAAoAoiAECEdIAAoAoSAECERICUhDAJAAkADQCAAKAKQgBAiBCAMIBFrIg5BgYB8aiAEQYCABGogDksbISAgACgCjIAQIRAgDCgAACENIAcgDkkEQANAIAAgB0H//wNxQQF0akGAgAhqIAcgACAHIBFqEDpBAnRqIgQoAgBrIgVB//8DIAVB//8DSRs7AQAgBCAHNgIAIAdBAWoiByAOSQ0ACwsgACAONgKUgBACQAJAIAAgDBA6QQJ0aigCACIFICBJDQAgDUH//wNxIA1BEHZGIA1B/wFxIA1BGHZGcSEfIBAgHWohEyAQIBFqIhdBBGohKSAMQQhqIRwgDEEEaiEZIAxBf2ohI0EAIRtBAyEKIC4hCEEAIRoDQAJAAkACfwJAAkAgECAFTQRAIAogI2ovAAAgBSARaiILIApqQX9qLwAARw0FIA0gCygAAEcNBSALQQRqIQcgHiAZTQR/IBkFIAcoAAAgGSgAAHMiBA0CIAdBBGohByAcCyIEIB5JBEADQCAHKAAAIAQoAABzIhYEQCAWECUgBGogGWshBwwHCyAHQQRqIQcgBEEEaiIEIB5JDQALCwJAIAQgK08NACAHLwAAIAQvAABHDQAgB0ECaiEHIARBAmohBAsgBCAYSQR/IARBAWogBCAHLQAAIAQtAABGGwUgBAsgGWshBwwECyANIAUgHWoiBCgAAEcNBCAEQQRqIQcCfyAZIBggDCAQIAVraiIhICEgGEsbIhZBfWoiCyAZTQ0AGiAHKAAAIBkoAABzIgQNAiAHQQRqIQcgHAsiBCALSQRAA0AgBygAACAEKAAAcyIkBEAgJBAlIARqIBlrDAULIAdBBGohByAEQQRqIgQgC0kNAAsLAkAgBCAWQX9qTw0AIAcvAAAgBC8AAEcNACAHQQJqIQcgBEECaiEECyAEIBZJBH8gBEEBaiAEIActAAAgBC0AAEYbBSAECyAZawwCCyAEECUhBwwCCyAEECULIQQgBSARaiAUAn8gBEEEaiILIAxqIBZHICEgGE9yRQRAIBchBAJ/AkAgHiAWIgdLBEAgFygAACAWKAAAcyIEDQEgFkEEaiEHICkhBAsgByAeSQRAA0AgBCgAACAHKAAAcyIUBEAgFBAlIAdqIBZrDAQLIARBBGohBCAHQQRqIgcgHkkNAAsLAkAgByArTw0AIAQvAAAgBy8AAEcNACAEQQJqIQQgB0ECaiEHCyAHIBhJBH8gB0EBaiAHIAQtAAAgBy0AAEYbBSAHCyAWawwBCyAEECULIAtqIQsLIAsgCkoiBAsbIRQgCyAKIAQbIQoMAQsgB0EEaiIEIAogBCAKSiIEGyEKIAsgFCAEGyEUCwJAAkACQCAyRSAAIAVB//8DcUEBdGpBgIAIai8BACIHQQFHcg0AIBtFBEBBASEbIB9FDQEgGSAYIA0QM0EEaiEaQQIhGwsgG0ECRyAFQX9qIgQgIElyDQBBAiEbIBAgBBAyRQ0AIA0gHSARIAQgEEkiFhsgBGoiCygAAEcNACALQQRqIBMgGCAWGyIFIA0QM0EEaiEHIB0gACgCkIAQIhtqIRYCQCAEIBBJBEAgBSAHIAtqRgRAIBcgGCAHIA0QPRAzIAdqIQcLIAsgFiANEDEhBQwBCyALIAsgFyANEDEiBWsgF0cgGyAQT3INACATIBZBACAFayANED0QMSAFaiEFCyAEIAQgBWsiBSAgIAUgIEsbIgVrIAdqIgsgGkkgByAaS3JFBEAgByAEIBpraiIEIBAgECAEEDIbIQVBAiEbDAILQQIhGyAQIAUQMkUEQCAQIQUMAgsCQCAKIAsgGiALIBpJGyIHTwRAIBQhBCAKIQcMAQsgDCAFIBFqIgRrQf//A0oNAwsgBSAAIAVB//8DcUEBdGpBgIAIai8BACIKSQRAIAQhFCAHIQoMAwsgBSAKayEFIAQhFCAHIQoMAQsgBSAHayEFCyAIQX9qIghFDQAgBSAgTw0BCwsgCkEDTA0AICUhFiAJIQ0gDCEZIBQiCSEbIAohEANAIAkhFAJAAkAgDCAKIhdqIiUgL0sNACAAKAKQgBAiBSAlQX5qIhEgACgChIAQIh9rIgRBgYB8aiAFQYCABGogBEsbISMgACgCjIAQIRwgACgCiIAQISQgESgAACETIAAoApSAECIHIARJBEADQCAAIAdB//8DcUEBdGpBgIAIaiAHIAAgByAfahA6QQJ0aiIFKAIAayIJQf//AyAJQf//A0kbOwEAIAUgBzYCACAHQQFqIgcgBEkNAAsLIAAgBDYClIAQIAAgERA6QQJ0aigCACIFICNJDQAgE0H//wNxIBNBEHZGIBNB/wFxIBNBGHZGcSEwIBwgJGohLCAcIB9qIiBBBGohHSARQQhqIS0gEUEEaiEaIAwgEWshKEEAISFBACARIAxrIiprITMgDEF/aiE0IBchCiAuISlBACEOIA8hCQNAAkACQAJ/AkACQCAcIAVNBEAgCiA0ai8AACAFIB9qIgggM2ogCmpBf2ovAABHDQUgEyAIKAAARw0FAkAgKkUEQEEAIQsMAQsgKCAgIAhrIgQgKCAEShsiD0EfdSAPcSEEQQAhBwNAIAciCyAPTARAIAQhCwwCCyARIAtBf2oiB2otAAAgByAIai0AAEYNAAsLIAhBBGohByAeIBpNBH8gGgUgBygAACAaKAAAcyIEDQIgB0EEaiEHIC0LIgQgHkkEQANAIAcoAAAgBCgAAHMiDwRAIA8QJSAEaiAaayEHDAcLIAdBBGohByAEQQRqIgQgHkkNAAsLAkAgBCArTw0AIAcvAAAgBC8AAEcNACAHQQJqIQcgBEECaiEECyAEIBhJBH8gBEEBaiAEIActAAAgBC0AAEYbBSAECyAaayEHDAQLIBMgBSAkaiIPKAAARw0EIA9BBGohByAAKAKQgBAhNQJ/IBogGCARIBwgBWtqIicgJyAYSxsiCEF9aiILIBpNDQAaIAcoAAAgGigAAHMiBA0CIAdBBGohByAtCyIEIAtJBEADQCAHKAAAIAQoAABzIjYEQCA2ECUgBGogGmsMBQsgB0EEaiEHIARBBGoiBCALSQ0ACwsCQCAEIAhBf2pPDQAgBy8AACAELwAARw0AIAdBAmohByAEQQJqIQQLIAQgCEkEfyAEQQFqIAQgBy0AACAELQAARhsFIAQLIBprDAILIAQQJSEHDAILIAQQJQshBCARIARBBGoiC2ogCEcgJyAYT3JFBEAgICEEAn8CQCAeIAgiB0sEQCAgKAAAIAgoAABzIgQNASAIQQRqIQcgHSEECyAHIB5JBEADQCAEKAAAIAcoAABzIicEQCAnECUgB2ogCGsMBAsgBEEEaiEEIAdBBGoiByAeSQ0ACwsCQCAHICtPDQAgBC8AACAHLwAARw0AIARBAmohBCAHQQJqIQcLIAcgGEkEfyAHQQFqIAcgBC0AACAHLQAARhsFIAcLIAhrDAELIAQQJQsgC2ohCwsCQCAqRQRAQQAhBAwBCyAoICQgNWogD2siBCAoIARKGyInQR91ICdxIQhBACEHA0AgByIEICdMBEAgCCEEDAILIBEgBEF/aiIHai0AACAHIA9qLQAARg0ACwsgCyAEayIHIApMDQEgBCARaiEVIAUgH2ogBGohCSAHIQoMAQsgByALa0EEaiIEIApMDQAgCyARaiEVIAggC2ohCSAEIQoLAkACQAJAIDJFIAAgBUH//wNxQQF0akGAgAhqLwEAIgdBAUdyDQAgIUUEQEEBISEgMEUNAUECISEgGiAYIBMQM0EEaiEOCyAhQQJHIAVBf2oiBCAjSXINAEECISEgHCAEEDJFDQAgEyAkIB8gBCAcSSIPGyAEaiIIKAAARw0AIAhBBGogLCAYIA8bIgUgExAzQQRqIQcgJCAAKAKQgBAiC2ohDwJAIAQgHEkEQCAFIAcgCGpGBEAgICAYIAcgExA9EDMgB2ohBwsgCCAPIBMQMSEFDAELIAggCCAgIBMQMSIFayAgRyALIBxPcg0AICwgD0EAIAVrIBMQPRAxIAVqIQULIAQgBCAFayIFICMgBSAjSxsiD2sgB2oiCCAOSSAHIA5LckUEQCAHIAQgDmtqIgQgHCAcIAQQMhshBQwCCyAPIBwgHCAPEDIiBBshBSAqIARFcg0BAkAgCiAIIA4gCCAOSRsiB08EQCAVIQQgCSEIIAohBwwBCyARIgQgDyAfaiIIa0H//wNKDQMLIA8gACAPQf//A3FBAXRqQYCACGovAQAiBUkEQCAEIRUgCCEJIAchCgwDCyAPIAVrIQUgBCEVIAghCSAHIQoMAQsgBSAHayEFCyApQX9qIilFDQAgBSAjTw0BCwsgCiAXRw0BIAkhDwsgDCAWayEKIAYEQCANIApB/wFuaiAKakEJaiAmSw0KCyANQQFqIQQCQCAKQQ9PBEAgDUHwAToAACAKQXFqIgVB/wFPBEAgBEH/ASAKQfJ9aiIFQf8BbiIEQQFqECgaIARBgX5sIAVqIQUgBCANakECaiEECyAEIAU6AAAgBEEBaiEEDAELIA0gCkEEdDoAAAsgBCAWIAQgCmoiCRA7IAkgDCAUa0H//wNxEC8gF0F8aiEFIAlBAmohCSAGBEAgCSAFQf8BbmpBBmogJksNCgsgDS0AACEHIAVBD08EQCANIAdBD2o6AAAgF0FtaiIFQf4DTwRAIAlB/wEgF0Hve2oiBUH+A24iCUEBdCIHQQJqECgaIAlBgnxsIAVqIQUgBCAHIApqakEEaiEJCyAFQf8BTwRAIAlB/wE6AAAgCUEBaiEJIAVBgX5qIQULIAkgBToAACAJQQFqIQkMBwsgDSAFIAdqOgAADAYLIBkgDCAZIAxJIBUgDCAQaklxIgQbIQ4gCSEPIBUiDCAOa0EDSA0AIBAgFyAEGyEZIBsgFCAEGyEUIBYhEQNAIA4gGWoiFkEDaiEzIA4gGUESIBlBEkgbIixqIS0CQANAAkACQAJ/AkAgDCAOayIEQRFKDQAgDiAMayAEIApqQXxqICwgLSAKIAxqQXxqSxtqIgRBAUgNACAKIARrIRAgBCAJaiEPIAQgDGoMAQsgCSEPIAohECAMCyIVIBBqIiUgL0sNACAAKAKQgBAiBSAlQX1qIhcgACgChIAQIhxrIgRBgYB8aiAFQYCABGogBEsbISMgACgCjIAQIRMgACgCiIAQISQgFygAACEdIAAoApSAECIHIARJBEADQCAAIAdB//8DcUEBdGpBgIAIaiAHIAAgByAcahA6QQJ0aiIFKAIAayIJQf//AyAJQf//A0kbOwEAIAUgBzYCACAHQQFqIgcgBEkNAAsLIAAgBDYClIAQIAAgFxA6QQJ0aigCACIFICNJDQAgHUH//wNxIB1BEHZGIB1B/wFxIB1BGHZGcSE0IBMgJGohJyATIBxqIhpBBGohICAXQQhqITAgF0EEaiEbIBUgF2shKEEAISFBACAXIBVrIiprITUgFUF/aiE2IBAhCiAuISlBACEfIBIhCSAiIQwDQAJAAkACfwJAAkAgEyAFTQRAIAogNmovAAAgBSAcaiIIIDVqIApqQX9qLwAARw0FIB0gCCgAAEcNBQJAICpFBEBBACELDAELICggGiAIayIEICggBEobIhJBH3UgEnEhBEEAIQcDQCAHIgsgEkwEQCAEIQsMAgsgFyALQX9qIgdqLQAAIAcgCGotAABGDQALCyAIQQRqIQcgHiAbTQR/IBsFIAcoAAAgGygAAHMiBA0CIAdBBGohByAwCyIEIB5JBEADQCAHKAAAIAQoAABzIhIEQCASECUgBGogG2shBwwHCyAHQQRqIQcgBEEEaiIEIB5JDQALCwJAIAQgK08NACAHLwAAIAQvAABHDQAgB0ECaiEHIARBAmohBAsgBCAYSQR/IARBAWogBCAHLQAAIAQtAABGGwUgBAsgG2shBwwECyAdIAUgJGoiEigAAEcNBCASQQRqIQcgACgCkIAQITkCfyAbIBggFyATIAVraiIiICIgGEsbIghBfWoiCyAbTQ0AGiAHKAAAIBsoAABzIgQNAiAHQQRqIQcgMAsiBCALSQRAA0AgBygAACAEKAAAcyI6BEAgOhAlIARqIBtrDAULIAdBBGohByAEQQRqIgQgC0kNAAsLAkAgBCAIQX9qTw0AIAcvAAAgBC8AAEcNACAHQQJqIQcgBEECaiEECyAEIAhJBH8gBEEBaiAEIActAAAgBC0AAEYbBSAECyAbawwCCyAEECUhBwwCCyAEECULIQQgFyAEQQRqIgtqIAhHICIgGE9yRQRAIBohBAJ/AkAgHiAIIgdLBEAgGigAACAIKAAAcyIEDQEgCEEEaiEHICAhBAsgByAeSQRAA0AgBCgAACAHKAAAcyIiBEAgIhAlIAdqIAhrDAQLIARBBGohBCAHQQRqIgcgHkkNAAsLAkAgByArTw0AIAQvAAAgBy8AAEcNACAEQQJqIQQgB0ECaiEHCyAHIBhJBH8gB0EBaiAHIAQtAAAgBy0AAEYbBSAHCyAIawwBCyAEECULIAtqIQsLAkAgKkUEQEEAIQQMAQsgKCAkIDlqIBJrIgQgKCAEShsiIkEfdSAicSEIQQAhBwNAIAciBCAiTARAIAghBAwCCyAXIARBf2oiB2otAAAgByASai0AAEYNAAsLIAsgBGsiByAKTA0BIAQgF2ohDCAFIBxqIARqIQkgByEKDAELIAcgC2tBBGoiBCAKTA0AIAsgF2ohDCAIIAtqIQkgBCEKCwJAAkACQCAyRSAAIAVB//8DcUEBdGpBgIAIai8BACIHQQFHcg0AICFFBEBBASEhIDRFDQEgGyAYIB0QM0EEaiEfQQIhIQsgIUECRyAFQX9qIgQgI0lyDQBBAiEhIBMgBBAyRQ0AIB0gJCAcIAQgE0kiEhsgBGoiCCgAAEcNACAIQQRqICcgGCASGyIFIB0QM0EEaiEHICQgACgCkIAQIiJqIRICQCAEIBNJBEAgBSAHIAhqRgRAIBogGCAHIB0QPRAzIAdqIQcLIAggEiAdEDEhBQwBCyAIIAggGiAdEDEiBWsgGkcgIiATT3INACAnIBJBACAFayAdED0QMSAFaiEFCyAEIAQgBWsiBSAjIAUgI0sbIhJrIAdqIgggH0kgByAfS3JFBEAgByAEIB9raiIEIBMgEyAEEDIbIQUMAgsgEiATIBMgEhAyIgQbIQUgKiAERXINAQJAIAogCCAfIAggH0kbIgdPBEAgDCEEIAkhCCAKIQcMAQsgFyIEIBIgHGoiCGtB//8DSg0DCyASIAAgEkH//wNxQQF0akGAgAhqLwEAIgVJBEAgBCEMIAghCSAHIQoMAwsgEiAFayEFIAQhDCAIIQkgByEKDAELIAUgB2shBQsgKUF/aiIpRQ0AIAUgI08NAQsLIAogEEcNASAJIRIgDCEiCyAOIBFrIQQgBgRAIA0gBEH/AW5qIARqQQlqICZLDQcLIBUgDmsgGSAVIBZJGyEJIA1BAWohBQJAIARBD08EQCANQfABOgAAIARBcWoiB0H/AU8EQCAFQf8BIARB8n1qIgpB/wFuIgVBAWoQKBogBUGBfmwgCmohByAFIA1qQQJqIQULIAUgBzoAACAFQQFqIQUMAQsgDSAEQQR0OgAACyAFIBEgBCAFaiIHEDsgByAOIBRrQf//A3EQLyAJQXxqIQogB0ECaiEHIAYEQCAHIApB/wFuakEGaiAmSw0HCyANLQAAIQwCQCAKQQ9PBEAgDSAMQQ9qOgAAIAlBbWoiC0H+A08EQCAHQf8BIAlB73tqIgpB/gNuIgdBAXQiDEECahAoGiAHQYJ8bCAKaiELIAUgBCAMampBBGohBwsgC0H/AU8EQCAHQf8BOgAAIAtBgX5qIQsgB0EBaiEHCyAHIAs6AAAgB0EBaiEHDAELIA0gCiAMajoAAAsgFSAJIA5qIgRrIQkgBgRAIAcgCUH/AW5qIAlqQQlqICZLDQkLIAdBAWohBQJAIAlBD08EQCAHQfABOgAAIAlBcWoiCEH/AU8EQCAFQf8BIAlB8n1qIgpB/wFuIgVBAWoQKBogBUGBfmwgCmohCCAFIAdqQQJqIQULIAUgCDoAACAFQQFqIQUMAQsgByAJQQR0OgAACyAFIAQgBSAJaiIJEDsgCSAVIA9rQf//A3EQLyAQQXxqIQogCUECaiEJIAYEQCAJIApB/wFuakEGaiAmSw0JCyAHLQAAIQwgCkEPTwRAIAcgDEEPajoAACAQQW1qIgdB/gNPBEAgCUH/ASAQQe97aiIJQf4DbiIKQQF0IgxBAmoQKBogCkGCfGwgCWohByAFIAwgFWogBGtqQQRqIQkLIAdB/wFPBEAgCUH/AToAACAHQYF+aiEHIAlBAWohCQsgCSAHOgAAIAlBAWohCQwKCyAHIAogDGo6AAAMCQsgDCAzTw0BIAwhIiAJIRIgDCAWSQ0ACwJAIBUgFk8NACAQIBYgFWsiBGsiEEEDSgRAIAQgD2ohDyAWIRUMAQsgDCEVIAkhDyAKIRALIA4gEWshByAGBEAgDSAHQf8BbmogB2pBCWogJksNBQsgDUEBaiEEAkAgB0EPTwRAIA1B8AE6AAAgB0FxaiIFQf8BTwRAIARB/wEgB0HyfWoiBUH/AW4iBEEBahAoGiAEQYF+bCAFaiEFIAQgDWpBAmohBAsgBCAFOgAAIARBAWohBAwBCyANIAdBBHQ6AAALIAQgESAEIAdqIgUQOyAFIA4gFGtB//8DcRAvIBlBfGohCCAFQQJqIQUgBgRAIAUgCEH/AW5qQQZqICZLDQULIA0tAAAhFAJ/IAhBD08EQCANIBRBD2o6AAAgGUFtaiIIQf4DTwRAIAVB/wEgGUHve2oiBUH+A24iCEEBdCIUQQJqECgaIAhBgnxsIAVqIQggBCAHIBRqakEEaiEFCyAIQf8BTwRAIAVB/wE6AAAgCEGBfmohCCAFQQFqIQULIAUgCDoAACAFQQFqDAELIA0gCCAUajoAACAFCyENIAwhIiAJIRIgFSEZIA8hGwwCCwJ/IBUgFk8EQCAZIQggEAwBCyAQIBUgDmsiCEERSg0AGiAQIAggEGpBfGogLCAtIBAgFWpBfGpLGyIIIA4gFWtqIgRBAUgNABogBCAPaiEPIAQgFWohFSAQIARrCyEZIA4gEWshByAGBEAgDSAHQf8BbmogB2pBCWogJksNBAsgDUEBaiEEAkAgB0EPTwRAIA1B8AE6AAAgB0FxaiIFQf8BTwRAIARB/wEgB0HyfWoiBUH/AW4iBEEBahAoGiAEQYF+bCAFaiEFIAQgDWpBAmohBAsgBCAFOgAAIARBAWohBAwBCyANIAdBBHQ6AAALIAQgESAEIAdqIgUQOyAFIA4gFGtB//8DcRAvIAhBfGohFCAFQQJqIQUgBgRAIAUgFEH/AW5qQQZqICZLDQQLIA0tAAAhEgJ/IBRBD08EQCANIBJBD2o6AAAgCEFtaiILQf4DTwRAIAVB/wEgCEHve2oiBUH+A24iFEEBdCISQQJqECgaIBRBgnxsIAVqIQsgBCAHIBJqakEEaiEFCyALQf8BTwRAIAVB/wE6AAAgC0GBfmohCyAFQQFqIQULIAUgCzoAACAIIA5qIREgFSEOIAVBAWoMAQsgDSASIBRqOgAAIAggDmohESAVIQ4gBQshDSAPIRQgDCEiIAkhEgwACwALAAsgDiEHIAxBAWoiDCAvTQ0BDAkLCyARDAULIAQhJSAHDAULICUgL00NAAsMBAsgACABIAIgAyAEIC4gCUGYFmooAgAgBiAFQQtKQQAgAC0AmoAQQQBHEJACDAQLIBYLISUgDQshCUEAIQcgBkECRw0CCyAxICVrIgdB8AFqQf8BbiEEAkAgBkUNACAEIAdqIAlqQQFqICZBBWogNyA4GyIETQ0AQQAhByAGQQFGDQIgCUF/cyAEaiIEIARB8AFqQf8BbmshBwsgByAlaiEFAkAgB0EPTwRAIAlB8AE6AAAgCUEBaiEEIAdBcWoiBkH/AUkEQCAEIgkgBjoAAAwCCyAEQf8BIAdB8n1qIgZB/wFuIgRBAWoQKBogBCAJakECaiIJIARBgX5sIAZqOgAADAELIAkgB0EEdDoAAAsgCUEBaiAlIAcQKiEEIAMgBSABazYCACAEIAdqIAJrCyIHQQBKDQELIABBAToAm4AQCyAHCzsBAX8gAEUgAEEDcXIEfyABBSAAQQA2ApyAECAAQv////8PNwKAgBAgAEEAOwGagBAgAEEJELABIAALCx8BAX8gAEGAgIDwB00EfyAAIABB/wFuakEQagUgAQsLxwIAIAAgAS0AADoAACAAIAEtAAE6AAEgACABLQACOgACIAAgAS0AAzoAAyAAIAEtAAQ6AAQgACABLQAFOgAFIAAgAS0ABjoABiAAIAEtAAc6AAcgACABLQAIOgAIIAAgAS0ACToACSAAIAEtAAo6AAogACABLQALOgALIAAgAS0ADDoADCAAIAEtAA06AA0gACABLQAOOgAOIAAgAS0ADzoADyAAIAEtABA6ABAgACABLQAROgARIAAgAS0AEjoAEiAAIAEtABM6ABMgACABLQAUOgAUIAAgAS0AFToAFSAAIAEtABY6ABYgACABLQAXOgAXIAAgAS0AGDoAGCAAIAEtABk6ABkgACABLQAaOgAaIAAgAS0AGzoAGyAAIAEtABw6ABwgACABLQAdOgAdIAAgAS0AHjoAHiAAIAEtAB86AB8gAEEgagsNACAAIABBBm5qQSBqCz4AEMICENUCQdQNQQJB+A9B8w9BCkELEANB3w1BBkGQDkH8DUEMQQ0QA0HoDUEBQfgNQfQNQQ5BDxADEPQCC0UBBH8gASAAIAEgAEsbIQMDQCAAIAFPBEAgAw8LIAAtAAAhBCACLQAAIQUgAEEBaiIGIQAgAkEBaiECIAQgBUYNAAsgBgsrAQF/EIsDIgRFBEBBQA8LIAQgACABIAIgAyAEEIYDEIUDIQAgBBCJAyAAC6QBAQF/IwBBQGoiBCQAIAQgADYCFCAEIAM2AgwgBCACNgIIIAEoAgAhACAEQgA3AyggBCAANgIYAkAgBEEIahCNBCICDQAgBEEIahCMBCIAQQFHBEAgBEEIahCIAhpBfSECAkACQCAAQQVqDggAAQEBAQEBAwELIAQoAgxFDQILIAAhAgwBCyABIAQoAhw2AgAgBEEIahCIAiECCyAEQUBrJAAgAgvABgEQf0F/IQUCQCAARQ0AIANFBEAgAkEBRw0BQX9BACAALQAAGw8LIAJFDQAgASADaiIIQWBqIQ8gACACaiIJQXBqIRAgCEF7aiERIAhBeWohCiAJQXtqIQwgCUF4aiESIAhBdGohDSAJQXFqIQ4gACECIAEhBQJAA0ACQCACQQFqIQMCQAJAAkAgAi0AACIHQQR2IgJBD0cEQCAFIA9LIAMgEE9yDQEgBSADKQAANwAAIAUgAykACDcACCACIAVqIgYgAiADaiICLwAAIgtrIQQgAkECaiECIAdBD3EiBUEPRgRAIAIhAwwDCyALQQhJBEAgAiEDDAMLIAQgAUkNAyAGIAQpAAA3AAAgBiAEKQAINwAIIAYgBC8AEDsAECAFIAZqQQRqIQUMBQtBACECIAMgDk8NBQNAAkAgAiADLQAAIgRqIQIgA0EBaiIDIA5PDQAgBEH/AUYNAQsLIAJBD2oiAiAFQX9zSyACIANBf3NLcg0FCyACIAVqIgYgDU1BACACIANqIgQgEk0bRQRAIAQgCUcgBiAIS3INBSAFIAMgAhBKGiAGIAFrIQUMBgsgBSADIAYQOyAHQQ9xIQUgBEECaiEDIAYgBC8AACILayEECyAFQQ9HBEAgAyECDAELIAMgDCADIAxLGyEHQQAhBQNAIANBAWohAiADIAdGDQIgBSADLQAAIhNqIQUgAiEDIBNB/wFGDQALIAVBD2oiBSAGQX9zSw0DCyAEIAFJDQAgBiAFQQRqIgdqIQUCfyALQQdNBEAgBkEAEDQgBiAELQAAOgAAIAYgBC0AAToAASAGIAQtAAI6AAIgBiAELQADOgADIAYgBCALQQJ0IgNB0BVqKAIAaiIEKAAANgAEIAQgA0HwFWooAgBrDAELIAYgBCkAADcAACAEQQhqCyEDIAZBCGohBCAFIA1LBEAgBSARSw0BIAQgCkkEQCAEIAMgChA7IAMgCiAEa2ohAyAKIQQLIAQgBU8NAgNAIAQgAy0AADoAACADQQFqIQMgBEEBaiIEIAVHDQALDAILIAQgAykAADcAACAHQRFJDQEgBkEQaiADQQhqIAUQOwwBCwsgAiEDCyADQX9zIABqDwsgBQsWAEEAIAIgAyAAIAEQmAIiACAAECEbCzkBAX8jAEEQayIEJAAgBCADNgIMIAIgBEEMaiAAIAEQmQIhACAEKAIMIQEgBEEQaiQAQQAgASAAGws5AQF/IwBBEGsiBCQAIAQgAzYCDCAAIAEgAiAEQQxqEIoEIQAgBCgCDCEBIARBEGokAEEAIAEgABsLDQAgACACIAEgAxCaAguXAwEIfwJAIAFFDQAgAiADaiEKIAAgAWohBSAAQQFqIQEgAC0AAEEfcSEGIAIhBANAAkACfyAGQSBPBEACQCAGQQV2QX9qIgNBBkYEQCABIQBBBiEDA0AgAEEBaiIBIAVPDQcgAyAALQAAIgdqIQMgASEAIAdB/wFGDQALDAELIAEgBU8NBQsgAUEBaiEAIAQgBkEIdEGAPnEiCGsgAS0AACILayEHIAhBgD5HIAtB/wFHckUEQCABQQJqIAVPDQUgBCABLQACIAEtAAFBCHRya0GBQGohByABQQNqIQALIAMgBGpBA2ogCksNBCAHQX9qIgEgAkkNBCAAIAVPBH9BAAUgAC0AACEGIABBAWohAEEBCyEIIAQgB0YEQCAEIAEtAAAgA0EDaiIBECggAWohBCAADAILIAQgASADQQNqEMQEIQQgAAwBCyAEIAZBAWoiA2ogCksNAyABIANqIgAgBUsNAyAEIAEgAxBQIQQgACAFTw0BQQEhCCAALQAAIQYgAEEBagshASAIDQELCyAEIAJrIQkLIAkLnwEBAn8gACgCECECQXshAQJAAkACQAJAAkACQAJAIAAoAgwtAABBBXYOBQABAgMEBgtBdyEBIAJBAUcNBSAAQRI2AkAMBAtBdyEBIAJBAUcNBCAAQRM2AkAMAwtBdyEBIAJBAUcNAyAAQRQ2AkAMAgtBdyEBIAJBAUcNAiAAQRU2AkAMAQtBdyEBIAJBAUcNASAAQRY2AkALQQAhAQsgAQsHACAAKAIEC6QCAQR/IAAgAzYCMCAAIAI2AgggACABNgIEIABBADYCACAAQQA2AkwgAEEBNgJEIABBADYCLCABLQAAIQUgAS0AASECIAAgAUECajYCDCAAIAI2AhAgACABLQADIgc2AiggACABKAAEIgI2AhQgACABKAAIIgQ2AiQgASgADCEGIAAgAUEQajYCNCAAIAY2AhgCQCACRSAEQdbSqtUCS3IgBEEBSCAEIANLcnIgB0UgBUECR3JyDQAgAS0AAkEIcQ0AIAAgAiAEIAIgBG0iBWxrIgQ2AiAgACAFIARBAEpqNgIcIAIgA0oNAAJAIAEtAAJBAnEEQCACQRBqIAZGDQEMAgsgABCgAg0BIAAoAhwgACgCGEFwakEEbUoNAQsgABCHARoLCysBAX8jAEHQEWsiAyQAIANBADYCUCADQQhqIAAgASACEKICIANB0BFqJAALyQIBAn9BASEEAkAgAkEESA0AAkACQAJAIAMEQCADQYABIANBgAFKGyIDQdbSqtUCIANB1tKq1QJJGyEEDAELIAIiBEGAgAJIDQBBgIACIQQgACgCOCIDQX5qIgVBA00EQCAFQQJ0QcAUaigCACEECwJAAkACQAJAAkACQCABDgoAAQYCAwMEBAQFBgsgBEECdiEEDAcLIARBAXYhBAwFCyAEQQF0IQQMBAsgBEECdCEEDAMLIARBA3QhBAwCCyAEQQN0IQAgA0EFSwRAIAAhBAwCC0EBIAN0QTRxRQRAIAAhBAwCCyAEQQR0IQQMAQsgAUEBSA0BIAAoAjghAwsgA0EEIAQQsgFFDQAgBEGAgAQgBEGAgARIG0ECdCIAQYCABCAAQYCABEobIQQLIAIgBCAEIAJKGyIEQQVIDQAgBCAEQQRvayEECyAEC/UCAQN/IwBBEGsiBCQAIAAoAghBAjoAAAJ/IAAoAjgiA0EGTwRAIARBmtQBNgIAQegRIAQQT0GPEkEvEHJBewwBCyAAKAIIQQE6AAEgACAAKAIIIgJBAmo2AgwgAkEAOgACIAAoAgggACgCKDoAAyAAKAIIQQRqIAAoAhQQNCAAKAIIQQhqIAAoAiQQNCAAIAAoAghBEGo2AjQgACAAKAIcQQJ0QRBqNgIsIAAoAjxFBEAgACgCDCICIAItAABBAnI6AAAgAEEQNgIsCyAAKAIUQf8ATARAIAAoAgwiAiACLQAAQQJyOgAAIABBEDYCLAtCgMCAgYSMICADrUIDhoinIQJBASEDAkACQAJAIAFBf2oOAgEAAgtBBCEDCyAAKAIMIgEgAS0AACADcjoAAAsgACgCDCIBIAAoAjggACgCKCAAKAIkELIBRUEEdCABLQAAcjoAACAAKAIMIgAgAC0AACACcjoAAEEBCyEAIARBEGokACAAC/sBAQF/IwBBIGsiCSQAIAAgBjYCMCAAIAU2AgggACAENgIEIABBATYCACAAQQA2AkwgAEEBNgJEIAAgBzYCOCAAQgQ3AiggACADNgIUIAAgATYCPAJ/IANB8P///wdPBEAgCUHv////BzYCAEGGEyAJEE9BfwwBCyAGQQ9NBEAgCUEQNgIQQbATIAlBEGoQT0F/DAELIAFBCk8EQEHjE0EsEHJBdgwBCyACQQNPBEBBkBRBLhByQXYMAQsgACAAIAEgAyAIEKQCIgE2AiQgACADIAEgAyABbSICbGsiATYCICAAIAIgAUEASmo2AhxBAQshACAJQSBqJAAgAAtZAQF/IwBBoAZrIgUkACAFQQhqENADIAVBCGogACABIAIgAyAEENIDIQEgBUEIaiIAEPYBIABBgAJqIAAoApgDIAAoApwDIAAoAqADEKQBIAVBoAZqJAAgAQuQAQEBfyMAQUBqIgUkACAFIAA2AhQgBSADNgIMIAUgAjYCCCABKAIAIQAgBUEANgIwIAVCADcDKCAFIAA2AhgCQCAFQQhqIAQQqAQiBA0AIAVBCGoQqwQiAEEBRwRAIABBeyAAGyEEIAVBCGoQrwEaDAELIAEgBSgCHDYCACAFQQhqEK8BIQQLIAVBQGskACAECzEBAn8Cf0EAQbiAEBBMIgUiBhCSAkUNABogBiAAIAEgAiADIAQQsgQLIQAgBRA4IAALKwEBfyMAQaCAAWsiBSQAIAUgACABIAIgAyAEELMEIQAgBUGggAFqJAAgAAsqAQF/IAAgASAAKAIEIgNHBH8gAyABIAIQKhogACgCBAUgAQsgAmo2AgQLaQIBfwF+IAEgAG4hBUGM7AEtAABFBEAQhgFBjOwBQQE6AAALIAVBB3FFBEAgAiADIAUgACAEQaDsASgCABEPACEGIAMgACAFbCIAaiAAIAJqIAEgAGsQKhogBqcPCyADIAIgARAqGiAFCysAQYzsAS0AAEUEQBCGAUGM7AFBAToAAAsgACABIAIgA0GY7AEoAgARCAALxQsCEn8BfCMAQYCAAmsiCyQAIABB0BRqIQcgAEHaFGohCQJ/IABBA3RB8BRqKwMAIAK3oiIYmUQAAAAAAADgQWMEQCAYqgwBC0GAgICAeAshBiABIAJqIQggBy0AACEHIAktAAAhDkEAIQADQCALIABBAXRqQQA7AQAgAEEBaiIAIAd2RQ0ACwJ/QQAgAkEESA0AGkEAIARBwgBIDQAaIAhBfmohDCADIAQgBiAGIARKG2ohDSADQR86AAAgAyABLQAAOgABIAMgAS0AAToAAiADQQNqIQRBAiEGIAFBAmohACACQQ9OBEAgCEF0aiEPIAxBAmohEkEgIAdrIRBBACEHA0ACfwJ/AkACQCAALQAAIgkgAEF/ai0AAEcEQCAALQACIQIgAC0AASEIDAELIAlBCHQgCXIgAC0AASIIIAAtAAIiAkEIdHJHDQAgAEECaiEIIABBA2ohBwwBCyAFQQAgACABIAsgCEEIdCAJciACQRB0ciAALQADQRh0ckGx893xeWwgEHZBAXRqIggvAQBqIgprIgJBH3EbRQRAIAggACABazsBAAsgAEEBaiEIIAJBf2oiCUH8vwRPBEBBACAEQQJqIgIgDUsNBhogBCAALQAAOgAAIARBAWohBCAIIAZBAWoiBkH/AXFBIEcNAxogBEEfOgAAQQAiBiAHQQFqIgcgDksNBhogAiEEIAgMAwsCQCAKLQAAIhMgCi0AASIUQQh0ciAKLQACIhVBEHRyIAotAANBGHRyIAAtAAAiESAALQABIhZBCHRyIAAtAAIiF0EQdHIgAC0AA0EYdHJGBEBBBCEHIApBBGohCAwBCyARIBNHIBQgFkdyIBUgF0dyRQRAIApBA2ohCEEDIQcMAQtBACAEQQJqIgAgDUsNBhogBCAROgAAIARBAWohBCAIIAZBAWoiBkH/AXFBIEcNAxogBEEfOgAAQQAiBiAHQQFqIgcgDksNBhogACEEIAgMAwsgACAHaiEHIAlFDQAgByASIAgQlwIMAQtBASECQQAhCSAHIAwgCBDFBAshCAJAIAZB/wFxBEAgBkF/c0GAfnIgBGogBkF/ajoAAAwBCyAEQX9qIQQLQQAgBCAIQX1qIgYgAGsiAEH/AW5qQQZqIA1LDQMaAn8gCUH+P00EQCAAQQZNBEAgBCAAQQV0IAlBCHZqOgAAIARBAmohACAEQQFqDAILIAQgCUEIdkFgajoAACAEQQFqIQIgAEF5aiIHQf8BTwRAIAJB/wEgAEH6fWoiAkH/AW4iAEEBahAoGiAAQYF+bCACaiEHIAAgBGoiAEECaiECIABBAWohBAsgAiAHOgAAIARBA2ohACAEQQJqDAELIAJBgEBqIQkgAEEGTQRAIARB/wE6AAEgBCAJQQh2OgACIAQgAEEFdEEfcjoAACAEQQRqIQAgBEEDagwBCyAEQf8BOgAAIARBAWohAiAAQXlqIgdB/wFPBEAgAkH/ASAAQfp9aiICQf8BbiIAQQFqECgaIABBgX5sIAJqIQcgACAEaiIAQQJqIQIgAEEBaiEECyACIAc6AAAgBCAJQQh2OgADIARB/wE6AAIgBEEFaiEAIARBBGoLIAk6AAAgBiAPSQRAIAsgBi0AACAIQX5qLQAAQQh0ciAIQX9qLQAAQRB0ciAILQAAQRh0ckGx893xeWwgEHZBAXRqIAYgAWs7AQALIABBHzoAACAAQQFqIQRBACEGQQAhByAIQX9qCyIAIA9JDQALCyAAIAxBAWpNBEADQEEAIARBAmoiASANSw0CGiAEIAAtAAA6AAAgBEEBaiEEIAZBAWoiBkH/AXFBIEYEQCAEQR86AABBACEGIAEhBAsgACAMTSEBIABBAWohACABDQALCwJAIAZB/wFxBEAgBkF/c0GAfnIgBGogBkF/ajoAAAwBCyAEQX9qIQQLIAMgAy0AAEEgcjoAACAEIANrCyEGIAtBgIACaiQAIAYLJgBBACACIAMgACABIARBAXRBf2pBFiAEQQlIGxCnAiIAIAAQIRsLOwEBfyMAQRBrIgUkACAFIAM2AgwgAiAFQQxqIAAgASAEEKgCIQAgBSgCDCEBIAVBEGokAEEAIAEgABsLOQEBfyMAQRBrIgQkACAEIAM2AgwgACABIAIgBEEMahDGBCEAIAQoAgwhASAEQRBqJABBACABIAAbC2kCAX8BfiABIABuIQVBjOwBLQAARQRAEIYBQYzsAUEBOgAACyAFQQdxRQRAIAIgAyAFIAAgBEGc7AEoAgARDwAhBiADIAAgBWwiAGogACACaiABIABrECoaIAanDwsgAyACIAEQKhogBQsrAEGM7AEtAABFBEAQhgFBjOwBQQE6AAALIAAgASACIANBlOwBKAIAEQgAC8YFARd/A0ACQCAAKAIAIgEoAkxFBEAgASgCJCIFIAEoAihBAnRqIQsgACgCCCEGIAEoAgghCCABKAIEIQkgASgCNCENIAEoAiAhDiABKAIcIQIgASgCMCEUIAEoAgAhDyABKAIMLQAAIQECQCAFIAAoAhRMBEAgACgCECEQIAAoAgwhBwwBCyAGEDggACALIAVBAXRqEHkiBjYCCCAAIAUgBmoiBzYCDCAAIAcgC2oiEDYCEAsCfyABQQJxIhFFIA9BAEdxIhIEQCAAKAIAIgQgBCgCxBFBAWoiAzYCxBEgAgwBCyACIAIgACgCACIEKAJEIgFtIgMgAiABIANsa0EASmoiASAAKAIEbCIDIAFqIgEgASACShsLIRNBACEMIAMgE04NASAJQRBqIRUgCEEQaiEWIAJBf2ohF0EAIQoDQCAEKALAEUEBSA0CIA4gBSADIBdGIA5BAEpxIgIbIQFBASAKIAIbIQoCQCAPBEAgAyAFbCECIBEEQCACIBZqIAIgCWogARBQGgwCCyAEIAEgCkEAIAsgAiAJaiAHIAYgEBC0ASEBDAELIBEEQCAIIAMgBWwiAmogAiAVaiABEFAaDAELIAQgASAKIAkgDSADQQJ0aigAACAIIAMgBWxqIAYgBxCzASEBCyAAKAIAIgIoAsARQQFIDQIgAUF/TARAIAIgATYCwBEMAwsCQCASBEAgDSADQQJ0aiACKAIsIgQQNCAAKAIAIQIgAUEAIAEgBGogFEwbRQRAIAJBADYCwBEMBgsgAiACKALEEUEBaiIDNgLEESACIAIoAiwgAWo2AiwgBCAIaiAHIAEQUBoMAQsgASAMaiEMIANBAWohAwsgAyATTg0CIAAoAgAhBAwACwALIAAoAggQOCAAEDhBAA8LIBINACAAKAIAIgEoAsARQQFIDQAgASABKAIsIAxqNgIsDAALAAvyAQEIfyMAQSBrIgIkACAAQoGAgIBwNwLAESAAQZQRaiIFEAkaIAVBABAIGgJAIAAoAkRBAUgNAANAAkAgACAEQQJ0aiIGQdAIaiAENgIAQRgQeSIBIAQ2AgQgASAANgIAIAEgACgCJCIDIAAoAihBAnRqIgcgA0EBdGoQeSIDNgIIIAEgACgCJCIINgIUIAEgAyAIaiIDNgIMIAEgAyAHajYCECAGQdAAaiAFQREgARAaIgENACAEQQFqIgQgACgCREgNAQwCCwsgAiABNgIQQaURIAJBEGoQTyACIAEQuAE2AgBB1REgAhBPCyACQSBqJAALHAAgACAAKAIIIAFrNgIIIAAgACgCBCABajYCBAuBAQEDfyMAQSBrIgEkACAAKAJIQQFOBEAgAEEBNgJMA0AgACACQQJ0aigCUCABQRxqEAsiAwRAIAEgAzYCEEHYEiABQRBqEE8gASADELgBNgIAQdURIAEQTwsgAkEBaiICIAAoAkhIDQALIABBlBFqEAoaCyAAQQA2AkggAUEgaiQAC3UBAn8jAEEQayICJAACQCAAKAJEIgFBgQJOBEAgAkGAAjYCAEG7ECACEE8MAQsgAUEATARAQfkQQSsQcgwBCyAAAn9BASABQQFGDQAaIAEgASAAKAJIRg0AGiAAELcCIAAQtQIgACgCRAs2AkgLIAJBEGokAAv/AgEIfyAAKAIsIQQgACgCKEECdCAAKAIkQQF0ahB5IQUgACgCHCIGQQFOBEAgBSAAKAIkaiEIA0ACQCAAKAIARQ0AIAAoAgwtAABBAnENACAAKAI0IANBAnRqIAQQNCAAKAIcIQYLQQAhByAAKAIkIgIhASAGQX9qIANGBEAgACgCICIBIAIgAUEASiIHGyEBCyAAKAIMLQAAQQJxIQYCQCAAKAIABEAgBgRAIAIgA2wiAiAAKAIIakEQaiAAKAIEIAJqIAEQUBoMAgsgACABIAcgBCAAKAIwIAAoAgQgAiADbGogACgCCCAEaiAFIAgQtAEiAQ0BIAUQOEEADwsgBgRAIAIgA2wiAiAAKAIIaiAAKAIEIAJqQRBqIAEQUBoMAQsgACABIAcgACgCBCAAKAI0IANBAnRqKAAAIAAoAgggAiADbGogBSAIELMBIQELIAFBAEgEQCAFEDggAQ8LIAEgBGohBCADQQFqIgMgACgCHCIGSA0ACwsgBRA4IAQLEQAgASAAKAIINgIAIAAoAgQLhwEBAn8CQCAAKAIMLQAAQQJxBEAgACgCFEEQaiAAKAIwSg0BC0F/IQIgABCHASIBQQBIDQACQCABDQBBACEBIAAoAhRBEGogACgCMEoNACAAKAIMIgEgAS0AAEECcjoAACAAQRA2AiwgABCHASIBQQBIDQELIAAoAghBDGogARA0IAEhAgsgAgtUACAAQZgQEF1FBEBBAA8LIABBoBAQXUUEQEEBDwsgAEGkEBBdRQRAQQIPCyAAQaoQEF1FBEBBAw8LIABBsRAQXUUEQEEEDwtBf0EFIABBthAQXRsLIgEBfiABIAKtIAOtQiCGhCAEIAARFAAiBUIgiKcQBCAFpwseAQF+IAEgAiADIAQgBSAAEQ8AIgZCIIinEAQgBqcLKQAgACgCACABKAIANgIAIAAoAgAgASgCBDYCBCAAIAAoAgBBCGo2AgALBABCAAsEAEEACz4BA38DQCAAQQR0IgFBhO0BaiABQYDtAWoiAjYCACABQYjtAWogAjYCACAAQQFqIgBBwABHDQALQTAQtgEaCxsAIAAgASgCCCAFEEMEQCABIAIgAyAEEIwBCwuWAgEGfyAAIAEoAgggBRBDBEAgASACIAMgBBCMAQ8LIAEtADUhByAAKAIMIQYgAUEAOgA1IAEtADQhCCABQQA6ADQgAEEQaiIJIAEgAiADIAQgBRCJASAHIAEtADUiCnIhByAIIAEtADQiC3IhCAJAIAZBAkgNACAJIAZBA3RqIQkgAEEYaiEGA0AgAS0ANg0BAkAgCwRAIAEoAhhBAUYNAyAALQAIQQJxDQEMAwsgCkUNACAALQAIQQFxRQ0CCyABQQA7ATQgBiABIAIgAyAEIAUQiQEgAS0ANSIKIAdyIQcgAS0ANCILIAhyIQggBkEIaiIGIAlJDQALCyABIAdB/wFxQQBHOgA1IAEgCEH/AXFBAEc6ADQLkgEAIAAgASgCCCAEEEMEQCABIAIgAxCLAQ8LAkAgACABKAIAIAQQQ0UNAAJAIAIgASgCEEcEQCABKAIUIAJHDQELIANBAUcNASABQQE2AiAPCyABIAI2AhQgASADNgIgIAEgASgCKEEBajYCKAJAIAEoAiRBAUcNACABKAIYQQJHDQAgAUEBOgA2CyABQQQ2AiwLCzQBAX8jAEEQayICJAAgAiAANgIEIAIgASkCADcCCCACQQRqIAJBCGoQvwIgAkEQaiQAIAALoQQBBH8gACABKAIIIAQQQwRAIAEgAiADEIsBDwsCQCAAIAEoAgAgBBBDBEACQCACIAEoAhBHBEAgASgCFCACRw0BCyADQQFHDQIgAUEBNgIgDwsgASADNgIgIAEoAixBBEcEQCAAQRBqIgUgACgCDEEDdGohCCABAn8CQANAAkAgBSAITw0AIAFBADsBNCAFIAEgAiACQQEgBBCJASABLQA2DQACQCABLQA1RQ0AIAEtADQEQEEBIQMgASgCGEEBRg0EQQEhB0EBIQYgAC0ACEECcQ0BDAQLQQEhByAGIQMgAC0ACEEBcUUNAwsgBUEIaiEFDAELCyAGIQNBBCAHRQ0BGgtBAws2AiwgA0EBcQ0CCyABIAI2AhQgASABKAIoQQFqNgIoIAEoAiRBAUcNASABKAIYQQJHDQEgAUEBOgA2DwsgACgCDCEGIABBEGoiBSABIAIgAyAEEHogBkECSA0AIAUgBkEDdGohBiAAQRhqIQUCQCAAKAIIIgBBAnFFBEAgASgCJEEBRw0BCwNAIAEtADYNAiAFIAEgAiADIAQQeiAFQQhqIgUgBkkNAAsMAQsgAEEBcUUEQANAIAEtADYNAiABKAIkQQFGDQIgBSABIAIgAyAEEHogBUEIaiIFIAZJDQAMAgsACwNAIAEtADYNASABKAIkQQFGBEAgASgCGEEBRg0CCyAFIAEgAiADIAQQeiAFQQhqIgUgBkkNAAsLC28BAn8gACABKAIIQQAQQwRAIAEgAiADEIoBDwsgACgCDCEEIABBEGoiBSABIAIgAxC3AQJAIARBAkgNACAFIARBA3RqIQQgAEEYaiEAA0AgACABIAIgAxC3ASABLQA2DQEgAEEIaiIAIARJDQALCwsZACAAIAEoAghBABBDBEAgASACIAMQigELCzIAIAAgASgCCEEAEEMEQCABIAIgAxCKAQ8LIAAoAggiACABIAIgAyAAKAIAKAIcEQgAC/MBACAAIAEoAgggBBBDBEAgASACIAMQiwEPCwJAIAAgASgCACAEEEMEQAJAIAIgASgCEEcEQCABKAIUIAJHDQELIANBAUcNAiABQQE2AiAPCyABIAM2AiACQCABKAIsQQRGDQAgAUEAOwE0IAAoAggiACABIAIgAkEBIAQgACgCACgCFBEMACABLQA1BEAgAUEDNgIsIAEtADRFDQEMAwsgAUEENgIsCyABIAI2AhQgASABKAIoQQFqNgIoIAEoAiRBAUcNASABKAIYQQJHDQEgAUEBOgA2DwsgACgCCCIAIAEgAiADIAQgACgCACgCGBELAAsLOAAgACABKAIIIAUQQwRAIAEgAiADIAQQjAEPCyAAKAIIIgAgASACIAMgBCAFIAAoAgAoAhQRDAALoAIBBH8jAEFAaiIBJAAgACgCACICQXxqKAIAIQMgAkF4aigCACEEIAFB7OQBNgIQIAEgADYCDCABQfjkATYCCEEAIQIgAUEUakEAQSsQKBogACAEaiEAAkAgA0H45AFBABBDBEAgAUEBNgI4IAMgAUEIaiAAIABBAUEAIAMoAgAoAhQRDAAgAEEAIAEoAiBBAUYbIQIMAQsgAyABQQhqIABBAUEAIAMoAgAoAhgRCwACQAJAIAEoAiwOAgABAgsgASgCHEEAIAEoAihBAUYbQQAgASgCJEEBRhtBACABKAIwQQFGGyECDAELIAEoAiBBAUcEQCABKAIwDQEgASgCJEEBRw0BIAEoAihBAUcNAQsgASgCGCECCyABQUBrJAAgAgudAQEBfyMAQUBqIgMkAAJ/QQEgACABQQAQQw0AGkEAIAFFDQAaQQAgARDNAiIBRQ0AGiADQQhqQQRyQQBBNBAoGiADQQE2AjggA0F/NgIUIAMgADYCECADIAE2AgggASADQQhqIAIoAgBBASABKAIAKAIcEQgAIAMoAiAiAEEBRgRAIAIgAygCGDYCAAsgAEEBRgshACADQUBrJAAgAAsKACAAIAFBABBDCwwAIAAQjQEaIAAQOAsHACAAKAIECwkAIAAQjQEQOAsGAEG54wELPwEBf0EZEG0iAUEANgIIIAFCjICAgMABNwIAIAFBDGoiAUGx4wEpAAA3AAUgAUGs4wEpAAA3AAAgACABNgIAC4EBAQN/IwBBEGsiACQAAkAgAEEMaiAAQQhqEBQNAEH07AEgACgCDEECdEEEahBMIgE2AgAgAUUNACAAKAIIEEwiAUUEQEH07AFBADYCAAwBC0H07AEoAgAiAiAAKAIMQQJ0akEANgIAIAIgARATRQ0AQfTsAUEANgIACyAAQRBqJAALjgIBAX9BASECAkAgAAR/IAFB/wBNDQECQEHc7AEoAgBFBEAgAUGAf3FBgL8DRg0DDAELIAFB/w9NBEAgACABQT9xQYABcjoAASAAIAFBBnZBwAFyOgAAQQIPCyABQYCwA09BACABQYBAcUGAwANHG0UEQCAAIAFBP3FBgAFyOgACIAAgAUEMdkHgAXI6AAAgACABQQZ2QT9xQYABcjoAAUEDDwsgAUGAgHxqQf//P00EQCAAIAFBP3FBgAFyOgADIAAgAUESdkHwAXI6AAAgACABQQZ2QT9xQYABcjoAAiAAIAFBDHZBP3FBgAFyOgABQQQPCwtBsOwBQRk2AgBBfwUgAgsPCyAAIAE6AABBAQsJACAAKAI8EBULuAEBAX8gAUEARyECAkACQAJAIAFFIABBA3FFcg0AA0AgAC0AAEUNAiAAQQFqIQAgAUF/aiIBQQBHIQIgAUUNASAAQQNxDQALCyACRQ0BAkAgAC0AAEUgAUEESXINAANAIAAoAgAiAkF/cyACQf/9+3dqcUGAgYKEeHENASAAQQRqIQAgAUF8aiIBQQNLDQALCyABRQ0BCwNAIAAtAABFBEAgAA8LIABBAWohACABQX9qIgENAAsLQQALgwECA38BfgJAIABCgICAgBBUBEAgACEFDAELA0AgAUF/aiIBIABCCoAiBUJ2fiAAfKdBMHI6AAAgAEL/////nwFWIQIgBSEAIAINAAsLIAWnIgIEQANAIAFBf2oiASACQQpuIgNBdmwgAmpBMHI6AAAgAkEJSyEEIAMhAiAEDQALCyABCy0AIABQRQRAA0AgAUF/aiIBIACnQQdxQTByOgAAIABCA4giAEIAUg0ACwsgAQs1ACAAUEUEQANAIAFBf2oiASAAp0EPcUGw1AFqLQAAIAJyOgAAIABCBIgiAEIAUg0ACwsgAQvPAgEDfyMAQdABayIDJAAgAyACNgLMAUEAIQIgA0GgAWpBAEEoECgaIAMgAygCzAE2AsgBAkBBACABIANByAFqIANB0ABqIANBoAFqEI4BQQBIDQAgACgCTEEATgRAQQEhAgsgACgCACEEIAAsAEpBAEwEQCAAIARBX3E2AgALIARBIHEhBQJ/IAAoAjAEQCAAIAEgA0HIAWogA0HQAGogA0GgAWoQjgEMAQsgAEHQADYCMCAAIANB0ABqNgIQIAAgAzYCHCAAIAM2AhQgACgCLCEEIAAgAzYCLCAAIAEgA0HIAWogA0HQAGogA0GgAWoQjgEgBEUNABogAEEAQQAgACgCJBEBABogAEEANgIwIAAgBDYCLCAAQQA2AhwgAEEANgIQIAAoAhQaIABBADYCFEEACxogACAAKAIAIAVyNgIAIAJFDQALIANB0AFqJAAL1AIBB38jAEEgayIDJAAgAyAAKAIcIgQ2AhAgACgCFCEFIAMgAjYCHCADIAE2AhggAyAFIARrIgE2AhQgASACaiEEQQIhByADQRBqIQECfwJAAkAgACgCPCADQRBqQQIgA0EMahAFEI8BRQRAA0AgBCADKAIMIgVGDQIgBUF/TA0DIAEgBSABKAIEIghLIgZBA3RqIgkgBSAIQQAgBhtrIgggCSgCAGo2AgAgAUEMQQQgBhtqIgkgCSgCACAIazYCACAEIAVrIQQgACgCPCABQQhqIAEgBhsiASAHIAZrIgcgA0EMahAFEI8BRQ0ACwsgBEF/Rw0BCyAAIAAoAiwiATYCHCAAIAE2AhQgACABIAAoAjBqNgIQIAIMAQsgAEEANgIcIABCADcDECAAIAAoAgBBIHI2AgBBACAHQQJGDQAaIAIgASgCBGsLIQQgA0EgaiQAIAQLJAAgAEELTwR/IABBEGpBcHEiACAAQX9qIgAgAEELRhsFQQoLC0IBAX8jAEEQayIDJAAgACgCPCABpyABQiCIpyACQf8BcSADQQhqEA8QjwEhACADKQMIIQEgA0EQaiQAQn8gASAAGwshAQJ/IAAQ8gNBAWoiARBMIgJFBEBBAA8LIAIgACABECoLKgEBfyMAQRBrIgEkACABIAA2AgwgASgCDBCQARDgAiEAIAFBEGokACAACyoBAX8jAEEQayIAJAAgAEGmygE2AgxByMoBQQcgACgCDBAAIABBEGokAAsqAQF/IwBBEGsiACQAIABBh8oBNgIMQfDKAUEGIAAoAgwQACAAQRBqJAALKgEBfyMAQRBrIgAkACAAQZnIATYCDEGYywFBBSAAKAIMEAAgAEEQaiQACyoBAX8jAEEQayIAJAAgAEH7xwE2AgxBwMsBQQQgACgCDBAAIABBEGokAAudAQECfyACQXBJBEACQCACQQpNBEAgACACOgALIAAhAwwBCyAAIAIQ3gJBAWoiBBBtIgM2AgAgACAEQYCAgIB4cjYCCCAAIAI2AgQLIAIiAARAIAMgASAAECoaCyACIANqQQA6AAAPC0EIEA4iASICIgBB0OMBNgIAIABB/OMBNgIAIABBBGoQ1AIgAkGs5AE2AgAgAUG45AFBEBANAAsHACAAKAIICyoBAX8jAEEQayIAJAAgAEGHxgE2AgxBsM0BQQAgACgCDBAAIABBEGokAAsqAQF/IwBBEGsiACQAIABBmMUBNgIMQYjoASAAKAIMQQgQBiAAQRBqJAALKgEBfyMAQRBrIgAkACAAQZLFATYCDEH85wEgACgCDEEEEAYgAEEQaiQACy4BAX8jAEEQayIAJAAgAEGExQE2AgxB8OcBIAAoAgxBBEEAQX8QASAAQRBqJAALNgEBfyMAQRBrIgAkACAAQf/EATYCDEHk5wEgACgCDEEEQYCAgIB4Qf////8HEAEgAEEQaiQACy4BAX8jAEEQayIAJAAgAEHyxAE2AgxB2OcBIAAoAgxBBEEAQX8QASAAQRBqJAALNgEBfyMAQRBrIgAkACAAQe7EATYCDEHM5wEgACgCDEEEQYCAgIB4Qf////8HEAEgAEEQaiQACzABAX8jAEEQayIAJAAgAEHfxAE2AgxBwOcBIAAoAgxBAkEAQf//AxABIABBEGokAAsyAQF/IwBBEGsiACQAIABB2cQBNgIMQbTnASAAKAIMQQJBgIB+Qf//ARABIABBEGokAAsvAQF/IwBBEGsiACQAIABBy8QBNgIMQZznASAAKAIMQQFBAEH/ARABIABBEGokAAswAQF/IwBBEGsiACQAIABBv8QBNgIMQajnASAAKAIMQQFBgH9B/wAQASAAQRBqJAALMAEBfyMAQRBrIgAkACAAQbrEATYCDEGQ5wEgACgCDEEBQYB/Qf8AEAEgAEEQaiQACyYBAX8jAEEQayIAJAAgAEGk7AE2AgwgACgCDBoQwwEgAEEQaiQAC+gLAg9/AX4jAEHwAGsiByQAIAcgACgC8OEBIgg2AlQgASACaiEOIAggACgCgOIBaiEPIAEhCgJAAkAgBUUNACAAKALE4AEhECAAKALA4AEhESAAKAK84AEhDSAAQQE2AozhASAHIABBtNABaigCADYCRCAHIABBrNABaiISKQIANwI8IAdBEGogAyAEEEUQIQRAQWwhAAwCCyAHQTxqIRMgB0EkaiAHQRBqIAAoAgAQaCAHQSxqIAdBEGogACgCCBBoIAdBNGogB0EQaiAAKAIEEGggDkFgaiEUA0ACQAJAIAVFIAdBEGoQI0ECS3JFBEAgBygCKCAHKAIkQQN0aiIALQACIQIgBygCOCAHKAI0QQN0aiIELQACIQMgBCgCBCEMIAAoAgQhBAJAIAcoAjAgBygCLEEDdGoiCC0AAiIARQRAQQAhCQwBCyAIKAIEIQggBkUgAEEZSXJFBEAgCCAHQRBqIABBICAHKAIUayIIIAggAEsbIggQQiAAIAhrIgB0aiEJIAdBEGoQIxogAEUNAyAHQRBqIAAQQiAJaiEJDAMLIAdBEGogABBCIAhqIQkgB0EQahAjGiAAQQFLDQILAkACQAJAAkACQCAJIARFaiIADgQEAQEAAQsgBygCPEF/aiIAIABFaiEJDAELIABBAnQgB2ooAjwiCCAIRWohCSAAQQFGDQELIAcgBygCQDYCRAsgByAHKAI8NgJAIAcgCTYCPAwDCyAHKAI8IQkMAgsgBQRAQWwhAAwFC0FsIQAgB0EQahAjQQJJDQQgEiATKQIANwIAIBIgEygCCDYCCCAHKAJUIQgMAwsgBykCPCEWIAcgCTYCPCAHIBY3A0ALIAIgA2ohACADBH8gB0EQaiADEEIFQQALIQggAEEUTwRAIAdBEGoQIxoLIAggDGohCyACBH8gB0EQaiACEEIFQQALIQggB0EQahAjGiAHIAcoAiggBygCJEEDdGoiAC8BACAHQRBqIAAtAAMQRmo2AiQgByAHKAI4IAcoAjRBA3RqIgAvAQAgB0EQaiAALQADEEZqNgI0IAdBEGoQIxogByAHKAIwIAcoAixBA3RqIgAvAQAgB0EQaiAALQADEEZqNgIsIAcgBCAIaiIANgJYIAcgCTYCYCAHIAs2AlwgBygCVCEMIAcgACAKaiIEIAlrIgI2AmgCfwJAIAogACALaiIDaiAUTQRAIAAgDGoiFSAPTQ0BCyAHIAcpA2A3AwggByAHKQNYNwMAIAogDiAHIAdB1ABqIA8gDSARIBAQkwEMAQsgCiAMEBwCQCAAQRFJDQAgCkEQaiAMQRBqIggQHCAKQSBqIAxBIGoQHCAAQXBqQSFIDQAgCkEwaiEAA0AgACAIQSBqIgwQHCAAQRBqIAhBMGoQHCAMIQggAEEgaiIAIARJDQALCyAHIBU2AlQgByAENgJsAkAgCSAEIA1rSwRAQWwgCSAEIBFrSw0CGiAQIAIgDWsiAGoiAiALaiAQTQRAIAQgAiALEEoaDAILIAQgAkEAIABrEEohAiAHIAAgC2oiCzYCXCAHIAIgAGsiBDYCbCAHIA02AmggDSECCyAJQRBPBEAgBCACEBwgBEEQaiACQRBqEBwgC0EhSA0BIAQgC2ohCCAEQSBqIQADQCAAIAJBIGoiBBAcIABBEGogAkEwahAcIAQhAiAAQSBqIgAgCEkNAAsMAQsgB0HsAGogB0HoAGogCRB8IAtBCUkNACALIAcoAmwiCGpBeGohBCAIIAcoAmgiAGtBD0wEQANAIAggABBnIABBCGohACAIQQhqIgggBEkNAAwCCwALIAggABAcIAhBEGogAEEQahAcIAtBKUgNACAIQSBqIQgDQCAIIABBIGoiAhAcIAhBEGogAEEwahAcIAIhACAIQSBqIgggBEkNAAsLIAMLIQAgBUF/aiEFIAAgCmohCiAAECFFDQALDAELQbp/IQAgDyAIayICIA4gCmtLDQAgCiAIIAIQKiACaiABayEACyAHQfAAaiQAIAALkBgCGX8CfiMAQdABayIHJAAgByAAKALw4QEiCDYCtAEgASACaiESIAggACgCgOIBaiETIAEhCgJAIAUEQCAAKALE4AEhECAAKALA4AEhFCAAKAK84AEhDiAAQQE2AozhASAHIABBtNABaigCADYCXCAHIABBrNABaiIXKQIANwJUIAcgEDYCZCAHIA42AmAgByABIA5rNgJoQWwhDyAHQShqIAMgBBBFECENASAFQQQgBUEESBshFiAHQTxqIAdBKGogACgCABBoIAdBxABqIAdBKGogACgCCBBoIAdBzABqIAdBKGogACgCBBBoQQAhCCAFQQBKIQICQCAFQQFIIAdBKGoQI0ECS3INACAHQeAAaiELIAdB5ABqIQwDQCAHKAJAIAcoAjxBA3RqIgAtAAIhAyAHKAJQIAcoAkxBA3RqIgItAAIhBCACKAIEIQ0gACgCBCEJQQAhAAJAAkAgBygCSCAHKAJEQQN0aiIKLQACIgIEQCAKKAIEIQACQCAGBEAgACAHQShqIAJBGCACQRhJGyIAEEIgAiAAayIKdGohACAHQShqECMaIApFDQEgB0EoaiAKEEIgAGohAAwBCyAHQShqIAIQQiAAaiEAIAdBKGoQIxoLIAJBAUsNAQsCQAJAAkACQAJAIAAgCUVqIgIOBAQBAQABCyAHKAJUQX9qIgAgAEVqIQAMAQsgAkECdCAHaigCVCIAIABFaiEAIAJBAUYNAQsgByAHKAJYNgJcCyAHIAcoAlQ2AlggByAANgJUDAILIAcoAlQhAAwBCyAHKQJUISAgByAANgJUIAcgIDcDWAsgAyAEaiECIAQEfyAHQShqIAQQQgVBAAshCiACQRRPBEAgB0EoahAjGgsgCiANaiEEIAMEfyAHQShqIAMQQgVBAAshAiAHQShqECMaIAcgAiAJaiIKIAcoAmhqIgMgBGo2AmggDCALIAAgA0sbKAIAIQkgByAHKAJAIAcoAjxBA3RqIgIvAQAgB0EoaiACLQADEEZqNgI8IAcgBygCUCAHKAJMQQN0aiICLwEAIAdBKGogAi0AAxBGajYCTCAHQShqECMaIAcoAkggBygCREEDdGoiAi8BACENIAdBKGogAi0AAxBGIREgB0HwAGogCEEEdGoiAiADIAlqIABrNgIMIAIgADYCCCACIAQ2AgQgAiAKNgIAIAcgDSARajYCRCAIQQFqIgggFkghAiAHQShqECMhACAIIBZODQEgAEEDSQ0ACwsgAg0BIAggBUghAiAHQShqECMhAAJAIAggBU4EQCABIQoMAQsgAEECSwRAIAEhCgwBCyASQWBqIRogB0HgAGohGyAHQeQAaiEcIAEhCgNAIAcoAkAgBygCPEEDdGoiAC0AAiEDIAcoAlAgBygCTEEDdGoiBC0AAiECIAQoAgQhDCAAKAIEIQRBACELAkACQCAHKAJIIAcoAkRBA3RqIgktAAIiAARAIAkoAgQhCQJAIAYEQCAJIAdBKGogAEEYIABBGEkbIgkQQiAAIAlrIgl0aiELIAdBKGoQIxogCUUNASAHQShqIAkQQiALaiELDAELIAdBKGogABBCIAlqIQsgB0EoahAjGgsgAEEBSw0BCwJAAkACQAJAAkAgCyAERWoiAA4EBAEBAAELIAcoAlRBf2oiACAARWohCwwBCyAAQQJ0IAdqKAJUIgkgCUVqIQsgAEEBRg0BCyAHIAcoAlg2AlwLIAcgBygCVDYCWCAHIAs2AlQMAgsgBygCVCELDAELIAcpAlQhICAHIAs2AlQgByAgNwNYCyACIANqIQAgAgR/IAdBKGogAhBCBUEACyECIABBFE8EQCAHQShqECMaCyACIAxqIRggAwR/IAdBKGogAxBCBUEACyEAIAdBKGoQIxogByAAIARqIh0gBygCaGoiGSAYajYCaCAcIBsgCyAZSxsoAgAhHiAHIAcoAkAgBygCPEEDdGoiAC8BACAHQShqIAAtAAMQRmo2AjwgByAHKAJQIAcoAkxBA3RqIgAvAQAgB0EoaiAALQADEEZqNgJMIAdBKGoQIxogByAHKAJIIAcoAkRBA3RqIgAvAQAgB0EoaiAALQADEEZqNgJEIAcgB0HwAGogCEEDcUEEdGoiESkDCCIgNwPAASAHIBEpAwAiITcDuAEgBygCtAEhACAHKAK8ASENIAcgCiAhpyIJaiIMICCnIhVrIgM2AsgBAn8CQCAAIAlqIh8gE00EQCAKIAkgDWoiBGogGk0NAQsgByAHKQPAATcDICAHIAcpA7gBNwMYIAogEiAHQRhqIAdBtAFqIBMgDiAUIBAQkwEMAQsgCiAAEBwCQCAJQRFJDQAgCkEQaiAAQRBqIgIQHCAKQSBqIABBIGoQHCAJQXBqQSFIDQAgCkEwaiEAA0AgACACQSBqIgkQHCAAQRBqIAJBMGoQHCAJIQIgAEEgaiIAIAxJDQALCyAHIB82ArQBIAcgDDYCzAECQCAVIAwgDmtLBEBBbCAVIAwgFGtLDQIaIBAgAyAOayIAaiICIA1qIBBNBEAgDCACIA0QShoMAgsgDCACQQAgAGsQSiECIAcgACANaiINNgK8ASAHIAIgAGsiDDYCzAEgByAONgLIASAOIQMLIBVBEE8EQCAMIAMQHCAMQRBqIANBEGoQHCANQSFIDQEgDCANaiEJIAxBIGohAANAIAAgA0EgaiICEBwgAEEQaiADQTBqEBwgAiEDIABBIGoiACAJSQ0ACwwBCyAHQcwBaiAHQcgBaiAVEHwgDUEJSQ0AIA0gBygCzAEiAmpBeGohCSACIAcoAsgBIgBrQQ9MBEADQCACIAAQZyAAQQhqIQAgAkEIaiICIAlJDQAMAgsACyACIAAQHCACQRBqIABBEGoQHCANQSlIDQAgAkEgaiECA0AgAiAAQSBqIgMQHCACQRBqIABBMGoQHCADIQAgAkEgaiICIAlJDQALCyAECyIAECEEQCAAIQ8MBAsgESAdNgIAIBEgGSAeaiALazYCDCARIAs2AgggESAYNgIEIAAgCmohCiAIQQFqIgggBUghAiAHQShqECMhACAIIAVODQEgAEEDSQ0ACwsgAg0BIAggFmsiDCAFSARAIBJBYGohDQNAIAcgB0HwAGogDEEDcUEEdGoiACkDCCIgNwPAASAHIAApAwAiITcDuAEgBygCtAEhACAHKAK8ASELIAcgCiAhpyIGaiIEICCnIglrIgI2AsgBAn8CQCAAIAZqIg8gE00EQCAKIAYgC2oiA2ogDU0NAQsgByAHKQPAATcDECAHIAcpA7gBNwMIIAogEiAHQQhqIAdBtAFqIBMgDiAUIBAQkwEMAQsgCiAAEBwCQCAGQRFJDQAgCkEQaiAAQRBqIggQHCAKQSBqIABBIGoQHCAGQXBqQSFIDQAgCkEwaiEAA0AgACAIQSBqIgYQHCAAQRBqIAhBMGoQHCAGIQggAEEgaiIAIARJDQALCyAHIA82ArQBIAcgBDYCzAECQCAJIAQgDmtLBEBBbCAJIAQgFGtLDQIaIBAgAiAOayIAaiICIAtqIBBNBEAgBCACIAsQShoMAgsgBCACQQAgAGsQSiECIAcgACALaiILNgK8ASAHIAIgAGsiBDYCzAEgByAONgLIASAOIQILIAlBEE8EQCAEIAIQHCAEQRBqIAJBEGoQHCALQSFIDQEgBCALaiEGIARBIGohAANAIAAgAkEgaiIEEBwgAEEQaiACQTBqEBwgBCECIABBIGoiACAGSQ0ACwwBCyAHQcwBaiAHQcgBaiAJEHwgC0EJSQ0AIAsgBygCzAEiCGpBeGohBCAIIAcoAsgBIgBrQQ9MBEADQCAIIAAQZyAAQQhqIQAgCEEIaiIIIARJDQAMAgsACyAIIAAQHCAIQRBqIABBEGoQHCALQSlIDQAgCEEgaiEIA0AgCCAAQSBqIgIQHCAIQRBqIABBMGoQHCACIQAgCEEgaiIIIARJDQALCyADCyIPECENAyAKIA9qIQogDEEBaiIMIAVHDQALCyAXIAcpAlQ3AgAgFyAHKAJcNgIIIAcoArQBIQgLQbp/IQ8gEyAIayIAIBIgCmtLDQAgCiAIIAAQKiAAaiABayEPCyAHQdABaiQAIA8LQQEDfyAAQQhqIQMgACgCBCECQQAhAANAIAEgAyAAQQN0ai0AAkEWS2ohASAAQQFqIgAgAnZFDQALIAFBCCACa3QLJQAgAEIANwIAIABBADsBCCAAQQA6AAsgACABNgIMIAAgAjoACguUAwEFf0G4fyEHAkACQCADRQ0AIAItAAAiBEUNAQJ/IAJBAWoiBSAEQRh0QRh1IgZBf0oNABogBkF/RgRAIANBA0gNAiAFLwAAQYD+AWohBCACQQNqDAELIANBAkgNASACLQABIARBCHRyQYCAfmohBCACQQJqCyEFIAEgBDYCACAFQQFqIgEgAiADaiIDSw0AQWwhByAAQRBqIAAgBS0AACIFQQZ2QSNBCSABIAMgAWtB4LABQfCxAUGAswEgACgCjOEBIAAoApziASAEEJQBIgYQISIIDQAgAEGYIGogAEEIaiAFQQR2QQNxQR9BCCABIAEgBmogCBsiASADIAFrQZC3AUGQuAFBkLkBIAAoAozhASAAKAKc4gEgBBCUASIGECEiCA0AIABBoDBqIABBBGogBUECdkEDcUE0QQkgASABIAZqIAgbIgEgAyABa0GguwFBgL0BQeC+ASAAKAKM4QEgACgCnOIBIAQQlAEiABAhDQAgACABaiACayEHCyAHDwsgAUEANgIAQQFBuH8gA0EBRhsLygYBCH9BbCEIAkAgAkEDSQ0AAkACQAJAAkAgAS0AACIEQQNxIglBAWsOAwMBAAILIAAoAojhAQ0AQWIPCyACQQVJDQJBAyEGIAEoAAAhBQJ/AkACQAJAIARBAnZBA3EiB0F+ag4CAQIACyAFQQ52Qf8HcSEEIAVBBHZB/wdxIQMgB0UMAgsgBUESdiEEQQQhBiAFQQR2Qf//AHEhA0EADAELIAVBBHZB//8PcSIDQYCACEsNAyABLQAEQQp0IAVBFnZyIQRBBSEGQQALIQUgBCAGaiIKIAJLDQICQCADQYEGSQ0AIAAoApziAUUNAEEAIQIDQCACQcT/AEkhByACQUBrIQIgBw0ACwsCfyAJQQNGBEAgASAGaiEBIABB4OIBaiECIAAoAgwhBiAFBEAgAiADIAEgBCAGEJMDDAILIAIgAyABIAQgBhCQAwwBCyAAQbjQAWohAiABIAZqIQEgAEHg4gFqIQYgAEGo0ABqIQcgBQRAIAcgBiADIAEgBCACEJEDDAELIAcgBiADIAEgBCACEI4DCxAhDQIgACADNgKA4gEgAEEBNgKI4QEgACAAQeDiAWo2AvDhASAJQQJGBEAgACAAQajQAGo2AgwLIAAgA2oiAEH44gFqQgA3AAAgAEHw4gFqQgA3AAAgAEHo4gFqQgA3AAAgAEHg4gFqQgA3AAAgCg8LQQIhAwJ/AkACQAJAIARBAnZBA3FBf2oOAwEAAgALQQEhAyAEQQN2DAILIAEvAABBBHYMAQtBAyEDIAEQlQFBBHYLIgQgA2oiBUEgaiACSwRAIAUgAksNAiAAQeDiAWogASADaiAEECohASAAIAQ2AoDiASAAIAE2AvDhASABIARqIgBCADcAGCAAQgA3ABAgAEIANwAIIABCADcAACAFDwsgACAENgKA4gEgACABIANqNgLw4QEgBQ8LQQIhAwJ/AkACQAJAIARBAnZBA3FBf2oOAwEAAgALQQEhAyAEQQN2DAILIAEvAABBBHYMAQsgAkEESSABEJUBIgJBj4CAAUtyDQFBAyEDIAJBBHYLIQIgAEHg4gFqIAEgA2otAAAgAkEgahAoIQEgACACNgKA4gEgACABNgLw4QEgA0EBaiEICyAIC8kDAQZ/IwBBgAFrIgMkAEFiIQgCQCACQQlJDQAgAEGY0ABqIAFBCGoiBCACQXhqIAAQzgEiBRAhIgYNACADQR82AnwgAyADQfwAaiADQfgAaiAEIAQgBWogBhsiBCABIAJqIgIgBGsQayIFECENACADKAJ8IgZBH0sNACADKAJ4IgdBCU8NACAAQYggaiADIAZB4KsBQeCsASAHEH0gA0E0NgJ8IAMgA0H8AGogA0H4AGogBCAFaiIEIAIgBGsQayIFECENACADKAJ8IgZBNEsNACADKAJ4IgdBCk8NACAAQZAwaiADIAZB4K0BQZCkASAHEH0gA0EjNgJ8IAMgA0H8AGogA0H4AGogBCAFaiIEIAIgBGsQayIFECENACADKAJ8IgZBI0sNACADKAJ4IgdBCk8NACAAIAMgBkHArwFBsKcBIAcQfSAEIAVqIgRBDGoiBSACSw0AIAQoAAAiBkF/aiACIAVrIgJPDQAgACAGNgKc0AEgBEEEaiIEKAAAIgVBf2ogAk8NACAAQaDQAWogBTYCACAEQQRqIgQoAAAiBUF/aiACTw0AIABBpNABaiAFNgIAIAQgAWtBBGohCAsgA0GAAWokACAICy0BAX8gAARAQbp/IQQgAyABTQR/IAAgAiADECgaIAMFIAQLDwtBtn9BACADGwstAQF/IAAEQEG6fyEEIAMgAU0EfyAAIAIgAxAqGiADBSAECw8LQbZ/QQAgAxsLpAICBH8BfiMAQRBrIgckAEG4fyEFAkAgBEH//wdLDQAgAEHY4AFqKQMAIQkgACADIAQQ+gIiBRAhIgYNACAAKAKc4gEhCCAAIAdBDGogAyADIAVqIAYbIgMgBEEAIAUgBhtrIgYQ+QIiBRAhDQAgCUKAgIAQViEEIAYgBWshBiADIAVqIQUCQAJAIAgEQCAAQQA2ApziASAHKAIMIQMMAQsCQAJAIAApA9jgAUKAgIAIWARAIAcoAgwhAwwBCyAHKAIMIgNBBEoNAQsgAEEANgKc4gEMAgsgACgCCBD3AiEIIABBADYCnOIBIAhBFEkNAQsgACABIAIgBSAGIAMgBBD2AiEFDAELIAAgASACIAUgBiADIAQQ9QIhBQsgB0EQaiQAIAULaQAgAEHQ4AFqIAEgAiAAKALs4QEQiAMiARAhBEAgAQ8LQbh/IQICQCABDQAgAEHs4AFqKAIAIgEEQEFgIQIgACgCmOIBIAFHDQELQQAhAiAAQfDgAWooAgBFDQAgAEGQ4QFqEIYCCyACC2wBAX8CfwJAAkAgAkEHTQ0AIAEoAABBt8jC4X5HDQAgACABKAAENgKY4gFBYiAAQRBqIAEgAhD7AiIDECENAhogAEKBgICAEDcDiOEBIAAgASADaiACIANrEMYBDAELIAAgASACEMYBC0EACwvIAwIHfwF+IwBBEGsiCSQAQbh/IQcCQCAEKAIAIghBBUEJIAAoAuzhASIFG0kNACADKAIAIgZBAUEFIAUbIAUQlwEiBRAhBEAgBSEHDAELIAggBUEDakkNACAAIAYgBRD/AiIHECENACAFIAZqIgYgCCAFayIIIAkQxwEiBRAhBEAgBSEHDAELIAEgAmohCiAAQZDhAWohCyABIQIDQCAIQX1qIgggBUkEQEG4fyEHDAILIAZBA2ohBkFsIQcCfwJAAkACQCAJKAIADgMBAgAFCyAAIAIgCiACayAGIAUQ/gIMAgsgAiAKIAJrIAYgBRD9AgwBCyACIAogAmsgBi0AACAJKAIIEPwCCyIHECENASAAKALw4AEEQCALIAIgBxCFAgsgCCAFayEIIAUgBmohBiACIAdqIQIgCSgCBEUEQCAGIAggCRDHASIFIQcgBRAhRQ0BDAILCyAAKQPQ4AEiDEJ/UgRAQWwhByAMIAIgAWusUg0BCyADIAAoAvDgAQR/QWohByAIQQRJDQEgCxCEAiEMIAYoAAAgDKdHDQEgCEF8aiEIIAZBBGoFIAYLNgIAIAQgCDYCACACIAFrIQcLIAlBEGokACAHCzAAIAAQyQECf0EAQQAQIQ0AGiABRSACRXJFBEBBYiAAIAEgAhCAAxAhDQEaC0EACws5ACABBEAgACAAKALE4AEgASgCBCABKAIIakc2ApziAQsgABDJAUEAECEgAUVyRQRAIAAgARCYAwsLLwACf0G4fyABQQhJDQAaQXIgACgABCIAQXdLDQAaQbh/IABBCGoiACAAIAFLGwsL3gIBB38jAEEQayIHJAAgBQR/IAUoAgQhCiAFKAIIBUEACyELAkACQCAAKALs4QEiCRBpIARLBEAgASEIDAELIAEhCANAAkAgAygAAEFwcUHQ1LTCAUYEQCADIAQQhAMiBhAhDQEgAyAGaiEDIAQgBmsiBCAJEGlPDQIgByAENgIIIAcgAzYCDAwDCyAHIAQ2AgggByADNgIMAkAgBQRAIAAgBRCDA0EAIQZBABAhRQ0BDAULIAAgCiALEIIDIgYQIQ0ECyAAIAgQhwNBACAAIAggAiAHQQxqIAdBCGoQgQMiBiIDa0EAIAMQIRtBCkYgDHEEQEG4fyEGDAQLIAYQIQ0DIAYgCGohCCAHKAIIIgQgACgC7OEBIgkQaUkNAiACIAZrIQJBASEMIAcoAgwhAwwBCwsgByAENgIIIAcgAzYCDAwBC0G4fyEGIAQNACAIIAFrIQYLIAdBEGokACAGCzMAAkACQAJAIAAoAqDiAUEBag4DAgABAAsgABDKAUEADwsgAEEANgKg4gELIAAoApTiAQtGAQJ/IAEgACgCuOABIgJHBEAgACACNgLE4AEgACABNgK44AEgACgCvOABIQMgACABNgK84AEgACABIAMgAmtqNgLA4AELC7EEAgR/An4gAEIANwMgIABCADcDGCAAQgA3AxAgAEIANwMIIABCADcDACADEGkiBCACSwRAIAQPCyABRQRAQX8PCwJAAkACQAJAAkACQAJ/IANBAUYEQCABIAJBARCXAQwBCyABKAAAIgZBqOq+aUcNASABIAIgAxCXAQsiAyACSw0FIAAgAzYCGEFyIQMgASAEaiIFQX9qLQAAIgJBCHENBSACQSBxIgZFBEBBcCEDIAUtAAAiBUGnAUsNBiAFQQdxrUIBIAVBA3ZBCmqthiIIQgOIfiAIfCEJIARBAWohBAsgAkEGdiEFIAJBAnYhB0EAIQMgAkEDcUF/ag4DAQIDBAtBdiEDIAZBcHFB0NS0wgFHDQRBCCEDIAJBCEkNBCAAQgA3AwAgAEIANwMgIABCADcDGCAAQgA3AxAgAEIANwMIIAEoAAQhASAAQQE2AhQgACABrTcDAEEADwsgASAEai0AACEDIARBAWohBAwCCyABIARqLwAAIQMgBEECaiEEDAELIAEgBGooAAAhAyAEQQRqIQQLIAdBAXEhAgJ+AkACQAJAAkAgBUF/ag4DAQIDAAtCfyAGRQ0DGiABIARqMQAADAMLIAEgBGovAACtQoACfAwCCyABIARqKAAArQwBCyABIARqKQAACyEIIAAgAjYCICAAIAM2AhwgACAINwMAQQAhAyAAQQA2AhQgACAIIAkgBhsiCDcDCCAAIAhCgIAIIAhCgIAIVBs+AhALIAMLXQEDfwJAIABFDQAgACgCiOIBDQAgAEH84QFqKAIAIQEgAEH44QFqKAIAIQIgACgC9OEBIQMgABDKASAAKAKo4gEgAyACIAEQZCAAQQA2AqjiASAAIAMgAiABEGQLC6kBAQF/IwBBIGsiASQAIABBgYCAwAA2ArTiASAAQQA2AojiASAAQQA2AuzhASAAQgA3A5DiASAAQQA2AtziASAAQgA3AsziASAAQQA2ArziASAAQQA2AsTgASAAQgA3ApziASAAQaTiAWpCADcCACAAQaziAWpBADYCACABQRBqEOABIAEgASkDGDcDCCABIAEpAxA3AwAgACABEN8BNgKM4gEgAUEgaiQACzkBAn9BmOMJQQBBABCHAiIABH8gAEEANgL84QEgAEEANgL44QEgAEEANgL04QEgABCKAyAABSABCws8AQF/IAAgAyAEIAUQzwEiBRAhBEAgBQ8LQbh/IQYgBSAESQR/IAEgAiADIAVqIAQgBWsgABDLAQUgBgsLPAEBfyAAIAMgBCAFEM4BIgUQIQRAIAUPC0G4fyEGIAUgBEkEfyABIAIgAyAFaiAEIAVrIAAQzAEFIAYLCz4AIAJFBEBBun8PCyAERQRAQWwPCyACIAQQlAMEQCAAIAEgAiADIAQgBRCNAw8LIAAgASACIAMgBCAFEIwDCwcAIAARCQALSwEBfyMAQRBrIgUkACAFQQhqIAQoAgAQNAJ/IAUtAAkEQCAAIAEgAiADIAQQzAEMAQsgACABIAIgAyAEEMsBCyEEIAVBEGokACAECzwBAX8gACADIAQgBRDPASIFECEEQCAFDwtBuH8hBiAFIARJBH8gASACIAMgBWogBCAFayAAEM0BBSAGCwv/AwEDfyMAQSBrIgUkACAFQQhqIAIgAxBFIgIQIUUEQCAFIAQoAgAQNCAEQQRqIQIgBS0AAiEDAkAgBUEIahAjIAAgAWoiB0F9aiIGIABNcg0AA0AgACACIAUoAgggBSgCDCADEClBAnRqIgQvAQA7AAAgBUEIaiAELQACECYgACAELQADaiIEIAIgBSgCCCAFKAIMIAMQKUECdGoiAC8BADsAACAFQQhqIAAtAAIQJiAEIAAtAANqIQAgBUEIahAjDQEgACAGSQ0ACwsCQCAFQQhqECMgACAHQX5qIgRLcg0AA0AgACACIAUoAgggBSgCDCADEClBAnRqIgYvAQA7AAAgBUEIaiAGLQACECYgACAGLQADaiEAIAVBCGoQIw0BIAAgBE0NAAsLIAAgBE0EQANAIAAgAiAFKAIIIAUoAgwgAxApQQJ0aiIGLwEAOwAAIAVBCGogBi0AAhAmIAAgBi0AA2oiACAETQ0ACwsCQCAAIAdPDQAgACACIAUoAgggBSgCDCADECkiA0ECdGoiAC0AADoAACAALQADQQFGBEAgBUEIaiAALQACECYMAQsgBSgCDEEfSw0AIAVBCGogAiADQQJ0ai0AAhAmIAUoAgxBIUkNACAFQSA2AgwLIAFBbCAFKAIMIAUoAhAgBSgCFBBLGyECCyAFQSBqJAAgAgtLAQF/IwBBEGsiBSQAIAVBCGogBCgCABA0An8gBS0ACQRAIAAgASACIAMgBBCSAwwBCyAAIAEgAiADIAQQzQELIQQgBUEQaiQAIAQLXQEBf0EPIQIgASAASQRAIAFBBHQgAG4hAgsgAEEIdiIBIAJBGGwiAEHMqAFqKAIAbCAAQcioAWooAgBqIgJBA3YgAmogAEHAqAFqKAIAIABBxKgBaigCACABbGpJC8wCAQR/IwBBQGoiCSQAIAkgAygCMDYCMCAJIAMpAig3AyggCSADKQIgNwMgIAkgAykCGDcDGCAJIAMpAhA3AxAgCSADKQIINwMIIAkgAykCADcDAAJAIARBAkgNACAJIARBAnRqKAIAIQQgCUE8aiAIEC8gCUEBOgA/IAkgAjoAPiAERQ0AQQAhAyAJKAI8IQoDQCAAIANBAnRqIAo2AQAgA0EBaiIDIARHDQALCyAGBEBBACEEA0AgCSAFIARBAXRqIgotAAEiC0ECdGoiDCgCACEDIAlBPGogCi0AAEEIdCAIakH//wNxEC8gCUECOgA/IAkgByALayIKIAJqOgA+IANBASABIAprdGohCiAJKAI8IQsDQCAAIANBAnRqIAs2AQAgA0EBaiIDIApJDQALIAwgCjYCACAEQQFqIgQgBkcNAAsLIAlBQGskAAvdAgEJfyMAQdAAayIJJAAgCUFAayAFKAIwNgIAIAkgBSkCKDcDOCAJIAUpAiA3AzAgCSAFKQIYNwMoIAkgBSkCEDcDICAJIAUpAgA3AxAgCSAFKQIINwMYIAMEQCAHIAZrIQ8gByABayEQA0BBASABIAcgAiALQQF0aiIGLQABIgxrIghrIgp0IQ0gBi0AACEOIAlBEGogDEECdGoiDCgCACEGAkAgCiAPTwRAIAAgBkECdGogCiAIIAUgCEE0bGogCCAQaiIIQQEgCEEBShsiCCACIAQgCEECdGooAgAiCEEBdGogAyAIayAHIA4QlQMgBiANaiEIDAELIAlBDGogDhAvIAlBAToADyAJIAg6AA4gBiAGIA1qIghPDQAgCSgCDCEKA0AgACAGQQJ0aiAKNgEAIAZBAWoiBiAIRw0ACwsgDCAINgIAIAtBAWoiCyADRw0ACwsgCUHQAGokAAs+AQN/IAAEQCAAKAIAIABBvNABaigCACIBIABBwNABaigCACICIABBxNABaigCACIDEGQgACABIAIgAxBkCwvMAQEBfyAAIAEoArTQATYCmOIBIAAgASgCBCICNgLA4AEgACACNgK84AEgACACIAEoAghqIgI2ArjgASAAIAI2AsTgASABKAK40AEEQCAAQoGAgIAQNwOI4QEgACABQaTQAGo2AgwgACABQZQgajYCCCAAIAFBnDBqNgIEIAAgAUEMajYCACAAQazQAWogAUGo0AFqKAIANgIAIABBsNABaiABQazQAWooAgA2AgAgAEG00AFqIAFBsNABaigCADYCAA8LIABCADcDiOEBC6JIAS5/IwBB4ABrIhIkACAAKAKEASEGIAAoAgQhByAAKAKIASEFIAAoAgwhCCASIAAoAhg2AlwgACgCPCEbIABBQGsoAgAhHCAAQSxqIiYgAyAEQQIQWSADIAcgCGogA0ZqIg0gAyAEaiIMQXhqIi5JBEAgBUH/HyAFQf8fSRshLyAMQWBqITBBA0EEIAZBA0YbIi1Bf2ohJwNAAkACQAJAAkACQAJAAkACQAJAIAAoAgQiBSAAKAIYIgRqIA1LDQAgDSADayEdIAAoAoQBIQYgBCANIAVrIgdJBEADQCAAIAQgBWogDCAGQQEQQSAEaiIEIAdJDQALCyAdRSEhIAAgBzYCGAJAAkACQAJAAkAgBkF9ag4FAAECAwMBC0EAIQlBACANIAAoAgQiGWsiCEF/IAAoAnhBf2p0QX9zIiRrIgQgBCAISxshFiAAKAIgIA0gACgCfEEDEB5BAnRqIgooAgAhBSAIIAAoAhAgACgCFCAIIAAoAnQQJyIEayEYIARBASAEGyEVQQNBBCAdGyEeIAAoAigiHyAIICRxQQN0aiILQQRqIRQgACgCiAEiBEH/HyAEQf8fSRshDiANQQNqIQ8gCEEJaiERIAggACgCDCITayEgIBMgGWohGiAAKAIIIhAgE2ohFyAAKAKAASEiICchBiAhIQQDQAJAAn8CfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIHQX9qIiMgIEkEQCANQQMQHyANIAdrQQMQH0cNAiAPIA8gB2sgDBAdDAELICMgGE8NASATIAggB2siB0F/c2pBA0kNASANQQMQHyAHIBBqIgdBAxAfRw0BIA8gB0EDaiAMIBcgGhAgC0EDaiIHIAZNDQAgGyAJQQN0aiIGIAc2AgQgBiAEICFrNgIAIAlBAWohCSAHIA5LDQUgByIGIA1qIAxGDQULIARBAWoiBCAeSQ0ACwJAIAZBAksNAEECIQYgGSAAKAIcIAAoAiQgEkHcAGogDRBAIgQgFUkNACAIIARrIgdB//8PSw0AAn8gBCATTwRAIA0gBCAZaiAMEB0MAQsgDSAEIBBqIAwgFyAaECALIgRBA0kNACAbIAQ2AgQgGyAHQQJqNgIAIAQgDk0EQEEBIQkgBCEGIAQgDWogDEcNAQtBASEJIAAgCEEBajYCGAwECyAKIAg2AgACQCAFIBVJDQAgCEECaiEYQX8gInRBf3MhCkEAIQ5BACEPA0ACfyAOIA8gDiAPSRsiBCAFaiATTwRAIAQgDWogBSAZaiAEaiAMEB0gBGohBCAZDAELIBAgGSAEIA1qIAUgEGogBGogDCAXIBoQICAEaiIEIAVqIBNJGwshCCAEIAZLBEAgGyAJQQN0aiIGIAQ2AgQgBiAYIAVrNgIAIAQgBWogESAEIBEgBWtLGyERIAlBAWohCSAEQYAgSw0CIAQhBiAEIA1qIAxGDQILIB8gBSAkcUEDdGohBwJAAkAgBSAIaiAEai0AACAEIA1qLQAASQRAIAsgBTYCACAFIBZLDQEgEkFAayELDAQLIBQgBTYCACAFIBZLBEAgByEUIAQhDwwCCyASQUBrIRQMAwsgBCEOIAdBBGoiCyEHCyAKRQ0BIApBf2ohCiAHKAIAIgUgFU8NAAsLIBRBADYCACALQQA2AgAgACARQXhqNgIYDAMLQQAhCUEAIA0gACgCBCITayIIQX8gACgCeEF/anRBf3MiFWsiBCAEIAhLGyEaIAAoAiAgDSAAKAJ8QQQQHkECdGoiDigCACEFIAggACgCECAAKAIUIAggACgCdBAnIgRrIQogBEEBIAQbIRdBA0EEIB0bIRggACgCKCIeIAggFXFBA3RqIhRBBGohGSAAKAKIASIEQf8fIARB/x9JGyEfIA1BBGohDyAIQQlqIREgCCAAKAIMIgtrISAgCyATaiEkIAAoAggiECALaiEWIAAoAoABISIgJyEGICEhBANAAkACfwJ/IARBA0YEQCACKAIAQX9qDAELIAIgBEECdGooAgALIgdBf2oiIyAgSQRAIA1BBBAfIA0gB2tBBBAfRw0CIA8gDyAHayAMEB0MAQsgIyAKTw0BIAsgCCAHayIHQX9zakEDSQ0BIA1BBBAfIAcgEGoiB0EEEB9HDQEgDyAHQQRqIAwgFiAkECALQQRqIgcgBk0NACAbIAlBA3RqIgYgBzYCBCAGIAQgIWs2AgAgCUEBaiEJIAcgH0sNBCAHIgYgDWogDEYNBAsgBEEBaiIEIBhJDQALIA4gCDYCAAJAIAUgF0kNACAIQQJqIRhBfyAidEF/cyEKQQAhDkEAIQ8DQAJ/IA4gDyAOIA9JGyIEIAVqIAtPBEAgBCANaiAFIBNqIARqIAwQHSAEaiEEIBMMAQsgECATIAQgDWogBSAQaiAEaiAMIBYgJBAgIARqIgQgBWogC0kbCyEIIAQgBksEQCAbIAlBA3RqIgYgBDYCBCAGIBggBWs2AgAgBCAFaiARIAQgESAFa0sbIREgCUEBaiEJIARBgCBLDQIgBCEGIAQgDWogDEYNAgsgHiAFIBVxQQN0aiEHAkACQCAFIAhqIARqLQAAIAQgDWotAABJBEAgFCAFNgIAIAUgGksNASASQUBrIRQMBAsgGSAFNgIAIAUgGksEQCAHIRkgBCEPDAILIBJBQGshGQwDCyAEIQ4gB0EEaiIUIQcLIApFDQEgCkF/aiEKIAcoAgAiBSAXTw0ACwsgGUEANgIAIBRBADYCACAAIBFBeGo2AhgMAgtBACEJQQAgDSAAKAIEIhNrIghBfyAAKAJ4QX9qdEF/cyIVayIEIAQgCEsbIRogACgCICANIAAoAnxBBRAeQQJ0aiIOKAIAIQUgCCAAKAIQIAAoAhQgCCAAKAJ0ECciBGshCiAEQQEgBBshF0EDQQQgHRshGCAAKAIoIh4gCCAVcUEDdGoiGUEEaiEUIAAoAogBIgRB/x8gBEH/H0kbIR8gDUEEaiEPIAhBCWohESAIIAAoAgwiC2shICALIBNqISQgACgCCCIQIAtqIRYgACgCgAEhIiAnIQYgISEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiB0F/aiIjICBJBEAgDUEEEB8gDSAHa0EEEB9HDQIgDyAPIAdrIAwQHQwBCyAjIApPDQEgCyAIIAdrIgdBf3NqQQNJDQEgDUEEEB8gByAQaiIHQQQQH0cNASAPIAdBBGogDCAWICQQIAtBBGoiByAGTQ0AIBsgCUEDdGoiBiAHNgIEIAYgBCAhazYCACAJQQFqIQkgByAfSw0DIAciBiANaiAMRg0DCyAEQQFqIgQgGEkNAAsgDiAINgIAAkAgBSAXSQ0AIAhBAmohGEF/ICJ0QX9zIQpBACEOQQAhDwNAAn8gDiAPIA4gD0kbIgQgBWogC08EQCAEIA1qIAUgE2ogBGogDBAdIARqIQQgEwwBCyAQIBMgBCANaiAFIBBqIARqIAwgFiAkECAgBGoiBCAFaiALSRsLIQggBCAGSwRAIBsgCUEDdGoiBiAENgIEIAYgGCAFazYCACAEIAVqIBEgBCARIAVrSxshESAJQQFqIQkgBEGAIEsNAiAEIQYgBCANaiAMRg0CCyAeIAUgFXFBA3RqIQcCQAJAIAUgCGogBGotAAAgBCANai0AAEkEQCAZIAU2AgAgBSAaSw0BIBJBQGshGQwECyAUIAU2AgAgBSAaSwRAIAchFCAEIQ8MAgsgEkFAayEUDAMLIAQhDiAHQQRqIhkhBwsgCkUNASAKQX9qIQogBygCACIFIBdPDQALCyAUQQA2AgAgGUEANgIAIAAgEUF4ajYCGAwBC0EAIQlBACANIAAoAgQiE2siCEF/IAAoAnhBf2p0QX9zIhVrIgQgBCAISxshGiAAKAIgIA0gACgCfEEGEB5BAnRqIg4oAgAhBSAIIAAoAhAgACgCFCAIIAAoAnQQJyIEayEKIARBASAEGyEXQQNBBCAdGyEYIAAoAigiHiAIIBVxQQN0aiIZQQRqIRQgACgCiAEiBEH/HyAEQf8fSRshHyANQQRqIQ8gCEEJaiERIAggACgCDCILayEgIAsgE2ohJCAAKAIIIhAgC2ohFiAAKAKAASEiICchBiAhIQQDQAJAAn8CfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIHQX9qIiMgIEkEQCANQQQQHyANIAdrQQQQH0cNAiAPIA8gB2sgDBAdDAELICMgCk8NASALIAggB2siB0F/c2pBA0kNASANQQQQHyAHIBBqIgdBBBAfRw0BIA8gB0EEaiAMIBYgJBAgC0EEaiIHIAZNDQAgGyAJQQN0aiIGIAc2AgQgBiAEICFrNgIAIAlBAWohCSAHIB9LDQIgByIGIA1qIAxGDQILIARBAWoiBCAYSQ0ACyAOIAg2AgACQCAFIBdJDQAgCEECaiEYQX8gInRBf3MhCkEAIQ5BACEPA0ACfyAOIA8gDiAPSRsiBCAFaiALTwRAIAQgDWogBSATaiAEaiAMEB0gBGohBCATDAELIBAgEyAEIA1qIAUgEGogBGogDCAWICQQICAEaiIEIAVqIAtJGwshCCAEIAZLBEAgGyAJQQN0aiIGIAQ2AgQgBiAYIAVrNgIAIAQgBWogESAEIBEgBWtLGyERIAlBAWohCSAEQYAgSw0CIAQhBiAEIA1qIAxGDQILIB4gBSAVcUEDdGohBwJAAkAgBSAIaiAEai0AACAEIA1qLQAASQRAIBkgBTYCACAFIBpLDQEgEkFAayEZDAQLIBQgBTYCACAFIBpLBEAgByEUIAQhDwwCCyASQUBrIRQMAwsgBCEOIAdBBGoiGSEHCyAKRQ0BIApBf2ohCiAHKAIAIgUgF08NAAsLIBRBADYCACAZQQA2AgAgACARQXhqNgIYCyAJRQ0AIBwgAigCADYCECAcIAIoAgQ2AhQgAigCCCEEIBwgHTYCDCAcQQA2AgggHCAENgIYIBwgAyAdICZBAhBYIgU2AgAgGyAJQX9qQQN0aiIEKAIEIgcgL0sEQCAEKAIAIQoMAwtBASEEQQAgJkECEC0hBgNAIBwgBEEcbGpBgICAgAQ2AgAgBEEBaiIEIC1HDQALIAUgBmohCkEAIQggLSEHA0AgGyAIQQN0aiIEKAIEIQYgEkFAayACIAQoAgAiDyAhED8gByAGTQRAIA9BAWoQJCIOQQh0QYAgaiERA0AgB0F9aiEEAn8gACgCZEEBRgRAIAQQKyARagwBCyAAKAJgIAAoAjggDkECdGooAgAQK2sgACgCXGogBBA8QQJ0IgRBkKQBaigCACAOakEIdGogACgCNCAEaigCABAra0EzagshBSAcIAdBHGxqIgQgHTYCDCAEIA82AgQgBCAHNgIIIAQgBSAKajYCACAEIBIpA0A3AhAgBCASKAJINgIYIAdBAWoiByAGTQ0ACwsgCEEBaiIIIAlHDQALQQEhDwJAIAdBf2oiBEUEQEEAIQQMAQsDQEEBIQUgHCAPQX9qQRxsaiIHKAIIRQRAIAcoAgxBAWohBQsgDSAPaiILQX9qQQEgJkECEFIgBygCAGogBSAmQQIQLWogBUF/aiAmQQIQLWsiBiAcIA9BHGxqIhooAgAiGUwEQCAaIAU2AgwgGkIANwIEIBogBjYCACAaIAcoAhg2AhggGiAHKQIQNwIQIAYhGQsCQCALIC5LDQAgBCAPRgRAIA8hBAwDC0EAIR0gGigCCCIHRQRAIBooAgwhHQtBACAmQQIQLSEyIAAoAgQiBiAAKAIYIgVqIAtLDQAgACgChAEhCCAFIAsgBmsiCUkEQANAIAAgBSAGaiAMIAhBARBBIAVqIgUgCUkNAAsLIAdBAEchISAaQRBqISQgACAJNgIYAkACQAJAAkACQCAIQX1qDgUAAQIDAwELQQAhEEEAIAsgACgCBCIOayIJQX8gACgCeEF/anRBf3MiImsiBSAFIAlLGyEjIAAoAiAgCyAAKAJ8QQMQHkECdGoiJSgCACEGIAkgACgCECAAKAIUIAkgACgCdBAnIgVrISggBUEBIAUbIR5BBEEDIAcbISkgACgCKCIqIAkgInFBA3RqIhZBBGohEyAAKAKIASIFQf8fIAVB/x9JGyEVIAtBA2ohESAJQQlqIRQgCSAAKAIMIhdrISsgDiAXaiEfIAAoAggiGCAXaiEgIAAoAoABISwgJyEHICEhBQNAAkACfwJ/IAVBA0YEQCAkKAIAQX9qDAELIBogBUECdGooAhALIgpBf2oiCCArSQRAIAtBAxAfIAsgCmtBAxAfRw0CIBEgESAKayAMEB0MAQsgCCAoTw0BIBcgCSAKayIIQX9zakEDSQ0BIAtBAxAfIAggGGoiCEEDEB9HDQEgESAIQQNqIAwgICAfECALQQNqIgggB00NACAbIBBBA3RqIgcgCDYCBCAHIAUgIWs2AgAgEEEBaiEQIAggFUsNBSAIIgcgC2ogDEYNBQsgBUEBaiIFIClJDQALAkAgB0ECSw0AQQIhByAOIAAoAhwgACgCJCASQdwAaiALEEAiBSAeSQ0AIAkgBWsiCEH//w9LDQACfyAFIBdPBEAgCyAFIA5qIAwQHQwBCyALIAUgGGogDCAgIB8QIAsiBUEDSQ0AIBsgBTYCBCAbIAhBAmo2AgAgBSAVTQRAQQEhECAFIQcgBSALaiAMRw0BC0EBIRAgACAJQQFqNgIYDAQLICUgCTYCAAJAIAYgHkkNACAJQQJqISVBfyAsdEF/cyEVQQAhCUEAIQgDQAJ/IAkgCCAJIAhJGyIFIAZqIBdPBEAgBSALaiAGIA5qIAVqIAwQHSAFaiEFIA4MAQsgGCAOIAUgC2ogBiAYaiAFaiAMICAgHxAgIAVqIgUgBmogF0kbCyERIAUgB0sEQCAbIBBBA3RqIgcgBTYCBCAHICUgBms2AgAgBSAGaiAUIAUgFCAGa0sbIRQgEEEBaiEQIAVBgCBLDQIgBSEHIAUgC2ogDEYNAgsgKiAGICJxQQN0aiEKAkACQCAGIBFqIAVqLQAAIAUgC2otAABJBEAgFiAGNgIAIAYgI0sNASASQUBrIRYMBAsgEyAGNgIAIAYgI0sEQCAKIRMgBSEIDAILIBJBQGshEwwDCyAFIQkgCkEEaiIWIQoLIBVFDQEgFUF/aiEVIAooAgAiBiAeTw0ACwsgE0EANgIAIBZBADYCACAAIBRBeGo2AhgMAwtBACEQQQAgCyAAKAIEIhNrIglBfyAAKAJ4QX9qdEF/cyIeayIFIAUgCUsbIR8gACgCICALIAAoAnxBBBAeQQJ0aiIVKAIAIQYgCSAAKAIQIAAoAhQgCSAAKAJ0ECciBWshJSAFQQEgBRshIEEEQQMgBxshKCAAKAIoIikgCSAecUEDdGoiF0EEaiEOIAAoAogBIgVB/x8gBUH/H0kbISogC0EEaiERIAlBCWohFCAJIAAoAgwiFmshKyATIBZqISIgACgCCCIYIBZqISMgACgCgAEhLCAnIQcgISEFA0ACQAJ/An8gBUEDRgRAICQoAgBBf2oMAQsgGiAFQQJ0aigCEAsiCkF/aiIIICtJBEAgC0EEEB8gCyAKa0EEEB9HDQIgESARIAprIAwQHQwBCyAIICVPDQEgFiAJIAprIghBf3NqQQNJDQEgC0EEEB8gCCAYaiIIQQQQH0cNASARIAhBBGogDCAjICIQIAtBBGoiCCAHTQ0AIBsgEEEDdGoiByAINgIEIAcgBSAhazYCACAQQQFqIRAgCCAqSw0EIAgiByALaiAMRg0ECyAFQQFqIgUgKEkNAAsgFSAJNgIAAkAgBiAgSQ0AIAlBAmohJUF/ICx0QX9zIRVBACEJQQAhCANAAn8gCSAIIAkgCEkbIgUgBmogFk8EQCAFIAtqIAYgE2ogBWogDBAdIAVqIQUgEwwBCyAYIBMgBSALaiAGIBhqIAVqIAwgIyAiECAgBWoiBSAGaiAWSRsLIREgBSAHSwRAIBsgEEEDdGoiByAFNgIEIAcgJSAGazYCACAFIAZqIBQgBSAUIAZrSxshFCAQQQFqIRAgBUGAIEsNAiAFIQcgBSALaiAMRg0CCyApIAYgHnFBA3RqIQoCQAJAIAYgEWogBWotAAAgBSALai0AAEkEQCAXIAY2AgAgBiAfSw0BIBJBQGshFwwECyAOIAY2AgAgBiAfSwRAIAohDiAFIQgMAgsgEkFAayEODAMLIAUhCSAKQQRqIhchCgsgFUUNASAVQX9qIRUgCigCACIGICBPDQALCyAOQQA2AgAgF0EANgIAIAAgFEF4ajYCGAwCC0EAIRBBACALIAAoAgQiE2siCUF/IAAoAnhBf2p0QX9zIh5rIgUgBSAJSxshHyAAKAIgIAsgACgCfEEFEB5BAnRqIhUoAgAhBiAJIAAoAhAgACgCFCAJIAAoAnQQJyIFayElIAVBASAFGyEgQQRBAyAHGyEoIAAoAigiKSAJIB5xQQN0aiIXQQRqIQ4gACgCiAEiBUH/HyAFQf8fSRshKiALQQRqIREgCUEJaiEUIAkgACgCDCIWayErIBMgFmohIiAAKAIIIhggFmohIyAAKAKAASEsICchByAhIQUDQAJAAn8CfyAFQQNGBEAgJCgCAEF/agwBCyAaIAVBAnRqKAIQCyIKQX9qIgggK0kEQCALQQQQHyALIAprQQQQH0cNAiARIBEgCmsgDBAdDAELIAggJU8NASAWIAkgCmsiCEF/c2pBA0kNASALQQQQHyAIIBhqIghBBBAfRw0BIBEgCEEEaiAMICMgIhAgC0EEaiIIIAdNDQAgGyAQQQN0aiIHIAg2AgQgByAFICFrNgIAIBBBAWohECAIICpLDQMgCCIHIAtqIAxGDQMLIAVBAWoiBSAoSQ0ACyAVIAk2AgACQCAGICBJDQAgCUECaiElQX8gLHRBf3MhFUEAIQlBACEIA0ACfyAJIAggCSAISRsiBSAGaiAWTwRAIAUgC2ogBiATaiAFaiAMEB0gBWohBSATDAELIBggEyAFIAtqIAYgGGogBWogDCAjICIQICAFaiIFIAZqIBZJGwshESAFIAdLBEAgGyAQQQN0aiIHIAU2AgQgByAlIAZrNgIAIAUgBmogFCAFIBQgBmtLGyEUIBBBAWohECAFQYAgSw0CIAUhByAFIAtqIAxGDQILICkgBiAecUEDdGohCgJAAkAgBiARaiAFai0AACAFIAtqLQAASQRAIBcgBjYCACAGIB9LDQEgEkFAayEXDAQLIA4gBjYCACAGIB9LBEAgCiEOIAUhCAwCCyASQUBrIQ4MAwsgBSEJIApBBGoiFyEKCyAVRQ0BIBVBf2ohFSAKKAIAIgYgIE8NAAsLIA5BADYCACAXQQA2AgAgACAUQXhqNgIYDAELQQAhEEEAIAsgACgCBCITayIJQX8gACgCeEF/anRBf3MiHmsiBSAFIAlLGyEfIAAoAiAgCyAAKAJ8QQYQHkECdGoiFSgCACEGIAkgACgCECAAKAIUIAkgACgCdBAnIgVrISUgBUEBIAUbISBBBEEDIAcbISggACgCKCIpIAkgHnFBA3RqIhdBBGohDiAAKAKIASIFQf8fIAVB/x9JGyEqIAtBBGohESAJQQlqIRQgCSAAKAIMIhZrISsgEyAWaiEiIAAoAggiGCAWaiEjIAAoAoABISwgJyEHICEhBQNAAkACfwJ/IAVBA0YEQCAkKAIAQX9qDAELIBogBUECdGooAhALIgpBf2oiCCArSQRAIAtBBBAfIAsgCmtBBBAfRw0CIBEgESAKayAMEB0MAQsgCCAlTw0BIBYgCSAKayIIQX9zakEDSQ0BIAtBBBAfIAggGGoiCEEEEB9HDQEgESAIQQRqIAwgIyAiECALQQRqIgggB00NACAbIBBBA3RqIgcgCDYCBCAHIAUgIWs2AgAgEEEBaiEQIAggKksNAiAIIgcgC2ogDEYNAgsgBUEBaiIFIChJDQALIBUgCTYCAAJAIAYgIEkNACAJQQJqISVBfyAsdEF/cyEVQQAhCUEAIQgDQAJ/IAkgCCAJIAhJGyIFIAZqIBZPBEAgBSALaiAGIBNqIAVqIAwQHSAFaiEFIBMMAQsgGCATIAUgC2ogBiAYaiAFaiAMICMgIhAgIAVqIgUgBmogFkkbCyERIAUgB0sEQCAbIBBBA3RqIgcgBTYCBCAHICUgBms2AgAgBSAGaiAUIAUgFCAGa0sbIRQgEEEBaiEQIAVBgCBLDQIgBSEHIAUgC2ogDEYNAgsgKSAGIB5xQQN0aiEKAkACQCAGIBFqIAVqLQAAIAUgC2otAABJBEAgFyAGNgIAIAYgH0sNASASQUBrIRcMBAsgDiAGNgIAIAYgH0sEQCAKIQ4gBSEIDAILIBJBQGshDgwDCyAFIQkgCkEEaiIXIQoLIBVFDQEgFUF/aiEVIAooAgAiBiAgTw0ACwsgDkEANgIAIBdBADYCACAAIBRBeGo2AhgLIBBFDQAgGyAQQX9qQQN0aiIFKAIEIgcgL0sgByAPakGAIE9yDQQgGSAyaiERQQAhBwNAIBJBQGsgJCAbIAdBA3RqIgYoAgAiCCAhED8gLSEOAn8gBwRAIAZBfGooAgBBAWohDgsgBigCBCIFIA5PCwRAIAhBAWoQJCIJQQh0QYAgaiEZA0AgBUF9aiEKIAUgD2ohBgJ/IAAoAmRBAUYEQCAKECsgGWoMAQsgACgCYCAAKAI4IAlBAnRqKAIAECtrIAAoAlxqIAoQPEECdCIKQZCkAWooAgAgCWpBCHRqIAAoAjQgCmooAgAQK2tBM2oLIBFqIQoCQAJAIAYgBE0EQCAKIBwgBkEcbGooAgBIDQEMAgsDQCAcIARBAWoiBEEcbGpBgICAgAQ2AgAgBCAGSQ0ACwsgHCAGQRxsaiIGIB02AgwgBiAINgIEIAYgBTYCCCAGIAo2AgAgBiASKQNANwIQIAYgEigCSDYCGAsgBUF/aiIFIA5PDQALCyAHQQFqIgcgEEcNAAsLIA9BAWoiDyAETQ0ACwsgHCAEQRxsaiIFKAIMIR0gBSgCBCEKIAUoAgAhMSAFKAIIIQcgEiAFKAIYNgJYIBIgBSkCEDcDUCASIAUpAgg3AyggEiAFKQIQNwMwIBIgBSgCGDYCOCASIAUpAgA3AyBBACAEIBJBIGoQPmsiBSAFIARLGyEEDAMLIA1BAWohDQwHCyAFKAIAIQpBACEEIA8gGigCCAR/IAQFIBooAgwLayIEQYAgTQ0BCyAcIB02AiggHCAHNgIkIBwgCjYCICAcIDE2AhwgHCASKAJYNgI0IBwgEikDUDcCLAwBCyAcIARBAWoiCUEcbGoiBSAdNgIMIAUgBzYCCCAFIAo2AgQgBSAxNgIAIAUgEikDUDcCECAFIBIoAlg2AhggCSEdIAQNAQtBASEdQQEhCQwBCwNAIBIgHCAEQRxsaiIFIghBGGooAgA2AhggEiAFKQIQNwMQIBIgBSkCCDcDCCASIAUpAgA3AwAgEhA+IQcgHCAdQX9qIh1BHGxqIgYgCCgCGDYCGCAGIAUpAhA3AhAgBiAFKQIINwIIIAYgBSkCADcCACAEIAdLIQVBACAEIAdrIgYgBiAESxshBCAFDQALIB0gCUsNAQsDQCAcIB1BHGxqIgQoAgwhBgJ/IAMgBmogBCgCCCIPRQ0AGgJAAkAgBCgCBCIIQQNPBEAgAiACKQIANwIEIAhBfmohBAwBCwJAAkACQAJAIAggBkVqIgUOBAUBAQABCyACKAIAQX9qIQQMAQsgAiAFQQJ0aigCACEEIAVBAkkNAQsgAiACKAIENgIICyACIAIoAgA2AgQLIAIgBDYCAAsgJiAGIAMgCCAPEFcgD0F9aiEOIAEoAgwhBAJAAkAgAyAGaiIFIDBNBEAgBCADEBwgASgCDCEEIAZBEE0EQCABIAQgBmo2AgwMAwsgBEEQaiADQRBqIgcQHCAEQSBqIANBIGoQHCAGQTFIDQEgBCAGaiEKIARBMGohBANAIAQgB0EgaiIFEBwgBEEQaiAHQTBqEBwgBSEHIARBIGoiBCAKSQ0ACwwBCyAEIAMgBSAwECILIAEgASgCDCAGajYCDCAGQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgQgCEEBajYCACAEIAY7AQQgDkGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAOOwEGIAEgBEEIajYCBCAGIA9qIANqIgMLIQ0gHUEBaiIdIAlNDQALCyAmQQIQUQsgDSAuSQ0ACwsgEkHgAGokACAMIANrC+NIAS9/IwBB4ABrIhEkACAAKAKEASEGIAAoAgQhCCAAKAKIASEFIAAoAgwhByARIAAoAhg2AlwgACgCPCEcIABBQGsoAgAhGyAAQSxqIicgAyAEQQAQWSADIAcgCGogA0ZqIg0gAyAEaiIMQXhqIi9JBEAgBUH/HyAFQf8fSRshMCAMQWBqITFBA0EEIAZBA0YbIi5Bf2ohKANAAkACQAJAAkACQAJAAkACQAJAIAAoAgQiBSAAKAIYIgRqIA1LDQAgDSADayEkIAAoAoQBIQYgBCANIAVrIghJBEADQCAAIAQgBWogDCAGQQEQQSAEaiIEIAhJDQALCyAkRSEZIAAgCDYCGAJAAkACQAJAAkAgBkF9ag4FAAECAwMBC0EAIQlBACANIAAoAgQiC2siB0F/IAAoAnhBf2p0QX9zIhVrIgQgBCAHSxshIyAAKAIgIA0gACgCfEEDEB5BAnRqIg4oAgAhBSAHIAAoAhAgACgCFCAHIAAoAnQQJyIEayETIARBASAEGyEXQQNBBCAkGyEdIAAoAigiHyAHIBVxQQN0aiIKQQRqIRggACgCiAEiBEH/HyAEQf8fSRshFiANQQNqIQ8gB0EJaiESIAcgACgCDCIeayEgIAsgHmohFCAAKAIIIhAgHmohGiAAKAKAASEhICghBiAZIQQDQAJAAn8CfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIIQX9qIiIgIEkEQCANQQMQHyANIAhrQQMQH0cNAiAPIA8gCGsgDBAdDAELICIgE08NASAeIAcgCGsiCEF/c2pBA0kNASANQQMQHyAIIBBqIghBAxAfRw0BIA8gCEEDaiAMIBogFBAgC0EDaiIIIAZNDQAgHCAJQQN0aiIGIAg2AgQgBiAEIBlrNgIAIAlBAWohCSAIIBZLDQUgCCIGIA1qIAxGDQULIARBAWoiBCAdSQ0ACwJAIAZBAksNAEECIQYgCyAAKAIcIAAoAiQgEUHcAGogDRBAIgQgF0kNACAHIARrIghB//8PSw0AAn8gBCAeTwRAIA0gBCALaiAMEB0MAQsgDSAEIBBqIAwgGiAUECALIgRBA0kNACAcIAQ2AgQgHCAIQQJqNgIAIAQgFk0EQEEBIQkgBCEGIAQgDWogDEcNAQtBASEJIAAgB0EBajYCGAwECyAOIAc2AgACQCAFIBdJDQAgB0ECaiETQX8gIXRBf3MhDkEAIQ9BACEHA0ACfyAPIAcgDyAHSRsiBCAFaiAeTwRAIAQgDWogBSALaiAEaiAMEB0gBGohBCALDAELIBAgCyAEIA1qIAUgEGogBGogDCAaIBQQICAEaiIEIAVqIB5JGwshFiAEIAZLBEAgHCAJQQN0aiIGIAQ2AgQgBiATIAVrNgIAIAQgBWogEiAEIBIgBWtLGyESIAlBAWohCSAEQYAgSw0CIAQhBiAEIA1qIAxGDQILIB8gBSAVcUEDdGohCAJAAkAgBSAWaiAEai0AACAEIA1qLQAASQRAIAogBTYCACAFICNLDQEgEUFAayEKDAQLIBggBTYCACAFICNLBEAgCCEYIAQhBwwCCyARQUBrIRgMAwsgBCEPIAhBBGoiCiEICyAORQ0BIA5Bf2ohDiAIKAIAIgUgF08NAAsLIBhBADYCACAKQQA2AgAgACASQXhqNgIYDAMLQQAhCUEAIA0gACgCBCIYayIHQX8gACgCeEF/anRBf3MiF2siBCAEIAdLGyEUIAAoAiAgDSAAKAJ8QQQQHkECdGoiFigCACEFIAcgACgCECAAKAIUIAcgACgCdBAnIgRrIQ4gBEEBIAQbIRpBA0EEICQbIRMgACgCKCIdIAcgF3FBA3RqIh5BBGohCyAAKAKIASIEQf8fIARB/x9JGyEfIA1BBGohDyAHQQlqIRIgByAAKAIMIgprISAgCiAYaiEVIAAoAggiECAKaiEjIAAoAoABISEgKCEGIBkhBANAAkACfwJ/IARBA0YEQCACKAIAQX9qDAELIAIgBEECdGooAgALIghBf2oiIiAgSQRAIA1BBBAfIA0gCGtBBBAfRw0CIA8gDyAIayAMEB0MAQsgIiAOTw0BIAogByAIayIIQX9zakEDSQ0BIA1BBBAfIAggEGoiCEEEEB9HDQEgDyAIQQRqIAwgIyAVECALQQRqIgggBk0NACAcIAlBA3RqIgYgCDYCBCAGIAQgGWs2AgAgCUEBaiEJIAggH0sNBCAIIgYgDWogDEYNBAsgBEEBaiIEIBNJDQALIBYgBzYCAAJAIAUgGkkNACAHQQJqIRNBfyAhdEF/cyEOQQAhD0EAIQcDQAJ/IA8gByAPIAdJGyIEIAVqIApPBEAgBCANaiAFIBhqIARqIAwQHSAEaiEEIBgMAQsgECAYIAQgDWogBSAQaiAEaiAMICMgFRAgIARqIgQgBWogCkkbCyEWIAQgBksEQCAcIAlBA3RqIgYgBDYCBCAGIBMgBWs2AgAgBCAFaiASIAQgEiAFa0sbIRIgCUEBaiEJIARBgCBLDQIgBCEGIAQgDWogDEYNAgsgHSAFIBdxQQN0aiEIAkACQCAFIBZqIARqLQAAIAQgDWotAABJBEAgHiAFNgIAIAUgFEsNASARQUBrIR4MBAsgCyAFNgIAIAUgFEsEQCAIIQsgBCEHDAILIBFBQGshCwwDCyAEIQ8gCEEEaiIeIQgLIA5FDQEgDkF/aiEOIAgoAgAiBSAaTw0ACwsgC0EANgIAIB5BADYCACAAIBJBeGo2AhgMAgtBACEJQQAgDSAAKAIEIhhrIgdBfyAAKAJ4QX9qdEF/cyIXayIEIAQgB0sbIRQgACgCICANIAAoAnxBBRAeQQJ0aiIWKAIAIQUgByAAKAIQIAAoAhQgByAAKAJ0ECciBGshDiAEQQEgBBshGkEDQQQgJBshEyAAKAIoIh0gByAXcUEDdGoiHkEEaiELIAAoAogBIgRB/x8gBEH/H0kbIR8gDUEEaiEPIAdBCWohEiAHIAAoAgwiCmshICAKIBhqIRUgACgCCCIQIApqISMgACgCgAEhISAoIQYgGSEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiIiICBJBEAgDUEEEB8gDSAIa0EEEB9HDQIgDyAPIAhrIAwQHQwBCyAiIA5PDQEgCiAHIAhrIghBf3NqQQNJDQEgDUEEEB8gCCAQaiIIQQQQH0cNASAPIAhBBGogDCAjIBUQIAtBBGoiCCAGTQ0AIBwgCUEDdGoiBiAINgIEIAYgBCAZazYCACAJQQFqIQkgCCAfSw0DIAgiBiANaiAMRg0DCyAEQQFqIgQgE0kNAAsgFiAHNgIAAkAgBSAaSQ0AIAdBAmohE0F/ICF0QX9zIQ5BACEPQQAhBwNAAn8gDyAHIA8gB0kbIgQgBWogCk8EQCAEIA1qIAUgGGogBGogDBAdIARqIQQgGAwBCyAQIBggBCANaiAFIBBqIARqIAwgIyAVECAgBGoiBCAFaiAKSRsLIRYgBCAGSwRAIBwgCUEDdGoiBiAENgIEIAYgEyAFazYCACAEIAVqIBIgBCASIAVrSxshEiAJQQFqIQkgBEGAIEsNAiAEIQYgBCANaiAMRg0CCyAdIAUgF3FBA3RqIQgCQAJAIAUgFmogBGotAAAgBCANai0AAEkEQCAeIAU2AgAgBSAUSw0BIBFBQGshHgwECyALIAU2AgAgBSAUSwRAIAghCyAEIQcMAgsgEUFAayELDAMLIAQhDyAIQQRqIh4hCAsgDkUNASAOQX9qIQ4gCCgCACIFIBpPDQALCyALQQA2AgAgHkEANgIAIAAgEkF4ajYCGAwBC0EAIQlBACANIAAoAgQiGGsiB0F/IAAoAnhBf2p0QX9zIhdrIgQgBCAHSxshFCAAKAIgIA0gACgCfEEGEB5BAnRqIhYoAgAhBSAHIAAoAhAgACgCFCAHIAAoAnQQJyIEayEOIARBASAEGyEaQQNBBCAkGyETIAAoAigiHSAHIBdxQQN0aiIeQQRqIQsgACgCiAEiBEH/HyAEQf8fSRshHyANQQRqIQ8gB0EJaiESIAcgACgCDCIKayEgIAogGGohFSAAKAIIIhAgCmohIyAAKAKAASEhICghBiAZIQQDQAJAAn8CfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIIQX9qIiIgIEkEQCANQQQQHyANIAhrQQQQH0cNAiAPIA8gCGsgDBAdDAELICIgDk8NASAKIAcgCGsiCEF/c2pBA0kNASANQQQQHyAIIBBqIghBBBAfRw0BIA8gCEEEaiAMICMgFRAgC0EEaiIIIAZNDQAgHCAJQQN0aiIGIAg2AgQgBiAEIBlrNgIAIAlBAWohCSAIIB9LDQIgCCIGIA1qIAxGDQILIARBAWoiBCATSQ0ACyAWIAc2AgACQCAFIBpJDQAgB0ECaiETQX8gIXRBf3MhDkEAIQ9BACEHA0ACfyAPIAcgDyAHSRsiBCAFaiAKTwRAIAQgDWogBSAYaiAEaiAMEB0gBGohBCAYDAELIBAgGCAEIA1qIAUgEGogBGogDCAjIBUQICAEaiIEIAVqIApJGwshFiAEIAZLBEAgHCAJQQN0aiIGIAQ2AgQgBiATIAVrNgIAIAQgBWogEiAEIBIgBWtLGyESIAlBAWohCSAEQYAgSw0CIAQhBiAEIA1qIAxGDQILIB0gBSAXcUEDdGohCAJAAkAgBSAWaiAEai0AACAEIA1qLQAASQRAIB4gBTYCACAFIBRLDQEgEUFAayEeDAQLIAsgBTYCACAFIBRLBEAgCCELIAQhBwwCCyARQUBrIQsMAwsgBCEPIAhBBGoiHiEICyAORQ0BIA5Bf2ohDiAIKAIAIgUgGk8NAAsLIAtBADYCACAeQQA2AgAgACASQXhqNgIYCyAJRQ0AIBsgAigCADYCECAbIAIoAgQ2AhQgAigCCCEEIBsgJDYCDCAbQQA2AgggGyAENgIYIBsgAyAkICdBABBYIgU2AgAgHCAJQX9qQQN0aiIEKAIEIgggMEsEQCAEKAIAIQcMAwtBASEEQQAgJ0EAEC0hBgNAIBsgBEEcbGpBgICAgAQ2AgAgBEEBaiIEIC5HDQALIAUgBmohFkEAIQsgLiEIA0AgHCALQQN0aiIEKAIEIQcgEUFAayACIAQoAgAiDyAZED8gCCAHTQRAIA9BAWoQJCIGQQl0QbO0f2pBMyAGQRNLGyEYIAZBCHRBgCBqIQ4DQCAIQX1qIQQCfyAAKAJkQQFGBEAgBBAuIA5qDAELIAAoAmAgGGogACgCOCAGQQJ0aigCABAuayAAKAJcaiAEEDxBAnQiBEGQpAFqKAIAIAZqQQh0aiAAKAI0IARqKAIAEC5rCyEFIBsgCEEcbGoiBCAkNgIMIAQgDzYCBCAEIAg2AgggBCAFIBZqNgIAIAQgESkDQDcCECAEIBEoAkg2AhggCEEBaiIIIAdNDQALCyALQQFqIgsgCUcNAAtBASEPAkAgCEF/aiIERQRAQQAhBAwBCwNAQQEhBSAbIA9Bf2pBHGxqIggoAghFBEAgCCgCDEEBaiEFCyANIA9qIgpBf2pBASAnQQAQUiAIKAIAaiAFICdBABAtaiAFQX9qICdBABAtayIGIBsgD0EcbGoiGigCACIWTARAIBogBTYCDCAaQgA3AgQgGiAGNgIAIBogCCgCGDYCGCAaIAgpAhA3AhAgBiEWCyAKIC9LBH8gD0EBagUgBCAPRgRAIA8hBAwDCwJAIBsgD0EBaiIeQRxsaigCACAWQYABakwNAEEAISQgGigCCCIIRQRAIBooAgwhJAtBACAnQQAQLSEzIAAoAgQiBiAAKAIYIgVqIApLDQAgACgChAEhByAFIAogBmsiCUkEQANAIAAgBSAGaiAMIAdBARBBIAVqIgUgCUkNAAsLIAhBAEchGCAaQRBqISMgACAJNgIYAkACQAJAAkACQCAHQX1qDgUAAQIDAwELQQAhEEEAIAogACgCBCIOayIJQX8gACgCeEF/anRBf3MiImsiBSAFIAlLGyEmIAAoAiAgCiAAKAJ8QQMQHkECdGoiFCgCACEGIAkgACgCECAAKAIUIAkgACgCdBAnIgVrISUgBUEBIAUbIR9BBEEDIAgbISkgACgCKCIqIAkgInFBA3RqIhNBBGohEiAAKAKIASIFQf8fIAVB/x9JGyEZIApBA2ohCyAJQQlqIRcgCSAAKAIMIhVrISsgDiAVaiEgIAAoAggiHSAVaiEhIAAoAoABISwgKCEIIBghBQNAAkACfwJ/IAVBA0YEQCAjKAIAQX9qDAELIBogBUECdGooAhALIgdBf2oiLSArSQRAIApBAxAfIAogB2tBAxAfRw0CIAsgCyAHayAMEB0MAQsgLSAlTw0BIBUgCSAHayIHQX9zakEDSQ0BIApBAxAfIAcgHWoiB0EDEB9HDQEgCyAHQQNqIAwgISAgECALQQNqIgcgCE0NACAcIBBBA3RqIgggBzYCBCAIIAUgGGs2AgAgEEEBaiEQIAcgGUsNBSAHIgggCmogDEYNBQsgBUEBaiIFIClJDQALAkAgCEECSw0AQQIhCCAOIAAoAhwgACgCJCARQdwAaiAKEEAiBSAfSQ0AIAkgBWsiB0H//w9LDQACfyAFIBVPBEAgCiAFIA5qIAwQHQwBCyAKIAUgHWogDCAhICAQIAsiBUEDSQ0AIBwgBTYCBCAcIAdBAmo2AgAgBSAZTQRAQQEhECAFIQggBSAKaiAMRw0BC0EBIRAgACAJQQFqNgIYDAQLIBQgCTYCAAJAIAYgH0kNACAJQQJqISVBfyAsdEF/cyEUQQAhCUEAIQsDQAJ/IAkgCyAJIAtJGyIFIAZqIBVPBEAgBSAKaiAGIA5qIAVqIAwQHSAFaiEFIA4MAQsgHSAOIAUgCmogBiAdaiAFaiAMICEgIBAgIAVqIgUgBmogFUkbCyEZIAUgCEsEQCAcIBBBA3RqIgggBTYCBCAIICUgBms2AgAgBSAGaiAXIAUgFyAGa0sbIRcgEEEBaiEQIAVBgCBLDQIgBSEIIAUgCmogDEYNAgsgKiAGICJxQQN0aiEHAkACQCAGIBlqIAVqLQAAIAUgCmotAABJBEAgEyAGNgIAIAYgJksNASARQUBrIRMMBAsgEiAGNgIAIAYgJksEQCAHIRIgBSELDAILIBFBQGshEgwDCyAFIQkgB0EEaiITIQcLIBRFDQEgFEF/aiEUIAcoAgAiBiAfTw0ACwsgEkEANgIAIBNBADYCACAAIBdBeGo2AhgMAwtBACEQQQAgCiAAKAIEIhJrIglBfyAAKAJ4QX9qdEF/cyIfayIFIAUgCUsbISAgACgCICAKIAAoAnxBBBAeQQJ0aiIZKAIAIQYgCSAAKAIQIAAoAhQgCSAAKAJ0ECciBWshFCAFQQEgBRshIUEEQQMgCBshJSAAKAIoIikgCSAfcUEDdGoiFUEEaiEOIAAoAogBIgVB/x8gBUH/H0kbISogCkEEaiELIAlBCWohFyAJIAAoAgwiE2shKyASIBNqISIgACgCCCIdIBNqISYgACgCgAEhLCAoIQggGCEFA0ACQAJ/An8gBUEDRgRAICMoAgBBf2oMAQsgGiAFQQJ0aigCEAsiB0F/aiItICtJBEAgCkEEEB8gCiAHa0EEEB9HDQIgCyALIAdrIAwQHQwBCyAtIBRPDQEgEyAJIAdrIgdBf3NqQQNJDQEgCkEEEB8gByAdaiIHQQQQH0cNASALIAdBBGogDCAmICIQIAtBBGoiByAITQ0AIBwgEEEDdGoiCCAHNgIEIAggBSAYazYCACAQQQFqIRAgByAqSw0EIAciCCAKaiAMRg0ECyAFQQFqIgUgJUkNAAsgGSAJNgIAAkAgBiAhSQ0AIAlBAmohJUF/ICx0QX9zIRRBACEJQQAhCwNAAn8gCSALIAkgC0kbIgUgBmogE08EQCAFIApqIAYgEmogBWogDBAdIAVqIQUgEgwBCyAdIBIgBSAKaiAGIB1qIAVqIAwgJiAiECAgBWoiBSAGaiATSRsLIRkgBSAISwRAIBwgEEEDdGoiCCAFNgIEIAggJSAGazYCACAFIAZqIBcgBSAXIAZrSxshFyAQQQFqIRAgBUGAIEsNAiAFIQggBSAKaiAMRg0CCyApIAYgH3FBA3RqIQcCQAJAIAYgGWogBWotAAAgBSAKai0AAEkEQCAVIAY2AgAgBiAgSw0BIBFBQGshFQwECyAOIAY2AgAgBiAgSwRAIAchDiAFIQsMAgsgEUFAayEODAMLIAUhCSAHQQRqIhUhBwsgFEUNASAUQX9qIRQgBygCACIGICFPDQALCyAOQQA2AgAgFUEANgIAIAAgF0F4ajYCGAwCC0EAIRBBACAKIAAoAgQiEmsiCUF/IAAoAnhBf2p0QX9zIh9rIgUgBSAJSxshICAAKAIgIAogACgCfEEFEB5BAnRqIhkoAgAhBiAJIAAoAhAgACgCFCAJIAAoAnQQJyIFayEUIAVBASAFGyEhQQRBAyAIGyElIAAoAigiKSAJIB9xQQN0aiIVQQRqIQ4gACgCiAEiBUH/HyAFQf8fSRshKiAKQQRqIQsgCUEJaiEXIAkgACgCDCITayErIBIgE2ohIiAAKAIIIh0gE2ohJiAAKAKAASEsICghCCAYIQUDQAJAAn8CfyAFQQNGBEAgIygCAEF/agwBCyAaIAVBAnRqKAIQCyIHQX9qIi0gK0kEQCAKQQQQHyAKIAdrQQQQH0cNAiALIAsgB2sgDBAdDAELIC0gFE8NASATIAkgB2siB0F/c2pBA0kNASAKQQQQHyAHIB1qIgdBBBAfRw0BIAsgB0EEaiAMICYgIhAgC0EEaiIHIAhNDQAgHCAQQQN0aiIIIAc2AgQgCCAFIBhrNgIAIBBBAWohECAHICpLDQMgByIIIApqIAxGDQMLIAVBAWoiBSAlSQ0ACyAZIAk2AgACQCAGICFJDQAgCUECaiElQX8gLHRBf3MhFEEAIQlBACELA0ACfyAJIAsgCSALSRsiBSAGaiATTwRAIAUgCmogBiASaiAFaiAMEB0gBWohBSASDAELIB0gEiAFIApqIAYgHWogBWogDCAmICIQICAFaiIFIAZqIBNJGwshGSAFIAhLBEAgHCAQQQN0aiIIIAU2AgQgCCAlIAZrNgIAIAUgBmogFyAFIBcgBmtLGyEXIBBBAWohECAFQYAgSw0CIAUhCCAFIApqIAxGDQILICkgBiAfcUEDdGohBwJAAkAgBiAZaiAFai0AACAFIApqLQAASQRAIBUgBjYCACAGICBLDQEgEUFAayEVDAQLIA4gBjYCACAGICBLBEAgByEOIAUhCwwCCyARQUBrIQ4MAwsgBSEJIAdBBGoiFSEHCyAURQ0BIBRBf2ohFCAHKAIAIgYgIU8NAAsLIA5BADYCACAVQQA2AgAgACAXQXhqNgIYDAELQQAhEEEAIAogACgCBCISayIJQX8gACgCeEF/anRBf3MiH2siBSAFIAlLGyEgIAAoAiAgCiAAKAJ8QQYQHkECdGoiGSgCACEGIAkgACgCECAAKAIUIAkgACgCdBAnIgVrIRQgBUEBIAUbISFBBEEDIAgbISUgACgCKCIpIAkgH3FBA3RqIhVBBGohDiAAKAKIASIFQf8fIAVB/x9JGyEqIApBBGohCyAJQQlqIRcgCSAAKAIMIhNrISsgEiATaiEiIAAoAggiHSATaiEmIAAoAoABISwgKCEIIBghBQNAAkACfwJ/IAVBA0YEQCAjKAIAQX9qDAELIBogBUECdGooAhALIgdBf2oiLSArSQRAIApBBBAfIAogB2tBBBAfRw0CIAsgCyAHayAMEB0MAQsgLSAUTw0BIBMgCSAHayIHQX9zakEDSQ0BIApBBBAfIAcgHWoiB0EEEB9HDQEgCyAHQQRqIAwgJiAiECALQQRqIgcgCE0NACAcIBBBA3RqIgggBzYCBCAIIAUgGGs2AgAgEEEBaiEQIAcgKksNAiAHIgggCmogDEYNAgsgBUEBaiIFICVJDQALIBkgCTYCAAJAIAYgIUkNACAJQQJqISVBfyAsdEF/cyEUQQAhCUEAIQsDQAJ/IAkgCyAJIAtJGyIFIAZqIBNPBEAgBSAKaiAGIBJqIAVqIAwQHSAFaiEFIBIMAQsgHSASIAUgCmogBiAdaiAFaiAMICYgIhAgIAVqIgUgBmogE0kbCyEZIAUgCEsEQCAcIBBBA3RqIgggBTYCBCAIICUgBms2AgAgBSAGaiAXIAUgFyAGa0sbIRcgEEEBaiEQIAVBgCBLDQIgBSEIIAUgCmogDEYNAgsgKSAGIB9xQQN0aiEHAkACQCAGIBlqIAVqLQAAIAUgCmotAABJBEAgFSAGNgIAIAYgIEsNASARQUBrIRUMBAsgDiAGNgIAIAYgIEsEQCAHIQ4gBSELDAILIBFBQGshDgwDCyAFIQkgB0EEaiIVIQcLIBRFDQEgFEF/aiEUIAcoAgAiBiAhTw0ACwsgDkEANgIAIBVBADYCACAAIBdBeGo2AhgLIBBFDQAgHCAQQX9qQQN0aiIFKAIEIgggMEsgCCAPakGAIE9yDQUgFiAzaiEZQQAhCANAIBFBQGsgIyAcIAhBA3RqIgYoAgAiCSAYED8gLiEHIAgEQCAGQXxqKAIAQQFqIQcLAkAgBigCBCIFIAdJDQAgCUEBahAkIhZBCXRBs7R/akEzIBZBE0sbIRIgFkEIdEGAIGohCgNAIAVBfWohCyAFIA9qIQYCfyAAKAJkQQFGBEAgCxAuIApqDAELIAAoAmAgEmogACgCOCAWQQJ0aigCABAuayAAKAJcaiALEDxBAnQiC0GQpAFqKAIAIBZqQQh0aiAAKAI0IAtqKAIAEC5rCyAZaiELAkAgBiAETQRAIAsgGyAGQRxsaigCAEgNAQwDCwNAIBsgBEEBaiIEQRxsakGAgICABDYCACAEIAZJDQALCyAbIAZBHGxqIgYgJDYCDCAGIAk2AgQgBiAFNgIIIAYgCzYCACAGIBEpA0A3AhAgBiARKAJINgIYIAVBf2oiBSAHTw0ACwsgCEEBaiIIIBBHDQALCyAeCyIPIARNDQALCyAbIARBHGxqIgUoAgwhJCAFKAIEIQcgBSgCACEyIAUoAgghCCARIAUoAhg2AlggESAFKQIQNwNQIBEgBSkCCDcDKCARIAUpAhA3AzAgESAFKAIYNgI4IBEgBSkCADcDIEEAIAQgEUEgahA+ayIFIAUgBEsbIQQMAwsgDUEBaiENDAcLIAUoAgAhB0EAIQQgDyAaKAIIBH8gBAUgGigCDAtrIgRBgCBNDQELIBsgJDYCKCAbIAg2AiQgGyAHNgIgIBsgMjYCHCAbIBEoAlg2AjQgGyARKQNQNwIsDAELIBsgBEEBaiIWQRxsaiIFICQ2AgwgBSAINgIIIAUgBzYCBCAFIDI2AgAgBSARKQNQNwIQIAUgESgCWDYCGCAWIQ4gBA0BC0EBIQ5BASEWDAELA0AgESAbIARBHGxqIgUiB0EYaigCADYCGCARIAUpAhA3AxAgESAFKQIINwMIIBEgBSkCADcDACARED4hCCAbIA5Bf2oiDkEcbGoiBiAHKAIYNgIYIAYgBSkCEDcCECAGIAUpAgg3AgggBiAFKQIANwIAIAQgCEshBUEAIAQgCGsiBiAGIARLGyEEIAUNAAsgDiAWSw0BCwNAIBsgDkEcbGoiBCgCDCEGAn8gAyAGaiAEKAIIIg9FDQAaAkACQCAEKAIEIgdBA08EQCACIAIpAgA3AgQgB0F+aiEEDAELAkACQAJAAkAgByAGRWoiBQ4EBQEBAAELIAIoAgBBf2ohBAwBCyACIAVBAnRqKAIAIQQgBUECSQ0BCyACIAIoAgQ2AggLIAIgAigCADYCBAsgAiAENgIACyAnIAYgAyAHIA8QVyAPQX1qIQkgASgCDCEEAkACQCADIAZqIgUgMU0EQCAEIAMQHCABKAIMIQQgBkEQTQRAIAEgBCAGajYCDAwDCyAEQRBqIANBEGoiCBAcIARBIGogA0EgahAcIAZBMUgNASAEIAZqIQsgBEEwaiEEA0AgBCAIQSBqIgUQHCAEQRBqIAhBMGoQHCAFIQggBEEgaiIEIAtJDQALDAELIAQgAyAFIDEQIgsgASABKAIMIAZqNgIMIAZBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiBCAHQQFqNgIAIAQgBjsBBCAJQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAk7AQYgASAEQQhqNgIEIAYgD2ogA2oiAwshDSAOQQFqIg4gFk0NAAsLICdBABBRCyANIC9JDQALCyARQeAAaiQAIAwgA2sL+lsBNn8jAEHgAGsiFSQAIAAoAoQBIQYgACgCBCEHIAAoAogBIQUgACgCDCEJIBUgACgCGDYCXCAAKAI8IRkgAEFAaygCACEgIABBLGoiLSADIARBAhBZIAMgByAJaiADRmoiECADIARqIhJBeGoiN0kEQCAFQf8fIAVB/x9JGyE4IBJBYGohOUEDQQQgBkEDRhsiNkF/aiEuA0ACQAJAAkACQAJAAkACQAJAAkAgACgCBCIFIAAoAhgiBGogEEsNACAQIANrISIgACgChAEhBiAEIBAgBWsiB0kEQANAIAAgBCAFaiASIAZBABBBIARqIgQgB0kNAAsLICJFISggACAHNgIYAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBkF9ag4FAAECAwMBC0EAIQlBACAQIAAoAgQiFGsiDkF/IAAoAnhBf2p0QX9zIhtrIgQgBCAOSxshHCAAKAIgIBAgACgCfEEDEB5BAnRqIiQoAgAhCCAAKAJwIhEoAgAiHSARKAIEIhNrIhZBfyARKAJ4QX9qdEF/cyIeayARKAIQIhogFiAaayAeSxshHyAAKAIQIAAoAhQgDiAAKAJ0ECciBEEBIAQbISUgEyAEIBZrIhhrISkgDiAaayAYayEqQQNBBCAiGyEmIAAoAigiIyAOIBtxQQN0aiIXQQRqIQ0gACgCiAEiBEH/HyAEQf8fSRshByAQQQNqIQYgDkEJaiELIA4gACgCDCIPayEsIA8gFGohISARKAJ8ISsgACgCgAEhJyAuIQwgKCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCkF/aiIFICxJBEAgEEEDEB8gECAKa0EDEB9HDQIgBiAGIAprIBIQHQwBCyAFICpPDQEgDyAOIAprIgVBf3NqQQNJDQEgEEEDEB8gBSApaiIFQQMQH0cNASAGIAVBA2ogEiAdICEQIAtBA2oiBSAMTQ0AIBkgCUEDdGoiDCAFNgIEIAwgBCAoazYCACAJQQFqIQkgBSAHSw0NIAUiDCAQaiASRg0NCyAEQQFqIgQgJkkNAAsCQCAMQQJLDQBBAiEMIBQgACgCHCAAKAIkIBVB3ABqIBAQQCIEICVJDQAgDiAEayIFQf//D0sNACAQIAQgFGogEhAdIgRBA0kNACAZIAQ2AgQgGSAFQQJqNgIAIAQgB00EQEEBIQkgBCIMIBBqIBJHDQELQQEhCSAAIA5BAWo2AhgMDAsgJCAONgIAQX8gJ3RBf3MhDwJAIAggJUkEQCAPIQUMAQsgDkECaiEkQQAhB0EAIQYDQCAQIAcgBiAHIAZJGyIEaiAIIBRqIgUgBGogEhAdIARqIgQgDEsEQCAZIAlBA3RqIgwgBDYCBCAMICQgCGs2AgAgBCAIaiALIAQgCyAIa0sbIQsgCUEBaiEJIAQgEGogEkYgBEGAIEtyDQYgBCEMCyAjIAggG3FBA3RqIQoCQAJAIAQgBWotAAAgBCAQai0AAEkEQCAXIAg2AgAgCCAcSw0BIBVBQGshFyAPIQUMBAsgDSAINgIAIAggHEsEQCAKIQ0gBCEGDAILIBVBQGshDSAPIQUMAwsgBCEHIApBBGoiFyEKCyAPQX9qIgUgD08NASAFIQ8gCigCACIIICVPDQALCyANQQA2AgAgF0EANgIAIAVFDQogESgCICAQICtBAxAeQQJ0aigCACIKIBpNDQogESgCKCEHIA5BAmohFyAUIBhqIQ1BACEIQQAhDwNAIBAgCCAPIAggD0kbIgRqIAogE2ogBGogEiAdICEQICAEaiIEIAxLBEAgGSAJQQN0aiIGIAQ2AgQgBiAXIAogGGoiBms2AgAgBCAGaiALIAQgCyAGa0sbIQsgCUEBaiEJIARBgCBLDQwgBCIMIBBqIBJGDQwLIAogH00NCyAFQX9qIgVFDQsgBCAIIBMgDSAEIApqIBZJGyAKaiAEai0AACAEIBBqLQAASSIGGyEIIA8gBCAGGyEPIAcgCiAecUEDdGogBkECdGooAgAiCiAaSw0ACwwKC0EAIQlBACAQIAAoAgQiGmsiC0F/IAAoAnhBf2p0QX9zIhhrIgQgBCALSxshGyAAKAIgIBAgACgCfEEEEB5BAnRqIg8oAgAhCCAAKAJwIhEoAgAiHCARKAIEIhNrIhZBfyARKAJ4QX9qdEF/cyIdayARKAIQIhQgFiAUayAdSxshJCAAKAIQIAAoAhQgCyAAKAJ0ECciBEEBIAQbIR4gEyAEIBZrIiVrIR8gCyAUayAlayEpQQNBBCAiGyEqIAAoAigiJiALIBhxQQN0aiIXQQRqIQ0gACgCiAEiBEH/HyAEQf8fSRshIyAQQQRqIQYgC0EJaiEOIAsgACgCDCIHayEsIAcgGmohISARKAJ8ISsgACgCgAEhJyAuIQwgKCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCkF/aiIFICxJBEAgEEEEEB8gECAKa0EEEB9HDQIgBiAGIAprIBIQHQwBCyAFIClPDQEgByALIAprIgVBf3NqQQNJDQEgEEEEEB8gBSAfaiIFQQQQH0cNASAGIAVBBGogEiAcICEQIAtBBGoiBSAMTQ0AIBkgCUEDdGoiDCAFNgIEIAwgBCAoazYCACAJQQFqIQkgBSAjSw0MIAUiDCAQaiASRg0MCyAEQQFqIgQgKkkNAAsgDyALNgIAQX8gJ3RBf3MhDwJAIAggHkkEQCAPIQUMAQsgC0ECaiEfQQAhB0EAIQYDQCAQIAcgBiAHIAZJGyIEaiAIIBpqIgUgBGogEhAdIARqIgQgDEsEQCAZIAlBA3RqIgwgBDYCBCAMIB8gCGs2AgAgBCAIaiAOIAQgDiAIa0sbIQ4gCUEBaiEJIAQgEGogEkYgBEGAIEtyDQYgBCEMCyAmIAggGHFBA3RqIQoCQAJAIAQgBWotAAAgBCAQai0AAEkEQCAXIAg2AgAgCCAbSw0BIBVBQGshFyAPIQUMBAsgDSAINgIAIAggG0sEQCAKIQ0gBCEGDAILIBVBQGshDSAPIQUMAwsgBCEHIApBBGoiFyEKCyAPQX9qIgUgD08NASAFIQ8gCigCACIIIB5PDQALCyANQQA2AgAgF0EANgIAIAVFDQggESgCICAQICtBBBAeQQJ0aigCACIKIBRNDQggESgCKCEHIAtBAmohFyAaICVqIQ1BACEIQQAhDwNAIBAgCCAPIAggD0kbIgRqIAogE2ogBGogEiAcICEQICAEaiIEIAxLBEAgGSAJQQN0aiIGIAQ2AgQgBiAXIAogJWoiBms2AgAgBCAGaiAOIAQgDiAGa0sbIQ4gCUEBaiEJIARBgCBLDQogBCIMIBBqIBJGDQoLIAogJE0NCSAFQX9qIgVFDQkgBCAIIBMgDSAEIApqIBZJGyAKaiAEai0AACAEIBBqLQAASSIGGyEIIA8gBCAGGyEPIAcgCiAdcUEDdGogBkECdGooAgAiCiAUSw0ACwwIC0EAIQlBACAQIAAoAgQiGmsiC0F/IAAoAnhBf2p0QX9zIhhrIgQgBCALSxshGyAAKAIgIBAgACgCfEEFEB5BAnRqIg8oAgAhCCAAKAJwIhEoAgAiHCARKAIEIhNrIhZBfyARKAJ4QX9qdEF/cyIdayARKAIQIhQgFiAUayAdSxshJCAAKAIQIAAoAhQgCyAAKAJ0ECciBEEBIAQbIR4gEyAEIBZrIiVrIR8gCyAUayAlayEpQQNBBCAiGyEqIAAoAigiJiALIBhxQQN0aiINQQRqIRcgACgCiAEiBEH/HyAEQf8fSRshIyAQQQRqIQYgC0EJaiEOIAsgACgCDCIHayEsIAcgGmohISARKAJ8ISsgACgCgAEhJyAuIQwgKCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCkF/aiIFICxJBEAgEEEEEB8gECAKa0EEEB9HDQIgBiAGIAprIBIQHQwBCyAFIClPDQEgByALIAprIgVBf3NqQQNJDQEgEEEEEB8gBSAfaiIFQQQQH0cNASAGIAVBBGogEiAcICEQIAtBBGoiBSAMTQ0AIBkgCUEDdGoiDCAFNgIEIAwgBCAoazYCACAJQQFqIQkgBSAjSw0LIAUiDCAQaiASRg0LCyAEQQFqIgQgKkkNAAsgDyALNgIAQX8gJ3RBf3MhDwJAIAggHkkEQCAPIQUMAQsgC0ECaiEfQQAhB0EAIQYDQCAQIAcgBiAHIAZJGyIEaiAIIBpqIgUgBGogEhAdIARqIgQgDEsEQCAZIAlBA3RqIgwgBDYCBCAMIB8gCGs2AgAgBCAIaiAOIAQgDiAIa0sbIQ4gCUEBaiEJIAQgEGogEkYgBEGAIEtyDQYgBCEMCyAmIAggGHFBA3RqIQoCQAJAIAQgBWotAAAgBCAQai0AAEkEQCANIAg2AgAgCCAbSw0BIBVBQGshDSAPIQUMBAsgFyAINgIAIAggG0sEQCAKIRcgBCEGDAILIBVBQGshFyAPIQUMAwsgBCEHIApBBGoiDSEKCyAPQX9qIgUgD08NASAFIQ8gCigCACIIIB5PDQALCyAXQQA2AgAgDUEANgIAIAVFDQYgESgCICAQICtBBRAeQQJ0aigCACIKIBRNDQYgESgCKCEHIAtBAmohFyAaICVqIQ1BACEIQQAhDwNAIBAgCCAPIAggD0kbIgRqIAogE2ogBGogEiAcICEQICAEaiIEIAxLBEAgGSAJQQN0aiIGIAQ2AgQgBiAXIAogJWoiBms2AgAgBCAGaiAOIAQgDiAGa0sbIQ4gCUEBaiEJIARBgCBLDQggBCIMIBBqIBJGDQgLIAogJE0NByAFQX9qIgVFDQcgBCAIIBMgDSAEIApqIBZJGyAKaiAEai0AACAEIBBqLQAASSIGGyEIIA8gBCAGGyEPIAcgCiAdcUEDdGogBkECdGooAgAiCiAUSw0ACwwGC0EAIQlBACAQIAAoAgQiGmsiC0F/IAAoAnhBf2p0QX9zIhhrIgQgBCALSxshGyAAKAIgIBAgACgCfEEGEB5BAnRqIg8oAgAhCCAAKAJwIhEoAgAiHCARKAIEIhNrIhZBfyARKAJ4QX9qdEF/cyIdayARKAIQIhQgFiAUayAdSxshJCAAKAIQIAAoAhQgCyAAKAJ0ECciBEEBIAQbIR4gEyAEIBZrIiVrIR8gCyAUayAlayEpQQNBBCAiGyEqIAAoAigiJiALIBhxQQN0aiINQQRqIRcgACgCiAEiBEH/HyAEQf8fSRshIyAQQQRqIQYgC0EJaiEOIAsgACgCDCIHayEsIAcgGmohISARKAJ8ISsgACgCgAEhJyAuIQwgKCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCkF/aiIFICxJBEAgEEEEEB8gECAKa0EEEB9HDQIgBiAGIAprIBIQHQwBCyAFIClPDQEgByALIAprIgVBf3NqQQNJDQEgEEEEEB8gBSAfaiIFQQQQH0cNASAGIAVBBGogEiAcICEQIAtBBGoiBSAMTQ0AIBkgCUEDdGoiDCAFNgIEIAwgBCAoazYCACAJQQFqIQkgBSAjSw0KIAUiDCAQaiASRg0KCyAEQQFqIgQgKkkNAAsgDyALNgIAQX8gJ3RBf3MhDwJAIAggHkkEQCAPIQUMAQsgC0ECaiEfQQAhB0EAIQYDQCAQIAcgBiAHIAZJGyIEaiAIIBpqIgUgBGogEhAdIARqIgQgDEsEQCAZIAlBA3RqIgwgBDYCBCAMIB8gCGs2AgAgBCAIaiAOIAQgDiAIa0sbIQ4gCUEBaiEJIAQgEGogEkYgBEGAIEtyDQYgBCEMCyAmIAggGHFBA3RqIQoCQAJAIAQgBWotAAAgBCAQai0AAEkEQCANIAg2AgAgCCAbSw0BIBVBQGshDSAPIQUMBAsgFyAINgIAIAggG0sEQCAKIRcgBCEGDAILIBVBQGshFyAPIQUMAwsgBCEHIApBBGoiDSEKCyAPQX9qIgUgD08NASAFIQ8gCigCACIIIB5PDQALCyAXQQA2AgAgDUEANgIAIAVFDQQgESgCICAQICtBBhAeQQJ0aigCACIKIBRNDQQgESgCKCEHIAtBAmohFyAaICVqIQ1BACEIQQAhDwNAIBAgCCAPIAggD0kbIgRqIAogE2ogBGogEiAcICEQICAEaiIEIAxLBEAgGSAJQQN0aiIGIAQ2AgQgBiAXIAogJWoiBms2AgAgBCAGaiAOIAQgDiAGa0sbIQ4gCUEBaiEJIARBgCBLDQYgBCIMIBBqIBJGDQYLIAogJE0NBSAFQX9qIgVFDQUgBCAIIBMgDSAEIApqIBZJGyAKaiAEai0AACAEIBBqLQAASSIGGyEIIA8gBCAGGyEPIAcgCiAdcUEDdGogBkECdGooAgAiCiAUSw0ACwwECyANQQA2AgAgF0EANgIADAYLIA1BADYCACAXQQA2AgAMBAsgF0EANgIAIA1BADYCAAwCCyAXQQA2AgAgDUEANgIACyAAIA5BeGo2AhgMAwsgACAOQXhqNgIYDAILIAAgDkF4ajYCGAwBCyAAIAtBeGo2AhgLIAlFDQAgICACKAIANgIQICAgAigCBDYCFCACKAIIIQQgICAiNgIMICBBADYCCCAgIAQ2AhggICADICIgLUECEFgiBTYCACAZIAlBf2pBA3RqIgQoAgQiCiA4SwRAIAQoAgAhCAwDC0EBIQRBACAtQQIQLSEGA0AgICAEQRxsakGAgICABDYCACAEQQFqIgQgNkcNAAsgBSAGaiEIQQAhBiA2IQoDQCAZIAZBA3RqIgQoAgQhByAVQUBrIAIgBCgCACIMICgQPyAKIAdNBEAgDEEBahAkIg9BCHRBgCBqIRcDQCAKQX1qIQQCfyAAKAJkQQFGBEAgBBArIBdqDAELIAAoAmAgACgCOCAPQQJ0aigCABArayAAKAJcaiAEEDxBAnQiBEGQpAFqKAIAIA9qQQh0aiAAKAI0IARqKAIAECtrQTNqCyEFICAgCkEcbGoiBCAiNgIMIAQgDDYCBCAEIAo2AgggBCAFIAhqNgIAIAQgFSkDQDcCECAEIBUoAkg2AhggCkEBaiIKIAdNDQALCyAGQQFqIgYgCUcNAAtBASEPAkAgCkF/aiIERQRAQQAhBAwBCwNAQQEhBSAgIA9Bf2pBHGxqIgcoAghFBEAgBygCDEEBaiEFCyAPIBBqIgtBf2pBASAtQQIQUiAHKAIAaiAFIC1BAhAtaiAFQX9qIC1BAhAtayIGICAgD0EcbGoiGigCACIXTARAIBogBTYCDCAaQgA3AgQgGiAGNgIAIBogBygCGDYCGCAaIAcpAhA3AhAgBiEXCwJAIAsgN0sNACAEIA9GBEAgDyEEDAMLQQAhIiAaKAIIIgZFBEAgGigCDCEiC0EAIC1BAhAtISwgACgCBCIHIAAoAhgiBWogC0sNACAAKAKEASEJIAUgCyAHayIMSQRAA0AgACAFIAdqIBIgCUEAEEEgBWoiBSAMSQ0ACwsgBkEARyEoIBpBEGohJSAAIAw2AhgCQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAJQX1qDgUAAQIDAwELQQAhDkEAIAsgACgCBCIWayIRQX8gACgCeEF/anRBf3MiJGsiBSAFIBFLGyEfIAAoAiAgCyAAKAJ8QQMQHkECdGoiKygCACENIAAoAnAiEygCACIpIBMoAgQiHGsiHUF/IBMoAnhBf2p0QX9zIiprIBMoAhAiGyAdIBtrICpLGyEnIAAoAhAgACgCFCARIAAoAnQQJyIFQQEgBRshHiAcIAUgHWsiIWshLyARIBtrICFrITBBBEEDIAYbITEgACgCKCIyIBEgJHFBA3RqIhhBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEKIAtBA2ohByARQQlqIRQgESAAKAIMIiZrITMgFiAmaiEjIBMoAnwhNCAAKAKAASE1IC4hCSAoIQUDQAJAAn8CfyAFQQNGBEAgJSgCAEF/agwBCyAaIAVBAnRqKAIQCyIIQX9qIgYgM0kEQCALQQMQHyALIAhrQQMQH0cNAiAHIAcgCGsgEhAdDAELIAYgME8NASAmIBEgCGsiBkF/c2pBA0kNASALQQMQHyAGIC9qIgZBAxAfRw0BIAcgBkEDaiASICkgIxAgC0EDaiIGIAlNDQAgGSAOQQN0aiIJIAY2AgQgCSAFIChrNgIAIA5BAWohDiAGIApLDQ0gBiIJIAtqIBJGDQ0LIAVBAWoiBSAxSQ0ACwJAIAlBAksNAEECIQkgFiAAKAIcIAAoAiQgFUHcAGogCxBAIgUgHkkNACARIAVrIgZB//8PSw0AIAsgBSAWaiASEB0iBUEDSQ0AIBkgBTYCBCAZIAZBAmo2AgAgBSAKTQRAQQEhDiAFIgkgC2ogEkcNAQtBASEOIAAgEUEBajYCGAwMCyArIBE2AgBBfyA1dEF/cyEGAkAgDSAeSQRAIAYhBwwBCyARQQJqISZBACEKQQAhBQNAIAsgCiAFIAogBUkbIgdqIA0gFmoiKyAHaiASEB0gB2oiByAJSwRAIBkgDkEDdGoiCSAHNgIEIAkgJiANazYCACAHIA1qIBQgByAUIA1rSxshFCAOQQFqIQ4gByALaiASRiAHQYAgS3INBiAHIQkLIDIgDSAkcUEDdGohCAJAAkAgByArai0AACAHIAtqLQAASQRAIBggDTYCACANIB9LDQEgFUFAayEYIAYhBwwECyAMIA02AgAgDSAfSwRAIAghDCAHIQUMAgsgFUFAayEMIAYhBwwDCyAHIQogCEEEaiIYIQgLIAZBf2oiByAGTw0BIAchBiAIKAIAIg0gHk8NAAsLIAxBADYCACAYQQA2AgAgB0UNCiATKAIgIAsgNEEDEB5BAnRqKAIAIgggG00NCiATKAIoIQogEUECaiERIBYgIWohE0EAIQ1BACEGA0AgCyANIAYgDSAGSRsiBWogCCAcaiAFaiASICkgIxAgIAVqIgUgCUsEQCAZIA5BA3RqIgkgBTYCBCAJIBEgCCAhaiIJazYCACAFIAlqIBQgBSAUIAlrSxshFCAOQQFqIQ4gBUGAIEsNDCAFIgkgC2ogEkYNDAsgCCAnTQ0LIAdBf2oiB0UNCyAFIA0gHCATIAUgCGogHUkbIAhqIAVqLQAAIAUgC2otAABJIgwbIQ0gBiAFIAwbIQYgCiAIICpxQQN0aiAMQQJ0aigCACIIIBtLDQALDAoLQQAhDkEAIAsgACgCBCIbayITQX8gACgCeEF/anRBf3MiIWsiBSAFIBNLGyEkIAAoAiAgCyAAKAJ8QQQQHkECdGoiIygCACENIAAoAnAiFigCACIfIBYoAgQiHGsiHUF/IBYoAnhBf2p0QX9zIilrIBYoAhAiGCAdIBhrIClLGyErIAAoAhAgACgCFCATIAAoAnQQJyIFQQEgBRshKiAcIAUgHWsiHmshJyATIBhrIB5rIS9BBEEDIAYbITAgACgCKCIxIBMgIXFBA3RqIhRBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEyIAtBBGohByATQQlqIREgEyAAKAIMIgprITMgCiAbaiEmIBYoAnwhNCAAKAKAASE1IC4hCSAoIQUDQAJAAn8CfyAFQQNGBEAgJSgCAEF/agwBCyAaIAVBAnRqKAIQCyIIQX9qIgYgM0kEQCALQQQQHyALIAhrQQQQH0cNAiAHIAcgCGsgEhAdDAELIAYgL08NASAKIBMgCGsiBkF/c2pBA0kNASALQQQQHyAGICdqIgZBBBAfRw0BIAcgBkEEaiASIB8gJhAgC0EEaiIGIAlNDQAgGSAOQQN0aiIJIAY2AgQgCSAFIChrNgIAIA5BAWohDiAGIDJLDQwgBiIJIAtqIBJGDQwLIAVBAWoiBSAwSQ0ACyAjIBM2AgBBfyA1dEF/cyEGAkAgDSAqSQRAIAYhBwwBCyATQQJqISNBACEKQQAhBQNAIAsgCiAFIAogBUkbIgdqIA0gG2oiJyAHaiASEB0gB2oiByAJSwRAIBkgDkEDdGoiCSAHNgIEIAkgIyANazYCACAHIA1qIBEgByARIA1rSxshESAOQQFqIQ4gByALaiASRiAHQYAgS3INBiAHIQkLIDEgDSAhcUEDdGohCAJAAkAgByAnai0AACAHIAtqLQAASQRAIBQgDTYCACANICRLDQEgFUFAayEUIAYhBwwECyAMIA02AgAgDSAkSwRAIAghDCAHIQUMAgsgFUFAayEMIAYhBwwDCyAHIQogCEEEaiIUIQgLIAZBf2oiByAGTw0BIAchBiAIKAIAIg0gKk8NAAsLIAxBADYCACAUQQA2AgAgB0UNCCAWKAIgIAsgNEEEEB5BAnRqKAIAIgggGE0NCCAWKAIoIQogE0ECaiEUIBsgHmohE0EAIQ1BACEGA0AgCyANIAYgDSAGSRsiBWogCCAcaiAFaiASIB8gJhAgIAVqIgUgCUsEQCAZIA5BA3RqIgkgBTYCBCAJIBQgCCAeaiIJazYCACAFIAlqIBEgBSARIAlrSxshESAOQQFqIQ4gBUGAIEsNCiAFIgkgC2ogEkYNCgsgCCArTQ0JIAdBf2oiB0UNCSAFIA0gHCATIAUgCGogHUkbIAhqIAVqLQAAIAUgC2otAABJIgwbIQ0gBiAFIAwbIQYgCiAIIClxQQN0aiAMQQJ0aigCACIIIBhLDQALDAgLQQAhDkEAIAsgACgCBCIbayITQX8gACgCeEF/anRBf3MiIWsiBSAFIBNLGyEkIAAoAiAgCyAAKAJ8QQUQHkECdGoiIygCACENIAAoAnAiFigCACIfIBYoAgQiHGsiHUF/IBYoAnhBf2p0QX9zIilrIBYoAhAiGCAdIBhrIClLGyErIAAoAhAgACgCFCATIAAoAnQQJyIFQQEgBRshKiAcIAUgHWsiHmshJyATIBhrIB5rIS9BBEEDIAYbITAgACgCKCIxIBMgIXFBA3RqIhRBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEyIAtBBGohByATQQlqIREgEyAAKAIMIgprITMgCiAbaiEmIBYoAnwhNCAAKAKAASE1IC4hCSAoIQUDQAJAAn8CfyAFQQNGBEAgJSgCAEF/agwBCyAaIAVBAnRqKAIQCyIIQX9qIgYgM0kEQCALQQQQHyALIAhrQQQQH0cNAiAHIAcgCGsgEhAdDAELIAYgL08NASAKIBMgCGsiBkF/c2pBA0kNASALQQQQHyAGICdqIgZBBBAfRw0BIAcgBkEEaiASIB8gJhAgC0EEaiIGIAlNDQAgGSAOQQN0aiIJIAY2AgQgCSAFIChrNgIAIA5BAWohDiAGIDJLDQsgBiIJIAtqIBJGDQsLIAVBAWoiBSAwSQ0ACyAjIBM2AgBBfyA1dEF/cyEGAkAgDSAqSQRAIAYhBwwBCyATQQJqISNBACEKQQAhBQNAIAsgCiAFIAogBUkbIgdqIA0gG2oiJyAHaiASEB0gB2oiByAJSwRAIBkgDkEDdGoiCSAHNgIEIAkgIyANazYCACAHIA1qIBEgByARIA1rSxshESAOQQFqIQ4gByALaiASRiAHQYAgS3INBiAHIQkLIDEgDSAhcUEDdGohCAJAAkAgByAnai0AACAHIAtqLQAASQRAIBQgDTYCACANICRLDQEgFUFAayEUIAYhBwwECyAMIA02AgAgDSAkSwRAIAghDCAHIQUMAgsgFUFAayEMIAYhBwwDCyAHIQogCEEEaiIUIQgLIAZBf2oiByAGTw0BIAchBiAIKAIAIg0gKk8NAAsLIAxBADYCACAUQQA2AgAgB0UNBiAWKAIgIAsgNEEFEB5BAnRqKAIAIgggGE0NBiAWKAIoIQogE0ECaiEUIBsgHmohE0EAIQ1BACEGA0AgCyANIAYgDSAGSRsiBWogCCAcaiAFaiASIB8gJhAgIAVqIgUgCUsEQCAZIA5BA3RqIgkgBTYCBCAJIBQgCCAeaiIJazYCACAFIAlqIBEgBSARIAlrSxshESAOQQFqIQ4gBUGAIEsNCCAFIgkgC2ogEkYNCAsgCCArTQ0HIAdBf2oiB0UNByAFIA0gHCATIAUgCGogHUkbIAhqIAVqLQAAIAUgC2otAABJIgwbIQ0gBiAFIAwbIQYgCiAIIClxQQN0aiAMQQJ0aigCACIIIBhLDQALDAYLQQAhDkEAIAsgACgCBCIbayITQX8gACgCeEF/anRBf3MiIWsiBSAFIBNLGyEkIAAoAiAgCyAAKAJ8QQYQHkECdGoiIygCACENIAAoAnAiFigCACIfIBYoAgQiHGsiHUF/IBYoAnhBf2p0QX9zIilrIBYoAhAiGCAdIBhrIClLGyErIAAoAhAgACgCFCATIAAoAnQQJyIFQQEgBRshKiAcIAUgHWsiHmshJyATIBhrIB5rIS9BBEEDIAYbITAgACgCKCIxIBMgIXFBA3RqIhRBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEyIAtBBGohByATQQlqIREgEyAAKAIMIgprITMgCiAbaiEmIBYoAnwhNCAAKAKAASE1IC4hCSAoIQUDQAJAAn8CfyAFQQNGBEAgJSgCAEF/agwBCyAaIAVBAnRqKAIQCyIIQX9qIgYgM0kEQCALQQQQHyALIAhrQQQQH0cNAiAHIAcgCGsgEhAdDAELIAYgL08NASAKIBMgCGsiBkF/c2pBA0kNASALQQQQHyAGICdqIgZBBBAfRw0BIAcgBkEEaiASIB8gJhAgC0EEaiIGIAlNDQAgGSAOQQN0aiIJIAY2AgQgCSAFIChrNgIAIA5BAWohDiAGIDJLDQogBiIJIAtqIBJGDQoLIAVBAWoiBSAwSQ0ACyAjIBM2AgBBfyA1dEF/cyEGAkAgDSAqSQRAIAYhBwwBCyATQQJqISNBACEKQQAhBQNAIAsgCiAFIAogBUkbIgdqIA0gG2oiJyAHaiASEB0gB2oiByAJSwRAIBkgDkEDdGoiCSAHNgIEIAkgIyANazYCACAHIA1qIBEgByARIA1rSxshESAOQQFqIQ4gByALaiASRiAHQYAgS3INBiAHIQkLIDEgDSAhcUEDdGohCAJAAkAgByAnai0AACAHIAtqLQAASQRAIBQgDTYCACANICRLDQEgFUFAayEUIAYhBwwECyAMIA02AgAgDSAkSwRAIAghDCAHIQUMAgsgFUFAayEMIAYhBwwDCyAHIQogCEEEaiIUIQgLIAZBf2oiByAGTw0BIAchBiAIKAIAIg0gKk8NAAsLIAxBADYCACAUQQA2AgAgB0UNBCAWKAIgIAsgNEEGEB5BAnRqKAIAIgggGE0NBCAWKAIoIQogE0ECaiEUIBsgHmohE0EAIQ1BACEGA0AgCyANIAYgDSAGSRsiBWogCCAcaiAFaiASIB8gJhAgIAVqIgUgCUsEQCAZIA5BA3RqIgkgBTYCBCAJIBQgCCAeaiIJazYCACAFIAlqIBEgBSARIAlrSxshESAOQQFqIQ4gBUGAIEsNBiAFIgkgC2ogEkYNBgsgCCArTQ0FIAdBf2oiB0UNBSAFIA0gHCATIAUgCGogHUkbIAhqIAVqLQAAIAUgC2otAABJIgwbIQ0gBiAFIAwbIQYgCiAIIClxQQN0aiAMQQJ0aigCACIIIBhLDQALDAQLIAxBADYCACAYQQA2AgAMBgsgDEEANgIAIBRBADYCAAwECyAMQQA2AgAgFEEANgIADAILIAxBADYCACAUQQA2AgALIAAgEUF4ajYCGAwDCyAAIBFBeGo2AhgMAgsgACARQXhqNgIYDAELIAAgFEF4ajYCGAsgDkUNACAZIA5Bf2pBA3RqIgUoAgQiCiA4SyAKIA9qQYAgT3INBCAXICxqIRdBACEKA0AgFUFAayAlIBkgCkEDdGoiBigCACIHICgQPyA2IQwCfyAKBEAgBkF8aigCAEEBaiEMCyAGKAIEIgUgDE8LBEAgB0EBahAkIglBCHRBgCBqIQ0DQCAFQX1qIQggBSAPaiEGAn8gACgCZEEBRgRAIAgQKyANagwBCyAAKAJgIAAoAjggCUECdGooAgAQK2sgACgCXGogCBA8QQJ0IghBkKQBaigCACAJakEIdGogACgCNCAIaigCABAra0EzagsgF2ohCAJAAkAgBiAETQRAIAggICAGQRxsaigCAEgNAQwCCwNAICAgBEEBaiIEQRxsakGAgICABDYCACAEIAZJDQALCyAgIAZBHGxqIgYgIjYCDCAGIAc2AgQgBiAFNgIIIAYgCDYCACAGIBUpA0A3AhAgBiAVKAJINgIYCyAFQX9qIgUgDE8NAAsLIApBAWoiCiAORw0ACwsgD0EBaiIPIARNDQALCyAgIARBHGxqIgUoAgwhIiAFKAIEIQggBSgCACE6IAUoAgghCiAVIAUoAhg2AlggFSAFKQIQNwNQIBUgBSkCCDcDKCAVIAUpAhA3AzAgFSAFKAIYNgI4IBUgBSkCADcDIEEAIAQgFUEgahA+ayIFIAUgBEsbIQQMAwsgEEEBaiEQDAcLIAUoAgAhCEEAIQQgDyAaKAIIBH8gBAUgGigCDAtrIgRBgCBNDQELICAgIjYCKCAgIAo2AiQgICAINgIgICAgOjYCHCAgIBUoAlg2AjQgICAVKQNQNwIsDAELICAgBEEBaiIJQRxsaiIFICI2AgwgBSAKNgIIIAUgCDYCBCAFIDo2AgAgBSAVKQNQNwIQIAUgFSgCWDYCGCAJISIgBA0BC0EBISJBASEJDAELA0AgFSAgIARBHGxqIgUiDEEYaigCADYCGCAVIAUpAhA3AxAgFSAFKQIINwMIIBUgBSkCADcDACAVED4hByAgICJBf2oiIkEcbGoiBiAMKAIYNgIYIAYgBSkCEDcCECAGIAUpAgg3AgggBiAFKQIANwIAIAQgB0shBUEAIAQgB2siBiAGIARLGyEEIAUNAAsgIiAJSw0BCwNAICAgIkEcbGoiBCgCDCEGAn8gAyAGaiAEKAIIIgxFDQAaAkACQCAEKAIEIgdBA08EQCACIAIpAgA3AgQgB0F+aiEEDAELAkACQAJAAkAgByAGRWoiBQ4EBQEBAAELIAIoAgBBf2ohBAwBCyACIAVBAnRqKAIAIQQgBUECSQ0BCyACIAIoAgQ2AggLIAIgAigCADYCBAsgAiAENgIACyAtIAYgAyAHIAwQVyAMQX1qIQ8gASgCDCEEAkACQCADIAZqIgUgOU0EQCAEIAMQHCABKAIMIQQgBkEQTQRAIAEgBCAGajYCDAwDCyAEQRBqIANBEGoiChAcIARBIGogA0EgahAcIAZBMUgNASAEIAZqIQggBEEwaiEEA0AgBCAKQSBqIgUQHCAEQRBqIApBMGoQHCAFIQogBEEgaiIEIAhJDQALDAELIAQgAyAFIDkQIgsgASABKAIMIAZqNgIMIAZBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiBCAHQQFqNgIAIAQgBjsBBCAPQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIA87AQYgASAEQQhqNgIEIAYgDGogA2oiAwshECAiQQFqIiIgCU0NAAsLIC1BAhBRCyAQIDdJDQALCyAVQeAAaiQAIBIgA2sLu1wBN38jAEHgAGsiFyQAIAAoAoQBIQcgACgCBCEGIAAoAogBIREgACgCDCEFIBcgACgCGDYCXCAAKAI8IRsgAEFAaygCACEkIABBLGoiNSADIARBABBZIAMgBSAGaiADRmoiDSADIARqIhBBeGoiOEkEQCARQf8fIBFB/x9JGyE5IBBBYGohOkEDQQQgB0EDRhsiN0F/aiE2A0ACQAJAAkACQAJAAkACQAJAAkAgACgCBCIHIAAoAhgiBGogDUsNACANIANrIS4gACgChAEhBiAEIA0gB2siBUkEQANAIAAgBCAHaiAQIAZBABBBIARqIgQgBUkNAAsLIC5FISwgACAFNgIYAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBkF9ag4FAAECAwMBC0EAIQtBACANIAAoAgQiGWsiD0F/IAAoAnhBf2p0QX9zIiZrIgQgBCAPSxshJyAAKAIgIA0gACgCfEEDEB5BAnRqIi8oAgAhCSAAKAJwIhYoAgAiKCAWKAIEIh1rIh5BfyAWKAJ4QX9qdEF/cyIpayAWKAIQIhwgHiAcayApSxshMCAAKAIQIAAoAhQgDyAAKAJ0ECciBEEBIAQbIR8gHSAEIB5rIiJrITEgDyAcayAiayEUQQNBBCAuGyEgIAAoAigiMiAPICZxQQN0aiIMQQRqIQogACgCiAEiBEH/HyAEQf8fSRshNCANQQNqISUgD0EJaiETIA8gACgCDCIrayEVIBkgK2ohLSAWKAJ8ISEgACgCgAEhByA2IREgLCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiIFIBVJBEAgDUEDEB8gDSAIa0EDEB9HDQIgJSAlIAhrIBAQHQwBCyAFIBRPDQEgKyAPIAhrIgVBf3NqQQNJDQEgDUEDEB8gBSAxaiIFQQMQH0cNASAlIAVBA2ogECAoIC0QIAtBA2oiBSARTQ0AIBsgC0EDdGoiBiAFNgIEIAYgBCAsazYCACALQQFqIQsgBSA0Sw0NIAUiESANaiAQRg0NCyAEQQFqIgQgIEkNAAsCQCARQQJLDQBBAiERIBkgACgCHCAAKAIkIBdB3ABqIA0QQCIEIB9JDQAgDyAEayIFQf//D0sNACANIAQgGWogEBAdIgRBA0kNACAbIAQ2AgQgGyAFQQJqNgIAIAQgNE0EQEEBIQsgBCIRIA1qIBBHDQELQQEhCyAAIA9BAWo2AhgMDAsgLyAPNgIAQX8gB3RBf3MhBQJAIAkgH0kEQCAFIQcMAQsgD0ECaiEUQQAhBkEAIRUDQCANIAYgFSAGIBVJGyIEaiAJIBlqIiAgBGogEBAdIARqIgQgEUsEQCAbIAtBA3RqIgcgBDYCBCAHIBQgCWs2AgAgBCAJaiATIAQgEyAJa0sbIRMgC0EBaiELIAQgDWogEEYgBEGAIEtyDQYgBCERCyAyIAkgJnFBA3RqIQgCQAJAIAQgIGotAAAgBCANai0AAEkEQCAMIAk2AgAgCSAnSw0BIBdBQGshDCAFIQcMBAsgCiAJNgIAIAkgJ0sEQCAIIQogBCEVDAILIBdBQGshCiAFIQcMAwsgBCEGIAhBBGoiDCEICyAFQX9qIgcgBU8NASAHIQUgCCgCACIJIB9PDQALCyAKQQA2AgAgDEEANgIAIAdFDQogFigCICANICFBAxAeQQJ0aigCACIIIBxNDQogFigCKCEMIA9BAmohFSAZICJqIQpBACEJQQAhBQNAIA0gCSAFIAkgBUkbIgRqIAggHWogBGogECAoIC0QICAEaiIEIBFLBEAgGyALQQN0aiIGIAQ2AgQgBiAVIAggImoiBms2AgAgBCAGaiATIAQgEyAGa0sbIRMgC0EBaiELIARBgCBLDQwgBCIRIA1qIBBGDQwLIAggME0NCyAHQX9qIgdFDQsgBCAJIB0gCiAEIAhqIB5JGyAIaiAEai0AACAEIA1qLQAASSIGGyEJIAUgBCAGGyEFIAwgCCApcUEDdGogBkECdGooAgAiCCAcSw0ACwwKC0EAIQtBACANIAAoAgQiHGsiD0F/IAAoAnhBf2p0QX9zIiVrIgQgBCAPSxshJiAAKAIgIA0gACgCfEEEEB5BAnRqIi0oAgAhCSAAKAJwIhYoAgAiJyAWKAIEIh1rIh5BfyAWKAJ4QX9qdEF/cyIoayAWKAIQIhkgHiAZayAoSxshLyAAKAIQIAAoAhQgDyAAKAJ0ECciBEEBIAQbISkgHSAEIB5rIh9rITAgDyAZayAfayExQQNBBCAuGyEUIAAoAigiMiAPICVxQQN0aiIqQQRqIQwgACgCiAEiBEH/HyAEQf8fSRshICANQQRqISIgD0EJaiEKIA8gACgCDCI0ayEVIBwgNGohKyAWKAJ8ISEgACgCgAEhByA2IREgLCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiIFIBVJBEAgDUEEEB8gDSAIa0EEEB9HDQIgIiAiIAhrIBAQHQwBCyAFIDFPDQEgNCAPIAhrIgVBf3NqQQNJDQEgDUEEEB8gBSAwaiIFQQQQH0cNASAiIAVBBGogECAnICsQIAtBBGoiBSARTQ0AIBsgC0EDdGoiBiAFNgIEIAYgBCAsazYCACALQQFqIQsgBSAgSw0MIAUiESANaiAQRg0MCyAEQQFqIgQgFEkNAAsgLSAPNgIAQX8gB3RBf3MhBQJAIAkgKUkEQCAFIQcMAQsgD0ECaiEUQQAhBkEAIRUDQCANIAYgFSAGIBVJGyIEaiAJIBxqIiAgBGogEBAdIARqIgQgEUsEQCAbIAtBA3RqIgcgBDYCBCAHIBQgCWs2AgAgBCAJaiAKIAQgCiAJa0sbIQogC0EBaiELIAQgDWogEEYgBEGAIEtyDQYgBCERCyAyIAkgJXFBA3RqIQgCQAJAIAQgIGotAAAgBCANai0AAEkEQCAqIAk2AgAgCSAmSw0BIBdBQGshKiAFIQcMBAsgDCAJNgIAIAkgJksEQCAIIQwgBCEVDAILIBdBQGshDCAFIQcMAwsgBCEGIAhBBGoiKiEICyAFQX9qIgcgBU8NASAHIQUgCCgCACIJIClPDQALCyAMQQA2AgAgKkEANgIAIAdFDQggFigCICANICFBBBAeQQJ0aigCACIIIBlNDQggFigCKCEgIA9BAmohDCAcIB9qIRVBACEJQQAhBQNAIA0gCSAFIAkgBUkbIgRqIAggHWogBGogECAnICsQICAEaiIEIBFLBEAgGyALQQN0aiIGIAQ2AgQgBiAMIAggH2oiBms2AgAgBCAGaiAKIAQgCiAGa0sbIQogC0EBaiELIARBgCBLDQogBCIRIA1qIBBGDQoLIAggL00NCSAHQX9qIgdFDQkgBCAJIB0gFSAEIAhqIB5JGyAIaiAEai0AACAEIA1qLQAASSIGGyEJIAUgBCAGGyEFICAgCCAocUEDdGogBkECdGooAgAiCCAZSw0ACwwIC0EAIQtBACANIAAoAgQiHGsiD0F/IAAoAnhBf2p0QX9zIiVrIgQgBCAPSxshJiAAKAIgIA0gACgCfEEFEB5BAnRqIi0oAgAhCSAAKAJwIhYoAgAiJyAWKAIEIh1rIh5BfyAWKAJ4QX9qdEF/cyIoayAWKAIQIhkgHiAZayAoSxshLyAAKAIQIAAoAhQgDyAAKAJ0ECciBEEBIAQbISkgHSAEIB5rIh9rITAgDyAZayAfayExQQNBBCAuGyEUIAAoAigiMiAPICVxQQN0aiIqQQRqIQwgACgCiAEiBEH/HyAEQf8fSRshICANQQRqISIgD0EJaiEKIA8gACgCDCI0ayEVIBwgNGohKyAWKAJ8ISEgACgCgAEhByA2IREgLCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiIFIBVJBEAgDUEEEB8gDSAIa0EEEB9HDQIgIiAiIAhrIBAQHQwBCyAFIDFPDQEgNCAPIAhrIgVBf3NqQQNJDQEgDUEEEB8gBSAwaiIFQQQQH0cNASAiIAVBBGogECAnICsQIAtBBGoiBSARTQ0AIBsgC0EDdGoiBiAFNgIEIAYgBCAsazYCACALQQFqIQsgBSAgSw0LIAUiESANaiAQRg0LCyAEQQFqIgQgFEkNAAsgLSAPNgIAQX8gB3RBf3MhBQJAIAkgKUkEQCAFIQcMAQsgD0ECaiEUQQAhBkEAIRUDQCANIAYgFSAGIBVJGyIEaiAJIBxqIiAgBGogEBAdIARqIgQgEUsEQCAbIAtBA3RqIgcgBDYCBCAHIBQgCWs2AgAgBCAJaiAKIAQgCiAJa0sbIQogC0EBaiELIAQgDWogEEYgBEGAIEtyDQYgBCERCyAyIAkgJXFBA3RqIQgCQAJAIAQgIGotAAAgBCANai0AAEkEQCAqIAk2AgAgCSAmSw0BIBdBQGshKiAFIQcMBAsgDCAJNgIAIAkgJksEQCAIIQwgBCEVDAILIBdBQGshDCAFIQcMAwsgBCEGIAhBBGoiKiEICyAFQX9qIgcgBU8NASAHIQUgCCgCACIJIClPDQALCyAMQQA2AgAgKkEANgIAIAdFDQYgFigCICANICFBBRAeQQJ0aigCACIIIBlNDQYgFigCKCEgIA9BAmohDCAcIB9qIRVBACEJQQAhBQNAIA0gCSAFIAkgBUkbIgRqIAggHWogBGogECAnICsQICAEaiIEIBFLBEAgGyALQQN0aiIGIAQ2AgQgBiAMIAggH2oiBms2AgAgBCAGaiAKIAQgCiAGa0sbIQogC0EBaiELIARBgCBLDQggBCIRIA1qIBBGDQgLIAggL00NByAHQX9qIgdFDQcgBCAJIB0gFSAEIAhqIB5JGyAIaiAEai0AACAEIA1qLQAASSIGGyEJIAUgBCAGGyEFICAgCCAocUEDdGogBkECdGooAgAiCCAZSw0ACwwGC0EAIQtBACANIAAoAgQiHGsiD0F/IAAoAnhBf2p0QX9zIiVrIgQgBCAPSxshJiAAKAIgIA0gACgCfEEGEB5BAnRqIi0oAgAhCSAAKAJwIhYoAgAiJyAWKAIEIh1rIh5BfyAWKAJ4QX9qdEF/cyIoayAWKAIQIhkgHiAZayAoSxshLyAAKAIQIAAoAhQgDyAAKAJ0ECciBEEBIAQbISkgHSAEIB5rIh9rITAgDyAZayAfayExQQNBBCAuGyEUIAAoAigiMiAPICVxQQN0aiIqQQRqIQwgACgCiAEiBEH/HyAEQf8fSRshICANQQRqISIgD0EJaiEKIA8gACgCDCI0ayEVIBwgNGohKyAWKAJ8ISEgACgCgAEhByA2IREgLCEEA0ACQAJ/An8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiIFIBVJBEAgDUEEEB8gDSAIa0EEEB9HDQIgIiAiIAhrIBAQHQwBCyAFIDFPDQEgNCAPIAhrIgVBf3NqQQNJDQEgDUEEEB8gBSAwaiIFQQQQH0cNASAiIAVBBGogECAnICsQIAtBBGoiBSARTQ0AIBsgC0EDdGoiBiAFNgIEIAYgBCAsazYCACALQQFqIQsgBSAgSw0KIAUiESANaiAQRg0KCyAEQQFqIgQgFEkNAAsgLSAPNgIAQX8gB3RBf3MhBQJAIAkgKUkEQCAFIQcMAQsgD0ECaiEUQQAhBkEAIRUDQCANIAYgFSAGIBVJGyIEaiAJIBxqIiAgBGogEBAdIARqIgQgEUsEQCAbIAtBA3RqIgcgBDYCBCAHIBQgCWs2AgAgBCAJaiAKIAQgCiAJa0sbIQogC0EBaiELIAQgDWogEEYgBEGAIEtyDQYgBCERCyAyIAkgJXFBA3RqIQgCQAJAIAQgIGotAAAgBCANai0AAEkEQCAqIAk2AgAgCSAmSw0BIBdBQGshKiAFIQcMBAsgDCAJNgIAIAkgJksEQCAIIQwgBCEVDAILIBdBQGshDCAFIQcMAwsgBCEGIAhBBGoiKiEICyAFQX9qIgcgBU8NASAHIQUgCCgCACIJIClPDQALCyAMQQA2AgAgKkEANgIAIAdFDQQgFigCICANICFBBhAeQQJ0aigCACIIIBlNDQQgFigCKCEgIA9BAmohDCAcIB9qIRVBACEJQQAhBQNAIA0gCSAFIAkgBUkbIgRqIAggHWogBGogECAnICsQICAEaiIEIBFLBEAgGyALQQN0aiIGIAQ2AgQgBiAMIAggH2oiBms2AgAgBCAGaiAKIAQgCiAGa0sbIQogC0EBaiELIARBgCBLDQYgBCIRIA1qIBBGDQYLIAggL00NBSAHQX9qIgdFDQUgBCAJIB0gFSAEIAhqIB5JGyAIaiAEai0AACAEIA1qLQAASSIGGyEJIAUgBCAGGyEFICAgCCAocUEDdGogBkECdGooAgAiCCAZSw0ACwwECyAKQQA2AgAgDEEANgIADAYLIAxBADYCACAqQQA2AgAMBAsgDEEANgIAICpBADYCAAwCCyAMQQA2AgAgKkEANgIACyAAIApBeGo2AhgMAwsgACAKQXhqNgIYDAILIAAgCkF4ajYCGAwBCyAAIBNBeGo2AhgLIAtFDQAgJCACKAIANgIQICQgAigCBDYCFCACKAIIIQQgJCAuNgIMICRBADYCCCAkIAQ2AhggJCADIC4gNUEAEFgiBjYCACAbIAtBf2pBA3RqIgQoAgQiCCA5SwRAIAQoAgAhBQwDC0EBIQRBACA1QQAQLSEFA0AgJCAEQRxsakGAgICABDYCACAEQQFqIgQgN0cNAAsgBSAGaiERQQAhCiA3IQgDQCAbIApBA3RqIgQoAgQhDCAXQUBrIAIgBCgCACIVICwQPyAIIAxNBEAgFUEBahAkIiBBCXRBs7R/akEzICBBE0sbIQYgIEEIdEGAIGohBQNAIAhBfWohBAJ/IAAoAmRBAUYEQCAEEC4gBWoMAQsgACgCYCAGaiAAKAI4ICBBAnRqKAIAEC5rIAAoAlxqIAQQPEECdCIEQZCkAWooAgAgIGpBCHRqIAAoAjQgBGooAgAQLmsLIQcgJCAIQRxsaiIEIC42AgwgBCAVNgIEIAQgCDYCCCAEIAcgEWo2AgAgBCAXKQNANwIQIAQgFygCSDYCGCAIQQFqIgggDE0NAAsLIApBAWoiCiALRw0AC0EBIRECQCAIQX9qIgRFBEBBACEEDAELA0BBASEHICQgEUF/akEcbGoiBigCCEUEQCAGKAIMQQFqIQcLIA0gEWoiEkF/akEBIDVBABBSIAYoAgBqIAcgNUEAEC1qIAdBf2ogNUEAEC1rIgUgJCARQRxsaiIzKAIAIhVMBEAgMyAHNgIMIDNCADcCBCAzIAU2AgAgMyAGKAIYNgIYIDMgBikCEDcCECAFIRULIBIgOEsEfyARQQFqBSAEIBFGBEAgESEEDAMLAkAgJCARQQFqIiBBHGxqKAIAIBVBgAFqTA0AQQAhLiAzKAIIIgpFBEAgMygCDCEuC0EAIDVBABAtITQgACgCBCILIAAoAhgiB2ogEksNACAAKAKEASEGIAcgEiALayIFSQRAA0AgACAHIAtqIBAgBkEAEEEgB2oiByAFSQ0ACwsgCkEARyEsIDNBEGohKiAAIAU2AhgCQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAGQX1qDgUAAQIDAwELQQAhE0EAIBIgACgCBCIPayIaQX8gACgCeEF/anRBf3MiImsiBSAFIBpLGyElIAAoAiAgEiAAKAJ8QQMQHkECdGoiLSgCACEOIAAoAnAiIygCACImICMoAgQiGWsiHEF/ICMoAnhBf2p0QX9zIidrICMoAhAiFiAcIBZrICdLGyEvIAAoAhAgACgCFCAaIAAoAnQQJyIFQQEgBRshHSAZIAUgHGsiHmshMCAaIBZrIB5rISFBBEEDIAobIRQgACgCKCIxIBogInFBA3RqIgxBBGohCSAAKAKIASIFQf8fIAVB/x9JGyEoIBJBA2ohHyAaQQlqIRggGiAAKAIMIilrIQggDyApaiErICMoAnwhMiAAKAKAASEKIDYhCyAsIQcDQAJAAn8CfyAHQQNGBEAgKigCAEF/agwBCyAzIAdBAnRqKAIQCyIFQX9qIgYgCEkEQCASQQMQHyASIAVrQQMQH0cNAiAfIB8gBWsgEBAdDAELIAYgIU8NASApIBogBWsiBUF/c2pBA0kNASASQQMQHyAFIDBqIgVBAxAfRw0BIB8gBUEDaiAQICYgKxAgC0EDaiIFIAtNDQAgGyATQQN0aiIGIAU2AgQgBiAHICxrNgIAIBNBAWohEyAFIChLDQ0gBSILIBJqIBBGDQ0LIAdBAWoiByAUSQ0ACwJAIAtBAksNAEECIQsgDyAAKAIcIAAoAiQgF0HcAGogEhBAIgUgHUkNACAaIAVrIgZB//8PSw0AIBIgBSAPaiAQEB0iBUEDSQ0AIBsgBTYCBCAbIAZBAmo2AgAgBSAoTQRAQQEhEyAFIgsgEmogEEcNAQtBASETIAAgGkEBajYCGAwMCyAtIBo2AgBBfyAKdEF/cyEKAkAgDiAdSQRAIAohBgwBCyAaQQJqISFBACEIQQAhBwNAIBIgCCAHIAggB0kbIgVqIA4gD2oiFCAFaiAQEB0gBWoiBiALSwRAIBsgE0EDdGoiBSAGNgIEIAUgISAOazYCACAGIA5qIBggBiAYIA5rSxshGCATQQFqIRMgBiASaiAQRiAGQYAgS3INBiAGIQsLIDEgDiAicUEDdGohBQJAAkAgBiAUai0AACAGIBJqLQAASQRAIAwgDjYCACAOICVLDQEgF0FAayEMIAohBgwECyAJIA42AgAgDiAlSwRAIAUhCSAGIQcMAgsgF0FAayEJIAohBgwDCyAGIQggBUEEaiIMIQULIApBf2oiBiAKTw0BIAYhCiAFKAIAIg4gHU8NAAsLIAlBADYCACAMQQA2AgAgBkUNCiAjKAIgIBIgMkEDEB5BAnRqKAIAIgUgFk0NCiAjKAIoIQkgGkECaiEMIA8gHmohCEEAIQ5BACEKA0AgEiAOIAogDiAKSRsiB2ogBSAZaiAHaiAQICYgKxAgIAdqIgcgC0sEQCAbIBNBA3RqIgsgBzYCBCALIAwgBSAeaiILazYCACAHIAtqIBggByAYIAtrSxshGCATQQFqIRMgB0GAIEsNDCAHIgsgEmogEEYNDAsgBSAvTQ0LIAZBf2oiBkUNCyAHIA4gGSAIIAUgB2ogHEkbIAVqIAdqLQAAIAcgEmotAABJIhQbIQ4gCiAHIBQbIQogCSAFICdxQQN0aiAUQQJ0aigCACIFIBZLDQALDAoLQQAhE0EAIBIgACgCBCIWayIYQX8gACgCeEF/anRBf3MiH2siBSAFIBhLGyEiIAAoAiAgEiAAKAJ8QQQQHkECdGoiKygCACEOIAAoAnAiIygCACIlICMoAgQiGWsiHEF/ICMoAnhBf2p0QX9zIiZrICMoAhAiDyAcIA9rICZLGyEtIAAoAhAgACgCFCAYIAAoAnQQJyIFQQEgBRshJyAZIAUgHGsiHWshLyAYIA9rIB1rITBBBEEDIAobISEgACgCKCIxIBggH3FBA3RqIglBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEUIBJBBGohHiAYQQlqIRogGCAAKAIMIihrIQggFiAoaiEpICMoAnwhMiAAKAKAASEKIDYhCyAsIQcDQAJAAn8CfyAHQQNGBEAgKigCAEF/agwBCyAzIAdBAnRqKAIQCyIFQX9qIgYgCEkEQCASQQQQHyASIAVrQQQQH0cNAiAeIB4gBWsgEBAdDAELIAYgME8NASAoIBggBWsiBUF/c2pBA0kNASASQQQQHyAFIC9qIgVBBBAfRw0BIB4gBUEEaiAQICUgKRAgC0EEaiIFIAtNDQAgGyATQQN0aiIGIAU2AgQgBiAHICxrNgIAIBNBAWohEyAFIBRLDQwgBSILIBJqIBBGDQwLIAdBAWoiByAhSQ0ACyArIBg2AgBBfyAKdEF/cyEKAkAgDiAnSQRAIAohBgwBCyAYQQJqISFBACEIQQAhBwNAIBIgCCAHIAggB0kbIgVqIA4gFmoiFCAFaiAQEB0gBWoiBiALSwRAIBsgE0EDdGoiBSAGNgIEIAUgISAOazYCACAGIA5qIBogBiAaIA5rSxshGiATQQFqIRMgBiASaiAQRiAGQYAgS3INBiAGIQsLIDEgDiAfcUEDdGohBQJAAkAgBiAUai0AACAGIBJqLQAASQRAIAkgDjYCACAOICJLDQEgF0FAayEJIAohBgwECyAMIA42AgAgDiAiSwRAIAUhDCAGIQcMAgsgF0FAayEMIAohBgwDCyAGIQggBUEEaiIJIQULIApBf2oiBiAKTw0BIAYhCiAFKAIAIg4gJ08NAAsLIAxBADYCACAJQQA2AgAgBkUNCCAjKAIgIBIgMkEEEB5BAnRqKAIAIgUgD00NCCAjKAIoIQkgGEECaiEMIBYgHWohCEEAIQ5BACEKA0AgEiAOIAogDiAKSRsiB2ogBSAZaiAHaiAQICUgKRAgIAdqIgcgC0sEQCAbIBNBA3RqIgsgBzYCBCALIAwgBSAdaiILazYCACAHIAtqIBogByAaIAtrSxshGiATQQFqIRMgB0GAIEsNCiAHIgsgEmogEEYNCgsgBSAtTQ0JIAZBf2oiBkUNCSAHIA4gGSAIIAUgB2ogHEkbIAVqIAdqLQAAIAcgEmotAABJIhQbIQ4gCiAHIBQbIQogCSAFICZxQQN0aiAUQQJ0aigCACIFIA9LDQALDAgLQQAhE0EAIBIgACgCBCIWayIYQX8gACgCeEF/anRBf3MiH2siBSAFIBhLGyEiIAAoAiAgEiAAKAJ8QQUQHkECdGoiKygCACEOIAAoAnAiIygCACIlICMoAgQiGWsiHEF/ICMoAnhBf2p0QX9zIiZrICMoAhAiDyAcIA9rICZLGyEtIAAoAhAgACgCFCAYIAAoAnQQJyIFQQEgBRshJyAZIAUgHGsiHWshLyAYIA9rIB1rITBBBEEDIAobISEgACgCKCIxIBggH3FBA3RqIglBBGohDCAAKAKIASIFQf8fIAVB/x9JGyEUIBJBBGohHiAYQQlqIRogGCAAKAIMIihrIQggFiAoaiEpICMoAnwhMiAAKAKAASEKIDYhCyAsIQcDQAJAAn8CfyAHQQNGBEAgKigCAEF/agwBCyAzIAdBAnRqKAIQCyIFQX9qIgYgCEkEQCASQQQQHyASIAVrQQQQH0cNAiAeIB4gBWsgEBAdDAELIAYgME8NASAoIBggBWsiBUF/c2pBA0kNASASQQQQHyAFIC9qIgVBBBAfRw0BIB4gBUEEaiAQICUgKRAgC0EEaiIFIAtNDQAgGyATQQN0aiIGIAU2AgQgBiAHICxrNgIAIBNBAWohEyAFIBRLDQsgBSILIBJqIBBGDQsLIAdBAWoiByAhSQ0ACyArIBg2AgBBfyAKdEF/cyEKAkAgDiAnSQRAIAohBgwBCyAYQQJqISFBACEIQQAhBwNAIBIgCCAHIAggB0kbIgVqIA4gFmoiFCAFaiAQEB0gBWoiBiALSwRAIBsgE0EDdGoiBSAGNgIEIAUgISAOazYCACAGIA5qIBogBiAaIA5rSxshGiATQQFqIRMgBiASaiAQRiAGQYAgS3INBiAGIQsLIDEgDiAfcUEDdGohBQJAAkAgBiAUai0AACAGIBJqLQAASQRAIAkgDjYCACAOICJLDQEgF0FAayEJIAohBgwECyAMIA42AgAgDiAiSwRAIAUhDCAGIQcMAgsgF0FAayEMIAohBgwDCyAGIQggBUEEaiIJIQULIApBf2oiBiAKTw0BIAYhCiAFKAIAIg4gJ08NAAsLIAxBADYCACAJQQA2AgAgBkUNBiAjKAIgIBIgMkEFEB5BAnRqKAIAIgUgD00NBiAjKAIoIQkgGEECaiEMIBYgHWohCEEAIQ5BACEKA0AgEiAOIAogDiAKSRsiB2ogBSAZaiAHaiAQICUgKRAgIAdqIgcgC0sEQCAbIBNBA3RqIgsgBzYCBCALIAwgBSAdaiILazYCACAHIAtqIBogByAaIAtrSxshGiATQQFqIRMgB0GAIEsNCCAHIgsgEmogEEYNCAsgBSAtTQ0HIAZBf2oiBkUNByAHIA4gGSAIIAUgB2ogHEkbIAVqIAdqLQAAIAcgEmotAABJIhQbIQ4gCiAHIBQbIQogCSAFICZxQQN0aiAUQQJ0aigCACIFIA9LDQALDAYLQQAhE0EAIBIgACgCBCIWayIYQX8gACgCeEF/anRBf3MiH2siBSAFIBhLGyEiIAAoAiAgEiAAKAJ8QQYQHkECdGoiKygCACEOIAAoAnAiIygCACIlICMoAgQiGWsiHEF/ICMoAnhBf2p0QX9zIiZrICMoAhAiDyAcIA9rICZLGyEtIAAoAhAgACgCFCAYIAAoAnQQJyIFQQEgBRshJyAZIAUgHGsiHWshLyAYIA9rIB1rITBBBEEDIAobISEgACgCKCIxIBggH3FBA3RqIgxBBGohCSAAKAKIASIFQf8fIAVB/x9JGyEUIBJBBGohHiAYQQlqIRogGCAAKAIMIihrIQggFiAoaiEpICMoAnwhMiAAKAKAASEKIDYhCyAsIQcDQAJAAn8CfyAHQQNGBEAgKigCAEF/agwBCyAzIAdBAnRqKAIQCyIFQX9qIgYgCEkEQCASQQQQHyASIAVrQQQQH0cNAiAeIB4gBWsgEBAdDAELIAYgME8NASAoIBggBWsiBUF/c2pBA0kNASASQQQQHyAFIC9qIgVBBBAfRw0BIB4gBUEEaiAQICUgKRAgC0EEaiIFIAtNDQAgGyATQQN0aiIGIAU2AgQgBiAHICxrNgIAIBNBAWohEyAFIBRLDQogBSILIBJqIBBGDQoLIAdBAWoiByAhSQ0ACyArIBg2AgBBfyAKdEF/cyEKAkAgDiAnSQRAIAohBgwBCyAYQQJqISFBACEIQQAhBwNAIBIgCCAHIAggB0kbIgVqIA4gFmoiFCAFaiAQEB0gBWoiBiALSwRAIBsgE0EDdGoiBSAGNgIEIAUgISAOazYCACAGIA5qIBogBiAaIA5rSxshGiATQQFqIRMgBiASaiAQRiAGQYAgS3INBiAGIQsLIDEgDiAfcUEDdGohBQJAAkAgBiAUai0AACAGIBJqLQAASQRAIAwgDjYCACAOICJLDQEgF0FAayEMIAohBgwECyAJIA42AgAgDiAiSwRAIAUhCSAGIQcMAgsgF0FAayEJIAohBgwDCyAGIQggBUEEaiIMIQULIApBf2oiBiAKTw0BIAYhCiAFKAIAIg4gJ08NAAsLIAlBADYCACAMQQA2AgAgBkUNBCAjKAIgIBIgMkEGEB5BAnRqKAIAIgUgD00NBCAjKAIoIQkgGEECaiEMIBYgHWohCEEAIQ5BACEKA0AgEiAOIAogDiAKSRsiB2ogBSAZaiAHaiAQICUgKRAgIAdqIgcgC0sEQCAbIBNBA3RqIgsgBzYCBCALIAwgBSAdaiILazYCACAHIAtqIBogByAaIAtrSxshGiATQQFqIRMgB0GAIEsNBiAHIgsgEmogEEYNBgsgBSAtTQ0FIAZBf2oiBkUNBSAHIA4gGSAIIAUgB2ogHEkbIAVqIAdqLQAAIAcgEmotAABJIhQbIQ4gCiAHIBQbIQogCSAFICZxQQN0aiAUQQJ0aigCACIFIA9LDQALDAQLIAlBADYCACAMQQA2AgAMBgsgDEEANgIAIAlBADYCAAwECyAMQQA2AgAgCUEANgIADAILIAlBADYCACAMQQA2AgALIAAgGkF4ajYCGAwDCyAAIBpBeGo2AhgMAgsgACAaQXhqNgIYDAELIAAgGEF4ajYCGAsgE0UNACAbIBNBf2pBA3RqIgUoAgQiCCA5SyAIIBFqQYAgT3INBSAVIDRqIRVBACEIA0AgF0FAayAqIBsgCEEDdGoiBigCACIMICwQPyA3IQUgCARAIAZBfGooAgBBAWohBQsCQCAGKAIEIgcgBUkNACAMQQFqECQiIUEJdEGztH9qQTMgIUETSxshCiAhQQh0QYAgaiELA0AgB0F9aiEGIAcgEWohFAJ/IAAoAmRBAUYEQCAGEC4gC2oMAQsgACgCYCAKaiAAKAI4ICFBAnRqKAIAEC5rIAAoAlxqIAYQPEECdCIGQZCkAWooAgAgIWpBCHRqIAAoAjQgBmooAgAQLmsLIBVqIQYCQCAUIARNBEAgBiAkIBRBHGxqKAIASA0BDAMLA0AgJCAEQQFqIgRBHGxqQYCAgIAENgIAIAQgFEkNAAsLICQgFEEcbGoiCSAuNgIMIAkgDDYCBCAJIAc2AgggCSAGNgIAIAkgFykDQDcCECAJIBcoAkg2AhggB0F/aiIHIAVPDQALCyAIQQFqIgggE0cNAAsLICALIhEgBE0NAAsLICQgBEEcbGoiBigCDCEuIAYoAgQhBSAGKAIAITsgBigCCCEIIBcgBigCGDYCWCAXIAYpAhA3A1AgFyAGKQIINwMoIBcgBikCEDcDMCAXIAYoAhg2AjggFyAGKQIANwMgQQAgBCAXQSBqED5rIgYgBiAESxshBAwDCyANQQFqIQ0MBwsgBSgCACEFQQAhBCARIDMoAggEfyAEBSAzKAIMC2siBEGAIE0NAQsgJCAuNgIoICQgCDYCJCAkIAU2AiAgJCA7NgIcICQgFygCWDYCNCAkIBcpA1A3AiwMAQsgJCAEQQFqIhVBHGxqIgYgLjYCDCAGIAg2AgggBiAFNgIEIAYgOzYCACAGIBcpA1A3AhAgBiAXKAJYNgIYIBUhCSAEDQELQQEhCUEBIRUMAQsDQCAXICQgBEEcbGoiESIFQRhqKAIANgIYIBcgESkCEDcDECAXIBEpAgg3AwggFyARKQIANwMAIBcQPiEHICQgCUF/aiIJQRxsaiIGIAUoAhg2AhggBiARKQIQNwIQIAYgESkCCDcCCCAGIBEpAgA3AgAgBCAHSyEGQQAgBCAHayIFIAUgBEsbIQQgBg0ACyAJIBVLDQELA0AgJCAJQRxsaiIEKAIMIQoCfyADIApqIAQoAggiEUUNABoCQAJAIAQoAgQiC0EDTwRAIAIgAikCADcCBCALQX5qIQQMAQsCQAJAAkACQCALIApFaiIFDgQFAQEAAQsgAigCAEF/aiEEDAELIAIgBUECdGooAgAhBCAFQQJJDQELIAIgAigCBDYCCAsgAiACKAIANgIECyACIAQ2AgALIDUgCiADIAsgERBXIBFBfWohByABKAIMIQUCQAJAIAMgCmoiBCA6TQRAIAUgAxAcIAEoAgwhBCAKQRBNBEAgASAEIApqNgIMDAMLIARBEGogA0EQaiIIEBwgBEEgaiADQSBqEBwgCkExSA0BIAQgCmohBiAEQTBqIQQDQCAEIAhBIGoiBRAcIARBEGogCEEwahAcIAUhCCAEQSBqIgQgBkkNAAsMAQsgBSADIAQgOhAiCyABIAEoAgwgCmo2AgwgCkGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIEIAtBAWo2AgAgBCAKOwEEIAdBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgBzsBBiABIARBCGo2AgQgCiARaiADaiIDCyENIAlBAWoiCSAVTQ0ACwsgNUEAEFELIA0gOEkNAAsLIBdB4ABqJAAgECADawsLAEGI7AEoAgAQOAtIACAAQUBrKAIAEHAEQCAAIAAoAgBB/wEQfjYCGAsgACAAKAIEQSMQfjYCHCAAIAAoAghBNBB+NgIgIAAgACgCDEEfEH42AiQL6T4BKX8jAEHwAGsiDCQAIAwgAigCCDYCSCAMIAIpAgA3A0AgACgChAEhBSAAKAIEIQkgACgCiAEhAiAAKAIMIQcgDCAAKAIYNgJsIAAoAjwhFyAAQUBrKAIAIRggAEEsaiIiIAMgBEECEFkgAyAHIAlqIANGaiIPIAMgBGoiEkF4aiIpSQRAIAJB/x8gAkH/H0kbISogEkFgaiErQQNBBCAFQQNGGyIoQX9qISMDQAJAAkACQAJAAkACQAJAAkACQCAAKAIEIgUgACgCGCICaiAPSw0AIA8gA2shGSAAKAKEASEJIAIgDyAFayIHSQRAA0AgACACIAVqIBIgCUEAEEEgAmoiAiAHSQ0ACwsgGUUhHSAAIAc2AhgCQAJAAkACQAJAIAlBfWoOBQABAgMDAQtBACEKQQAgDyAAKAIEIhNrIgZBfyAAKAJ4QX9qdEF/cyIQayICIAIgBksbIRUgACgCICAPIAAoAnxBAxAeQQJ0aiIaKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgJBASACGyENQQNBBCAZGyEbIAAoAigiHCAGIBBxQQN0aiIOQQRqIRYgACgCiAEiAkH/HyACQf8fSRshCyAPQQNqIRQgBkEJaiEJIAYgACgCDGshHiAMKAJAQX9qIREgACgCgAEhHyAjIQUgHSECA0AgESEHIAJBA0cEQCAMQUBrIAJBAnRqKAIAIQcLAkAgB0F/aiAeTw0AIA9BAxAfIA8gB2tBAxAfRw0AIBQgFCAHayASEB1BA2oiByAFTQ0AIBcgCkEDdGoiBSAHNgIEIAUgAiAdazYCACAKQQFqIQogByALSw0FIAciBSAPaiASRg0FCyACQQFqIgIgG0kNAAsCQCAFQQJLDQBBAiEFIBMgACgCHCAAKAIkIAxB7ABqIA8QQCICIA1JDQAgBiACayIHQf//D0sNACAPIAIgE2ogEhAdIgJBA0kNACAXIAI2AgQgFyAHQQJqNgIAIAIgC00EQEEBIQogAiIFIA9qIBJHDQELQQEhCiAAIAZBAWo2AhgMBAsgGiAGNgIAAkAgCCANSQ0AIAZBAmohFEF/IB90QX9zIQtBACEGQQAhEQNAIA8gBiARIAYgEUkbIgJqIAggE2oiGiACaiASEB0gAmoiAiAFSwRAIBcgCkEDdGoiBSACNgIEIAUgFCAIazYCACACIAhqIAkgAiAJIAhrSxshCSAKQQFqIQogAkGAIEsNAiACIgUgD2ogEkYNAgsgHCAIIBBxQQN0aiEHAkACQCACIBpqLQAAIAIgD2otAABJBEAgDiAINgIAIAggFUsNASAMQdAAaiEODAQLIBYgCDYCACAIIBVLBEAgByEWIAIhEQwCCyAMQdAAaiEWDAMLIAIhBiAHQQRqIg4hBwsgC0UNASALQX9qIQsgBygCACIIIA1PDQALCyAWQQA2AgAgDkEANgIAIAAgCUF4ajYCGAwDC0EAIQpBACAPIAAoAgQiFWsiBkF/IAAoAnhBf2p0QX9zIhNrIgIgAiAGSxshDSAAKAIgIA8gACgCfEEEEB5BAnRqIhQoAgAhCCAAKAIQIAAoAhQgBiAAKAJ0ECciAkEBIAIbIRBBA0EEIBkbIRogACgCKCIbIAYgE3FBA3RqIg5BBGohFiAAKAKIASICQf8fIAJB/x9JGyEcIA9BBGohCyAGQQlqIQkgBiAAKAIMayEeIAwoAkBBf2ohESAAKAKAASEfICMhBSAdIQIDQCARIQcgAkEDRwRAIAxBQGsgAkECdGooAgAhBwsCQCAHQX9qIB5PDQAgD0EEEB8gDyAHa0EEEB9HDQAgCyALIAdrIBIQHUEEaiIHIAVNDQAgFyAKQQN0aiIFIAc2AgQgBSACIB1rNgIAIApBAWohCiAHIBxLDQQgByIFIA9qIBJGDQQLIAJBAWoiAiAaSQ0ACyAUIAY2AgACQCAIIBBJDQAgBkECaiEUQX8gH3RBf3MhC0EAIQZBACERA0AgDyAGIBEgBiARSRsiAmogCCAVaiIaIAJqIBIQHSACaiICIAVLBEAgFyAKQQN0aiIFIAI2AgQgBSAUIAhrNgIAIAIgCGogCSACIAkgCGtLGyEJIApBAWohCiACQYAgSw0CIAIiBSAPaiASRg0CCyAbIAggE3FBA3RqIQcCQAJAIAIgGmotAAAgAiAPai0AAEkEQCAOIAg2AgAgCCANSw0BIAxB0ABqIQ4MBAsgFiAINgIAIAggDUsEQCAHIRYgAiERDAILIAxB0ABqIRYMAwsgAiEGIAdBBGoiDiEHCyALRQ0BIAtBf2ohCyAHKAIAIgggEE8NAAsLIBZBADYCACAOQQA2AgAgACAJQXhqNgIYDAILQQAhCkEAIA8gACgCBCIVayIGQX8gACgCeEF/anRBf3MiE2siAiACIAZLGyENIAAoAiAgDyAAKAJ8QQUQHkECdGoiFCgCACEIIAAoAhAgACgCFCAGIAAoAnQQJyICQQEgAhshEEEDQQQgGRshGiAAKAIoIhsgBiATcUEDdGoiFkEEaiEOIAAoAogBIgJB/x8gAkH/H0kbIRwgD0EEaiELIAZBCWohCSAGIAAoAgxrIR4gDCgCQEF/aiERIAAoAoABIR8gIyEFIB0hAgNAIBEhByACQQNHBEAgDEFAayACQQJ0aigCACEHCwJAIAdBf2ogHk8NACAPQQQQHyAPIAdrQQQQH0cNACALIAsgB2sgEhAdQQRqIgcgBU0NACAXIApBA3RqIgUgBzYCBCAFIAIgHWs2AgAgCkEBaiEKIAcgHEsNAyAHIgUgD2ogEkYNAwsgAkEBaiICIBpJDQALIBQgBjYCAAJAIAggEEkNACAGQQJqIRRBfyAfdEF/cyELQQAhBkEAIREDQCAPIAYgESAGIBFJGyICaiAIIBVqIhogAmogEhAdIAJqIgIgBUsEQCAXIApBA3RqIgUgAjYCBCAFIBQgCGs2AgAgAiAIaiAJIAIgCSAIa0sbIQkgCkEBaiEKIAJBgCBLDQIgAiIFIA9qIBJGDQILIBsgCCATcUEDdGohBwJAAkAgAiAaai0AACACIA9qLQAASQRAIBYgCDYCACAIIA1LDQEgDEHQAGohFgwECyAOIAg2AgAgCCANSwRAIAchDiACIREMAgsgDEHQAGohDgwDCyACIQYgB0EEaiIWIQcLIAtFDQEgC0F/aiELIAcoAgAiCCAQTw0ACwsgDkEANgIAIBZBADYCACAAIAlBeGo2AhgMAQtBACEKQQAgDyAAKAIEIhVrIgZBfyAAKAJ4QX9qdEF/cyITayICIAIgBksbIQ0gACgCICAPIAAoAnxBBhAeQQJ0aiIUKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgJBASACGyEQQQNBBCAZGyEaIAAoAigiGyAGIBNxQQN0aiIWQQRqIQ4gACgCiAEiAkH/HyACQf8fSRshHCAPQQRqIQsgBkEJaiEJIAYgACgCDGshHiAMKAJAQX9qIREgACgCgAEhHyAjIQUgHSECA0AgESEHIAJBA0cEQCAMQUBrIAJBAnRqKAIAIQcLAkAgB0F/aiAeTw0AIA9BBBAfIA8gB2tBBBAfRw0AIAsgCyAHayASEB1BBGoiByAFTQ0AIBcgCkEDdGoiBSAHNgIEIAUgAiAdazYCACAKQQFqIQogByAcSw0CIAciBSAPaiASRg0CCyACQQFqIgIgGkkNAAsgFCAGNgIAAkAgCCAQSQ0AIAZBAmohFEF/IB90QX9zIQtBACEGQQAhEQNAIA8gBiARIAYgEUkbIgJqIAggFWoiGiACaiASEB0gAmoiAiAFSwRAIBcgCkEDdGoiBSACNgIEIAUgFCAIazYCACACIAhqIAkgAiAJIAhrSxshCSAKQQFqIQogAkGAIEsNAiACIgUgD2ogEkYNAgsgGyAIIBNxQQN0aiEHAkACQCACIBpqLQAAIAIgD2otAABJBEAgFiAINgIAIAggDUsNASAMQdAAaiEWDAQLIA4gCDYCACAIIA1LBEAgByEOIAIhEQwCCyAMQdAAaiEODAMLIAIhBiAHQQRqIhYhBwsgC0UNASALQX9qIQsgBygCACIIIBBPDQALCyAOQQA2AgAgFkEANgIAIAAgCUF4ajYCGAsgCkUNACAYIAwoAkA2AhAgGCAMKAJENgIUIAwoAkghAiAYIBk2AgwgGEEANgIIIBggAjYCGCAYIAMgGSAiQQIQWCIFNgIAIBcgCkF/akEDdGoiAigCBCIHICpLBEAgAigCACELDAMLQQEhAkEAICJBAhAtIQkDQCAYIAJBHGxqQYCAgIAENgIAIAJBAWoiAiAoRw0ACyAFIAlqIQtBACEJICghBwNAIBcgCUEDdGoiAigCBCEFIAxB0ABqIAxBQGsgAigCACIRIB0QPyAHIAVNBEAgEUEBahAkIgZBCHRBgCBqIQ4DQCAHQX1qIQICfyAAKAJkQQFGBEAgAhArIA5qDAELIAAoAmAgACgCOCAGQQJ0aigCABArayAAKAJcaiACEDxBAnQiAkGQpAFqKAIAIAZqQQh0aiAAKAI0IAJqKAIAECtrQTNqCyEIIBggB0EcbGoiAiAZNgIMIAIgETYCBCACIAc2AgggAiAIIAtqNgIAIAIgDCkDUDcCECACIAwoAlg2AhggB0EBaiIHIAVNDQALCyAJQQFqIgkgCkcNAAtBASERAkAgB0F/aiICRQRAQQAhAgwBCwNAQQEhCCAYIBFBf2pBHGxqIgkoAghFBEAgCSgCDEEBaiEICyAPIBFqIg1Bf2pBASAiQQIQUiAJKAIAaiAIICJBAhAtaiAIQX9qICJBAhAtayIFIBggEUEcbGoiFCgCACIWTARAIBQgCDYCDCAUQgA3AgQgFCAFNgIAIBQgCSgCGDYCGCAUIAkpAhA3AhAgBSEWCwJAIA0gKUsNACACIBFGBEAgESECDAMLQQAhGSAUKAIIIglFBEAgFCgCDCEZC0EAICJBAhAtIS0gACgCBCIFIAAoAhgiCGogDUsNACAAKAKEASEHIAggDSAFayIKSQRAA0AgACAFIAhqIBIgB0EAEEEgCGoiCCAKSQ0ACwsgCUEARyEdIBRBEGohGiAAIAo2AhgCQAJAAkACQAJAIAdBfWoOBQABAgMDAQtBACEQQQAgDSAAKAIEIhtrIgZBfyAAKAJ4QX9qdEF/cyIeayIFIAUgBksbIR8gACgCICANIAAoAnxBAxAeQQJ0aiIhKAIAIQUgACgCECAAKAIUIAYgACgCdBAnIgdBASAHGyEcQQRBAyAJGyEkIAAoAigiJSAGIB5xQQN0aiIHQQRqIRMgACgCiAEiCUH/HyAJQf8fSRshDiANQQNqISAgBkEJaiEVIAYgACgCDGshJiAAKAKAASEnICMhCSAdIQgDQAJAAn8gCEEDRgRAIBooAgBBf2oMAQsgFCAIQQJ0aigCEAsiC0F/aiAmTw0AIA1BAxAfIA0gC2tBAxAfRw0AICAgICALayASEB1BA2oiCiAJTQ0AIBcgEEEDdGoiCSAKNgIEIAkgCCAdazYCACAQQQFqIRAgCiAOSw0FIAoiCSANaiASRg0FCyAIQQFqIgggJEkNAAsCQCAJQQJLDQBBAiEJIBsgACgCHCAAKAIkIAxB7ABqIA0QQCIKIBxJDQAgBiAKayIIQf//D0sNACANIAogG2ogEhAdIgpBA0kNACAXIAo2AgQgFyAIQQJqNgIAIAogDk0EQEEBIRAgCiIJIA1qIBJHDQELQQEhECAAIAZBAWo2AhgMBAsgISAGNgIAAkAgBSAcSQ0AIAZBAmohIEF/ICd0QX9zIQhBACEKQQAhDgNAIA0gCiAOIAogDkkbIgZqIAUgG2oiISAGaiASEB0gBmoiBiAJSwRAIBcgEEEDdGoiCSAGNgIEIAkgICAFazYCACAFIAZqIBUgBiAVIAVrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgkgDWogEkYNAgsgJSAFIB5xQQN0aiELAkACQCAGICFqLQAAIAYgDWotAABJBEAgByAFNgIAIAUgH0sNASAMQdAAaiEHDAQLIBMgBTYCACAFIB9LBEAgCyETIAYhDgwCCyAMQdAAaiETDAMLIAYhCiALQQRqIgchCwsgCEUNASAIQX9qIQggCygCACIFIBxPDQALCyATQQA2AgAgB0EANgIAIAAgFUF4ajYCGAwDC0EAIRBBACANIAAoAgQiH2siBkF/IAAoAnhBf2p0QX9zIhtrIgUgBSAGSxshHCAAKAIgIA0gACgCfEEEEB5BAnRqIiAoAgAhBSAAKAIQIAAoAhQgBiAAKAJ0ECciB0EBIAcbIR5BBEEDIAkbISEgACgCKCIkIAYgG3FBA3RqIhNBBGohByAAKAKIASIJQf8fIAlB/x9JGyElIA1BBGohDiAGQQlqIRUgBiAAKAIMayEmIAAoAoABIScgIyEJIB0hCANAAkACfyAIQQNGBEAgGigCAEF/agwBCyAUIAhBAnRqKAIQCyILQX9qICZPDQAgDUEEEB8gDSALa0EEEB9HDQAgDiAOIAtrIBIQHUEEaiIKIAlNDQAgFyAQQQN0aiIJIAo2AgQgCSAIIB1rNgIAIBBBAWohECAKICVLDQQgCiIJIA1qIBJGDQQLIAhBAWoiCCAhSQ0ACyAgIAY2AgACQCAFIB5JDQAgBkECaiEgQX8gJ3RBf3MhCEEAIQpBACEOA0AgDSAKIA4gCiAOSRsiBmogBSAfaiIhIAZqIBIQHSAGaiIGIAlLBEAgFyAQQQN0aiIJIAY2AgQgCSAgIAVrNgIAIAUgBmogFSAGIBUgBWtLGyEVIBBBAWohECAGQYAgSw0CIAYiCSANaiASRg0CCyAkIAUgG3FBA3RqIQsCQAJAIAYgIWotAAAgBiANai0AAEkEQCATIAU2AgAgBSAcSw0BIAxB0ABqIRMMBAsgByAFNgIAIAUgHEsEQCALIQcgBiEODAILIAxB0ABqIQcMAwsgBiEKIAtBBGoiEyELCyAIRQ0BIAhBf2ohCCALKAIAIgUgHk8NAAsLIAdBADYCACATQQA2AgAgACAVQXhqNgIYDAILQQAhEEEAIA0gACgCBCIfayIGQX8gACgCeEF/anRBf3MiG2siBSAFIAZLGyEcIAAoAiAgDSAAKAJ8QQUQHkECdGoiICgCACEFIAAoAhAgACgCFCAGIAAoAnQQJyIHQQEgBxshHkEEQQMgCRshISAAKAIoIiQgBiAbcUEDdGoiE0EEaiEHIAAoAogBIglB/x8gCUH/H0kbISUgDUEEaiEOIAZBCWohFSAGIAAoAgxrISYgACgCgAEhJyAjIQkgHSEIA0ACQAJ/IAhBA0YEQCAaKAIAQX9qDAELIBQgCEECdGooAhALIgtBf2ogJk8NACANQQQQHyANIAtrQQQQH0cNACAOIA4gC2sgEhAdQQRqIgogCU0NACAXIBBBA3RqIgkgCjYCBCAJIAggHWs2AgAgEEEBaiEQIAogJUsNAyAKIgkgDWogEkYNAwsgCEEBaiIIICFJDQALICAgBjYCAAJAIAUgHkkNACAGQQJqISBBfyAndEF/cyEIQQAhCkEAIQ4DQCANIAogDiAKIA5JGyIGaiAFIB9qIiEgBmogEhAdIAZqIgYgCUsEQCAXIBBBA3RqIgkgBjYCBCAJICAgBWs2AgAgBSAGaiAVIAYgFSAFa0sbIRUgEEEBaiEQIAZBgCBLDQIgBiIJIA1qIBJGDQILICQgBSAbcUEDdGohCwJAAkAgBiAhai0AACAGIA1qLQAASQRAIBMgBTYCACAFIBxLDQEgDEHQAGohEwwECyAHIAU2AgAgBSAcSwRAIAshByAGIQ4MAgsgDEHQAGohBwwDCyAGIQogC0EEaiITIQsLIAhFDQEgCEF/aiEIIAsoAgAiBSAeTw0ACwsgB0EANgIAIBNBADYCACAAIBVBeGo2AhgMAQtBACEQQQAgDSAAKAIEIh9rIgZBfyAAKAJ4QX9qdEF/cyIbayIFIAUgBksbIRwgACgCICANIAAoAnxBBhAeQQJ0aiIgKAIAIQUgACgCECAAKAIUIAYgACgCdBAnIgdBASAHGyEeQQRBAyAJGyEhIAAoAigiJCAGIBtxQQN0aiITQQRqIQcgACgCiAEiCUH/HyAJQf8fSRshJSANQQRqIQ4gBkEJaiEVIAYgACgCDGshJiAAKAKAASEnICMhCSAdIQgDQAJAAn8gCEEDRgRAIBooAgBBf2oMAQsgFCAIQQJ0aigCEAsiC0F/aiAmTw0AIA1BBBAfIA0gC2tBBBAfRw0AIA4gDiALayASEB1BBGoiCiAJTQ0AIBcgEEEDdGoiCSAKNgIEIAkgCCAdazYCACAQQQFqIRAgCiAlSw0CIAoiCSANaiASRg0CCyAIQQFqIgggIUkNAAsgICAGNgIAAkAgBSAeSQ0AIAZBAmohIEF/ICd0QX9zIQhBACEKQQAhDgNAIA0gCiAOIAogDkkbIgZqIAUgH2oiISAGaiASEB0gBmoiBiAJSwRAIBcgEEEDdGoiCSAGNgIEIAkgICAFazYCACAFIAZqIBUgBiAVIAVrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgkgDWogEkYNAgsgJCAFIBtxQQN0aiELAkACQCAGICFqLQAAIAYgDWotAABJBEAgEyAFNgIAIAUgHEsNASAMQdAAaiETDAQLIAcgBTYCACAFIBxLBEAgCyEHIAYhDgwCCyAMQdAAaiEHDAMLIAYhCiALQQRqIhMhCwsgCEUNASAIQX9qIQggCygCACIFIB5PDQALCyAHQQA2AgAgE0EANgIAIAAgFUF4ajYCGAsgEEUNACAXIBBBf2pBA3RqIgUoAgQiByAqSyAHIBFqQYAgT3INBCAWIC1qIQ5BACEWA0AgDEHQAGogGiAXIBZBA3RqIgUoAgAiCSAdED8gKCEGAn8gFgRAIAVBfGooAgBBAWohBgsgBSgCBCIIIAZPCwRAIAlBAWoQJCIHQQh0QYAgaiETA0AgCEF9aiEKIAggEWohBQJ/IAAoAmRBAUYEQCAKECsgE2oMAQsgACgCYCAAKAI4IAdBAnRqKAIAECtrIAAoAlxqIAoQPEECdCIKQZCkAWooAgAgB2pBCHRqIAAoAjQgCmooAgAQK2tBM2oLIA5qIQoCQAJAIAUgAk0EQCAKIBggBUEcbGooAgBIDQEMAgsDQCAYIAJBAWoiAkEcbGpBgICAgAQ2AgAgAiAFSQ0ACwsgGCAFQRxsaiIFIBk2AgwgBSAJNgIEIAUgCDYCCCAFIAo2AgAgBSAMKQNQNwIQIAUgDCgCWDYCGAsgCEF/aiIIIAZPDQALCyAWQQFqIhYgEEcNAAsLIBFBAWoiESACTQ0ACwsgGCACQRxsaiIFKAIMIRkgBSgCBCELIAUoAgAhLCAFKAIIIQcgDCAFKAIYNgJoIAwgBSkCEDcDYCAMIAUpAgg3AyggDCAFKQIQNwMwIAwgBSgCGDYCOCAMIAUpAgA3AyBBACACIAxBIGoQPmsiBSAFIAJLGyECDAMLIA9BAWohDwwHCyAFKAIAIQtBACECIBEgFCgCCAR/IAIFIBQoAgwLayICQYAgTQ0BCyAYIBk2AiggGCAHNgIkIBggCzYCICAYICw2AhwgGCAMKAJoNgI0IBggDCkDYDcCLAwBCyAYIAJBAWoiCkEcbGoiBSAZNgIMIAUgBzYCCCAFIAs2AgQgBSAsNgIAIAUgDCkDYDcCECAFIAwoAmg2AhggCiEZIAINAQtBASEZQQEhCgwBCwNAIAwgGCACQRxsaiIFIhFBGGooAgA2AhggDCAFKQIQNwMQIAwgBSkCCDcDCCAMIAUpAgA3AwAgDBA+IQcgGCAZQX9qIhlBHGxqIgkgESgCGDYCGCAJIAUpAhA3AhAgCSAFKQIINwIIIAkgBSkCADcCACACIAdLIQVBACACIAdrIgkgCSACSxshAiAFDQALIBkgCksNAQsDQCAYIBlBHGxqIgIoAgwhCQJ/IAMgCWogAigCCCIGRQ0AGgJAIAIoAgQiEUEDTwRAIAwgDCkDQDcCRCAMIBFBfmo2AkAMAQsCQAJAAkACQCARIAlFaiICDgQEAQEAAQsgDCgCQEF/aiEHDAELIAxBQGsgAkECdGooAgAhByACQQJJDQELIAwgDCgCRDYCSAsgDCAMKAJANgJEIAwgBzYCQAsgIiAJIAMgESAGEFcgBkF9aiEIIAEoAgwhAgJAAkAgAyAJaiIFICtNBEAgAiADEBwgASgCDCECIAlBEE0EQCABIAIgCWo2AgwMAwsgAkEQaiADQRBqIgcQHCACQSBqIANBIGoQHCAJQTFIDQEgAiAJaiELIAJBMGohAgNAIAIgB0EgaiIFEBwgAkEQaiAHQTBqEBwgBSEHIAJBIGoiAiALSQ0ACwwBCyACIAMgBSArECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgIgEUEBajYCACACIAk7AQQgCEGAgARPBEAgAUECNgIkIAEgAiABKAIAa0EDdTYCKAsgAiAIOwEGIAEgAkEIajYCBCAGIAlqIANqIgMLIQ8gGUEBaiIZIApNDQALCyAiQQIQUQsgDyApSQ0ACwsgARDyASAAIAAoAgQgBGs2AgQgACAAKAIMIARqIgE2AgwgACABNgIYIAAgATYCECAiEJ4DIAxB8ABqJAALwD4BKX8jAEHgAGsiESQAIAAoAgQhBQJAIAAoAkgNACABKAIEIAEoAgBHDQAgACgCDCIJIAAoAhBHIARBgQhJciADIAVrIAlHcg0AIAAgASACIAMgBBCfAyAAKAIEIQULIAAoAoQBIQcgACgCiAEhCSAAKAIMISEgESAAKAIYNgJcIAAoAjwhGCAAQUBrKAIAIRkgAEEsaiIiIAMgBEECEFkgAyAFICFqIANGaiIPIAMgBGoiEkF4aiIpSQRAIAlB/x8gCUH/H0kbISogEkFgaiErQQNBBCAHQQNGGyIoQX9qISEDQAJAAkACQAJAAkACQAJAAkACQCAAKAIEIgkgACgCGCIEaiAPSw0AIA8gA2shGiAAKAKEASEHIAQgDyAJayIFSQRAA0AgACAEIAlqIBIgB0EAEEEgBGoiBCAFSQ0ACwsgGkUhHCAAIAU2AhgCQAJAAkACQAJAIAdBfWoOBQABAgMDAQtBACELQQAgDyAAKAIEIhNrIgZBfyAAKAJ4QX9qdEF/cyIQayIEIAQgBksbIRUgACgCICAPIAAoAnxBAxAeQQJ0aiIUKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgRBASAEGyEOQQNBBCAaGyEfIAAoAigiFyAGIBBxQQN0aiIWQQRqIQogACgCiAEiBEH/HyAEQf8fSRshDSAPQQNqIQwgBkEJaiEHIAYgACgCDGshGyAAKAKAASEdICEhCSAcIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiBUF/aiAbTw0AIA9BAxAfIA8gBWtBAxAfRw0AIAwgDCAFayASEB1BA2oiBSAJTQ0AIBggC0EDdGoiCSAFNgIEIAkgBCAcazYCACALQQFqIQsgBSANSw0FIAUiCSAPaiASRg0FCyAEQQFqIgQgH0kNAAsCQCAJQQJLDQBBAiEJIBMgACgCHCAAKAIkIBFB3ABqIA8QQCIEIA5JDQAgBiAEayIFQf//D0sNACAPIAQgE2ogEhAdIgRBA0kNACAYIAQ2AgQgGCAFQQJqNgIAIAQgDU0EQEEBIQsgBCIJIA9qIBJHDQELQQEhCyAAIAZBAWo2AhgMBAsgFCAGNgIAAkAgCCAOSQ0AIAZBAmohFEF/IB10QX9zIQ1BACEGQQAhDANAIA8gBiAMIAYgDEkbIgRqIAggE2oiHyAEaiASEB0gBGoiBCAJSwRAIBggC0EDdGoiCSAENgIEIAkgFCAIazYCACAEIAhqIAcgBCAHIAhrSxshByALQQFqIQsgBEGAIEsNAiAEIgkgD2ogEkYNAgsgFyAIIBBxQQN0aiEFAkACQCAEIB9qLQAAIAQgD2otAABJBEAgFiAINgIAIAggFUsNASARQUBrIRYMBAsgCiAINgIAIAggFUsEQCAFIQogBCEMDAILIBFBQGshCgwDCyAEIQYgBUEEaiIWIQULIA1FDQEgDUF/aiENIAUoAgAiCCAOTw0ACwsgCkEANgIAIBZBADYCACAAIAdBeGo2AhgMAwtBACELQQAgDyAAKAIEIhVrIgZBfyAAKAJ4QX9qdEF/cyITayIEIAQgBksbIQ4gACgCICAPIAAoAnxBBBAeQQJ0aiIMKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgRBASAEGyEQQQNBBCAaGyEUIAAoAigiHyAGIBNxQQN0aiIKQQRqIRYgACgCiAEiBEH/HyAEQf8fSRshFyAPQQRqIQ0gBkEJaiEHIAYgACgCDGshGyAAKAKAASEdICEhCSAcIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiBUF/aiAbTw0AIA9BBBAfIA8gBWtBBBAfRw0AIA0gDSAFayASEB1BBGoiBSAJTQ0AIBggC0EDdGoiCSAFNgIEIAkgBCAcazYCACALQQFqIQsgBSAXSw0EIAUiCSAPaiASRg0ECyAEQQFqIgQgFEkNAAsgDCAGNgIAAkAgCCAQSQ0AIAZBAmohFEF/IB10QX9zIQ1BACEGQQAhDANAIA8gBiAMIAYgDEkbIgRqIAggFWoiFyAEaiASEB0gBGoiBCAJSwRAIBggC0EDdGoiCSAENgIEIAkgFCAIazYCACAEIAhqIAcgBCAHIAhrSxshByALQQFqIQsgBEGAIEsNAiAEIgkgD2ogEkYNAgsgHyAIIBNxQQN0aiEFAkACQCAEIBdqLQAAIAQgD2otAABJBEAgCiAINgIAIAggDksNASARQUBrIQoMBAsgFiAINgIAIAggDksEQCAFIRYgBCEMDAILIBFBQGshFgwDCyAEIQYgBUEEaiIKIQULIA1FDQEgDUF/aiENIAUoAgAiCCAQTw0ACwsgFkEANgIAIApBADYCACAAIAdBeGo2AhgMAgtBACELQQAgDyAAKAIEIhVrIgZBfyAAKAJ4QX9qdEF/cyITayIEIAQgBksbIQ4gACgCICAPIAAoAnxBBRAeQQJ0aiIMKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgRBASAEGyEQQQNBBCAaGyEUIAAoAigiHyAGIBNxQQN0aiIKQQRqIRYgACgCiAEiBEH/HyAEQf8fSRshFyAPQQRqIQ0gBkEJaiEHIAYgACgCDGshGyAAKAKAASEdICEhCSAcIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiBUF/aiAbTw0AIA9BBBAfIA8gBWtBBBAfRw0AIA0gDSAFayASEB1BBGoiBSAJTQ0AIBggC0EDdGoiCSAFNgIEIAkgBCAcazYCACALQQFqIQsgBSAXSw0DIAUiCSAPaiASRg0DCyAEQQFqIgQgFEkNAAsgDCAGNgIAAkAgCCAQSQ0AIAZBAmohFEF/IB10QX9zIQ1BACEGQQAhDANAIA8gBiAMIAYgDEkbIgRqIAggFWoiFyAEaiASEB0gBGoiBCAJSwRAIBggC0EDdGoiCSAENgIEIAkgFCAIazYCACAEIAhqIAcgBCAHIAhrSxshByALQQFqIQsgBEGAIEsNAiAEIgkgD2ogEkYNAgsgHyAIIBNxQQN0aiEFAkACQCAEIBdqLQAAIAQgD2otAABJBEAgCiAINgIAIAggDksNASARQUBrIQoMBAsgFiAINgIAIAggDksEQCAFIRYgBCEMDAILIBFBQGshFgwDCyAEIQYgBUEEaiIKIQULIA1FDQEgDUF/aiENIAUoAgAiCCAQTw0ACwsgFkEANgIAIApBADYCACAAIAdBeGo2AhgMAQtBACELQQAgDyAAKAIEIhVrIgZBfyAAKAJ4QX9qdEF/cyITayIEIAQgBksbIQ4gACgCICAPIAAoAnxBBhAeQQJ0aiIMKAIAIQggACgCECAAKAIUIAYgACgCdBAnIgRBASAEGyEQQQNBBCAaGyEUIAAoAigiHyAGIBNxQQN0aiIKQQRqIRYgACgCiAEiBEH/HyAEQf8fSRshFyAPQQRqIQ0gBkEJaiEHIAYgACgCDGshGyAAKAKAASEdICEhCSAcIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiBUF/aiAbTw0AIA9BBBAfIA8gBWtBBBAfRw0AIA0gDSAFayASEB1BBGoiBSAJTQ0AIBggC0EDdGoiCSAFNgIEIAkgBCAcazYCACALQQFqIQsgBSAXSw0CIAUiCSAPaiASRg0CCyAEQQFqIgQgFEkNAAsgDCAGNgIAAkAgCCAQSQ0AIAZBAmohFEF/IB10QX9zIQ1BACEGQQAhDANAIA8gBiAMIAYgDEkbIgRqIAggFWoiFyAEaiASEB0gBGoiBCAJSwRAIBggC0EDdGoiCSAENgIEIAkgFCAIazYCACAEIAhqIAcgBCAHIAhrSxshByALQQFqIQsgBEGAIEsNAiAEIgkgD2ogEkYNAgsgHyAIIBNxQQN0aiEFAkACQCAEIBdqLQAAIAQgD2otAABJBEAgCiAINgIAIAggDksNASARQUBrIQoMBAsgFiAINgIAIAggDksEQCAFIRYgBCEMDAILIBFBQGshFgwDCyAEIQYgBUEEaiIKIQULIA1FDQEgDUF/aiENIAUoAgAiCCAQTw0ACwsgFkEANgIAIApBADYCACAAIAdBeGo2AhgLIAtFDQAgGSACKAIANgIQIBkgAigCBDYCFCACKAIIIQQgGSAaNgIMIBlBADYCCCAZIAQ2AhggGSADIBogIkECEFgiCTYCACAYIAtBf2pBA3RqIgQoAgQiBSAqSwRAIAQoAgAhDQwDC0EBIQRBACAiQQIQLSEHA0AgGSAEQRxsakGAgICABDYCACAEQQFqIgQgKEcNAAsgByAJaiENQQAhByAoIQUDQCAYIAdBA3RqIgQoAgQhCSARQUBrIAIgBCgCACIKIBwQPyAFIAlNBEAgCkEBahAkIgZBCHRBgCBqIQwDQCAFQX1qIQQCfyAAKAJkQQFGBEAgBBArIAxqDAELIAAoAmAgACgCOCAGQQJ0aigCABArayAAKAJcaiAEEDxBAnQiBEGQpAFqKAIAIAZqQQh0aiAAKAI0IARqKAIAECtrQTNqCyEIIBkgBUEcbGoiBCAaNgIMIAQgCjYCBCAEIAU2AgggBCAIIA1qNgIAIAQgESkDQDcCECAEIBEoAkg2AhggBUEBaiIFIAlNDQALCyAHQQFqIgcgC0cNAAtBASEJAkAgBUF/aiIERQRAQQAhBAwBCwNAQQEhCCAZIAlBf2pBHGxqIgUoAghFBEAgBSgCDEEBaiEICyAJIA9qIg5Bf2pBASAiQQIQUiAFKAIAaiAIICJBAhAtaiAIQX9qICJBAhAtayIHIBkgCUEcbGoiFCgCACIWTARAIBQgCDYCDCAUQgA3AgQgFCAHNgIAIBQgBSgCGDYCGCAUIAUpAhA3AhAgByEWCwJAIA4gKUsNACAEIAlGBEAgCSEEDAMLQQAhGiAUKAIIIgdFBEAgFCgCDCEaC0EAICJBAhAtIS0gACgCBCIFIAAoAhgiCGogDksNACAAKAKEASELIAggDiAFayIKSQRAA0AgACAFIAhqIBIgC0EAEEEgCGoiCCAKSQ0ACwsgB0EARyEcIBRBEGohHyAAIAo2AhgCQAJAAkACQAJAIAtBfWoOBQABAgMDAQtBACEQQQAgDiAAKAIEIhdrIgpBfyAAKAJ4QX9qdEF/cyIdayIFIAUgCksbISMgACgCICAOIAAoAnxBAxAeQQJ0aiIgKAIAIQwgACgCECAAKAIUIAogACgCdBAnIgVBASAFGyEbQQRBAyAHGyEkIAAoAigiJSAKIB1xQQN0aiIFQQRqIRMgACgCiAEiB0H/HyAHQf8fSRshBiAOQQNqIR4gCkEJaiEVIAogACgCDGshJiAAKAKAASEnICEhByAcIQgDQAJAAn8gCEEDRgRAIB8oAgBBf2oMAQsgFCAIQQJ0aigCEAsiDUF/aiAmTw0AIA5BAxAfIA4gDWtBAxAfRw0AIB4gHiANayASEB1BA2oiCyAHTQ0AIBggEEEDdGoiByALNgIEIAcgCCAcazYCACAQQQFqIRAgCyAGSw0FIAsiByAOaiASRg0FCyAIQQFqIgggJEkNAAsCQCAHQQJLDQBBAiEHIBcgACgCHCAAKAIkIBFB3ABqIA4QQCILIBtJDQAgCiALayIIQf//D0sNACAOIAsgF2ogEhAdIgtBA0kNACAYIAs2AgQgGCAIQQJqNgIAIAsgBk0EQEEBIRAgCyIHIA5qIBJHDQELQQEhECAAIApBAWo2AhgMBAsgICAKNgIAAkAgDCAbSQ0AIApBAmohHkF/ICd0QX9zIQhBACELQQAhCgNAIA4gCyAKIAsgCkkbIgZqIAwgF2oiICAGaiASEB0gBmoiBiAHSwRAIBggEEEDdGoiByAGNgIEIAcgHiAMazYCACAGIAxqIBUgBiAVIAxrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgcgDmogEkYNAgsgJSAMIB1xQQN0aiENAkACQCAGICBqLQAAIAYgDmotAABJBEAgBSAMNgIAIAwgI0sNASARQUBrIQUMBAsgEyAMNgIAIAwgI0sEQCANIRMgBiEKDAILIBFBQGshEwwDCyAGIQsgDUEEaiIFIQ0LIAhFDQEgCEF/aiEIIA0oAgAiDCAbTw0ACwsgE0EANgIAIAVBADYCACAAIBVBeGo2AhgMAwtBACEQQQAgDiAAKAIEIiNrIgpBfyAAKAJ4QX9qdEF/cyIXayIFIAUgCksbIRsgACgCICAOIAAoAnxBBBAeQQJ0aiIeKAIAIQwgACgCECAAKAIUIAogACgCdBAnIgVBASAFGyEdQQRBAyAHGyEgIAAoAigiJCAKIBdxQQN0aiITQQRqIQUgACgCiAEiB0H/HyAHQf8fSRshJSAOQQRqIQYgCkEJaiEVIAogACgCDGshJiAAKAKAASEnICEhByAcIQgDQAJAAn8gCEEDRgRAIB8oAgBBf2oMAQsgFCAIQQJ0aigCEAsiDUF/aiAmTw0AIA5BBBAfIA4gDWtBBBAfRw0AIAYgBiANayASEB1BBGoiCyAHTQ0AIBggEEEDdGoiByALNgIEIAcgCCAcazYCACAQQQFqIRAgCyAlSw0EIAsiByAOaiASRg0ECyAIQQFqIgggIEkNAAsgHiAKNgIAAkAgDCAdSQ0AIApBAmohHkF/ICd0QX9zIQhBACELQQAhCgNAIA4gCyAKIAsgCkkbIgZqIAwgI2oiICAGaiASEB0gBmoiBiAHSwRAIBggEEEDdGoiByAGNgIEIAcgHiAMazYCACAGIAxqIBUgBiAVIAxrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgcgDmogEkYNAgsgJCAMIBdxQQN0aiENAkACQCAGICBqLQAAIAYgDmotAABJBEAgEyAMNgIAIAwgG0sNASARQUBrIRMMBAsgBSAMNgIAIAwgG0sEQCANIQUgBiEKDAILIBFBQGshBQwDCyAGIQsgDUEEaiITIQ0LIAhFDQEgCEF/aiEIIA0oAgAiDCAdTw0ACwsgBUEANgIAIBNBADYCACAAIBVBeGo2AhgMAgtBACEQQQAgDiAAKAIEIiNrIgpBfyAAKAJ4QX9qdEF/cyIXayIFIAUgCksbIRsgACgCICAOIAAoAnxBBRAeQQJ0aiIeKAIAIQwgACgCECAAKAIUIAogACgCdBAnIgVBASAFGyEdQQRBAyAHGyEgIAAoAigiJCAKIBdxQQN0aiITQQRqIQUgACgCiAEiB0H/HyAHQf8fSRshJSAOQQRqIQYgCkEJaiEVIAogACgCDGshJiAAKAKAASEnICEhByAcIQgDQAJAAn8gCEEDRgRAIB8oAgBBf2oMAQsgFCAIQQJ0aigCEAsiDUF/aiAmTw0AIA5BBBAfIA4gDWtBBBAfRw0AIAYgBiANayASEB1BBGoiCyAHTQ0AIBggEEEDdGoiByALNgIEIAcgCCAcazYCACAQQQFqIRAgCyAlSw0DIAsiByAOaiASRg0DCyAIQQFqIgggIEkNAAsgHiAKNgIAAkAgDCAdSQ0AIApBAmohHkF/ICd0QX9zIQhBACELQQAhCgNAIA4gCyAKIAsgCkkbIgZqIAwgI2oiICAGaiASEB0gBmoiBiAHSwRAIBggEEEDdGoiByAGNgIEIAcgHiAMazYCACAGIAxqIBUgBiAVIAxrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgcgDmogEkYNAgsgJCAMIBdxQQN0aiENAkACQCAGICBqLQAAIAYgDmotAABJBEAgEyAMNgIAIAwgG0sNASARQUBrIRMMBAsgBSAMNgIAIAwgG0sEQCANIQUgBiEKDAILIBFBQGshBQwDCyAGIQsgDUEEaiITIQ0LIAhFDQEgCEF/aiEIIA0oAgAiDCAdTw0ACwsgBUEANgIAIBNBADYCACAAIBVBeGo2AhgMAQtBACEQQQAgDiAAKAIEIiNrIgpBfyAAKAJ4QX9qdEF/cyIXayIFIAUgCksbIRsgACgCICAOIAAoAnxBBhAeQQJ0aiIeKAIAIQwgACgCECAAKAIUIAogACgCdBAnIgVBASAFGyEdQQRBAyAHGyEgIAAoAigiJCAKIBdxQQN0aiITQQRqIQUgACgCiAEiB0H/HyAHQf8fSRshJSAOQQRqIQYgCkEJaiEVIAogACgCDGshJiAAKAKAASEnICEhByAcIQgDQAJAAn8gCEEDRgRAIB8oAgBBf2oMAQsgFCAIQQJ0aigCEAsiDUF/aiAmTw0AIA5BBBAfIA4gDWtBBBAfRw0AIAYgBiANayASEB1BBGoiCyAHTQ0AIBggEEEDdGoiByALNgIEIAcgCCAcazYCACAQQQFqIRAgCyAlSw0CIAsiByAOaiASRg0CCyAIQQFqIgggIEkNAAsgHiAKNgIAAkAgDCAdSQ0AIApBAmohHkF/ICd0QX9zIQhBACELQQAhCgNAIA4gCyAKIAsgCkkbIgZqIAwgI2oiICAGaiASEB0gBmoiBiAHSwRAIBggEEEDdGoiByAGNgIEIAcgHiAMazYCACAGIAxqIBUgBiAVIAxrSxshFSAQQQFqIRAgBkGAIEsNAiAGIgcgDmogEkYNAgsgJCAMIBdxQQN0aiENAkACQCAGICBqLQAAIAYgDmotAABJBEAgEyAMNgIAIAwgG0sNASARQUBrIRMMBAsgBSAMNgIAIAwgG0sEQCANIQUgBiEKDAILIBFBQGshBQwDCyAGIQsgDUEEaiITIQ0LIAhFDQEgCEF/aiEIIA0oAgAiDCAdTw0ACwsgBUEANgIAIBNBADYCACAAIBVBeGo2AhgLIBBFDQAgGCAQQX9qQQN0aiIHKAIEIgUgKksgBSAJakGAIE9yDQQgFiAtaiEMQQAhFgNAIBFBQGsgHyAYIBZBA3RqIgcoAgAiBSAcED8gKCEGAn8gFgRAIAdBfGooAgBBAWohBgsgBygCBCIIIAZPCwRAIAVBAWoQJCILQQh0QYAgaiETA0AgCEF9aiEKIAggCWohBwJ/IAAoAmRBAUYEQCAKECsgE2oMAQsgACgCYCAAKAI4IAtBAnRqKAIAECtrIAAoAlxqIAoQPEECdCIKQZCkAWooAgAgC2pBCHRqIAAoAjQgCmooAgAQK2tBM2oLIAxqIQoCQAJAIAcgBE0EQCAKIBkgB0EcbGooAgBIDQEMAgsDQCAZIARBAWoiBEEcbGpBgICAgAQ2AgAgBCAHSQ0ACwsgGSAHQRxsaiIHIBo2AgwgByAFNgIEIAcgCDYCCCAHIAo2AgAgByARKQNANwIQIAcgESgCSDYCGAsgCEF/aiIIIAZPDQALCyAWQQFqIhYgEEcNAAsLIAlBAWoiCSAETQ0ACwsgGSAEQRxsaiIJKAIMIRogCSgCBCENIAkoAgAhLCAJKAIIIQUgESAJKAIYNgJYIBEgCSkCEDcDUCARIAkpAgg3AyggESAJKQIQNwMwIBEgCSgCGDYCOCARIAkpAgA3AyBBACAEIBFBIGoQPmsiCSAJIARLGyEEDAMLIA9BAWohDwwHCyAHKAIAIQ1BACEEIAkgFCgCCAR/IAQFIBQoAgwLayIEQYAgTQ0BCyAZIBo2AiggGSAFNgIkIBkgDTYCICAZICw2AhwgGSARKAJYNgI0IBkgESkDUDcCLAwBCyAZIARBAWoiC0EcbGoiCSAaNgIMIAkgBTYCCCAJIA02AgQgCSAsNgIAIAkgESkDUDcCECAJIBEoAlg2AhggCyEaIAQNAQtBASEaQQEhCwwBCwNAIBEgGSAEQRxsaiIJIgpBGGooAgA2AhggESAJKQIQNwMQIBEgCSkCCDcDCCARIAkpAgA3AwAgERA+IQUgGSAaQX9qIhpBHGxqIgcgCigCGDYCGCAHIAkpAhA3AhAgByAJKQIINwIIIAcgCSkCADcCACAEIAVLIQlBACAEIAVrIgcgByAESxshBCAJDQALIBogC0sNAQsDQCAZIBpBHGxqIgQoAgwhBwJ/IAMgB2ogBCgCCCIGRQ0AGgJAAkAgBCgCBCIKQQNPBEAgAiACKQIANwIEIApBfmohBAwBCwJAAkACQAJAIAogB0VqIgkOBAUBAQABCyACKAIAQX9qIQQMAQsgAiAJQQJ0aigCACEEIAlBAkkNAQsgAiACKAIENgIICyACIAIoAgA2AgQLIAIgBDYCAAsgIiAHIAMgCiAGEFcgBkF9aiEIIAEoAgwhBAJAAkAgAyAHaiIJICtNBEAgBCADEBwgASgCDCEEIAdBEE0EQCABIAQgB2o2AgwMAwsgBEEQaiADQRBqIgUQHCAEQSBqIANBIGoQHCAHQTFIDQEgBCAHaiENIARBMGohBANAIAQgBUEgaiIJEBwgBEEQaiAFQTBqEBwgCSEFIARBIGoiBCANSQ0ACwwBCyAEIAMgCSArECILIAEgASgCDCAHajYCDCAHQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgQgCkEBajYCACAEIAc7AQQgCEGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAIOwEGIAEgBEEIajYCBCAGIAdqIANqIgMLIQ8gGkEBaiIaIAtNDQALCyAiQQIQUQsgDyApSQ0ACwsgEUHgAGokACASIANrC/Y9ASl/IwBB4ABrIhEkACAAKAKEASEHIAAoAgQhISAAKAKIASEJIAAoAgwhBiARIAAoAhg2AlwgACgCPCEYIABBQGsoAgAhGSAAQSxqIiIgAyAEQQIQWSADIAYgIWogA0ZqIg8gAyAEaiISQXhqIilJBEAgCUH/HyAJQf8fSRshKiASQWBqIStBA0EEIAdBA0YbIihBf2ohIQNAAkACQAJAAkACQAJAAkACQAJAIAAoAgQiCSAAKAIYIgRqIA9LDQAgDyADayEaIAAoAoQBIQcgBCAPIAlrIgZJBEADQCAAIAQgCWogEiAHQQAQQSAEaiIEIAZJDQALCyAaRSEcIAAgBjYCGAJAAkACQAJAAkAgB0F9ag4FAAECAwMBC0EAIQtBACAPIAAoAgQiE2siBUF/IAAoAnhBf2p0QX9zIhBrIgQgBCAFSxshFSAAKAIgIA8gACgCfEEDEB5BAnRqIhQoAgAhCCAAKAIQIAAoAhQgBSAAKAJ0ECciBEEBIAQbIQ5BA0EEIBobIR8gACgCKCIXIAUgEHFBA3RqIhZBBGohCiAAKAKIASIEQf8fIARB/x9JGyENIA9BA2ohDCAFQQlqIQcgBSAAKAIMayEbIAAoAoABIR0gISEJIBwhBANAAkACfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIGQX9qIBtPDQAgD0EDEB8gDyAGa0EDEB9HDQAgDCAMIAZrIBIQHUEDaiIGIAlNDQAgGCALQQN0aiIJIAY2AgQgCSAEIBxrNgIAIAtBAWohCyAGIA1LDQUgBiIJIA9qIBJGDQULIARBAWoiBCAfSQ0ACwJAIAlBAksNAEECIQkgEyAAKAIcIAAoAiQgEUHcAGogDxBAIgQgDkkNACAFIARrIgZB//8PSw0AIA8gBCATaiASEB0iBEEDSQ0AIBggBDYCBCAYIAZBAmo2AgAgBCANTQRAQQEhCyAEIgkgD2ogEkcNAQtBASELIAAgBUEBajYCGAwECyAUIAU2AgACQCAIIA5JDQAgBUECaiEUQX8gHXRBf3MhDUEAIQVBACEMA0AgDyAFIAwgBSAMSRsiBGogCCATaiIfIARqIBIQHSAEaiIEIAlLBEAgGCALQQN0aiIJIAQ2AgQgCSAUIAhrNgIAIAQgCGogByAEIAcgCGtLGyEHIAtBAWohCyAEQYAgSw0CIAQiCSAPaiASRg0CCyAXIAggEHFBA3RqIQYCQAJAIAQgH2otAAAgBCAPai0AAEkEQCAWIAg2AgAgCCAVSw0BIBFBQGshFgwECyAKIAg2AgAgCCAVSwRAIAYhCiAEIQwMAgsgEUFAayEKDAMLIAQhBSAGQQRqIhYhBgsgDUUNASANQX9qIQ0gBigCACIIIA5PDQALCyAKQQA2AgAgFkEANgIAIAAgB0F4ajYCGAwDC0EAIQtBACAPIAAoAgQiFWsiBUF/IAAoAnhBf2p0QX9zIhNrIgQgBCAFSxshDiAAKAIgIA8gACgCfEEEEB5BAnRqIgwoAgAhCCAAKAIQIAAoAhQgBSAAKAJ0ECciBEEBIAQbIRBBA0EEIBobIRQgACgCKCIfIAUgE3FBA3RqIhZBBGohCiAAKAKIASIEQf8fIARB/x9JGyEXIA9BBGohDSAFQQlqIQcgBSAAKAIMayEbIAAoAoABIR0gISEJIBwhBANAAkACfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIGQX9qIBtPDQAgD0EEEB8gDyAGa0EEEB9HDQAgDSANIAZrIBIQHUEEaiIGIAlNDQAgGCALQQN0aiIJIAY2AgQgCSAEIBxrNgIAIAtBAWohCyAGIBdLDQQgBiIJIA9qIBJGDQQLIARBAWoiBCAUSQ0ACyAMIAU2AgACQCAIIBBJDQAgBUECaiEUQX8gHXRBf3MhDUEAIQVBACEMA0AgDyAFIAwgBSAMSRsiBGogCCAVaiIXIARqIBIQHSAEaiIEIAlLBEAgGCALQQN0aiIJIAQ2AgQgCSAUIAhrNgIAIAQgCGogByAEIAcgCGtLGyEHIAtBAWohCyAEQYAgSw0CIAQiCSAPaiASRg0CCyAfIAggE3FBA3RqIQYCQAJAIAQgF2otAAAgBCAPai0AAEkEQCAWIAg2AgAgCCAOSw0BIBFBQGshFgwECyAKIAg2AgAgCCAOSwRAIAYhCiAEIQwMAgsgEUFAayEKDAMLIAQhBSAGQQRqIhYhBgsgDUUNASANQX9qIQ0gBigCACIIIBBPDQALCyAKQQA2AgAgFkEANgIAIAAgB0F4ajYCGAwCC0EAIQtBACAPIAAoAgQiFWsiBUF/IAAoAnhBf2p0QX9zIhNrIgQgBCAFSxshDiAAKAIgIA8gACgCfEEFEB5BAnRqIgwoAgAhCCAAKAIQIAAoAhQgBSAAKAJ0ECciBEEBIAQbIRBBA0EEIBobIRQgACgCKCIfIAUgE3FBA3RqIgpBBGohFiAAKAKIASIEQf8fIARB/x9JGyEXIA9BBGohDSAFQQlqIQcgBSAAKAIMayEbIAAoAoABIR0gISEJIBwhBANAAkACfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIGQX9qIBtPDQAgD0EEEB8gDyAGa0EEEB9HDQAgDSANIAZrIBIQHUEEaiIGIAlNDQAgGCALQQN0aiIJIAY2AgQgCSAEIBxrNgIAIAtBAWohCyAGIBdLDQMgBiIJIA9qIBJGDQMLIARBAWoiBCAUSQ0ACyAMIAU2AgACQCAIIBBJDQAgBUECaiEUQX8gHXRBf3MhDUEAIQVBACEMA0AgDyAFIAwgBSAMSRsiBGogCCAVaiIXIARqIBIQHSAEaiIEIAlLBEAgGCALQQN0aiIJIAQ2AgQgCSAUIAhrNgIAIAQgCGogByAEIAcgCGtLGyEHIAtBAWohCyAEQYAgSw0CIAQiCSAPaiASRg0CCyAfIAggE3FBA3RqIQYCQAJAIAQgF2otAAAgBCAPai0AAEkEQCAKIAg2AgAgCCAOSw0BIBFBQGshCgwECyAWIAg2AgAgCCAOSwRAIAYhFiAEIQwMAgsgEUFAayEWDAMLIAQhBSAGQQRqIgohBgsgDUUNASANQX9qIQ0gBigCACIIIBBPDQALCyAWQQA2AgAgCkEANgIAIAAgB0F4ajYCGAwBC0EAIQtBACAPIAAoAgQiFWsiBUF/IAAoAnhBf2p0QX9zIhNrIgQgBCAFSxshDiAAKAIgIA8gACgCfEEGEB5BAnRqIgwoAgAhCCAAKAIQIAAoAhQgBSAAKAJ0ECciBEEBIAQbIRBBA0EEIBobIRQgACgCKCIfIAUgE3FBA3RqIgpBBGohFiAAKAKIASIEQf8fIARB/x9JGyEXIA9BBGohDSAFQQlqIQcgBSAAKAIMayEbIAAoAoABIR0gISEJIBwhBANAAkACfyAEQQNGBEAgAigCAEF/agwBCyACIARBAnRqKAIACyIGQX9qIBtPDQAgD0EEEB8gDyAGa0EEEB9HDQAgDSANIAZrIBIQHUEEaiIGIAlNDQAgGCALQQN0aiIJIAY2AgQgCSAEIBxrNgIAIAtBAWohCyAGIBdLDQIgBiIJIA9qIBJGDQILIARBAWoiBCAUSQ0ACyAMIAU2AgACQCAIIBBJDQAgBUECaiEUQX8gHXRBf3MhDUEAIQVBACEMA0AgDyAFIAwgBSAMSRsiBGogCCAVaiIXIARqIBIQHSAEaiIEIAlLBEAgGCALQQN0aiIJIAQ2AgQgCSAUIAhrNgIAIAQgCGogByAEIAcgCGtLGyEHIAtBAWohCyAEQYAgSw0CIAQiCSAPaiASRg0CCyAfIAggE3FBA3RqIQYCQAJAIAQgF2otAAAgBCAPai0AAEkEQCAKIAg2AgAgCCAOSw0BIBFBQGshCgwECyAWIAg2AgAgCCAOSwRAIAYhFiAEIQwMAgsgEUFAayEWDAMLIAQhBSAGQQRqIgohBgsgDUUNASANQX9qIQ0gBigCACIIIBBPDQALCyAWQQA2AgAgCkEANgIAIAAgB0F4ajYCGAsgC0UNACAZIAIoAgA2AhAgGSACKAIENgIUIAIoAgghBCAZIBo2AgwgGUEANgIIIBkgBDYCGCAZIAMgGiAiQQIQWCIJNgIAIBggC0F/akEDdGoiBCgCBCIGICpLBEAgBCgCACENDAMLQQEhBEEAICJBAhAtIQcDQCAZIARBHGxqQYCAgIAENgIAIARBAWoiBCAoRw0ACyAHIAlqIQ1BACEHICghBgNAIBggB0EDdGoiBCgCBCEJIBFBQGsgAiAEKAIAIgogHBA/IAYgCU0EQCAKQQFqECQiBUEIdEGAIGohDANAIAZBfWohBAJ/IAAoAmRBAUYEQCAEECsgDGoMAQsgACgCYCAAKAI4IAVBAnRqKAIAECtrIAAoAlxqIAQQPEECdCIEQZCkAWooAgAgBWpBCHRqIAAoAjQgBGooAgAQK2tBM2oLIQggGSAGQRxsaiIEIBo2AgwgBCAKNgIEIAQgBjYCCCAEIAggDWo2AgAgBCARKQNANwIQIAQgESgCSDYCGCAGQQFqIgYgCU0NAAsLIAdBAWoiByALRw0AC0EBIQkCQCAGQX9qIgRFBEBBACEEDAELA0BBASEIIBkgCUF/akEcbGoiBigCCEUEQCAGKAIMQQFqIQgLIAkgD2oiDkF/akEBICJBAhBSIAYoAgBqIAggIkECEC1qIAhBf2ogIkECEC1rIgcgGSAJQRxsaiIUKAIAIhZMBEAgFCAINgIMIBRCADcCBCAUIAc2AgAgFCAGKAIYNgIYIBQgBikCEDcCECAHIRYLAkAgDiApSw0AIAQgCUYEQCAJIQQMAwtBACEaIBQoAggiB0UEQCAUKAIMIRoLQQAgIkECEC0hLSAAKAIEIgYgACgCGCIIaiAOSw0AIAAoAoQBIQsgCCAOIAZrIgpJBEADQCAAIAYgCGogEiALQQAQQSAIaiIIIApJDQALCyAHQQBHIRwgFEEQaiEfIAAgCjYCGAJAAkACQAJAAkAgC0F9ag4FAAECAwMBC0EAIRBBACAOIAAoAgQiF2siCkF/IAAoAnhBf2p0QX9zIh1rIgYgBiAKSxshIyAAKAIgIA4gACgCfEEDEB5BAnRqIiAoAgAhDCAAKAIQIAAoAhQgCiAAKAJ0ECciBkEBIAYbIRtBBEEDIAcbISQgACgCKCIlIAogHXFBA3RqIgZBBGohEyAAKAKIASIHQf8fIAdB/x9JGyEFIA5BA2ohHiAKQQlqIRUgCiAAKAIMayEmIAAoAoABIScgISEHIBwhCANAAkACfyAIQQNGBEAgHygCAEF/agwBCyAUIAhBAnRqKAIQCyINQX9qICZPDQAgDkEDEB8gDiANa0EDEB9HDQAgHiAeIA1rIBIQHUEDaiILIAdNDQAgGCAQQQN0aiIHIAs2AgQgByAIIBxrNgIAIBBBAWohECALIAVLDQUgCyIHIA5qIBJGDQULIAhBAWoiCCAkSQ0ACwJAIAdBAksNAEECIQcgFyAAKAIcIAAoAiQgEUHcAGogDhBAIgsgG0kNACAKIAtrIghB//8PSw0AIA4gCyAXaiASEB0iC0EDSQ0AIBggCzYCBCAYIAhBAmo2AgAgCyAFTQRAQQEhECALIgcgDmogEkcNAQtBASEQIAAgCkEBajYCGAwECyAgIAo2AgACQCAMIBtJDQAgCkECaiEeQX8gJ3RBf3MhCEEAIQtBACEKA0AgDiALIAogCyAKSRsiBWogDCAXaiIgIAVqIBIQHSAFaiIFIAdLBEAgGCAQQQN0aiIHIAU2AgQgByAeIAxrNgIAIAUgDGogFSAFIBUgDGtLGyEVIBBBAWohECAFQYAgSw0CIAUiByAOaiASRg0CCyAlIAwgHXFBA3RqIQ0CQAJAIAUgIGotAAAgBSAOai0AAEkEQCAGIAw2AgAgDCAjSw0BIBFBQGshBgwECyATIAw2AgAgDCAjSwRAIA0hEyAFIQoMAgsgEUFAayETDAMLIAUhCyANQQRqIgYhDQsgCEUNASAIQX9qIQggDSgCACIMIBtPDQALCyATQQA2AgAgBkEANgIAIAAgFUF4ajYCGAwDC0EAIRBBACAOIAAoAgQiI2siCkF/IAAoAnhBf2p0QX9zIhdrIgYgBiAKSxshGyAAKAIgIA4gACgCfEEEEB5BAnRqIh4oAgAhDCAAKAIQIAAoAhQgCiAAKAJ0ECciBkEBIAYbIR1BBEEDIAcbISAgACgCKCIkIAogF3FBA3RqIhNBBGohBiAAKAKIASIHQf8fIAdB/x9JGyElIA5BBGohBSAKQQlqIRUgCiAAKAIMayEmIAAoAoABIScgISEHIBwhCANAAkACfyAIQQNGBEAgHygCAEF/agwBCyAUIAhBAnRqKAIQCyINQX9qICZPDQAgDkEEEB8gDiANa0EEEB9HDQAgBSAFIA1rIBIQHUEEaiILIAdNDQAgGCAQQQN0aiIHIAs2AgQgByAIIBxrNgIAIBBBAWohECALICVLDQQgCyIHIA5qIBJGDQQLIAhBAWoiCCAgSQ0ACyAeIAo2AgACQCAMIB1JDQAgCkECaiEeQX8gJ3RBf3MhCEEAIQtBACEKA0AgDiALIAogCyAKSRsiBWogDCAjaiIgIAVqIBIQHSAFaiIFIAdLBEAgGCAQQQN0aiIHIAU2AgQgByAeIAxrNgIAIAUgDGogFSAFIBUgDGtLGyEVIBBBAWohECAFQYAgSw0CIAUiByAOaiASRg0CCyAkIAwgF3FBA3RqIQ0CQAJAIAUgIGotAAAgBSAOai0AAEkEQCATIAw2AgAgDCAbSw0BIBFBQGshEwwECyAGIAw2AgAgDCAbSwRAIA0hBiAFIQoMAgsgEUFAayEGDAMLIAUhCyANQQRqIhMhDQsgCEUNASAIQX9qIQggDSgCACIMIB1PDQALCyAGQQA2AgAgE0EANgIAIAAgFUF4ajYCGAwCC0EAIRBBACAOIAAoAgQiI2siCkF/IAAoAnhBf2p0QX9zIhdrIgYgBiAKSxshGyAAKAIgIA4gACgCfEEFEB5BAnRqIh4oAgAhDCAAKAIQIAAoAhQgCiAAKAJ0ECciBkEBIAYbIR1BBEEDIAcbISAgACgCKCIkIAogF3FBA3RqIhNBBGohBiAAKAKIASIHQf8fIAdB/x9JGyElIA5BBGohBSAKQQlqIRUgCiAAKAIMayEmIAAoAoABIScgISEHIBwhCANAAkACfyAIQQNGBEAgHygCAEF/agwBCyAUIAhBAnRqKAIQCyINQX9qICZPDQAgDkEEEB8gDiANa0EEEB9HDQAgBSAFIA1rIBIQHUEEaiILIAdNDQAgGCAQQQN0aiIHIAs2AgQgByAIIBxrNgIAIBBBAWohECALICVLDQMgCyIHIA5qIBJGDQMLIAhBAWoiCCAgSQ0ACyAeIAo2AgACQCAMIB1JDQAgCkECaiEeQX8gJ3RBf3MhCEEAIQtBACEKA0AgDiALIAogCyAKSRsiBWogDCAjaiIgIAVqIBIQHSAFaiIFIAdLBEAgGCAQQQN0aiIHIAU2AgQgByAeIAxrNgIAIAUgDGogFSAFIBUgDGtLGyEVIBBBAWohECAFQYAgSw0CIAUiByAOaiASRg0CCyAkIAwgF3FBA3RqIQ0CQAJAIAUgIGotAAAgBSAOai0AAEkEQCATIAw2AgAgDCAbSw0BIBFBQGshEwwECyAGIAw2AgAgDCAbSwRAIA0hBiAFIQoMAgsgEUFAayEGDAMLIAUhCyANQQRqIhMhDQsgCEUNASAIQX9qIQggDSgCACIMIB1PDQALCyAGQQA2AgAgE0EANgIAIAAgFUF4ajYCGAwBC0EAIRBBACAOIAAoAgQiI2siCkF/IAAoAnhBf2p0QX9zIhdrIgYgBiAKSxshGyAAKAIgIA4gACgCfEEGEB5BAnRqIh4oAgAhDCAAKAIQIAAoAhQgCiAAKAJ0ECciBkEBIAYbIR1BBEEDIAcbISAgACgCKCIkIAogF3FBA3RqIhNBBGohBiAAKAKIASIHQf8fIAdB/x9JGyElIA5BBGohBSAKQQlqIRUgCiAAKAIMayEmIAAoAoABIScgISEHIBwhCANAAkACfyAIQQNGBEAgHygCAEF/agwBCyAUIAhBAnRqKAIQCyINQX9qICZPDQAgDkEEEB8gDiANa0EEEB9HDQAgBSAFIA1rIBIQHUEEaiILIAdNDQAgGCAQQQN0aiIHIAs2AgQgByAIIBxrNgIAIBBBAWohECALICVLDQIgCyIHIA5qIBJGDQILIAhBAWoiCCAgSQ0ACyAeIAo2AgACQCAMIB1JDQAgCkECaiEeQX8gJ3RBf3MhCEEAIQtBACEKA0AgDiALIAogCyAKSRsiBWogDCAjaiIgIAVqIBIQHSAFaiIFIAdLBEAgGCAQQQN0aiIHIAU2AgQgByAeIAxrNgIAIAUgDGogFSAFIBUgDGtLGyEVIBBBAWohECAFQYAgSw0CIAUiByAOaiASRg0CCyAkIAwgF3FBA3RqIQ0CQAJAIAUgIGotAAAgBSAOai0AAEkEQCATIAw2AgAgDCAbSw0BIBFBQGshEwwECyAGIAw2AgAgDCAbSwRAIA0hBiAFIQoMAgsgEUFAayEGDAMLIAUhCyANQQRqIhMhDQsgCEUNASAIQX9qIQggDSgCACIMIB1PDQALCyAGQQA2AgAgE0EANgIAIAAgFUF4ajYCGAsgEEUNACAYIBBBf2pBA3RqIgcoAgQiBiAqSyAGIAlqQYAgT3INBCAWIC1qIQxBACEWA0AgEUFAayAfIBggFkEDdGoiBygCACIGIBwQPyAoIQUCfyAWBEAgB0F8aigCAEEBaiEFCyAHKAIEIgggBU8LBEAgBkEBahAkIgtBCHRBgCBqIRMDQCAIQX1qIQogCCAJaiEHAn8gACgCZEEBRgRAIAoQKyATagwBCyAAKAJgIAAoAjggC0ECdGooAgAQK2sgACgCXGogChA8QQJ0IgpBkKQBaigCACALakEIdGogACgCNCAKaigCABAra0EzagsgDGohCgJAAkAgByAETQRAIAogGSAHQRxsaigCAEgNAQwCCwNAIBkgBEEBaiIEQRxsakGAgICABDYCACAEIAdJDQALCyAZIAdBHGxqIgcgGjYCDCAHIAY2AgQgByAINgIIIAcgCjYCACAHIBEpA0A3AhAgByARKAJINgIYCyAIQX9qIgggBU8NAAsLIBZBAWoiFiAQRw0ACwsgCUEBaiIJIARNDQALCyAZIARBHGxqIgkoAgwhGiAJKAIEIQ0gCSgCACEsIAkoAgghBiARIAkoAhg2AlggESAJKQIQNwNQIBEgCSkCCDcDKCARIAkpAhA3AzAgESAJKAIYNgI4IBEgCSkCADcDIEEAIAQgEUEgahA+ayIJIAkgBEsbIQQMAwsgD0EBaiEPDAcLIAcoAgAhDUEAIQQgCSAUKAIIBH8gBAUgFCgCDAtrIgRBgCBNDQELIBkgGjYCKCAZIAY2AiQgGSANNgIgIBkgLDYCHCAZIBEoAlg2AjQgGSARKQNQNwIsDAELIBkgBEEBaiILQRxsaiIJIBo2AgwgCSAGNgIIIAkgDTYCBCAJICw2AgAgCSARKQNQNwIQIAkgESgCWDYCGCALIRogBA0BC0EBIRpBASELDAELA0AgESAZIARBHGxqIgkiCkEYaigCADYCGCARIAkpAhA3AxAgESAJKQIINwMIIBEgCSkCADcDACARED4hBiAZIBpBf2oiGkEcbGoiByAKKAIYNgIYIAcgCSkCEDcCECAHIAkpAgg3AgggByAJKQIANwIAIAQgBkshCUEAIAQgBmsiByAHIARLGyEEIAkNAAsgGiALSw0BCwNAIBkgGkEcbGoiBCgCDCEHAn8gAyAHaiAEKAIIIgVFDQAaAkACQCAEKAIEIgpBA08EQCACIAIpAgA3AgQgCkF+aiEEDAELAkACQAJAAkAgCiAHRWoiCQ4EBQEBAAELIAIoAgBBf2ohBAwBCyACIAlBAnRqKAIAIQQgCUECSQ0BCyACIAIoAgQ2AggLIAIgAigCADYCBAsgAiAENgIACyAiIAcgAyAKIAUQVyAFQX1qIQggASgCDCEEAkACQCADIAdqIgkgK00EQCAEIAMQHCABKAIMIQQgB0EQTQRAIAEgBCAHajYCDAwDCyAEQRBqIANBEGoiBhAcIARBIGogA0EgahAcIAdBMUgNASAEIAdqIQ0gBEEwaiEEA0AgBCAGQSBqIgkQHCAEQRBqIAZBMGoQHCAJIQYgBEEgaiIEIA1JDQALDAELIAQgAyAJICsQIgsgASABKAIMIAdqNgIMIAdBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiBCAKQQFqNgIAIAQgBzsBBCAIQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAg7AQYgASAEQQhqNgIEIAUgB2ogA2oiAwshDyAaQQFqIhogC00NAAsLICJBAhBRCyAPIClJDQALCyARQeAAaiQAIBIgA2sLcgECfyABKAI4BEAgAgRAIAAQKw8LIAAQLg8LIAAQgAFBAnQiAEGwpwFqKAIAQQh0IQQgASgCBCIBKAIAIQMCfyACBEAgAxArIQIgACABaigCABArDAELIAMQLiECIAAgAWooAgAQLgshASACIARqIAFrC2YBAX8jAEEwayIGJAAgBkEYaiABEJYBIAZBCGogAhCWASAGQShqIAZBGGogBkEIaiADIAQgBSAAEQwAIAZBKGoQyAEhACAGQShqEMUBIAZBCGoQkgEgBkEYahCSASAGQTBqJAAgAAtfAQF/IwBB0BFrIggkACAIQQA2AlACQCAIQQhqIAAgASACIAMgBCAFIAYQvAIgBxCmAiIGQQBIDQAgCEEIaiABEKUCIgZBAEgNACAIQQhqELsCIQYLIAhB0BFqJAAgBgu3PgEpfyMAQeAAayIQJAAgACgChAEhBiAAKAIEISIgACgCiAEhBSAAKAIMIQggECAAKAIYNgJcIAAoAjwhFyAAQUBrKAIAIRYgAEEsaiIkIAMgBEEAEFkgAyAIICJqIANGaiIPIAMgBGoiEUF4aiIpSQRAIAVB/x8gBUH/H0kbISogEUFgaiErQQNBBCAGQQNGGyIoQX9qISIDQAJAAkACQAJAAkACQAJAAkACQCAAKAIEIgUgACgCGCIEaiAPSw0AIA8gA2shHSAAKAKEASEGIAQgDyAFayIISQRAA0AgACAEIAVqIBEgBkEAEEEgBGoiBCAISQ0ACwsgHUUhGyAAIAg2AhgCQAJAAkACQAJAIAZBfWoOBQABAgMDAQtBACELQQAgDyAAKAIEIh9rIgpBfyAAKAJ4QX9qdEF/cyINayIEIAQgCksbIRUgACgCICAPIAAoAnxBAxAeQQJ0aiISKAIAIQcgACgCECAAKAIUIAogACgCdBAnIgRBASAEGyEOQQNBBCAdGyEYIAAoAigiHCAKIA1xQQN0aiIGQQRqIRMgACgCiAEiBEH/HyAEQf8fSRshCSAPQQNqIQwgCkEJaiEUIAogACgCDGshGSAAKAKAASEaICIhBSAbIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiAZTw0AIA9BAxAfIA8gCGtBAxAfRw0AIAwgDCAIayAREB1BA2oiCCAFTQ0AIBcgC0EDdGoiBSAINgIEIAUgBCAbazYCACALQQFqIQsgCCAJSw0FIAgiBSAPaiARRg0FCyAEQQFqIgQgGEkNAAsCQCAFQQJLDQBBAiEFIB8gACgCHCAAKAIkIBBB3ABqIA8QQCIEIA5JDQAgCiAEayIIQf//D0sNACAPIAQgH2ogERAdIgRBA0kNACAXIAQ2AgQgFyAIQQJqNgIAIAQgCU0EQEEBIQsgBCIFIA9qIBFHDQELQQEhCyAAIApBAWo2AhgMBAsgEiAKNgIAAkAgByAOSQ0AIApBAmohEkF/IBp0QX9zIQxBACEKQQAhCQNAIA8gCiAJIAogCUkbIgRqIAcgH2oiGCAEaiAREB0gBGoiBCAFSwRAIBcgC0EDdGoiBSAENgIEIAUgEiAHazYCACAEIAdqIBQgBCAUIAdrSxshFCALQQFqIQsgBEGAIEsNAiAEIgUgD2ogEUYNAgsgHCAHIA1xQQN0aiEIAkACQCAEIBhqLQAAIAQgD2otAABJBEAgBiAHNgIAIAcgFUsNASAQQUBrIQYMBAsgEyAHNgIAIAcgFUsEQCAIIRMgBCEJDAILIBBBQGshEwwDCyAEIQogCEEEaiIGIQgLIAxFDQEgDEF/aiEMIAgoAgAiByAOTw0ACwsgE0EANgIAIAZBADYCACAAIBRBeGo2AhgMAwtBACELQQAgDyAAKAIEIhVrIgpBfyAAKAJ4QX9qdEF/cyITayIEIAQgCksbIR8gACgCICAPIAAoAnxBBBAeQQJ0aiIMKAIAIQcgACgCECAAKAIUIAogACgCdBAnIgRBASAEGyENQQNBBCAdGyESIAAoAigiGCAKIBNxQQN0aiIOQQRqIQYgACgCiAEiBEH/HyAEQf8fSRshHCAPQQRqIQkgCkEJaiEUIAogACgCDGshGSAAKAKAASEaICIhBSAbIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiAZTw0AIA9BBBAfIA8gCGtBBBAfRw0AIAkgCSAIayAREB1BBGoiCCAFTQ0AIBcgC0EDdGoiBSAINgIEIAUgBCAbazYCACALQQFqIQsgCCAcSw0EIAgiBSAPaiARRg0ECyAEQQFqIgQgEkkNAAsgDCAKNgIAAkAgByANSQ0AIApBAmohEkF/IBp0QX9zIQxBACEKQQAhCQNAIA8gCiAJIAogCUkbIgRqIAcgFWoiHCAEaiAREB0gBGoiBCAFSwRAIBcgC0EDdGoiBSAENgIEIAUgEiAHazYCACAEIAdqIBQgBCAUIAdrSxshFCALQQFqIQsgBEGAIEsNAiAEIgUgD2ogEUYNAgsgGCAHIBNxQQN0aiEIAkACQCAEIBxqLQAAIAQgD2otAABJBEAgDiAHNgIAIAcgH0sNASAQQUBrIQ4MBAsgBiAHNgIAIAcgH0sEQCAIIQYgBCEJDAILIBBBQGshBgwDCyAEIQogCEEEaiIOIQgLIAxFDQEgDEF/aiEMIAgoAgAiByANTw0ACwsgBkEANgIAIA5BADYCACAAIBRBeGo2AhgMAgtBACELQQAgDyAAKAIEIhVrIgpBfyAAKAJ4QX9qdEF/cyITayIEIAQgCksbIR8gACgCICAPIAAoAnxBBRAeQQJ0aiIMKAIAIQcgACgCECAAKAIUIAogACgCdBAnIgRBASAEGyENQQNBBCAdGyESIAAoAigiGCAKIBNxQQN0aiIOQQRqIQYgACgCiAEiBEH/HyAEQf8fSRshHCAPQQRqIQkgCkEJaiEUIAogACgCDGshGSAAKAKAASEaICIhBSAbIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiAZTw0AIA9BBBAfIA8gCGtBBBAfRw0AIAkgCSAIayAREB1BBGoiCCAFTQ0AIBcgC0EDdGoiBSAINgIEIAUgBCAbazYCACALQQFqIQsgCCAcSw0DIAgiBSAPaiARRg0DCyAEQQFqIgQgEkkNAAsgDCAKNgIAAkAgByANSQ0AIApBAmohEkF/IBp0QX9zIQxBACEKQQAhCQNAIA8gCiAJIAogCUkbIgRqIAcgFWoiHCAEaiAREB0gBGoiBCAFSwRAIBcgC0EDdGoiBSAENgIEIAUgEiAHazYCACAEIAdqIBQgBCAUIAdrSxshFCALQQFqIQsgBEGAIEsNAiAEIgUgD2ogEUYNAgsgGCAHIBNxQQN0aiEIAkACQCAEIBxqLQAAIAQgD2otAABJBEAgDiAHNgIAIAcgH0sNASAQQUBrIQ4MBAsgBiAHNgIAIAcgH0sEQCAIIQYgBCEJDAILIBBBQGshBgwDCyAEIQogCEEEaiIOIQgLIAxFDQEgDEF/aiEMIAgoAgAiByANTw0ACwsgBkEANgIAIA5BADYCACAAIBRBeGo2AhgMAQtBACELQQAgDyAAKAIEIhVrIgpBfyAAKAJ4QX9qdEF/cyITayIEIAQgCksbIR8gACgCICAPIAAoAnxBBhAeQQJ0aiIMKAIAIQcgACgCECAAKAIUIAogACgCdBAnIgRBASAEGyENQQNBBCAdGyESIAAoAigiGCAKIBNxQQN0aiIOQQRqIQYgACgCiAEiBEH/HyAEQf8fSRshHCAPQQRqIQkgCkEJaiEUIAogACgCDGshGSAAKAKAASEaICIhBSAbIQQDQAJAAn8gBEEDRgRAIAIoAgBBf2oMAQsgAiAEQQJ0aigCAAsiCEF/aiAZTw0AIA9BBBAfIA8gCGtBBBAfRw0AIAkgCSAIayAREB1BBGoiCCAFTQ0AIBcgC0EDdGoiBSAINgIEIAUgBCAbazYCACALQQFqIQsgCCAcSw0CIAgiBSAPaiARRg0CCyAEQQFqIgQgEkkNAAsgDCAKNgIAAkAgByANSQ0AIApBAmohEkF/IBp0QX9zIQxBACEKQQAhCQNAIA8gCiAJIAogCUkbIgRqIAcgFWoiHCAEaiAREB0gBGoiBCAFSwRAIBcgC0EDdGoiBSAENgIEIAUgEiAHazYCACAEIAdqIBQgBCAUIAdrSxshFCALQQFqIQsgBEGAIEsNAiAEIgUgD2ogEUYNAgsgGCAHIBNxQQN0aiEIAkACQCAEIBxqLQAAIAQgD2otAABJBEAgDiAHNgIAIAcgH0sNASAQQUBrIQ4MBAsgBiAHNgIAIAcgH0sEQCAIIQYgBCEJDAILIBBBQGshBgwDCyAEIQogCEEEaiIOIQgLIAxFDQEgDEF/aiEMIAgoAgAiByANTw0ACwsgBkEANgIAIA5BADYCACAAIBRBeGo2AhgLIAtFDQAgFiACKAIANgIQIBYgAigCBDYCFCACKAIIIQQgFiAdNgIMIBZBADYCCCAWIAQ2AhggFiADIB0gJEEAEFgiBTYCACAXIAtBf2pBA3RqIgQoAgQiCCAqSwRAIAQoAgAhBQwDC0EBIQRBACAkQQAQLSEGA0AgFiAEQRxsakGAgICABDYCACAEQQFqIgQgKEcNAAsgBSAGaiEMQQAhBiAoIQgDQCAXIAZBA3RqIgQoAgQhCiAQQUBrIAIgBCgCACIJIBsQPyAIIApNBEAgCUEBahAkIgVBCXRBs7R/akEzIAVBE0sbIRQgBUEIdEGAIGohEwNAIAhBfWohBAJ/IAAoAmRBAUYEQCAEEC4gE2oMAQsgACgCYCAUaiAAKAI4IAVBAnRqKAIAEC5rIAAoAlxqIAQQPEECdCIEQZCkAWooAgAgBWpBCHRqIAAoAjQgBGooAgAQLmsLIQcgFiAIQRxsaiIEIB02AgwgBCAJNgIEIAQgCDYCCCAEIAcgDGo2AgAgBCAQKQNANwIQIAQgECgCSDYCGCAIQQFqIgggCk0NAAsLIAZBAWoiBiALRw0AC0EBIQoCQCAIQX9qIgRFBEBBACEEDAELA0BBASEHIBYgCkF/akEcbGoiBigCCEUEQCAGKAIMQQFqIQcLIAogD2oiDUF/akEBICRBABBSIAYoAgBqIAcgJEEAEC1qIAdBf2ogJEEAEC1rIgUgFiAKQRxsaiIYKAIAIhRMBEAgGCAHNgIMIBhCADcCBCAYIAU2AgAgGCAGKAIYNgIYIBggBikCEDcCECAFIRQLIA0gKUsEfyAKQQFqBSAEIApGBEAgCiEEDAMLAkAgFiAKQQFqIh9BHGxqKAIAIBRBgAFqTA0AQQAhHSAYKAIIIgVFBEAgGCgCDCEdC0EAICRBABAtIS0gACgCBCIGIAAoAhgiB2ogDUsNACAAKAKEASEIIAcgDSAGayIJSQRAA0AgACAGIAdqIBEgCEEAEEEgB2oiByAJSQ0ACwsgBUEARyEbIBhBEGohHCAAIAk2AhgCQAJAAkACQAJAIAhBfWoOBQABAgMDAQtBACEOQQAgDSAAKAIEIhlrIghBfyAAKAJ4QX9qdEF/cyIhayIGIAYgCEsbISUgACgCICANIAAoAnxBAxAeQQJ0aiIeKAIAIQkgACgCECAAKAIUIAggACgCdBAnIgZBASAGGyEaQQRBAyAFGyEjIAAoAigiICAIICFxQQN0aiIMQQRqIRMgACgCiAEiBUH/HyAFQf8fSRshCyANQQNqIRIgCEEJaiEVIAggACgCDGshJiAAKAKAASEnICIhBiAbIQcDQAJAAn8gB0EDRgRAIBwoAgBBf2oMAQsgGCAHQQJ0aigCEAsiBUF/aiAmTw0AIA1BAxAfIA0gBWtBAxAfRw0AIBIgEiAFayAREB1BA2oiBSAGTQ0AIBcgDkEDdGoiBiAFNgIEIAYgByAbazYCACAOQQFqIQ4gBSALSw0FIAUiBiANaiARRg0FCyAHQQFqIgcgI0kNAAsCQCAGQQJLDQBBAiEGIBkgACgCHCAAKAIkIBBB3ABqIA0QQCIFIBpJDQAgCCAFayIHQf//D0sNACANIAUgGWogERAdIgVBA0kNACAXIAU2AgQgFyAHQQJqNgIAIAUgC00EQEEBIQ4gBSIGIA1qIBFHDQELQQEhDiAAIAhBAWo2AhgMBAsgHiAINgIAAkAgCSAaSQ0AIAhBAmohHkF/ICd0QX9zIRJBACELQQAhCANAIA0gCyAIIAsgCEkbIgVqIAkgGWoiIyAFaiAREB0gBWoiByAGSwRAIBcgDkEDdGoiBSAHNgIEIAUgHiAJazYCACAHIAlqIBUgByAVIAlrSxshFSAOQQFqIQ4gB0GAIEsNAiAHIgYgDWogEUYNAgsgICAJICFxQQN0aiEFAkACQCAHICNqLQAAIAcgDWotAABJBEAgDCAJNgIAIAkgJUsNASAQQUBrIQwMBAsgEyAJNgIAIAkgJUsEQCAFIRMgByEIDAILIBBBQGshEwwDCyAHIQsgBUEEaiIMIQULIBJFDQEgEkF/aiESIAUoAgAiCSAaTw0ACwsgE0EANgIAIAxBADYCACAAIBVBeGo2AhgMAwtBACEOQQAgDSAAKAIEIiVrIghBfyAAKAJ4QX9qdEF/cyIZayIGIAYgCEsbIRogACgCICANIAAoAnxBBBAeQQJ0aiISKAIAIQkgACgCECAAKAIUIAggACgCdBAnIgZBASAGGyEhQQRBAyAFGyEeIAAoAigiIyAIIBlxQQN0aiITQQRqIQwgACgCiAEiBUH/HyAFQf8fSRshICANQQRqIQsgCEEJaiEVIAggACgCDGshJiAAKAKAASEnICIhBiAbIQcDQAJAAn8gB0EDRgRAIBwoAgBBf2oMAQsgGCAHQQJ0aigCEAsiBUF/aiAmTw0AIA1BBBAfIA0gBWtBBBAfRw0AIAsgCyAFayAREB1BBGoiBSAGTQ0AIBcgDkEDdGoiBiAFNgIEIAYgByAbazYCACAOQQFqIQ4gBSAgSw0EIAUiBiANaiARRg0ECyAHQQFqIgcgHkkNAAsgEiAINgIAAkAgCSAhSQ0AIAhBAmohHkF/ICd0QX9zIRJBACELQQAhCANAIA0gCyAIIAsgCEkbIgVqIAkgJWoiICAFaiAREB0gBWoiByAGSwRAIBcgDkEDdGoiBSAHNgIEIAUgHiAJazYCACAHIAlqIBUgByAVIAlrSxshFSAOQQFqIQ4gB0GAIEsNAiAHIgYgDWogEUYNAgsgIyAJIBlxQQN0aiEFAkACQCAHICBqLQAAIAcgDWotAABJBEAgEyAJNgIAIAkgGksNASAQQUBrIRMMBAsgDCAJNgIAIAkgGksEQCAFIQwgByEIDAILIBBBQGshDAwDCyAHIQsgBUEEaiITIQULIBJFDQEgEkF/aiESIAUoAgAiCSAhTw0ACwsgDEEANgIAIBNBADYCACAAIBVBeGo2AhgMAgtBACEOQQAgDSAAKAIEIiVrIghBfyAAKAJ4QX9qdEF/cyIZayIGIAYgCEsbIRogACgCICANIAAoAnxBBRAeQQJ0aiISKAIAIQkgACgCECAAKAIUIAggACgCdBAnIgZBASAGGyEhQQRBAyAFGyEeIAAoAigiIyAIIBlxQQN0aiITQQRqIQwgACgCiAEiBUH/HyAFQf8fSRshICANQQRqIQsgCEEJaiEVIAggACgCDGshJiAAKAKAASEnICIhBiAbIQcDQAJAAn8gB0EDRgRAIBwoAgBBf2oMAQsgGCAHQQJ0aigCEAsiBUF/aiAmTw0AIA1BBBAfIA0gBWtBBBAfRw0AIAsgCyAFayAREB1BBGoiBSAGTQ0AIBcgDkEDdGoiBiAFNgIEIAYgByAbazYCACAOQQFqIQ4gBSAgSw0DIAUiBiANaiARRg0DCyAHQQFqIgcgHkkNAAsgEiAINgIAAkAgCSAhSQ0AIAhBAmohHkF/ICd0QX9zIRJBACELQQAhCANAIA0gCyAIIAsgCEkbIgVqIAkgJWoiICAFaiAREB0gBWoiByAGSwRAIBcgDkEDdGoiBSAHNgIEIAUgHiAJazYCACAHIAlqIBUgByAVIAlrSxshFSAOQQFqIQ4gB0GAIEsNAiAHIgYgDWogEUYNAgsgIyAJIBlxQQN0aiEFAkACQCAHICBqLQAAIAcgDWotAABJBEAgEyAJNgIAIAkgGksNASAQQUBrIRMMBAsgDCAJNgIAIAkgGksEQCAFIQwgByEIDAILIBBBQGshDAwDCyAHIQsgBUEEaiITIQULIBJFDQEgEkF/aiESIAUoAgAiCSAhTw0ACwsgDEEANgIAIBNBADYCACAAIBVBeGo2AhgMAQtBACEOQQAgDSAAKAIEIiVrIghBfyAAKAJ4QX9qdEF/cyIZayIGIAYgCEsbIRogACgCICANIAAoAnxBBhAeQQJ0aiISKAIAIQkgACgCECAAKAIUIAggACgCdBAnIgZBASAGGyEhQQRBAyAFGyEeIAAoAigiIyAIIBlxQQN0aiITQQRqIQwgACgCiAEiBUH/HyAFQf8fSRshICANQQRqIQsgCEEJaiEVIAggACgCDGshJiAAKAKAASEnICIhBiAbIQcDQAJAAn8gB0EDRgRAIBwoAgBBf2oMAQsgGCAHQQJ0aigCEAsiBUF/aiAmTw0AIA1BBBAfIA0gBWtBBBAfRw0AIAsgCyAFayAREB1BBGoiBSAGTQ0AIBcgDkEDdGoiBiAFNgIEIAYgByAbazYCACAOQQFqIQ4gBSAgSw0CIAUiBiANaiARRg0CCyAHQQFqIgcgHkkNAAsgEiAINgIAAkAgCSAhSQ0AIAhBAmohHkF/ICd0QX9zIRJBACELQQAhCANAIA0gCyAIIAsgCEkbIgVqIAkgJWoiICAFaiAREB0gBWoiByAGSwRAIBcgDkEDdGoiBSAHNgIEIAUgHiAJazYCACAHIAlqIBUgByAVIAlrSxshFSAOQQFqIQ4gB0GAIEsNAiAHIgYgDWogEUYNAgsgIyAJIBlxQQN0aiEFAkACQCAHICBqLQAAIAcgDWotAABJBEAgEyAJNgIAIAkgGksNASAQQUBrIRMMBAsgDCAJNgIAIAkgGksEQCAFIQwgByEIDAILIBBBQGshDAwDCyAHIQsgBUEEaiITIQULIBJFDQEgEkF/aiESIAUoAgAiCSAhTw0ACwsgDEEANgIAIBNBADYCACAAIBVBeGo2AhgLIA5FDQAgFyAOQX9qQQN0aiIFKAIEIgggKksgCCAKakGAIE9yDQUgFCAtaiEUQQAhCANAIBBBQGsgHCAXIAhBA3RqIgYoAgAiCyAbED8gKCEFIAgEQCAGQXxqKAIAQQFqIQULAkAgBigCBCIHIAVJDQAgC0EBahAkIglBCXRBs7R/akEzIAlBE0sbIRMgCUEIdEGAIGohDQNAIAdBfWohDCAHIApqIQYCfyAAKAJkQQFGBEAgDBAuIA1qDAELIAAoAmAgE2ogACgCOCAJQQJ0aigCABAuayAAKAJcaiAMEDxBAnQiDEGQpAFqKAIAIAlqQQh0aiAAKAI0IAxqKAIAEC5rCyAUaiEMAkAgBiAETQRAIAwgFiAGQRxsaigCAEgNAQwDCwNAIBYgBEEBaiIEQRxsakGAgICABDYCACAEIAZJDQALCyAWIAZBHGxqIgYgHTYCDCAGIAs2AgQgBiAHNgIIIAYgDDYCACAGIBApA0A3AhAgBiAQKAJINgIYIAdBf2oiByAFTw0ACwsgCEEBaiIIIA5HDQALCyAfCyIKIARNDQALCyAWIARBHGxqIgYoAgwhHSAGKAIEIQUgBigCACEsIAYoAgghCCAQIAYoAhg2AlggECAGKQIQNwNQIBAgBikCCDcDKCAQIAYpAhA3AzAgECAGKAIYNgI4IBAgBikCADcDIEEAIAQgEEEgahA+ayIGIAYgBEsbIQQMAwsgD0EBaiEPDAcLIAUoAgAhBUEAIQQgCiAYKAIIBH8gBAUgGCgCDAtrIgRBgCBNDQELIBYgHTYCKCAWIAg2AiQgFiAFNgIgIBYgLDYCHCAWIBAoAlg2AjQgFiAQKQNQNwIsDAELIBYgBEEBaiIUQRxsaiIGIB02AgwgBiAINgIIIAYgBTYCBCAGICw2AgAgBiAQKQNQNwIQIAYgECgCWDYCGCAUIQwgBA0BC0EBIQxBASEUDAELA0AgECAWIARBHGxqIgUiCkEYaigCADYCGCAQIAUpAhA3AxAgECAFKQIINwMIIBAgBSkCADcDACAQED4hCCAWIAxBf2oiDEEcbGoiBiAKKAIYNgIYIAYgBSkCEDcCECAGIAUpAgg3AgggBiAFKQIANwIAIAQgCEshBUEAIAQgCGsiBiAGIARLGyEEIAUNAAsgDCAUSw0BCwNAIBYgDEEcbGoiBCgCDCEGAn8gAyAGaiAEKAIIIgdFDQAaAkACQCAEKAIEIgpBA08EQCACIAIpAgA3AgQgCkF+aiEEDAELAkACQAJAAkAgCiAGRWoiBQ4EBQEBAAELIAIoAgBBf2ohBAwBCyACIAVBAnRqKAIAIQQgBUECSQ0BCyACIAIoAgQ2AggLIAIgAigCADYCBAsgAiAENgIACyAkIAYgAyAKIAcQVyAHQX1qIQkgASgCDCEEAkACQCADIAZqIgUgK00EQCAEIAMQHCABKAIMIQQgBkEQTQRAIAEgBCAGajYCDAwDCyAEQRBqIANBEGoiCBAcIARBIGogA0EgahAcIAZBMUgNASAEIAZqIQsgBEEwaiEEA0AgBCAIQSBqIgUQHCAEQRBqIAhBMGoQHCAFIQggBEEgaiIEIAtJDQALDAELIAQgAyAFICsQIgsgASABKAIMIAZqNgIMIAZBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiBCAKQQFqNgIAIAQgBjsBBCAJQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAk7AQYgASAEQQhqNgIEIAYgB2ogA2oiAwshDyAMQQFqIgwgFE0NAAsLICRBABBRCyAPIClJDQALCyAQQeAAaiQAIBEgA2sLcwEDfyAAIAEoAgAgASgCBCIFQQxsaiIEKQIANwIAIAAgBCgCCCIGNgIIIAYgACgCBCIEaiACTQRAIAEgBUEBajYCBA8LAkAgBCACSQRAIAAgAiAEayIENgIIIAQgA08NAQsgAEEANgIACyABIAIgAxDqAQtyAQF/IwBBIGsiBiQAIAYgBSkCEDcDGCAGIAUpAgg3AxAgBiAFKQIANwMIIAAgAiAGQQhqENYBIAEgAmoiAC0AAEEDdGogA60gBK1CIIaENwIAIAAgAC0AAEEBakF/IAUoAgh0QX9zcToAACAGQSBqJAALNwIBfwF+IAEEQANAIAAgAmoxAAAgA0LjyJW9y5vvjU9+fEIKfCEDIAJBAWoiAiABRw0ACwsgAwuRAQIEfwF+IwBBIGsiByQAIAJBAWoiCCADSQRAIAYoAgwhCQNAIAIgCWotAAAhCiAAKQMgIQsgAi0AACECIAcgBikCEDcDGCAHIAYpAgg3AxAgByAGKQIANwMIIAAgASACIAogCxDZASIBIAUgCCAEayAHQQhqEJkBIAgiAkEBaiIIIANJDQALCyAHQSBqJAAgAQvoBgIdfwJ+IwBBgAFrIgUkACAFIAAoAhA2AnggBSAAKQIINwNwIAUgACkCADcDaCACKAIIIQYgAigCBCEHIAIoAhAhGCAAKQMgISMgAigCDCEKIAAoAgwiECENIAVB6ABqEOgBIhEEQCAAKAIIIRIgACgCECENCwJ/AkAgAyAEaiIOIApBCCAKQQhLG2siGSADSQRAIAMhBwwBCyAHIAZrIQtBfyAYdEF/cyEbIBAgEmpBACARGyEcIA0gEmpBACARGyEdIAAoAgQiDyAQaiETQQAhBEEBIAZ0QQN0IR4gBkEfRiEfIAMiByEGA0ACfwJ+IAMgBkcEQCAiIAQtAAAgBCAKai0AACAjENkBDAELIAMgChCoAwsiIiALIBgQ2AEgG0cEQCAGIQQgBkEBagwBCyAGIA9rIRQgACgCFCEEIAUgAikCEDcDYCAFIAIpAgg3A1ggBSACKQIANwNQIAQgIiALENcBIAVB0ABqENYBIQQgIiALENUBISACQCAfRQRAIAQgHmohIUEAIRVBACEWQQAhDEEAIRoDQAJAIAQoAgQgIEcNACAEKAIAIgggDU0NAAJ/IBEEQCAGIBIgDyAIIBBJIgkbIAhqIhcgDiAcIA4gCRsgExAgIgggCkkNAiAGIAcgFyAdIBMgCRsQ1AEMAQsgBiAIIA9qIgkgDhAdIgggCkkNASAGIAcgCSATENQBCyEJIAggCWoiFyAaTQ0AIBchGiAEIQwgCSEWIAghFQsgBEEIaiIEICFJDQALIAwNAQsgBSACKQIQNwMYIAUgAikCCDcDECAFIAIpAgA3AwggACAiIAsgFCAFQQhqEJkBIAYhBCAGQQFqDAELQbp/IAEoAggiBCABKAIMRg0DGiAMKAIAIQggASgCACAEQQxsaiIMIBUgFmo2AgggDCAGIBZrIAdrNgIEIAwgFCAIazYCACABIARBAWo2AgggBSACKQIQNwNIIAVBQGsgAikCCDcDACAFIAIpAgA3AzggACAiIAsgFCAFQThqEJkBAn8gBiAGIBVqIgcgGUsNABogBSACKQIQNwMwIAUgAikCCDcDKCAFIAIpAgA3AyAgACAiIAYgByAPIAsgBUEgahCpAyEiIAdBf2oLIQQgBwsiBiAZTQ0ACwsgDiAHawshACAFQYABaiQAIAALRAEBfwJAIAEgACgCBGsiAyACTQ0AIAAoAhAiASADIAJrIgJJBEAgACACNgIQIAIhAQsgACgCDCABTw0AIAAgATYCDAsLOQEDfyABBEADQCAAIANBA3RqIgRBACAEKAIAIgQgAmsiBSAFIARLGzYCACADQQFqIgMgAUcNAAsLC0YBAX8gACgCBCEDIAAgAiABazYCBCAAIAIgA2sgAWsiASAAKAIIajYCCCAAIAAoAhAgAWs2AhAgACAAKAIMIAFrNgIMIAELXwECfyMAQRBrIgYkAEGI7AEgARDTAUEQahBMIgc2AgAgBkEIaiADIAQgARDTASIDIAEQeyAHIANBEGogAhB7IAUQpANBiOwBKAIAENsBIAAgBkEIahDaASAGQRBqJAALgAwBF38jAEEQayIPJAAgAigCBCEJIAIoAgAhBiADIAAoAgQiECAAKAIMIhFqIhQgA0ZqIgUgAyAEaiIOQXhqIhJJBEAgACgCCCITIAAoAhAiFWohGiARIBNqIRYgDkFgaiEXIBFBf2ohGANAAn9BACAFQQFqIgcgBiAQamsiBCAVTQ0AGkEAIBggBGtBA0kNABpBACAHKAAAIAQgEyAQIAQgEUkiBBtqIgooAABHDQAaIAVBBWogCkEEaiAOIBYgDiAEGyAUECBBBGoLIQQgD0H/k+vcAzYCDAJAIAAgBSAOIA9BDGoQmgEiCiAEIAogBEsiCBsiCkEDTQRAIAUgA2tBCHUgBWpBAWohBQwBCyAPKAIMQQAgCBshBCAFIAcgCBshBwJAAkAgBSASTw0AIAUgEGshDANAIAxBAWohDSAFQQFqIQgCQCAERQRAQQAhBAwBCyANIAZrIgsgFU0gGCALa0EDSXINACAIKAAAIAsgEyAQIAsgEUkiCxtqIhkoAABHDQAgBUEFaiAZQQRqIA4gFiAOIAsbIBQQICILQXtLDQAgC0EEaiILQQNsIApBA2wgBEEBahAka0EBakwNACAIIQdBACEEIAshCgsgD0H/k+vcAzYCCAJ/AkAgACAIIA4gD0EIahCaASILQQRJDQAgBEEBahAkIRkgC0ECdCAPKAIIIhtBAWoQJGsgCkECdCAZa0EEakwNACANIQwgCCEFIAshCiAbDAELIAggEk8NAiAMQQJqIQwgBUECaiEIAkAgBEUEQEEAIQQMAQsgDCAGayINIBVNIBggDWtBA0lyDQAgCCgAACANIBMgECANIBFJIg0baiILKAAARw0AIAVBBmogC0EEaiAOIBYgDiANGyAUECAiBUF7Sw0AIAVBBGoiBUECdCAKQQJ0QQFyIARBAWoQJGtMDQAgCCEHQQAhBCAFIQoLIA9B/5Pr3AM2AgQgACAIIA4gD0EEahCaASINQQRJDQIgBEEBahAkIQUgDUECdCAPKAIEIgtBAWoQJGsgCkECdCAFa0EHakwNAiAIIQUgDSEKIAsLIQQgBSEHIAUgEkkNAAsMAQsgByEFCwJ/IARFBEAgBiEIIAkMAQsgBEF+aiEIAkAgBSADTQ0AIBMgECAFIBBrIAhrIgcgEUkiCRsgB2oiByAaIBQgCRsiDE0NAANAIAVBf2oiCS0AACAHQX9qIgctAABHDQEgCkEBaiEKIAcgDEsEQCAJIgUgA0sNAQsLIAkhBQsgBgshByAKQX1qIQ0gBSADayEMIAEoAgwhBgJAAkAgBSAXTQRAIAYgAxAcIAEoAgwhCSAMQRBNBEAgASAJIAxqNgIMDAMLIAlBEGogA0EQaiIGEBwgCUEgaiADQSBqEBwgDEExSA0BIAkgDGohCyAJQTBqIQMDQCADIAZBIGoiCRAcIANBEGogBkEwahAcIAkhBiADQSBqIgMgC0kNAAsMAQsgBiADIAUgFxAiCyABIAEoAgwgDGo2AgwgDEGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIARBAWo2AgAgAyAMOwEEIA1BgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgDTsBBiABIANBCGo2AgQgByEJIAghBiAFIApqIgMhBSADIBJLDQADQAJAIAchBiAIIQcgAyAQayAGayIEIBVNIBggBGtBA0lyDQAgAygAACAEIBMgECAEIBFJIgQbaiIFKAAARw0AIANBBGogBUEEaiAOIBYgDiAEGyAUECAiCkEBaiEFIAEoAgwhBAJAIAMgF00EQCAEIAMQHAwBCyAEIAMgAyAXECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIAYhCCAHIQkgCkEEaiADaiIDIQUgAyASTQ0BDAILCyAGIQkgByEGIAMhBQsgBSASSQ0ACwsgAiAJNgIEIAIgBjYCACAPQRBqJAAgDiADawudJQEjfyACKAIEIR0gAigCACEUIAMgACgCBCIbIAAoAgwiHmoiISADRmoiByADIARqIgxBeGoiH0kEQCAAKAIIIiAgACgCECIjaiEnIB4gIGohJCAMQWBqISUgHkF/aiEmA0ACf0EAIAdBAWoiHCAUIBtqayIEICNNDQAaQQAgJiAEa0EDSQ0AGkEAIBwoAAAgBCAgIBsgBCAeSSIFG2oiBCgAAEcNABogB0EFaiAEQQRqIAwgJCAMIAUbICEQIEEEagshFQJAAkACQAJAAkAgACgChAFBe2oOAwECAgALIAAoAgQhECAAKAJ0IQUgACgCECEEIAAoAhQhCCAAKAKAASELIAAoAighDiAAKAIMIQogACgCCCENIAAgACgCeCIPIAAoAnwgB0EEECwiBiAEIAcgEGsiCUEBIAV0IgVrIAQgCSAEayAFSxsgCBsiEU0NAkEAIAlBASAPdCIEayIFIAUgCUsbIQ8gCiANaiEWIAogEGohEiAEQX9qIRMgB0EEaiEXQQEgC3QhC0H/k+vcAyEIQQMhBQNAAkACfyAGIApPBEAgBiAQaiIEIAVqLQAAIAUgB2otAABHDQIgByAEIAwQHQwBCyAGIA1qIgQoAAAgBygAAEcNASAXIARBBGogDCAWIBIQIEEEagsiBCAFTQ0AIAkgBmtBAmohCCAHIAQiBWogDEYNBQsgBiAPTQRAIAUhBAwFCyAOIAYgE3FBAnRqKAIAIgYgEU0EQCAFIQQMBQsgBSEEIAtBf2oiCw0ACwwDCyAAKAIEIRAgACgCdCEFIAAoAhAhBCAAKAIUIQggACgCgAEhCyAAKAIoIQ4gACgCDCEKIAAoAgghDSAAIAAoAngiDyAAKAJ8IAdBBRAsIgYgBCAHIBBrIglBASAFdCIFayAEIAkgBGsgBUsbIAgbIhFNDQFBACAJQQEgD3QiBGsiBSAFIAlLGyEPIAogDWohFiAKIBBqIRIgBEF/aiETIAdBBGohF0EBIAt0IQtB/5Pr3AMhCEEDIQUDQAJAAn8gBiAKTwRAIAYgEGoiBCAFai0AACAFIAdqLQAARw0CIAcgBCAMEB0MAQsgBiANaiIEKAAAIAcoAABHDQEgFyAEQQRqIAwgFiASECBBBGoLIgQgBU0NACAJIAZrQQJqIQggByAEIgVqIAxGDQQLIAYgD00EQCAFIQQMBAsgDiAGIBNxQQJ0aigCACIGIBFNBEAgBSEEDAQLIAUhBCALQX9qIgsNAAsMAgsgACgCBCEQIAAoAnQhBSAAKAIQIQQgACgCFCEIIAAoAoABIQsgACgCKCEOIAAoAgwhCiAAKAIIIQ0gACAAKAJ4Ig8gACgCfCAHQQYQLCIGIAQgByAQayIJQQEgBXQiBWsgBCAJIARrIAVLGyAIGyIRTQ0AQQAgCUEBIA90IgRrIgUgBSAJSxshDyAKIA1qIRYgCiAQaiESIARBf2ohEyAHQQRqIRdBASALdCELQf+T69wDIQhBAyEFA0ACQAJ/IAYgCk8EQCAGIBBqIgQgBWotAAAgBSAHai0AAEcNAiAHIAQgDBAdDAELIAYgDWoiBCgAACAHKAAARw0BIBcgBEEEaiAMIBYgEhAgQQRqCyIEIAVNDQAgCSAGa0ECaiEIIAcgBCIFaiAMRg0DCyAGIA9NBEAgBSEEDAMLIA4gBiATcUECdGooAgAiBiARTQRAIAUhBAwDCyAFIQQgC0F/aiILDQALDAELQQMhBEH/k+vcAyEICwJAIAQgFSAEIBVLIgUbIgRBA00EQCAHIANrQQh1IAdqQQFqIQcMAQsgCEEAIAUbIQkgByAcIAUbIRACQAJAIAcgH08NACAHIBtrIRwDQCAcQQFqIRUgB0EBaiEKAkAgCUUEQEEAIQkMAQsgFSAUayIFICNNICYgBWtBA0lyDQAgCigAACAFICAgGyAFIB5JIggbaiIFKAAARw0AIAdBBWogBUEEaiAMICQgDCAIGyAhECAiBUF7Sw0AIAVBBGoiBUEDbCAEQQNsIAlBAWoQJGtBAWpMDQAgCiEQQQAhCSAFIQQLAkACQAJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQ8gACgCdCEIIAAoAhAhBSAAKAIUIQsgACgCgAEhDSAAKAIoIRIgACgCDCERIAAoAgghFiAAIAAoAngiEyAAKAJ8IApBBBAsIgYgBSAKIA9rIg5BASAIdCIIayAFIA4gBWsgCEsbIAsbIhdNDQNBACAOQQEgE3QiBWsiCCAIIA5LGyETIBEgFmohGCAPIBFqIRkgBUF/aiEaIAdBBWohIkEBIA10IQ1B/5Pr3AMhC0EDIQgDQAJAAn8gBiARTwRAIAYgD2oiBSAIai0AACAIIApqLQAARw0CIAogBSAMEB0MAQsgBiAWaiIFKAAAIAooAABHDQEgIiAFQQRqIAwgGCAZECBBBGoLIgUgCE0NACAOIAZrQQJqIQsgBSEIIAUgCmogDEYNBAsgBiATTQRAIAghBQwECyASIAYgGnFBAnRqKAIAIgYgF00EQCAIIQUMBAsgCCEFIA1Bf2oiDQ0ACwwCCyAAKAIEIQ8gACgCdCEIIAAoAhAhBSAAKAIUIQsgACgCgAEhDSAAKAIoIRIgACgCDCERIAAoAgghFiAAIAAoAngiEyAAKAJ8IApBBRAsIgYgBSAKIA9rIg5BASAIdCIIayAFIA4gBWsgCEsbIAsbIhdNDQJBACAOQQEgE3QiBWsiCCAIIA5LGyETIBEgFmohGCAPIBFqIRkgBUF/aiEaIAdBBWohIkEBIA10IQ1B/5Pr3AMhC0EDIQgDQAJAAn8gBiARTwRAIAYgD2oiBSAIai0AACAIIApqLQAARw0CIAogBSAMEB0MAQsgBiAWaiIFKAAAIAooAABHDQEgIiAFQQRqIAwgGCAZECBBBGoLIgUgCE0NACAOIAZrQQJqIQsgBSEIIAUgCmogDEYNAwsgBiATTQRAIAghBQwDCyASIAYgGnFBAnRqKAIAIgYgF00EQCAIIQUMAwsgCCEFIA1Bf2oiDQ0ACwwBCyAAKAIEIQ8gACgCdCEIIAAoAhAhBSAAKAIUIQsgACgCgAEhDSAAKAIoIRIgACgCDCERIAAoAgghFiAAIAAoAngiEyAAKAJ8IApBBhAsIgYgBSAKIA9rIg5BASAIdCIIayAFIA4gBWsgCEsbIAsbIhdNDQFBACAOQQEgE3QiBWsiCCAIIA5LGyETIBEgFmohGCAPIBFqIRkgBUF/aiEaIAdBBWohIkEBIA10IQ1B/5Pr3AMhC0EDIQgDQAJAAn8gBiARTwRAIAYgD2oiBSAIai0AACAIIApqLQAARw0CIAogBSAMEB0MAQsgBiAWaiIFKAAAIAooAABHDQEgIiAFQQRqIAwgGCAZECBBBGoLIgUgCE0NACAOIAZrQQJqIQsgBSEIIAUgCmogDEYNAgsgBiATTQRAIAghBQwCCyASIAYgGnFBAnRqKAIAIgYgF00EQCAIIQUMAgsgCCEFIA1Bf2oiDQ0ACwsgBUEESQ0AIAlBAWoQJCEIIAVBAnQgC0EBahAkayAEQQJ0IAhrQQRqTA0AIBUhHCAKIQcgCyEJIAUhBAwBCyAKIB9PDQIgHEECaiEcIAdBAmohBUEAIQoCfyAEIAlFDQAaAkAgHCAUayIIICNNICYgCGtBA0lyDQAgBSgAACAIICAgGyAIIB5JIgYbaiIIKAAARw0AIAdBBmogCEEEaiAMICQgDCAGGyAhECAiCEF7Sw0AIAQgCEEEaiIIQQJ0IARBAnRBAXIgCSIKQQFqECRrTA0BGiAFIRBBACEKIAgMAQsgCSEKIAQLIQgCQAJAAkACQCAAKAKEAUF7ag4DAQICAAsgACgCBCENIAAoAnQhCSAAKAIQIQQgACgCFCELIAAoAoABIREgACgCKCEWIAAoAgwhDiAAKAIIIQ8gACAAKAJ4IhIgACgCfCAFQQQQLCIGIAQgBSANayIVQQEgCXQiCWsgBCAVIARrIAlLGyALGyITTQ0GQQAgFUEBIBJ0IgRrIgkgCSAVSxshEiAOIA9qIRcgDSAOaiEYIARBf2ohGSAHQQZqIRpBASARdCELQf+T69wDIQlBAyEHA0ACQAJ/IAYgDk8EQCAGIA1qIgQgB2otAAAgBSAHai0AAEcNAiAFIAQgDBAdDAELIAYgD2oiBCgAACAFKAAARw0BIBogBEEEaiAMIBcgGBAgQQRqCyIEIAdNDQAgFSAGa0ECaiEJIAUgBCIHaiAMRg0ECyAGIBJNBEAgByEEDAQLIBYgBiAZcUECdGooAgAiBiATTQRAIAchBAwECyAHIQQgC0F/aiILDQALDAILIAAoAgQhDSAAKAJ0IQkgACgCECEEIAAoAhQhCyAAKAKAASERIAAoAighFiAAKAIMIQ4gACgCCCEPIAAgACgCeCISIAAoAnwgBUEFECwiBiAEIAUgDWsiFUEBIAl0IglrIAQgFSAEayAJSxsgCxsiE00NBUEAIBVBASASdCIEayIJIAkgFUsbIRIgDiAPaiEXIA0gDmohGCAEQX9qIRkgB0EGaiEaQQEgEXQhC0H/k+vcAyEJQQMhBwNAAkACfyAGIA5PBEAgBiANaiIEIAdqLQAAIAUgB2otAABHDQIgBSAEIAwQHQwBCyAGIA9qIgQoAAAgBSgAAEcNASAaIARBBGogDCAXIBgQIEEEagsiBCAHTQ0AIBUgBmtBAmohCSAFIAQiB2ogDEYNAwsgBiASTQRAIAchBAwDCyAWIAYgGXFBAnRqKAIAIgYgE00EQCAHIQQMAwsgByEEIAtBf2oiCw0ACwwBCyAAKAIEIQ0gACgCdCEJIAAoAhAhBCAAKAIUIQsgACgCgAEhESAAKAIoIRYgACgCDCEOIAAoAgghDyAAIAAoAngiEiAAKAJ8IAVBBhAsIgYgBCAFIA1rIhVBASAJdCIJayAEIBUgBGsgCUsbIAsbIhNNDQRBACAVQQEgEnQiBGsiCSAJIBVLGyESIA4gD2ohFyANIA5qIRggBEF/aiEZIAdBBmohGkEBIBF0IQtB/5Pr3AMhCUEDIQcDQAJAAn8gBiAOTwRAIAYgDWoiBCAHai0AACAFIAdqLQAARw0CIAUgBCAMEB0MAQsgBiAPaiIEKAAAIAUoAABHDQEgGiAEQQRqIAwgFyAYECBBBGoLIgQgB00NACAVIAZrQQJqIQkgBSAEIgdqIAxGDQILIAYgEk0EQCAHIQQMAgsgFiAGIBlxQQJ0aigCACIGIBNNBEAgByEEDAILIAchBCALQX9qIgsNAAsLIARBBEkNAyAKQQFqECQhBiAFIQcgBEECdCAJQQFqECRrIAhBAnQgBmtBB2pMDQMLIAchECAJIQogBCEIIAcgH0kNAAsMAQsgCSEKIAQhCAsCfyAKRQRAIBQhBSAdDAELIApBfmohBQJAIBAgA00NACAgIBsgECAbayAFayIEIB5JIgcbIARqIgQgJyAhIAcbIgZNDQADQCAQQX9qIgctAAAgBEF/aiIELQAARw0BIAhBAWohCCAEIAZLBEAgByIQIANLDQELCyAHIRALIBQLIQYgCEF9aiEJIBAgA2shFCABKAIMIQQCQAJAIBAgJU0EQCAEIAMQHCABKAIMIQQgFEEQTQRAIAEgBCAUajYCDAwDCyAEQRBqIANBEGoiBxAcIARBIGogA0EgahAcIBRBMUgNASAEIBRqIR0gBEEwaiEEA0AgBCAHQSBqIgMQHCAEQRBqIAdBMGoQHCADIQcgBEEgaiIEIB1JDQALDAELIAQgAyAQICUQIgsgASABKAIMIBRqNgIMIBRBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAKQQFqNgIAIAMgFDsBBCAJQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyADIAk7AQYgASADQQhqNgIEIAYhHSAFIRQgCCAQaiIDIQcgAyAfSw0AA0ACQCAGIRQgBSEGIAMgG2sgFGsiBCAjTSAmIARrQQNJcg0AIAMoAAAgBCAgIBsgBCAeSSIFG2oiBCgAAEcNACADQQRqIARBBGogDCAkIAwgBRsgIRAgIgdBAWohBSABKAIMIQQCQCADICVNBEAgBCADEBwMAQsgBCADIAMgJRAiCyABKAIEIgRBATYCACAEQQA7AQQgBUGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAFOwEGIAEgBEEIajYCBCAUIQUgBiEdIAdBBGogA2oiAyEHIAMgH00NAQwCCwsgFCEdIAYhFCADIQcLIAcgH0kNAAsLIAIgHTYCBCACIBQ2AgAgDCADawvXGgEifyACKAIEIRggAigCACEQIAMgACgCBCIZIAAoAgwiGmoiISADRmoiByADIARqIgpBeGoiHEkEQCAAKAIIIh0gACgCECIjaiEmIBogHWohJCAKQWBqISIgGkF/aiElA0ACf0EAIAdBAWoiESAQIBlqayIEICNNDQAaQQAgJSAEa0EDSQ0AGkEAIBEoAAAgBCAdIBkgBCAaSSIFG2oiBCgAAEcNABogB0EFaiAEQQRqIAogJCAKIAUbICEQIEEEagshGwJAAkACQAJAAkAgACgChAFBe2oOAwECAgALIAAoAgQhDSAAKAJ0IQYgACgCECEEIAAoAhQhCyAAKAKAASEJIAAoAighEiAAKAIMIQggACgCCCEOIAAgACgCeCIPIAAoAnwgB0EEECwiBSAEIAcgDWsiDEEBIAZ0IgZrIAQgDCAEayAGSxsgCxsiFE0NAkEAIAxBASAPdCIEayIGIAYgDEsbIQ8gCCAOaiEVIAggDWohEyAEQX9qIRYgB0EEaiEXQQEgCXQhCUH/k+vcAyELQQMhBgNAAkACfyAFIAhPBEAgBSANaiIEIAZqLQAAIAYgB2otAABHDQIgByAEIAoQHQwBCyAFIA5qIgQoAAAgBygAAEcNASAXIARBBGogCiAVIBMQIEEEagsiBCAGTQ0AIAwgBWtBAmohCyAEIQYgBCAHaiAKRg0FCyAFIA9NBEAgBiEEDAULIBIgBSAWcUECdGooAgAiBSAUTQRAIAYhBAwFCyAGIQQgCUF/aiIJDQALDAMLIAAoAgQhDSAAKAJ0IQYgACgCECEEIAAoAhQhCyAAKAKAASEJIAAoAighEiAAKAIMIQggACgCCCEOIAAgACgCeCIPIAAoAnwgB0EFECwiBSAEIAcgDWsiDEEBIAZ0IgZrIAQgDCAEayAGSxsgCxsiFE0NAUEAIAxBASAPdCIEayIGIAYgDEsbIQ8gCCAOaiEVIAggDWohEyAEQX9qIRYgB0EEaiEXQQEgCXQhCUH/k+vcAyELQQMhBgNAAkACfyAFIAhPBEAgBSANaiIEIAZqLQAAIAYgB2otAABHDQIgByAEIAoQHQwBCyAFIA5qIgQoAAAgBygAAEcNASAXIARBBGogCiAVIBMQIEEEagsiBCAGTQ0AIAwgBWtBAmohCyAEIQYgBCAHaiAKRg0ECyAFIA9NBEAgBiEEDAQLIBIgBSAWcUECdGooAgAiBSAUTQRAIAYhBAwECyAGIQQgCUF/aiIJDQALDAILIAAoAgQhDSAAKAJ0IQYgACgCECEEIAAoAhQhCyAAKAKAASEJIAAoAighEiAAKAIMIQggACgCCCEOIAAgACgCeCIPIAAoAnwgB0EGECwiBSAEIAcgDWsiDEEBIAZ0IgZrIAQgDCAEayAGSxsgCxsiFE0NAEEAIAxBASAPdCIEayIGIAYgDEsbIQ8gCCAOaiEVIAggDWohEyAEQX9qIRYgB0EEaiEXQQEgCXQhCUH/k+vcAyELQQMhBgNAAkACfyAFIAhPBEAgBSANaiIEIAZqLQAAIAYgB2otAABHDQIgByAEIAoQHQwBCyAFIA5qIgQoAAAgBygAAEcNASAXIARBBGogCiAVIBMQIEEEagsiBCAGTQ0AIAwgBWtBAmohCyAEIQYgBCAHaiAKRg0DCyAFIA9NBEAgBiEEDAMLIBIgBSAWcUECdGooAgAiBSAUTQRAIAYhBAwDCyAGIQQgCUF/aiIJDQALDAELQQMhBEH/k+vcAyELCwJAIAQgGyAEIBtLIgQbIgxBA00EQCAHIANrQQh1IAdqQQFqIQcMAQsgC0EAIAQbIQ0gByARIAQbIQsCQCAHIBxPDQAgByAZayEbA0AgG0EBaiEbIAdBAWohBgJAIA1FBEBBACENDAELIBsgEGsiBCAjTSAlIARrQQNJcg0AIAYoAAAgBCAdIBkgBCAaSSIFG2oiBCgAAEcNACAHQQVqIARBBGogCiAkIAogBRsgIRAgIgRBe0sNACAEQQRqIgRBA2wgDEEDbCANQQFqECRrQQFqTA0AIAYhC0EAIQ0gBCEMCwJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQ4gACgCdCEIIAAoAhAhBSAAKAIUIQkgACgCgAEhFCAAKAIoIRUgACgCDCESIAAoAgghDyAAIAAoAngiEyAAKAJ8IAZBBBAsIgQgBSAGIA5rIhFBASAIdCIIayAFIBEgBWsgCEsbIAkbIhZNDQRBACARQQEgE3QiBWsiCCAIIBFLGyETIA8gEmohFyAOIBJqIR4gBUF/aiEfIAdBBWohIEEBIBR0IQlB/5Pr3AMhCEEDIQcDQAJAAn8gBCASTwRAIAQgDmoiBSAHai0AACAGIAdqLQAARw0CIAYgBSAKEB0MAQsgBCAPaiIFKAAAIAYoAABHDQEgICAFQQRqIAogFyAeECBBBGoLIgUgB00NACARIARrQQJqIQggBiAFIgdqIApGDQQLIAQgE00EQCAHIQUMBAsgFSAEIB9xQQJ0aigCACIEIBZNBEAgByEFDAQLIAchBSAJQX9qIgkNAAsMAgsgACgCBCEOIAAoAnQhCCAAKAIQIQUgACgCFCEJIAAoAoABIRQgACgCKCEVIAAoAgwhEiAAKAIIIQ8gACAAKAJ4IhMgACgCfCAGQQUQLCIEIAUgBiAOayIRQQEgCHQiCGsgBSARIAVrIAhLGyAJGyIWTQ0DQQAgEUEBIBN0IgVrIgggCCARSxshEyAPIBJqIRcgDiASaiEeIAVBf2ohHyAHQQVqISBBASAUdCEJQf+T69wDIQhBAyEHA0ACQAJ/IAQgEk8EQCAEIA5qIgUgB2otAAAgBiAHai0AAEcNAiAGIAUgChAdDAELIAQgD2oiBSgAACAGKAAARw0BICAgBUEEaiAKIBcgHhAgQQRqCyIFIAdNDQAgESAEa0ECaiEIIAYgBSIHaiAKRg0DCyAEIBNNBEAgByEFDAMLIBUgBCAfcUECdGooAgAiBCAWTQRAIAchBQwDCyAHIQUgCUF/aiIJDQALDAELIAAoAgQhDiAAKAJ0IQggACgCECEFIAAoAhQhCSAAKAKAASEUIAAoAighFSAAKAIMIRIgACgCCCEPIAAgACgCeCITIAAoAnwgBkEGECwiBCAFIAYgDmsiEUEBIAh0IghrIAUgESAFayAISxsgCRsiFk0NAkEAIBFBASATdCIFayIIIAggEUsbIRMgDyASaiEXIA4gEmohHiAFQX9qIR8gB0EFaiEgQQEgFHQhCUH/k+vcAyEIQQMhBwNAAkACfyAEIBJPBEAgBCAOaiIFIAdqLQAAIAYgB2otAABHDQIgBiAFIAoQHQwBCyAEIA9qIgUoAAAgBigAAEcNASAgIAVBBGogCiAXIB4QIEEEagsiBSAHTQ0AIBEgBGtBAmohCCAGIAUiB2ogCkYNAgsgBCATTQRAIAchBQwCCyAVIAQgH3FBAnRqKAIAIgQgFk0EQCAHIQUMAgsgByEFIAlBf2oiCQ0ACwsgBUEESQ0BIA1BAWoQJCEEIAVBAnQgCEEBahAkayAMQQJ0IARrQQRqTA0BIAUhDCAIIQ0gBiIHIQsgByAcSQ0ACwsCfyANRQRAIBAhBiAYDAELIA1BfmohBgJAIAsgA00NACAdIBkgCyAZayAGayIEIBpJIgUbIARqIgQgJiAhIAUbIgdNDQADQCALQX9qIgUtAAAgBEF/aiIELQAARw0BIAxBAWohDCAEIAdLBEAgBSILIANLDQELCyAFIQsLIBALIQUgDEF9aiEYIAsgA2shECABKAIMIQQCQAJAIAsgIk0EQCAEIAMQHCABKAIMIQQgEEEQTQRAIAEgBCAQajYCDAwDCyAEQRBqIANBEGoiBxAcIARBIGogA0EgahAcIBBBMUgNASAEIBBqIQggBEEwaiEEA0AgBCAHQSBqIgMQHCAEQRBqIAdBMGoQHCADIQcgBEEgaiIEIAhJDQALDAELIAQgAyALICIQIgsgASABKAIMIBBqNgIMIBBBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyANQQFqNgIAIAMgEDsBBCAYQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyADIBg7AQYgASADQQhqNgIEIAUhGCAGIRAgCyAMaiIDIQcgAyAcSw0AA0ACQCAFIRAgBiEFIAMgGWsgEGsiBCAjTSAlIARrQQNJcg0AIAMoAAAgBCAdIBkgBCAaSSIGG2oiBCgAAEcNACADQQRqIARBBGogCiAkIAogBhsgIRAgIgdBAWohBiABKAIMIQQCQCADICJNBEAgBCADEBwMAQsgBCADIAMgIhAiCyABKAIEIgRBATYCACAEQQA7AQQgBkGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAGOwEGIAEgBEEIajYCBCAQIQYgBSEYIAdBBGogA2oiAyEHIAMgHE0NAQwCCwsgECEYIAUhECADIQcLIAcgHEkNAAsLIAIgGDYCBCACIBA2AgAgCiADawuAEAEdfyACKAIEIQogAigCACEIIAMgACgCBCISIAAoAgwiE2oiHCADRmoiBiADIARqIgxBeGoiHUkEQCAAKAIIIhogACgCECIeaiEhIBMgGmohHyAMQWBqIRsgE0F/aiEgA0ACQAJ/AkACfwJAIAZBAWoiBSAIIBJqayIEIB5NICAgBGtBA0lyDQAgBSgAACAEIBogEiAEIBNJIgQbaiIHKAAARw0AIAZBBWogB0EEaiAMIB8gDCAEGyAcECBBBGohBEEADAELAkACQAJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQ4gACgCdCEFIAAoAhAhBCAAKAIUIQkgACgCgAEhDSAAKAIoIRQgACgCDCEPIAAoAgghESAAIAAoAngiECAAKAJ8IAZBBBAsIgcgBCAGIA5rIgtBASAFdCIFayAEIAsgBGsgBUsbIAkbIhVNDQNBACALQQEgEHQiBGsiBSAFIAtLGyEQIA8gEWohFiAOIA9qIRcgBEF/aiEYIAZBBGohGUEBIA10IQlB/5Pr3AMhDUEDIQUDQAJAAn8gByAPTwRAIAcgDmoiBCAFai0AACAFIAZqLQAARw0CIAYgBCAMEB0MAQsgByARaiIEKAAAIAYoAABHDQEgGSAEQQRqIAwgFiAXECBBBGoLIgQgBU0NACALIAdrQQJqIQ0gBiAEIgVqIAxGDQQLIAcgEE0EQCAFIQQMBAsgFCAHIBhxQQJ0aigCACIHIBVNBEAgBSEEDAQLIAUhBCAJQX9qIgkNAAsMAgsgACgCBCEOIAAoAnQhBSAAKAIQIQQgACgCFCEJIAAoAoABIQ0gACgCKCEUIAAoAgwhDyAAKAIIIREgACAAKAJ4IhAgACgCfCAGQQUQLCIHIAQgBiAOayILQQEgBXQiBWsgBCALIARrIAVLGyAJGyIVTQ0CQQAgC0EBIBB0IgRrIgUgBSALSxshECAPIBFqIRYgDiAPaiEXIARBf2ohGCAGQQRqIRlBASANdCEJQf+T69wDIQ1BAyEFA0ACQAJ/IAcgD08EQCAHIA5qIgQgBWotAAAgBSAGai0AAEcNAiAGIAQgDBAdDAELIAcgEWoiBCgAACAGKAAARw0BIBkgBEEEaiAMIBYgFxAgQQRqCyIEIAVNDQAgCyAHa0ECaiENIAYgBCIFaiAMRg0DCyAHIBBNBEAgBSEEDAMLIBQgByAYcUECdGooAgAiByAVTQRAIAUhBAwDCyAFIQQgCUF/aiIJDQALDAELIAAoAgQhDiAAKAJ0IQUgACgCECEEIAAoAhQhCSAAKAKAASENIAAoAighFCAAKAIMIQ8gACgCCCERIAAgACgCeCIQIAAoAnwgBkEGECwiByAEIAYgDmsiC0EBIAV0IgVrIAQgCyAEayAFSxsgCRsiFU0NAUEAIAtBASAQdCIEayIFIAUgC0sbIRAgDyARaiEWIA4gD2ohFyAEQX9qIRggBkEEaiEZQQEgDXQhCUH/k+vcAyENQQMhBQNAAkACfyAHIA9PBEAgByAOaiIEIAVqLQAAIAUgBmotAABHDQIgBiAEIAwQHQwBCyAHIBFqIgQoAAAgBigAAEcNASAZIARBBGogDCAWIBcQIEEEagsiBCAFTQ0AIAsgB2tBAmohDSAGIAQiBWogDEYNAgsgByAQTQRAIAUhBAwCCyAUIAcgGHFBAnRqKAIAIgcgFU0EQCAFIQQMAgsgBSEEIAlBf2oiCQ0ACwsgBEEDSw0BCyAGIANrQQh1IAZqQQFqIQYMBAsgDQ0BIAYhBUEACyENIAghCSAKDAELIA1BfmohCQJAAkAgBiADTQ0AIBogEiAGIBJrIAlrIgUgE0kiChsgBWoiByAhIBwgChsiCk0NAANAIAZBf2oiBS0AACAHQX9qIgctAABHDQEgBEEBaiEEIAcgCk0NAiAFIgYgA0sNAAsMAQsgBiEFCyAICyEHIARBfWohCyAFIANrIQogASgCDCEIAkACQCAFIBtNBEAgCCADEBwgASgCDCEIIApBEE0EQCABIAggCmo2AgwMAwsgCEEQaiADQRBqIgYQHCAIQSBqIANBIGoQHCAKQTFIDQEgCCAKaiEOIAhBMGohAwNAIAMgBkEgaiIIEBwgA0EQaiAGQTBqEBwgCCEGIANBIGoiAyAOSQ0ACwwBCyAIIAMgBSAbECILIAEgASgCDCAKajYCDCAKQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgDUEBajYCACADIAo7AQQgC0GAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgAyALOwEGIAEgA0EIajYCBCAHIQogCSEIIAQgBWoiAyEGIAMgHUsNAANAAkAgByEIIAkhByADIBJrIAhrIgQgHk0gICAEa0EDSXINACADKAAAIAQgGiASIAQgE0kiBBtqIgUoAABHDQAgA0EEaiAFQQRqIAwgHyAMIAQbIBwQICIGQQFqIQUgASgCDCEEAkAgAyAbTQRAIAQgAxAcDAELIAQgAyADIBsQIgsgASgCBCIEQQE2AgAgBEEAOwEEIAVBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgBTsBBiABIARBCGo2AgQgCCEJIAchCiAGQQRqIANqIgMhBiADIB1NDQEMAgsLIAghCiAHIQggAyEGCyAGIB1JDQALCyACIAo2AgQgAiAINgIAIAwgA2sL+QcBFX8jAEEQayIOJAAgAigCBCEIIAIoAgAhBiADIAAoAnAiBSgCACIRIAMgACgCBCINIAAoAgwiDGoiEmtqIAUoAgQiEyAFKAIMaiIXRmoiBSADIARqIgpBeGoiFEkEQCATIAwgE2ogEWsiGGshFSAKQWBqIQ8DQAJAAn8CQAJ/AkAgDCAFQQFqIgcgBiANamsiBEF/c2pBA0kNACATIAQgGGtqIAcgBmsgBCAMSSIEGyIJKAAAIAcoAABHDQAgBUEFaiAJQQRqIAogESAKIAQbIBIQIEEEaiELQQAMAQsgDkH/k+vcAzYCDCAAIAUgCiAOQQxqEGoiC0EDTQRAIAUgA2tBCHUgBWpBAWohBQwECyAOKAIMIhANASAFIQdBAAshECAGIQkgCAwBCwJAIAUgA00EQCAFIQcMAQsgBSEHIBUgDSAFIA0gEGprQQJqIgQgDEkiCRsgBGoiBCAXIBIgCRsiCU0NAANAIAVBf2oiBy0AACAEQX9qIgQtAABHBEAgBSEHDAILIAtBAWohCyAEIAlNDQEgByIFIANLDQALCyAQQX5qIQkgBgshBCALQX1qIRYgByADayEIIAEoAgwhBQJAAkAgByAPTQRAIAUgAxAcIAEoAgwhBiAIQRBNBEAgASAGIAhqNgIMDAMLIAZBEGogA0EQaiIFEBwgBkEgaiADQSBqEBwgCEExSA0BIAYgCGohGSAGQTBqIQMDQCADIAVBIGoiBhAcIANBEGogBUEwahAcIAYhBSADQSBqIgMgGUkNAAsMAQsgBSADIAcgDxAiCyABIAEoAgwgCGo2AgwgCEGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIBBBAWo2AgAgAyAIOwEEIBZBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgFjsBBiABIANBCGo2AgQgBCEIIAkhBiAHIAtqIgMhBSADIBRLDQADQAJAIAQhBiAJIQQgDCADIA1rIAZrIgVBf3NqQQNJDQAgBSAVIA0gBSAMSSIFG2oiBygAACADKAAARw0AIANBBGogB0EEaiAKIBEgCiAFGyASECAiC0EBaiEHIAEoAgwhBQJAIAMgD00EQCAFIAMQHAwBCyAFIAMgAyAPECILIAEoAgQiBUEBNgIAIAVBADsBBCAHQYCABE8EQCABQQI2AiQgASAFIAEoAgBrQQN1NgIoCyAFIAc7AQYgASAFQQhqNgIEIAYhCSAEIQggC0EEaiADaiIDIQUgAyAUTQ0BDAILCyAGIQggBCEGIAMhBQsgBSAUSQ0ACwsgAiAINgIEIAIgBjYCACAOQRBqJAAgCiADawuaCgEVfyMAQRBrIg8kACACKAIEIQkgAigCACEIIAMgACgCcCIFKAIAIhIgAyAAKAIEIhAgACgCDCINaiITa2ogBSgCBCIUIAUoAgxqIhhGaiIGIAMgBGoiDEF4aiIRSQRAIBQgDSAUaiASayIWayEXIAxBYGohFQNAAn9BACANIAZBAWoiBCAIIBBqayIFQX9zakEDSQ0AGkEAIBQgBSAWa2ogBCAIayAFIA1JIgUbIgcoAAAgBCgAAEcNABogBkEFaiAHQQRqIAwgEiAMIAUbIBMQIEEEagshBSAPQf+T69wDNgIMAkAgACAGIAwgD0EMahBqIgcgBSAHIAVLIgobIgdBA00EQCAGIANrQQh1IAZqQQFqIQYMAQsgBiAEIAobIgshBSAPKAIMQQAgChsiDiEKIAchBAJAIAYgEU8NAANAAkAgDSAGQQFqIgUgEGsgCGsiBEF/c2pBA0kNACAUIAQgFmtqIAUgCGsgBCANSSIEGyIKKAAAIAUoAABHDQAgBkEFaiAKQQRqIAwgEiAMIAQbIBMQICIEQXtLDQAgBEEEaiIEQQNsIAdBA2wgDkEBahAka0EBakwNAEEAIQ4gBSELIAQhBwsgD0H/k+vcAzYCCAJAIAAgBSAMIA9BCGoQaiIEQQRJDQAgDkEBahAkIQYgBEECdCAPKAIIIgpBAWoQJGsgB0ECdCAGa0EEakwNACAFIQYgBCEHIAohDiAFIQsgBSARSQ0BDAILCyALIQUgDiEKIAchBAsCfyAKRQRAIAUhBiAJIQcgCAwBCwJAIAUgA00EQCAFIQYMAQsgBSEGIBcgECAFIAogEGprQQJqIgcgDUkiCRsgB2oiByAYIBMgCRsiCU0NAANAIAVBf2oiBi0AACAHQX9qIgctAABHBEAgBSEGDAILIARBAWohBCAHIAlNDQEgBiIFIANLDQALCyAIIQcgCkF+agshBSAEQX1qIQ4gBiADayELIAEoAgwhCAJAAkAgBiAVTQRAIAggAxAcIAEoAgwhCSALQRBNBEAgASAJIAtqNgIMDAMLIAlBEGogA0EQaiIIEBwgCUEgaiADQSBqEBwgC0ExSA0BIAkgC2ohGSAJQTBqIQMDQCADIAhBIGoiCRAcIANBEGogCEEwahAcIAkhCCADQSBqIgMgGUkNAAsMAQsgCCADIAYgFRAiCyABIAEoAgwgC2o2AgwgC0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIApBAWo2AgAgAyALOwEEIA5BgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgDjsBBiABIANBCGo2AgQgByEJIAUhCCAEIAZqIgMhBiADIBFLDQADQAJAIAchCCAFIQcgDSADIBBrIAhrIgRBf3NqQQNJDQAgBCAXIBAgBCANSSIFG2oiBCgAACADKAAARw0AIANBBGogBEEEaiAMIBIgDCAFGyATECAiBkEBaiEFIAEoAgwhBAJAIAMgFU0EQCAEIAMQHAwBCyAEIAMgAyAVECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIAghBSAHIQkgBkEEaiADaiIDIQYgAyARTQ0BDAILCyAIIQkgByEIIAMhBgsgBiARSQ0ACwsgAiAJNgIEIAIgCDYCACAPQRBqJAAgDCADawvmCwEVfyMAQRBrIg0kACACKAIEIQogAigCACEIIAMgACgCcCIGKAIAIhIgAyAAKAIEIhAgACgCDCIOaiITa2ogBigCBCIUIAYoAgxqIhlGaiIFIAMgBGoiC0F4aiIRSQRAIBQgDiAUaiASayIWayEYIAtBYGohFQNAAn9BACAOIAVBAWoiBCAIIBBqayIGQX9zakEDSQ0AGkEAIBQgBiAWa2ogBCAIayAGIA5JIgYbIgkoAAAgBCgAAEcNABogBUEFaiAJQQRqIAsgEiALIAYbIBMQIEEEagshBiANQf+T69wDNgIMAkAgACAFIAsgDUEMahBqIgkgBiAJIAZLIgYbIglBA00EQCAFIANrQQh1IAVqQQFqIQUMAQsgDSgCDEEAIAYbIQwgBSAEIAYbIQQCQCAFIBFPDQADQAJAIA4gBUEBaiIGIBBrIAhrIgdBf3NqQQNJDQAgFCAHIBZraiAGIAhrIAcgDkkiBxsiDygAACAGKAAARw0AIAVBBWogD0EEaiALIBIgCyAHGyATECAiB0F7Sw0AIAdBBGoiB0EDbCAJQQNsIAxBAWoQJGtBAWpMDQBBACEMIAYhBCAHIQkLIA1B/5Pr3AM2AggCfwJAIAAgBiALIA1BCGoQaiIHQQRJDQAgDEEBahAkIRcgB0ECdCANKAIIIg9BAWoQJGsgCUECdCAXa0EEakwNACAPIQwgByEJIAYMAQsgBiARTw0CAkAgDiAFQQJqIgYgEGsgCGsiB0F/c2pBA0kNACAUIAcgFmtqIAYgCGsgByAOSSIHGyIPKAAAIAYoAABHDQAgBUEGaiAPQQRqIAsgEiALIAcbIBMQICIFQXtLDQAgBUEEaiIFQQJ0IAlBAnRBAXIgDEEBahAka0wNAEEAIQwgBiEEIAUhCQsgDUH/k+vcAzYCBCAAIAYgCyANQQRqEGoiBUEESQ0CIAxBAWoQJCEPIAVBAnQgDSgCBCIHQQFqECRrIAlBAnQgD2tBB2pMDQIgByEMIAUhCSAGCyIFIQQgBSARSQ0ACwsCfyAMRQRAIAQhBSAKIQYgCAwBCwJAIAQgA00EQCAEIQUMAQsgGCAQIAQiBSAMIBBqa0ECaiIGIA5JIgobIAZqIgYgGSATIAobIgpNDQADQCAEQX9qIgUtAAAgBkF/aiIGLQAARwRAIAQhBQwCCyAJQQFqIQkgBiAKTQ0BIAUhBCAFIANLDQALCyAIIQYgDEF+agshBCAJQX1qIQ8gBSADayEHIAEoAgwhCAJAAkAgBSAVTQRAIAggAxAcIAEoAgwhCiAHQRBNBEAgASAHIApqNgIMDAMLIApBEGogA0EQaiIIEBwgCkEgaiADQSBqEBwgB0ExSA0BIAcgCmohFyAKQTBqIQMDQCADIAhBIGoiChAcIANBEGogCEEwahAcIAohCCADQSBqIgMgF0kNAAsMAQsgCCADIAUgFRAiCyABIAEoAgwgB2o2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAxBAWo2AgAgAyAHOwEEIA9BgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgDzsBBiABIANBCGo2AgQgBiEKIAQhCCAFIAlqIgMhBSADIBFLDQADQAJAIAYhCCAEIQYgDiADIBBrIAhrIgRBf3NqQQNJDQAgBCAYIBAgBCAOSSIFG2oiBCgAACADKAAARw0AIANBBGogBEEEaiALIBIgCyAFGyATECAiCUEBaiEFIAEoAgwhBAJAIAMgFU0EQCAEIAMQHAwBCyAEIAMgAyAVECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIAghBCAGIQogCUEEaiADaiIDIQUgAyARTQ0BDAILCyAIIQogBiEIIAMhBQsgBSARSQ0ACwsgAiAKNgIEIAIgCDYCACANQRBqJAAgCyADawvpCwEVfyMAQRBrIg0kACACKAIEIQogAigCACEIIAMgACgCcCIGKAIAIhIgAyAAKAIEIhAgACgCDCIOaiITa2ogBigCBCIUIAYoAgxqIhlGaiIFIAMgBGoiC0F4aiIRSQRAIBQgDiAUaiASayIWayEYIAtBYGohFQNAAn9BACAOIAVBAWoiBCAIIBBqayIGQX9zakEDSQ0AGkEAIBQgBiAWa2ogBCAIayAGIA5JIgYbIgkoAAAgBCgAAEcNABogBUEFaiAJQQRqIAsgEiALIAYbIBMQIEEEagshBiANQf+T69wDNgIMAkAgACAFIAsgDUEMahCbASIJIAYgCSAGSyIGGyIJQQNNBEAgBSADa0EIdSAFakEBaiEFDAELIA0oAgxBACAGGyEMIAUgBCAGGyEEAkAgBSARTw0AA0ACQCAOIAVBAWoiBiAQayAIayIHQX9zakEDSQ0AIBQgByAWa2ogBiAIayAHIA5JIgcbIg8oAAAgBigAAEcNACAFQQVqIA9BBGogCyASIAsgBxsgExAgIgdBe0sNACAHQQRqIgdBA2wgCUEDbCAMQQFqECRrQQFqTA0AQQAhDCAGIQQgByEJCyANQf+T69wDNgIIAn8CQCAAIAYgCyANQQhqEJsBIgdBBEkNACAMQQFqECQhFyAHQQJ0IA0oAggiD0EBahAkayAJQQJ0IBdrQQRqTA0AIA8hDCAHIQkgBgwBCyAGIBFPDQICQCAOIAVBAmoiBiAQayAIayIHQX9zakEDSQ0AIBQgByAWa2ogBiAIayAHIA5JIgcbIg8oAAAgBigAAEcNACAFQQZqIA9BBGogCyASIAsgBxsgExAgIgVBe0sNACAFQQRqIgVBAnQgCUECdEEBciAMQQFqECRrTA0AQQAhDCAGIQQgBSEJCyANQf+T69wDNgIEIAAgBiALIA1BBGoQmwEiBUEESQ0CIAxBAWoQJCEPIAVBAnQgDSgCBCIHQQFqECRrIAlBAnQgD2tBB2pMDQIgByEMIAUhCSAGCyIFIQQgBSARSQ0ACwsCfyAMRQRAIAQhBSAKIQYgCAwBCwJAIAQgA00EQCAEIQUMAQsgGCAQIAQiBSAMIBBqa0ECaiIGIA5JIgobIAZqIgYgGSATIAobIgpNDQADQCAEQX9qIgUtAAAgBkF/aiIGLQAARwRAIAQhBQwCCyAJQQFqIQkgBiAKTQ0BIAUhBCAFIANLDQALCyAIIQYgDEF+agshBCAJQX1qIQ8gBSADayEHIAEoAgwhCAJAAkAgBSAVTQRAIAggAxAcIAEoAgwhCiAHQRBNBEAgASAHIApqNgIMDAMLIApBEGogA0EQaiIIEBwgCkEgaiADQSBqEBwgB0ExSA0BIAcgCmohFyAKQTBqIQMDQCADIAhBIGoiChAcIANBEGogCEEwahAcIAohCCADQSBqIgMgF0kNAAsMAQsgCCADIAUgFRAiCyABIAEoAgwgB2o2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAxBAWo2AgAgAyAHOwEEIA9BgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgDzsBBiABIANBCGo2AgQgBiEKIAQhCCAFIAlqIgMhBSADIBFLDQADQAJAIAYhCCAEIQYgDiADIBBrIAhrIgRBf3NqQQNJDQAgBCAYIBAgBCAOSSIFG2oiBCgAACADKAAARw0AIANBBGogBEEEaiALIBIgCyAFGyATECAiCUEBaiEFIAEoAgwhBAJAIAMgFU0EQCAEIAMQHAwBCyAEIAMgAyAVECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIAghBCAGIQogCUEEaiADaiIDIQUgAyARTQ0BDAILCyAIIQogBiEIIAMhBQsgBSARSQ0ACwsgAiAKNgIEIAIgCDYCACANQRBqJAAgCyADawvcDQESfyACKAIAIgUgAigCBCIHQQAgByADIAAoAgQgACgCDGoiFCADRmoiBiAUayIJSyIKGyAFIAlLIgkbIRZBACAFIAkbIQlBACAHIAobIQogBiADIARqIg5BeGoiFUkEQCAOQWBqIRMDQAJAAn8CQAJ/IAlFIAZBAWoiCCAJaygAACAIKAAAR3JFBEAgBkEFaiIEIAQgCWsgDhAdQQRqIQVBAAwBCwJAAkACQAJAAkACQCAAKAKEAUF7ag4DAQICAAsgACgCBCEPIAAoAnQhBSAAKAIQIQQgACgCFCEIIAAoAoABIQwgACgCKCEQIAAgACgCeCINIAAoAnwgBkEEECwiByAEIAYgD2siC0EBIAV0IgVrIAQgCyAEayAFSxsgCBsiEU0NA0EAIAtBASANdCIEayIFIAUgC0sbIQ0gBEF/aiESQQEgDHQhCEH/k+vcAyEMQQMhBANAAkAgByAPaiIFIARqLQAAIAQgBmotAABHDQAgBiAFIA4QHSIFIARNDQAgCyAHa0ECaiEMIAUiBCAGaiAORg0ECyAHIA1NBEAgBCEFDAQLIBAgByAScUECdGooAgAiByARTQRAIAQhBQwECyAEIQUgCEF/aiIIDQALDAILIAAoAgQhDyAAKAJ0IQUgACgCECEEIAAoAhQhCCAAKAKAASEMIAAoAighECAAIAAoAngiDSAAKAJ8IAZBBRAsIgcgBCAGIA9rIgtBASAFdCIFayAEIAsgBGsgBUsbIAgbIhFNDQJBACALQQEgDXQiBGsiBSAFIAtLGyENIARBf2ohEkEBIAx0IQhB/5Pr3AMhDEEDIQQDQAJAIAcgD2oiBSAEai0AACAEIAZqLQAARw0AIAYgBSAOEB0iBSAETQ0AIAsgB2tBAmohDCAFIgQgBmogDkYNAwsgByANTQRAIAQhBQwDCyAQIAcgEnFBAnRqKAIAIgcgEU0EQCAEIQUMAwsgBCEFIAhBf2oiCA0ACwwBCyAAKAIEIQ8gACgCdCEFIAAoAhAhBCAAKAIUIQggACgCgAEhDCAAKAIoIRAgACAAKAJ4Ig0gACgCfCAGQQYQLCIHIAQgBiAPayILQQEgBXQiBWsgBCALIARrIAVLGyAIGyIRTQ0BQQAgC0EBIA10IgRrIgUgBSALSxshDSAEQX9qIRJBASAMdCEIQf+T69wDIQxBAyEEA0ACQCAHIA9qIgUgBGotAAAgBCAGai0AAEcNACAGIAUgDhAdIgUgBE0NACALIAdrQQJqIQwgBSIEIAZqIA5GDQILIAcgDU0EQCAEIQUMAgsgECAHIBJxQQJ0aigCACIHIBFNBEAgBCEFDAILIAQhBSAIQX9qIggNAAsLIAVBA0sNAQsgBiADa0EIdSAGakEBaiEGDAQLIAwNASAGIQhBAAshDCAKIQcgCQwBCwJAIAYgA00EQCAGIQgMAQsgBiEIIAZBAiAMayIEaiAUTQ0AA0AgBkF/aiIILQAAIAQgBmpBf2otAABHBEAgBiEIDAILIAVBAWohBSAIIANNDQEgBCAIIgZqIBRLDQALCyAJIQcgDEF+agshBCAFQX1qIQsgCCADayEKIAEoAgwhBgJAAkAgCCATTQRAIAYgAxAcIAEoAgwhBiAKQRBNBEAgASAGIApqNgIMDAMLIAZBEGogA0EQaiIJEBwgBkEgaiADQSBqEBwgCkExSA0BIAYgCmohDyAGQTBqIQMDQCADIAlBIGoiBhAcIANBEGogCUEwahAcIAYhCSADQSBqIgMgD0kNAAsMAQsgBiADIAggExAiCyABIAEoAgwgCmo2AgwgCkGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAxBAWo2AgAgAyAKOwEEIAtBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgCzsBBiABIANBCGo2AgQgBSAIaiEDIAdFBEAgByEKIAQhCSADIQYMAQsgByEKIAQhCSADIgYgFUsNAANAIAchCSAEIQcgAygAACADIAlrKAAARwRAIAkhCiAHIQkgAyEGDAILIANBBGoiBCAEIAlrIA4QHSIGQQFqIQUgASgCDCEEAkAgAyATTQRAIAQgAxAcDAELIAQgAyADIBMQIgsgASgCBCIEQQE2AgAgBEEAOwEEIAVBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgBTsBBiABIARBCGo2AgQgBkEEaiADaiEDIAdFBEAgByEKIAMhBgwCCyAJIQQgByEKIAMiBiAVTQ0ACwsgBiAVSQ0ACwsgAiAKIBYgChs2AgQgAiAJIBYgCRs2AgAgDiADawtJAQF/IwBBIGsiAiQAIAJBCGogARCWASACQRhqIAJBCGogABEEACACQRhqEMgBIQAgAkEYahDFASACQQhqEJIBIAJBIGokACAAC4gWARZ/IAIoAgAiBSACKAIEIgZBACAGIAMgACgCBCAAKAIMaiIYIANGaiIHIBhrIgpLIgkbIAUgCksiChshGkEAIAUgChshCkEAIAYgCRshFCAHIAMgBGoiDkF4aiIVSQRAIA5BYGohFwNAQQAhDUEAIAprIRkgCkUgB0EBaiIPIAprKAAAIA8oAABHckUEQCAHQQVqIgQgBCAZaiAOEB1BBGohDQsCQAJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQwgACgCdCEFIAAoAhAhBCAAKAIUIQkgACgCgAEhCCAAKAIoIRIgACAAKAJ4IhAgACgCfCAHQQQQLCIGIAQgByAMayILQQEgBXQiBWsgBCALIARrIAVLGyAJGyIRTQ0CQQAgC0EBIBB0IgRrIgUgBSALSxshECAEQX9qIRNBASAIdCEIQf+T69wDIQlBAyEEA0ACQCAGIAxqIgUgBGotAAAgBCAHai0AAEcNACAHIAUgDhAdIgUgBE0NACALIAZrQQJqIQkgByAFIgRqIA5GDQULIAYgEE0EQCAEIQUMBQsgEiAGIBNxQQJ0aigCACIGIBFNBEAgBCEFDAULIAQhBSAIQX9qIggNAAsMAwsgACgCBCEMIAAoAnQhBSAAKAIQIQQgACgCFCEJIAAoAoABIQggACgCKCESIAAgACgCeCIQIAAoAnwgB0EFECwiBiAEIAcgDGsiC0EBIAV0IgVrIAQgCyAEayAFSxsgCRsiEU0NAUEAIAtBASAQdCIEayIFIAUgC0sbIRAgBEF/aiETQQEgCHQhCEH/k+vcAyEJQQMhBANAAkAgBiAMaiIFIARqLQAAIAQgB2otAABHDQAgByAFIA4QHSIFIARNDQAgCyAGa0ECaiEJIAcgBSIEaiAORg0ECyAGIBBNBEAgBCEFDAQLIBIgBiATcUECdGooAgAiBiARTQRAIAQhBQwECyAEIQUgCEF/aiIIDQALDAILIAAoAgQhDCAAKAJ0IQUgACgCECEEIAAoAhQhCSAAKAKAASEIIAAoAighEiAAIAAoAngiECAAKAJ8IAdBBhAsIgYgBCAHIAxrIgtBASAFdCIFayAEIAsgBGsgBUsbIAkbIhFNDQBBACALQQEgEHQiBGsiBSAFIAtLGyEQIARBf2ohE0EBIAh0IQhB/5Pr3AMhCUEDIQQDQAJAIAYgDGoiBSAEai0AACAEIAdqLQAARw0AIAcgBSAOEB0iBSAETQ0AIAsgBmtBAmohCSAHIAUiBGogDkYNAwsgBiAQTQRAIAQhBQwDCyASIAYgE3FBAnRqKAIAIgYgEU0EQCAEIQUMAwsgBCEFIAhBf2oiCA0ACwwBC0EDIQVB/5Pr3AMhCQsCQCAFIA0gBSANSyIEGyILQQNNBEAgByADa0EIdSAHakEBaiEHDAELIAlBACAEGyEMIAcgDyAEGyEJAkAgByAVTw0AA0AgB0EBaiEFAkAgDEUEQEEAIQwMAQsgCkUgBSgAACAFIBlqKAAAR3INACAHQQVqIgQgBCAZaiAOEB0iBEF7Sw0AIARBBGoiBEEDbCALQQNsIAxBAWoQJGtBAWpMDQAgBSEJQQAhDCAEIQsLAkACQAJAAkAgACgChAFBe2oOAwECAgALIAAoAgQhEiAAKAJ0IQYgACgCECEEIAAoAhQhCCAAKAKAASENIAAoAighECAAIAAoAngiESAAKAJ8IAVBBBAsIgcgBCAFIBJrIg9BASAGdCIGayAEIA8gBGsgBksbIAgbIhNNDQRBACAPQQEgEXQiBGsiBiAGIA9LGyERIARBf2ohFkEBIA10IQhB/5Pr3AMhDUEDIQQDQAJAIAcgEmoiBiAEai0AACAEIAVqLQAARw0AIAUgBiAOEB0iBiAETQ0AIA8gB2tBAmohDSAFIAYiBGogDkYNBAsgByARTQRAIAQhBgwECyAQIAcgFnFBAnRqKAIAIgcgE00EQCAEIQYMBAsgBCEGIAhBf2oiCA0ACwwCCyAAKAIEIRIgACgCdCEGIAAoAhAhBCAAKAIUIQggACgCgAEhDSAAKAIoIRAgACAAKAJ4IhEgACgCfCAFQQUQLCIHIAQgBSASayIPQQEgBnQiBmsgBCAPIARrIAZLGyAIGyITTQ0DQQAgD0EBIBF0IgRrIgYgBiAPSxshESAEQX9qIRZBASANdCEIQf+T69wDIQ1BAyEEA0ACQCAHIBJqIgYgBGotAAAgBCAFai0AAEcNACAFIAYgDhAdIgYgBE0NACAPIAdrQQJqIQ0gBSAGIgRqIA5GDQMLIAcgEU0EQCAEIQYMAwsgECAHIBZxQQJ0aigCACIHIBNNBEAgBCEGDAMLIAQhBiAIQX9qIggNAAsMAQsgACgCBCESIAAoAnQhBiAAKAIQIQQgACgCFCEIIAAoAoABIQ0gACgCKCEQIAAgACgCeCIRIAAoAnwgBUEGECwiByAEIAUgEmsiD0EBIAZ0IgZrIAQgDyAEayAGSxsgCBsiE00NAkEAIA9BASARdCIEayIGIAYgD0sbIREgBEF/aiEWQQEgDXQhCEH/k+vcAyENQQMhBANAAkAgByASaiIGIARqLQAAIAQgBWotAABHDQAgBSAGIA4QHSIGIARNDQAgDyAHa0ECaiENIAUgBiIEaiAORg0CCyAHIBFNBEAgBCEGDAILIBAgByAWcUECdGooAgAiByATTQRAIAQhBgwCCyAEIQYgCEF/aiIIDQALCyAGQQRJDQEgDEEBahAkIQQgBkECdCANQQFqECRrIAtBAnQgBGtBBGpMDQEgBiELIA0hDCAFIgchCSAFIBVJDQALCwJ/IAxFBEAgCSEHIAohBiAUDAELAkAgCSADTQRAIAkhBwwBC0ECIAxrIgQgCSIHaiAYTQ0AA0AgCUF/aiIHLQAAIAQgCWpBf2otAABHBEAgCSEHDAILIAtBAWohCyAHIANNDQEgByEJIAQgB2ogGEsNAAsLIAxBfmohBiAKCyEFIAtBfWohCSAHIANrIQogASgCDCEEAkACQCAHIBdNBEAgBCADEBwgASgCDCEEIApBEE0EQCABIAQgCmo2AgwMAwsgBEEQaiADQRBqIggQHCAEQSBqIANBIGoQHCAKQTFIDQEgBCAKaiEUIARBMGohBANAIAQgCEEgaiIDEBwgBEEQaiAIQTBqEBwgAyEIIARBIGoiBCAUSQ0ACwwBCyAEIAMgByAXECILIAEgASgCDCAKajYCDCAKQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgDEEBajYCACADIAo7AQQgCUGAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgAyAJOwEGIAEgA0EIajYCBCAHIAtqIQMgBUUEQCAFIRQgBiEKIAMhBwwBCyAFIRQgBiEKIAMiByAVSw0AA0AgBSEKIAYhBSADKAAAIAMgCmsoAABHBEAgCiEUIAUhCiADIQcMAgsgA0EEaiIEIAQgCmsgDhAdIgdBAWohBiABKAIMIQQCQCADIBdNBEAgBCADEBwMAQsgBCADIAMgFxAiCyABKAIEIgRBATYCACAEQQA7AQQgBkGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAGOwEGIAEgBEEIajYCBCAHQQRqIANqIQMgBUUEQCAFIRQgAyEHDAILIAohBiAFIRQgAyIHIBVNDQALCyAHIBVJDQALCyACIBQgGiAUGzYCBCACIAogGiAKGzYCACAOIANrC6keARd/IAIoAgAiBSACKAIEIgZBACAGIAMgACgCBCAAKAIMaiIaIANGaiIIIBprIgdLIgsbIAUgB0siBxshG0EAIAUgBxshE0EAIAYgCxshFSAIIAMgBGoiEEF4aiIWSQRAIBBBYGohGQNAQQAhDEEAIBNrIRcgE0UgCEEBaiIOIBNrKAAAIA4oAABHckUEQCAIQQVqIgQgBCAXaiAQEB1BBGohDAsCQAJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQogACgCdCEFIAAoAhAhBCAAKAIUIQcgACgCgAEhCSAAKAIoIQ0gACAAKAJ4Ig8gACgCfCAIQQQQLCIGIAQgCCAKayILQQEgBXQiBWsgBCALIARrIAVLGyAHGyIRTQ0CQQAgC0EBIA90IgRrIgUgBSALSxshDyAEQX9qIRJBASAJdCEHQf+T69wDIQlBAyEEA0ACQCAGIApqIgUgBGotAAAgBCAIai0AAEcNACAIIAUgEBAdIgUgBE0NACALIAZrQQJqIQkgCCAFIgRqIBBGDQULIAYgD00EQCAEIQUMBQsgDSAGIBJxQQJ0aigCACIGIBFNBEAgBCEFDAULIAQhBSAHQX9qIgcNAAsMAwsgACgCBCEKIAAoAnQhBSAAKAIQIQQgACgCFCEHIAAoAoABIQkgACgCKCENIAAgACgCeCIPIAAoAnwgCEEFECwiBiAEIAggCmsiC0EBIAV0IgVrIAQgCyAEayAFSxsgBxsiEU0NAUEAIAtBASAPdCIEayIFIAUgC0sbIQ8gBEF/aiESQQEgCXQhB0H/k+vcAyEJQQMhBANAAkAgBiAKaiIFIARqLQAAIAQgCGotAABHDQAgCCAFIBAQHSIFIARNDQAgCyAGa0ECaiEJIAggBSIEaiAQRg0ECyAGIA9NBEAgBCEFDAQLIA0gBiAScUECdGooAgAiBiARTQRAIAQhBQwECyAEIQUgB0F/aiIHDQALDAILIAAoAgQhCiAAKAJ0IQUgACgCECEEIAAoAhQhByAAKAKAASEJIAAoAighDSAAIAAoAngiDyAAKAJ8IAhBBhAsIgYgBCAIIAprIgtBASAFdCIFayAEIAsgBGsgBUsbIAcbIhFNDQBBACALQQEgD3QiBGsiBSAFIAtLGyEPIARBf2ohEkEBIAl0IQdB/5Pr3AMhCUEDIQQDQAJAIAYgCmoiBSAEai0AACAEIAhqLQAARw0AIAggBSAQEB0iBSAETQ0AIAsgBmtBAmohCSAIIAUiBGogEEYNAwsgBiAPTQRAIAQhBQwDCyANIAYgEnFBAnRqKAIAIgYgEU0EQCAEIQUMAwsgBCEFIAdBf2oiBw0ACwwBC0EDIQVB/5Pr3AMhCQsCQCAFIAwgBSAMSyIEGyIFQQNNBEAgCCADa0EIdSAIakEBaiEIDAELIAggDiAEGyELIAlBACAEGyIMIQ4gBSEJAkAgCCAWTw0AA0AgCEEBaiEJAkAgDEUEQEEAIQwMAQsgE0UgCSgAACAJIBdqKAAAR3INACAIQQVqIgQgBCAXaiAQEB0iBEF7Sw0AIARBBGoiBEEDbCAFQQNsIAxBAWoQJGtBAWpMDQAgCSELQQAhDCAEIQULAkACQAJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQ8gACgCdCEHIAAoAhAhBiAAKAIUIQ4gACgCgAEhCiAAKAIoIREgACAAKAJ4IhIgACgCfCAJQQQQLCIEIAYgCSAPayINQQEgB3QiB2sgBiANIAZrIAdLGyAOGyIUTQ0DQQAgDUEBIBJ0IgZrIgcgByANSxshEiAGQX9qIRhBASAKdCEKQf+T69wDIQ5BAyEGA0ACQCAEIA9qIgcgBmotAAAgBiAJai0AAEcNACAJIAcgEBAdIgcgBk0NACANIARrQQJqIQ4gCSAHIgZqIBBGDQQLIAQgEk0EQCAGIQcMBAsgESAEIBhxQQJ0aigCACIEIBRNBEAgBiEHDAQLIAYhByAKQX9qIgoNAAsMAgsgACgCBCEPIAAoAnQhByAAKAIQIQYgACgCFCEOIAAoAoABIQogACgCKCERIAAgACgCeCISIAAoAnwgCUEFECwiBCAGIAkgD2siDUEBIAd0IgdrIAYgDSAGayAHSxsgDhsiFE0NAkEAIA1BASASdCIGayIHIAcgDUsbIRIgBkF/aiEYQQEgCnQhCkH/k+vcAyEOQQMhBgNAAkAgBCAPaiIHIAZqLQAAIAYgCWotAABHDQAgCSAHIBAQHSIHIAZNDQAgDSAEa0ECaiEOIAkgByIGaiAQRg0DCyAEIBJNBEAgBiEHDAMLIBEgBCAYcUECdGooAgAiBCAUTQRAIAYhBwwDCyAGIQcgCkF/aiIKDQALDAELIAAoAgQhDyAAKAJ0IQcgACgCECEGIAAoAhQhDiAAKAKAASEKIAAoAighESAAIAAoAngiEiAAKAJ8IAlBBhAsIgQgBiAJIA9rIg1BASAHdCIHayAGIA0gBmsgB0sbIA4bIhRNDQFBACANQQEgEnQiBmsiByAHIA1LGyESIAZBf2ohGEEBIAp0IQpB/5Pr3AMhDkEDIQYDQAJAIAQgD2oiByAGai0AACAGIAlqLQAARw0AIAkgByAQEB0iByAGTQ0AIA0gBGtBAmohDiAJIAciBmogEEYNAgsgBCASTQRAIAYhBwwCCyARIAQgGHFBAnRqKAIAIgQgFE0EQCAGIQcMAgsgBiEHIApBf2oiCg0ACwsgB0EESQ0AIAxBAWoQJCEEIAdBAnQgDkEBahAkayAFQQJ0IARrQQRqTA0AIAkhCCAOIQwgByEFDAELIAkgFk8EQCAMIQ4gBSEJDAMLIAhBAmohBkEAIQ4CfyAFIAxFDQAaAkAgE0UgBigAACAGIBdqKAAAR3INACAIQQZqIgQgBCAXaiAQEB0iBEF7Sw0AIAwhDiAFIARBBGoiBEECdCAFQQJ0QQFyIAxBAWoQJGtMDQEaIAYhC0EAIQ4gBAwBCyAMIQ4gBQshCQJAAkACQAJAIAAoAoQBQXtqDgMBAgIACyAAKAIEIQ0gACgCdCEFIAAoAhAhBCAAKAIUIQcgACgCgAEhDCAAKAIoIQ8gACAAKAJ4IhEgACgCfCAGQQQQLCIIIAQgBiANayIKQQEgBXQiBWsgBCAKIARrIAVLGyAHGyISTQ0FQQAgCkEBIBF0IgRrIgUgBSAKSxshESAEQX9qIRRBASAMdCEHQf+T69wDIQxBAyEEA0ACQCAIIA1qIgUgBGotAAAgBCAGai0AAEcNACAGIAUgEBAdIgUgBE0NACAKIAhrQQJqIQwgBiAFIgRqIBBGDQQLIAggEU0EQCAEIQUMBAsgDyAIIBRxQQJ0aigCACIIIBJNBEAgBCEFDAQLIAQhBSAHQX9qIgcNAAsMAgsgACgCBCENIAAoAnQhBSAAKAIQIQQgACgCFCEHIAAoAoABIQwgACgCKCEPIAAgACgCeCIRIAAoAnwgBkEFECwiCCAEIAYgDWsiCkEBIAV0IgVrIAQgCiAEayAFSxsgBxsiEk0NBEEAIApBASARdCIEayIFIAUgCksbIREgBEF/aiEUQQEgDHQhB0H/k+vcAyEMQQMhBANAAkAgCCANaiIFIARqLQAAIAQgBmotAABHDQAgBiAFIBAQHSIFIARNDQAgCiAIa0ECaiEMIAYgBSIEaiAQRg0DCyAIIBFNBEAgBCEFDAMLIA8gCCAUcUECdGooAgAiCCASTQRAIAQhBQwDCyAEIQUgB0F/aiIHDQALDAELIAAoAgQhDSAAKAJ0IQUgACgCECEEIAAoAhQhByAAKAKAASEMIAAoAighDyAAIAAoAngiESAAKAJ8IAZBBhAsIgggBCAGIA1rIgpBASAFdCIFayAEIAogBGsgBUsbIAcbIhJNDQNBACAKQQEgEXQiBGsiBSAFIApLGyERIARBf2ohFEEBIAx0IQdB/5Pr3AMhDEEDIQQDQAJAIAggDWoiBSAEai0AACAEIAZqLQAARw0AIAYgBSAQEB0iBSAETQ0AIAogCGtBAmohDCAGIAUiBGogEEYNAgsgCCARTQRAIAQhBQwCCyAPIAggFHFBAnRqKAIAIgggEk0EQCAEIQUMAgsgBCEFIAdBf2oiBw0ACwsgBUEESQ0CIA5BAWoQJCEEIAYhCCAFQQJ0IAxBAWoQJGsgCUECdCAEa0EHakwNAgsgCCELIAwhDiAFIQkgCCAWSQ0ACwsCfyAORQRAIAshBSAVIQYgEwwBCwJAIAsgA00EQCALIQUMAQtBAiAOayIEIAsiBWogGk0NAANAIAtBf2oiBS0AACAEIAtqQX9qLQAARwRAIAshBQwCCyAJQQFqIQkgBSADTQ0BIAUhCyAEIAVqIBpLDQALCyATIQYgDkF+agshBCAJQX1qIRMgBSADayELIAEoAgwhBwJAAkAgBSAZTQRAIAcgAxAcIAEoAgwhCCALQRBNBEAgASAIIAtqNgIMDAMLIAhBEGogA0EQaiIHEBwgCEEgaiADQSBqEBwgC0ExSA0BIAggC2ohFSAIQTBqIQgDQCAIIAdBIGoiAxAcIAhBEGogB0EwahAcIAMhByAIQSBqIgggFUkNAAsMAQsgByADIAUgGRAiCyABIAEoAgwgC2o2AgwgC0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIA5BAWo2AgAgAyALOwEEIBNBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgEzsBBiABIANBCGo2AgQgBSAJaiEDIAZFBEAgBiEVIAQhEyADIQgMAQsgBiEVIAQhEyADIgggFksNAANAIAYhEyAEIQYgAygAACADIBNrKAAARwRAIBMhFSAGIRMgAyEIDAILIANBBGoiBCAEIBNrIBAQHSIHQQFqIQUgASgCDCEEAkAgAyAZTQRAIAQgAxAcDAELIAQgAyADIBkQIgsgASgCBCIEQQE2AgAgBEEAOwEEIAVBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgBTsBBiABIARBCGo2AgQgB0EEaiADaiEDIAZFBEAgBiEVIAMhCAwCCyATIQQgBiEVIAMiCCAWTQ0ACwsgCCAWSQ0ACwsgAiAVIBsgFRs2AgQgAiATIBsgExs2AgAgECADawvyAgEPfwJAIAAoAnAiBygCICABIAcoAnwgBhBaQQJ0aigCACIGIAcoAhAiCk0NACAHKAIAIg8gBygCBCIMayILQX8gBygCeEF/anRBf3MiDWsgCiALIAprIA1LGyEOIAAoAgQiCSAAKAIMaiEQIAEgCWsiCEECaiERIAhBAWohEiAJIAAoAhAgC2siE2ohFCAHKAIoIRVBACEAQQAhCQNAIAEgCSAAIAkgAEkbIgdqIAYgDGogB2ogAiAPIBAQICAHaiIHIARLBEAgByAEa0ECdCASIAYgE2oiCGsQJCADKAIAQQFqECRrSgRAIAMgESAIazYCACAHIQQLIAEgB2ogAkYNAgsgFSAGIA1xQQN0aiEIAkAgDCAUIAYgB2ogC0kbIAZqIAdqLQAAIAEgB2otAABJBEAgBiAOTQ0DIAhBBGohCCAHIQkgACEHDAELIAYgDk0NAgsgCCgCACIGIApNDQEgByEAIAVBf2oiBQ0ACwsgBAvDAwETfyMAQRBrIgwkACAAKAIoIhJBfyAAKAJ4QX9qdEF/cyITIAFxQQN0aiIIQQRqIQoCQCADRSAIKAIAIgYgAUEBIAAoAnR0IglrIAAoAhAiByABIAdrIAlLGyIUTXINACAAKAIIIg0gACgCDCIHaiIVIAIgByABSyIQGyEOIAAoAgQiCyAHaiEWIA0gCyAQGyABaiEPQQAhAiAFQQFGIRdBACEJA0ACQCAQIAVBAUdyRUEAIAIgCSACIAlJGyIAIAZqIgEgB0kbRQRAIAAgD2ogDSALIAEgB0kbIAsgFxsgBmoiESAAaiAOEB0gAGohAAwBCyAGIA1qIgEgBiALaiAAIA9qIAAgAWogDiAVIBYQICAAaiIAIAZqIAdJGyERCyAAIA9qIhggDkYNASASIAYgE3FBA3RqIQECQAJAIAAgEWotAAAgGC0AAEkEQCAIIAY2AgAgBiAESw0BIAxBDGohCAwECyAKIAY2AgAgBiAESwRAIAEhCiAAIQkMAgsgDEEMaiEKDAMLIAFBBGoiASEIIAAhAgsgASgCACIGIBRNDQEgA0F/aiIDDQALCyAKQQA2AgAgCEEANgIAIAxBEGokAAv7CgEQfyMAQRBrIgwkACACKAIAIgYgAigCBCIIQQAgCCADIAAoAgQgACgCDGoiEiADRmoiBSASayIHSyIJGyAGIAdLIgcbIRNBACAGIAcbIQdBACAIIAkbIQggBSADIARqIg1BeGoiD0kEQCANQWBqIREDQEEAIQZBACAHayEOIAdFIAVBAWoiCSAHaygAACAJKAAAR3JFBEAgBUEFaiIEIAQgDmogDRAdQQRqIQYLIAxB/5Pr3AM2AgwCQCAAIAUgDSAMQQxqEJwBIgQgBiAEIAZLIgYbIgtBA00EQCAFIANrQQh1IAVqQQFqIQUMAQsgDCgCDEEAIAYbIQQgBSAJIAYbIQYCQCAFIA9PDQADQCAFQQFqIQkCQCAERQRAQQAhBAwBCyAHRSAJKAAAIAkgDmooAABHcg0AIAVBBWoiCiAKIA5qIA0QHSIKQXtLDQAgCkEEaiIKQQNsIAtBA2wgBEEBahAka0EBakwNACAJIQZBACEEIAohCwsgDEH/k+vcAzYCCAJ/AkAgACAJIA0gDEEIahCcASIKQQRJDQAgBEEBahAkIRAgCkECdCAMKAIIIhRBAWoQJGsgC0ECdCAQa0EEakwNACAJIQUgCiELIBQMAQsgCSAPTw0CIAVBAmohCQJAIARFBEBBACEEDAELIAdFIAkoAAAgCSAOaigAAEdyDQAgBUEGaiIFIAUgDmogDRAdIgVBe0sNACAFQQRqIgVBAnQgC0ECdEEBciAEQQFqECRrTA0AIAkhBkEAIQQgBSELCyAMQf+T69wDNgIEIAAgCSANIAxBBGoQnAEiCkEESQ0CIARBAWoQJCEFIApBAnQgDCgCBCIQQQFqECRrIAtBAnQgBWtBB2pMDQIgCSEFIAohCyAQCyEEIAUhBiAFIA9JDQALCwJ/IARFBEAgBiEFIAchCSAIDAELAkAgBiADTQRAIAYhBQwBC0ECIARrIgggBiIFaiASTQ0AA0AgBkF/aiIFLQAAIAYgCGpBf2otAABHBEAgBiEFDAILIAtBAWohCyAFIANNDQEgBSEGIAUgCGogEksNAAsLIARBfmohCSAHCyEGIAtBfWohDiAFIANrIQogASgCDCEHAkACQCAFIBFNBEAgByADEBwgASgCDCEIIApBEE0EQCABIAggCmo2AgwMAwsgCEEQaiADQRBqIgcQHCAIQSBqIANBIGoQHCAKQTFIDQEgCCAKaiEQIAhBMGohAwNAIAMgB0EgaiIIEBwgA0EQaiAHQTBqEBwgCCEHIANBIGoiAyAQSQ0ACwwBCyAHIAMgBSARECILIAEgASgCDCAKajYCDCAKQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgBEEBajYCACADIAo7AQQgDkGAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgAyAOOwEGIAEgA0EIajYCBCAFIAtqIQMgBkUEQCAGIQggCSEHIAMhBQwBCyAGIQggCSEHIAMhBSADIA9LDQADQCAGIQcgCSEGIAMoAAAgAyAHaygAAEcEQCAHIQggBiEHIAMhBQwCCyADQQRqIgQgBCAHayANEB0iCEEBaiEFIAEoAgwhBAJAIAMgEU0EQCAEIAMQHAwBCyAEIAMgAyARECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIAhBBGogA2ohAyAGRQRAIAYhCCADIQUMAgsgByEJIAYhCCADIQUgAyAPTQ0ACwsgBSAPSQ0ACwsgAiAIIBMgCBs2AgQgAiAHIBMgBxs2AgAgDEEQaiQAIA0gA2sLphQBF38gACgCfCERIAAoAiAhEiAAKAIIIQ0gACgCiAEiCSAJRWohFyADIARqIg5BeGohEyACKAIEIQYgAigCACEJAkAgACgCECAAKAIUIAMgACgCBCIMayAEaiIEIAAoAnQiBxAnIg8gACgCDCIASQRAIBMgA0sEQCANIA8gACAAIA9JGyIUaiEVIAwgFGohFiANIA9qIRwgDkFgaiEQIBRBf2ohGCADIQADQCASIAMgESAFEB5BAnRqIgQoAgAhCiAEIAMgDGsiGTYCAAJAAkACQAJAIAMgCSAMamtBAWoiBCAPTSAYIARrQQNJckUEQCAEIA0gDCAEIBRJIgcbaiIEKAAAIANBAWoiCygAAEYNAQsgCiAPTwRAIA0gDCAKIBRJIgQbIApqIgcoAAAgAygAAEYNAgsgAyAXIAMgAGtBCHVqaiEDDAMLIANBBWogBEEEaiAOIBUgDiAHGyAWECAiGkEBaiEKIAsgAGshCCABKAIMIQQCQAJAIAsgEE0EQCAEIAAQHCABKAIMIQcgCEEQTQRAIAEgByAIajYCDAwDCyAHQRBqIABBEGoiBBAcIAdBIGogAEEgahAcIAhBMUgNASAHIAhqIRsgB0EwaiEAA0AgACAEQSBqIgcQHCAAQRBqIARBMGoQHCAHIQQgAEEgaiIAIBtJDQALDAELIAQgACALIBAQIgsgASABKAIMIAhqNgIMIAhBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAEEBNgIAIAAgCDsBBCAKQYCABE8EQCABQQI2AiQgASAAIAEoAgBrQQN1NgIoCyAAIAo7AQYgASAAQQhqNgIEIBpBBGogC2ohAAwBCyADQQRqIAdBBGogDiAVIA4gBBsgFhAgQQRqIQYCQCAHIBwgFiAEGyILTQRAIAMhBAwBCyADIQggAyEEIAMgAE0NAANAIAhBf2oiBC0AACAHQX9qIgctAABHBEAgCCEEDAILIAZBAWohBiAHIAtNDQEgBCEIIAQgAEsNAAsLIBkgCmshCCAGQX1qIRogBCAAayELIAEoAgwhBwJAAkAgBCAQTQRAIAcgABAcIAEoAgwhCiALQRBNBEAgASAKIAtqNgIMDAMLIApBEGogAEEQaiIHEBwgCkEgaiAAQSBqEBwgC0ExSA0BIAogC2ohGyAKQTBqIQADQCAAIAdBIGoiChAcIABBEGogB0EwahAcIAohByAAQSBqIgAgG0kNAAsMAQsgByAAIAQgEBAiCyABIAEoAgwgC2o2AgwgC0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIAIAhBA2o2AgAgACALOwEEIBpBgIAETwRAIAFBAjYCJCABIAAgASgCAGtBA3U2AigLIAAgGjsBBiABIABBCGo2AgQgBCAGaiEAIAkhBiAIIQkLIAAgE0sEQCAAIQMMAQsgEiADQQJqIBEgBRAeQQJ0aiAZQQJqNgIAIBIgAEF+aiIDIBEgBRAeQQJ0aiADIAxrNgIAIAkhByAGIQQDQAJAIAQhCSAHIQQgACAMayIGIAlrIgMgD00gGCADa0EDSXINACADIA0gDCADIBRJIgcbaiIDKAAAIAAoAABHDQAgAEEEaiADQQRqIA4gFSAOIAcbIBYQICIIQQFqIQcgASgCDCEDAkAgACAQTQRAIAMgABAcDAELIAMgACAAIBAQIgsgASgCBCIDQQE2AgAgA0EAOwEEIAdBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgBzsBBiABIANBCGo2AgQgEiAAIBEgBRAeQQJ0aiAGNgIAIAkhByAEIQYgCEEEaiAAaiIAIQMgACATTQ0BDAILCyAJIQYgBCEJIAAhAwsgAyATSQ0ACyAAIQMLIAIgCTYCAAwBCyAJIAZBACAGIAMgDCAEQQEgB3QiB2sgACAEIABrIAdLGyIUaiIQIANGaiIAIBBrIgRLIggbIAkgBEsiBBshFkEAIAkgBBshB0EAIAYgCBshCSAAQQFqIgQgE0kEQCAXQQFqIRcgDkFgaiEPA0AgACARIAUQHiEGIAAoAAAhCyAEIBEgBRAeIQggBCgAACEVIBIgCEECdGoiCigCACEIIBIgBkECdGoiDSgCACEGIA0gACAMayIYNgIAIAogBCAMazYCAAJ/AkAgB0UgAEECaiINIAdrIgooAAAgDSgAAEdyRQRAIAogAC0AASAKQX9qLQAARiIEayEGIA0gBGshAEEAIRUMAQsCQAJAAkAgBiAUSwRAIAsgBiAMaiIGKAAARg0BCyAIIBRNDQEgFSAIIAxqIgYoAABHDQEgBCEACyAAIAZrIgpBAmohFUEAIQQgBiAQTSAAIANNcg0BA0AgAEF/aiIILQAAIAZBf2oiCy0AAEcNAiAEQQFqIQQgCCADSwRAIAghACALIgYgEEsNAQsLIAchCSALIQYgCiEHIAghAAwCCyAEIBcgACADa0EHdmoiBmohBCAAIAZqDAILIAchCSAKIQcLIAAgBGpBBGogBCAGakEEaiAOEB0gBGoiC0EBaiEKIAAgA2shCCABKAIMIQQCQAJAIAAgD00EQCAEIAMQHCABKAIMIQYgCEEQTQRAIAEgBiAIaiIGNgIMDAMLIAZBEGogA0EQaiIEEBwgBkEgaiADQSBqEBwgCEExSA0BIAYgCGohGSAGQTBqIQMDQCADIARBIGoiBhAcIANBEGogBEEwahAcIAYhBCADQSBqIgMgGUkNAAsMAQsgBCADIAAgDxAiCyABIAEoAgwgCGoiBjYCDCAIQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgFUEBajYCACADIAg7AQQgCkGAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgAyAKOwEGIAEgA0EIajYCBCALQQRqIABqIgNBAWohBAJAIAMgE0sNACASIA0gESAFEB5BAnRqIBhBAmo2AgAgEiADQX5qIgAgESAFEB5BAnRqIAAgDGs2AgAgCUUEQEEAIQkMAQsgAygAACADIAlrKAAARw0AQQAgCWshBANAIAkhACAHIQkgACEHIANBBGoiACAAIARqIA4QHSEEIBIgAyARIAUQHkECdGogAyAMazYCACAEQQFqIQgCQCADIA9NBEAgBiADEBwMAQsgBiADIAMgDxAiCyABKAIEIgBBATYCACAAQQA7AQQgCEGAgARPBEAgAUECNgIkIAEgACABKAIAa0EDdTYCKAsgACAIOwEGIAEgAEEIajYCBAJAIAlFIAMgBGpBBGoiAyATS3INACADKAAAIAMgCWsoAABHDQBBACAJayEEIAEoAgwhBgwBCwsgA0EBaiEECyADCyEAIAQgE0kNAAsLIAIgByAWIAcbNgIAIAkgFiAJGyEGCyACIAY2AgQgDiADawsiACAAIAEgAiADIAQgACgChAEiAEEEIABBe2pBA0kbEL4DC486ARt/AkACQAJAAkACQCAAKAKEAUF7ag4DAwIBAAsgAigCBCEFIAIoAgAhCiADIAAoAnAiBigCACIRIAMgACgCBCIOIAAoAgwiD2oiEmtqIAYoAgQiEyAGKAIMIhdqIhxGaiIHIAMgBGoiDUF4aiIWSQRAIAAoAogBIgQgBEVqIRggACgCfCEUIAYoAnwhHSAAKAIgIRUgBigCICEeIBMgEyARayAPaiIZayEfIA1BYGohDCAPQX9qIRoDQCAVIAcgFEEEEB5BAnRqIgAoAgAhCyAAIAcgDmsiGzYCAAJAAkACQCAaIAdBAWoiACAKIA5qayIEa0EDSQ0AIBMgBCAZa2ogACAKayAEIA9JIgQbIgYoAAAgACgAAEcNACAHQQVqIAZBBGogDSARIA0gBBsgEhAgIglBAWohCyAAIANrIQggASgCDCEEAkACQCAAIAxNBEAgBCADEBwgASgCDCEGIAhBEE0EQCABIAYgCGo2AgwMAwsgBkEQaiADQRBqIgQQHCAGQSBqIANBIGoQHCAIQTFIDQEgBiAIaiEQIAZBMGohAwNAIAMgBEEgaiIGEBwgA0EQaiAEQTBqEBwgBiEEIANBIGoiAyAQSQ0ACwwBCyAEIAMgACAMECILIAEgASgCDCAIajYCDCAIQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyAJQQRqIQQgASgCBCIDQQE2AgAgAyAIOwEEIAtBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQCALIA9NBEACQCAeIAcgHUEEEB5BAnRqKAIAIgggF00NACAIIBNqIgYoAAAgBygAAEcNACAHQQRqIAZBBGogDSARIBIQIEEEaiEEIBsgCGshCwJAIAcgA00EQCAHIQAMAQsgByEFIAchACAIIBdMDQADQCAFQX9qIgAtAAAgBkF/aiIGLQAARwRAIAUhAAwCCyAEQQFqIQQgACADTQ0BIAAhBSAGIBxLDQALCyALIBlrIQYgBEF9aiELIAAgA2shCSABKAIMIQUCQAJAIAAgDE0EQCAFIAMQHCABKAIMIQggCUEQTQRAIAEgCCAJajYCDAwDCyAIQRBqIANBEGoiBRAcIAhBIGogA0EgahAcIAlBMUgNASAIIAlqIRAgCEEwaiEDA0AgAyAFQSBqIggQHCADQRBqIAVBMGoQHCAIIQUgA0EgaiIDIBBJDQALDAELIAUgAyAAIAwQIgsgASABKAIMIAlqNgIMIAlBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAGQQNqNgIAIAMgCTsBBCALQYCABEkNAiABQQI2AiQgASADIAEoAgBrQQN1NgIoDAILIAcgByADa0EIdSAYamohBwwDCyALIA5qIggoAAAgBygAAEcEQCAHIAcgA2tBCHUgGGpqIQcMAwsgB0EEaiAIQQRqIA0QHUEEaiEEAkAgByADTQRAIAchAAwBCyAHIQYgCCEFIAchACALIA9MDQADQCAGQX9qIgAtAAAgBUF/aiIFLQAARwRAIAYhAAwCCyAEQQFqIQQgACADTQ0BIAAhBiAFIBJLDQALCyAHIAhrIQYgBEF9aiELIAAgA2shCSABKAIMIQUCQAJAIAAgDE0EQCAFIAMQHCABKAIMIQggCUEQTQRAIAEgCCAJajYCDAwDCyAIQRBqIANBEGoiBRAcIAhBIGogA0EgahAcIAlBMUgNASAIIAlqIRAgCEEwaiEDA0AgAyAFQSBqIggQHCADQRBqIAVBMGoQHCAIIQUgA0EgaiIDIBBJDQALDAELIAUgAyAAIAwQIgsgASABKAIMIAlqNgIMIAlBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAGQQNqNgIAIAMgCTsBBCALQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAKIQUgBiEKDAELIAohBSAGIQoLIAMgCzsBBiABIANBCGo2AgQgACAEaiIDIBZLBEAgAyEHDAELIBUgB0ECaiAUQQQQHkECdGogG0ECajYCACAVIANBfmoiACAUQQQQHkECdGogACAOazYCACAKIQQgBSEAA0ACQCAAIQogBCEAIBogAyAOayIHIAprIgRrQQNJDQAgBCAfIA4gBCAPSSIFG2oiBCgAACADKAAARw0AIANBBGogBEEEaiANIBEgDSAFGyASECAiBkEBaiEFIAEoAgwhBAJAIAMgDE0EQCAEIAMQHAwBCyAEIAMgAyAMECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIBUgAyAUQQQQHkECdGogBzYCACAKIQQgACEFIAZBBGogA2oiAyEHIAMgFk0NAQwCCwsgCiEFIAAhCiADIQcLIAcgFkkNAAsLDAMLIAIoAgQhBSACKAIAIQogAyAAKAJwIgYoAgAiESADIAAoAgQiDiAAKAIMIg9qIhJraiAGKAIEIhMgBigCDCIXaiIcRmoiByADIARqIg1BeGoiFkkEQCAAKAKIASIEIARFaiEYIAAoAnwhFCAGKAJ8IR0gACgCICEVIAYoAiAhHiATIBMgEWsgD2oiGWshHyANQWBqIQwgD0F/aiEaA0AgFSAHIBRBBxAeQQJ0aiIAKAIAIQsgACAHIA5rIhs2AgACQAJAAkAgGiAHQQFqIgAgCiAOamsiBGtBA0kNACATIAQgGWtqIAAgCmsgBCAPSSIEGyIGKAAAIAAoAABHDQAgB0EFaiAGQQRqIA0gESANIAQbIBIQICIJQQFqIQsgACADayEIIAEoAgwhBAJAAkAgACAMTQRAIAQgAxAcIAEoAgwhBiAIQRBNBEAgASAGIAhqNgIMDAMLIAZBEGogA0EQaiIEEBwgBkEgaiADQSBqEBwgCEExSA0BIAYgCGohECAGQTBqIQMDQCADIARBIGoiBhAcIANBEGogBEEwahAcIAYhBCADQSBqIgMgEEkNAAsMAQsgBCADIAAgDBAiCyABIAEoAgwgCGo2AgwgCEGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgCUEEaiEEIAEoAgQiA0EBNgIAIAMgCDsBBCALQYCABEkNASABQQI2AiQgASADIAEoAgBrQQN1NgIoDAELAkAgCyAPTQRAAkAgHiAHIB1BBxAeQQJ0aigCACIIIBdNDQAgCCATaiIGKAAAIAcoAABHDQAgB0EEaiAGQQRqIA0gESASECBBBGohBCAbIAhrIQsCQCAHIANNBEAgByEADAELIAchBSAHIQAgCCAXTA0AA0AgBUF/aiIALQAAIAZBf2oiBi0AAEcEQCAFIQAMAgsgBEEBaiEEIAAgA00NASAAIQUgBiAcSw0ACwsgCyAZayEGIARBfWohCyAAIANrIQkgASgCDCEFAkACQCAAIAxNBEAgBSADEBwgASgCDCEIIAlBEE0EQCABIAggCWo2AgwMAwsgCEEQaiADQRBqIgUQHCAIQSBqIANBIGoQHCAJQTFIDQEgCCAJaiEQIAhBMGohAwNAIAMgBUEgaiIIEBwgA0EQaiAFQTBqEBwgCCEFIANBIGoiAyAQSQ0ACwwBCyAFIAMgACAMECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgBkEDajYCACADIAk7AQQgC0GAgARJDQIgAUECNgIkIAEgAyABKAIAa0EDdTYCKAwCCyAHIAcgA2tBCHUgGGpqIQcMAwsgCyAOaiIIKAAAIAcoAABHBEAgByAHIANrQQh1IBhqaiEHDAMLIAdBBGogCEEEaiANEB1BBGohBAJAIAcgA00EQCAHIQAMAQsgByEGIAghBSAHIQAgCyAPTA0AA0AgBkF/aiIALQAAIAVBf2oiBS0AAEcEQCAGIQAMAgsgBEEBaiEEIAAgA00NASAAIQYgBSASSw0ACwsgByAIayEGIARBfWohCyAAIANrIQkgASgCDCEFAkACQCAAIAxNBEAgBSADEBwgASgCDCEIIAlBEE0EQCABIAggCWo2AgwMAwsgCEEQaiADQRBqIgUQHCAIQSBqIANBIGoQHCAJQTFIDQEgCCAJaiEQIAhBMGohAwNAIAMgBUEgaiIIEBwgA0EQaiAFQTBqEBwgCCEFIANBIGoiAyAQSQ0ACwwBCyAFIAMgACAMECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgBkEDajYCACADIAk7AQQgC0GAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgCiEFIAYhCgwBCyAKIQUgBiEKCyADIAs7AQYgASADQQhqNgIEIAAgBGoiAyAWSwRAIAMhBwwBCyAVIAdBAmogFEEHEB5BAnRqIBtBAmo2AgAgFSADQX5qIgAgFEEHEB5BAnRqIAAgDms2AgAgCiEEIAUhAANAAkAgACEKIAQhACAaIAMgDmsiByAKayIEa0EDSQ0AIAQgHyAOIAQgD0kiBRtqIgQoAAAgAygAAEcNACADQQRqIARBBGogDSARIA0gBRsgEhAgIgZBAWohBSABKAIMIQQCQCADIAxNBEAgBCADEBwMAQsgBCADIAMgDBAiCyABKAIEIgRBATYCACAEQQA7AQQgBUGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAFOwEGIAEgBEEIajYCBCAVIAMgFEEHEB5BAnRqIAc2AgAgCiEEIAAhBSAGQQRqIANqIgMhByADIBZNDQEMAgsLIAohBSAAIQogAyEHCyAHIBZJDQALCwwCCyACKAIEIQUgAigCACEKIAMgACgCcCIGKAIAIhEgAyAAKAIEIg4gACgCDCIPaiISa2ogBigCBCITIAYoAgwiF2oiHEZqIgcgAyAEaiINQXhqIhZJBEAgACgCiAEiBCAERWohGCAAKAJ8IRQgBigCfCEdIAAoAiAhFSAGKAIgIR4gEyATIBFrIA9qIhlrIR8gDUFgaiEMIA9Bf2ohGgNAIBUgByAUQQYQHkECdGoiACgCACELIAAgByAOayIbNgIAAkACQAJAIBogB0EBaiIAIAogDmprIgRrQQNJDQAgEyAEIBlraiAAIAprIAQgD0kiBBsiBigAACAAKAAARw0AIAdBBWogBkEEaiANIBEgDSAEGyASECAiCUEBaiELIAAgA2shCCABKAIMIQQCQAJAIAAgDE0EQCAEIAMQHCABKAIMIQYgCEEQTQRAIAEgBiAIajYCDAwDCyAGQRBqIANBEGoiBBAcIAZBIGogA0EgahAcIAhBMUgNASAGIAhqIRAgBkEwaiEDA0AgAyAEQSBqIgYQHCADQRBqIARBMGoQHCAGIQQgA0EgaiIDIBBJDQALDAELIAQgAyAAIAwQIgsgASABKAIMIAhqNgIMIAhBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAlBBGohBCABKAIEIgNBATYCACADIAg7AQQgC0GAgARJDQEgAUECNgIkIAEgAyABKAIAa0EDdTYCKAwBCwJAIAsgD00EQAJAIB4gByAdQQYQHkECdGooAgAiCCAXTQ0AIAggE2oiBigAACAHKAAARw0AIAdBBGogBkEEaiANIBEgEhAgQQRqIQQgGyAIayELAkAgByADTQRAIAchAAwBCyAHIQUgByEAIAggF0wNAANAIAVBf2oiAC0AACAGQX9qIgYtAABHBEAgBSEADAILIARBAWohBCAAIANNDQEgACEFIAYgHEsNAAsLIAsgGWshBiAEQX1qIQsgACADayEJIAEoAgwhBQJAAkAgACAMTQRAIAUgAxAcIAEoAgwhCCAJQRBNBEAgASAIIAlqNgIMDAMLIAhBEGogA0EQaiIFEBwgCEEgaiADQSBqEBwgCUExSA0BIAggCWohECAIQTBqIQMDQCADIAVBIGoiCBAcIANBEGogBUEwahAcIAghBSADQSBqIgMgEEkNAAsMAQsgBSADIAAgDBAiCyABIAEoAgwgCWo2AgwgCUGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAZBA2o2AgAgAyAJOwEEIAtBgIAESQ0CIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAgsgByAHIANrQQh1IBhqaiEHDAMLIAsgDmoiCCgAACAHKAAARwRAIAcgByADa0EIdSAYamohBwwDCyAHQQRqIAhBBGogDRAdQQRqIQQCQCAHIANNBEAgByEADAELIAchBiAIIQUgByEAIAsgD0wNAANAIAZBf2oiAC0AACAFQX9qIgUtAABHBEAgBiEADAILIARBAWohBCAAIANNDQEgACEGIAUgEksNAAsLIAcgCGshBiAEQX1qIQsgACADayEJIAEoAgwhBQJAAkAgACAMTQRAIAUgAxAcIAEoAgwhCCAJQRBNBEAgASAIIAlqNgIMDAMLIAhBEGogA0EQaiIFEBwgCEEgaiADQSBqEBwgCUExSA0BIAggCWohECAIQTBqIQMDQCADIAVBIGoiCBAcIANBEGogBUEwahAcIAghBSADQSBqIgMgEEkNAAsMAQsgBSADIAAgDBAiCyABIAEoAgwgCWo2AgwgCUGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAZBA2o2AgAgAyAJOwEEIAtBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAohBSAGIQoMAQsgCiEFIAYhCgsgAyALOwEGIAEgA0EIajYCBCAAIARqIgMgFksEQCADIQcMAQsgFSAHQQJqIBRBBhAeQQJ0aiAbQQJqNgIAIBUgA0F+aiIAIBRBBhAeQQJ0aiAAIA5rNgIAIAohBCAFIQADQAJAIAAhCiAEIQAgGiADIA5rIgcgCmsiBGtBA0kNACAEIB8gDiAEIA9JIgUbaiIEKAAAIAMoAABHDQAgA0EEaiAEQQRqIA0gESANIAUbIBIQICIGQQFqIQUgASgCDCEEAkAgAyAMTQRAIAQgAxAcDAELIAQgAyADIAwQIgsgASgCBCIEQQE2AgAgBEEAOwEEIAVBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgBTsBBiABIARBCGo2AgQgFSADIBRBBhAeQQJ0aiAHNgIAIAohBCAAIQUgBkEEaiADaiIDIQcgAyAWTQ0BDAILCyAKIQUgACEKIAMhBwsgByAWSQ0ACwsMAQsgAigCBCEFIAIoAgAhCiADIAAoAnAiBigCACIRIAMgACgCBCIOIAAoAgwiD2oiEmtqIAYoAgQiEyAGKAIMIhdqIhxGaiIHIAMgBGoiDUF4aiIWSQRAIAAoAogBIgQgBEVqIRggACgCfCEUIAYoAnwhHSAAKAIgIRUgBigCICEeIBMgEyARayAPaiIZayEfIA1BYGohDCAPQX9qIRoDQCAVIAcgFEEFEB5BAnRqIgAoAgAhCyAAIAcgDmsiGzYCAAJAAkACQCAaIAdBAWoiACAKIA5qayIEa0EDSQ0AIBMgBCAZa2ogACAKayAEIA9JIgQbIgYoAAAgACgAAEcNACAHQQVqIAZBBGogDSARIA0gBBsgEhAgIglBAWohCyAAIANrIQggASgCDCEEAkACQCAAIAxNBEAgBCADEBwgASgCDCEGIAhBEE0EQCABIAYgCGo2AgwMAwsgBkEQaiADQRBqIgQQHCAGQSBqIANBIGoQHCAIQTFIDQEgBiAIaiEQIAZBMGohAwNAIAMgBEEgaiIGEBwgA0EQaiAEQTBqEBwgBiEEIANBIGoiAyAQSQ0ACwwBCyAEIAMgACAMECILIAEgASgCDCAIajYCDCAIQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyAJQQRqIQQgASgCBCIDQQE2AgAgAyAIOwEEIAtBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQCALIA9NBEACQCAeIAcgHUEFEB5BAnRqKAIAIgggF00NACAIIBNqIgYoAAAgBygAAEcNACAHQQRqIAZBBGogDSARIBIQIEEEaiEEIBsgCGshCwJAIAcgA00EQCAHIQAMAQsgByEFIAchACAIIBdMDQADQCAFQX9qIgAtAAAgBkF/aiIGLQAARwRAIAUhAAwCCyAEQQFqIQQgACADTQ0BIAAhBSAGIBxLDQALCyALIBlrIQYgBEF9aiELIAAgA2shCSABKAIMIQUCQAJAIAAgDE0EQCAFIAMQHCABKAIMIQggCUEQTQRAIAEgCCAJajYCDAwDCyAIQRBqIANBEGoiBRAcIAhBIGogA0EgahAcIAlBMUgNASAIIAlqIRAgCEEwaiEDA0AgAyAFQSBqIggQHCADQRBqIAVBMGoQHCAIIQUgA0EgaiIDIBBJDQALDAELIAUgAyAAIAwQIgsgASABKAIMIAlqNgIMIAlBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAGQQNqNgIAIAMgCTsBBCALQYCABEkNAiABQQI2AiQgASADIAEoAgBrQQN1NgIoDAILIAcgByADa0EIdSAYamohBwwDCyALIA5qIggoAAAgBygAAEcEQCAHIAcgA2tBCHUgGGpqIQcMAwsgB0EEaiAIQQRqIA0QHUEEaiEEAkAgByADTQRAIAchAAwBCyAHIQYgCCEFIAchACALIA9MDQADQCAGQX9qIgAtAAAgBUF/aiIFLQAARwRAIAYhAAwCCyAEQQFqIQQgACADTQ0BIAAhBiAFIBJLDQALCyAHIAhrIQYgBEF9aiELIAAgA2shCSABKAIMIQUCQAJAIAAgDE0EQCAFIAMQHCABKAIMIQggCUEQTQRAIAEgCCAJajYCDAwDCyAIQRBqIANBEGoiBRAcIAhBIGogA0EgahAcIAlBMUgNASAIIAlqIRAgCEEwaiEDA0AgAyAFQSBqIggQHCADQRBqIAVBMGoQHCAIIQUgA0EgaiIDIBBJDQALDAELIAUgAyAAIAwQIgsgASABKAIMIAlqNgIMIAlBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAGQQNqNgIAIAMgCTsBBCALQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAKIQUgBiEKDAELIAohBSAGIQoLIAMgCzsBBiABIANBCGo2AgQgACAEaiIDIBZLBEAgAyEHDAELIBUgB0ECaiAUQQUQHkECdGogG0ECajYCACAVIANBfmoiACAUQQUQHkECdGogACAOazYCACAKIQQgBSEAA0ACQCAAIQogBCEAIBogAyAOayIHIAprIgRrQQNJDQAgBCAfIA4gBCAPSSIFG2oiBCgAACADKAAARw0AIANBBGogBEEEaiANIBEgDSAFGyASECAiBkEBaiEFIAEoAgwhBAJAIAMgDE0EQCAEIAMQHAwBCyAEIAMgAyAMECILIAEoAgQiBEEBNgIAIARBADsBBCAFQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAU7AQYgASAEQQhqNgIEIBUgAyAUQQUQHkECdGogBzYCACAKIQQgACEFIAZBBGogA2oiAyEHIAMgFk0NAQwCCwsgCiEFIAAhCiADIQcLIAcgFkkNAAsLIAIgBTYCBCACIAo2AgAgDSADaw8LIAIgBTYCBCACIAo2AgAgDSADawuKJgEUfwJ/AkACQAJAAkAgACgChAFBe2oOAwMCAQALIAIoAgAiCSACKAIEIghBACAIIAMgACgCBCILIAMgC2sgBGoiBUEBIAAoAnR0IgZrIAAoAgwiByAFIAdrIAZLGyIUaiISIANGaiIFIBJrIgZLIgcbIAkgBksiBhshFUEAIAkgBhshCUEAIAggBxshCCAFQQFqIgYgAyAEaiIEQXhqIhNJBEAgACgCfCENIAAoAiAhDiAEQWBqIRAgACgCiAEiACAARWpBAWohFgNAIAUgDUEEEB4hACAFKAAAIQwgBiANQQQQHiEHIAYoAAAhESAOIAdBAnRqIgooAgAhByAOIABBAnRqIg8oAgAhACAPIAUgC2siFzYCACAKIAYgC2s2AgACfwJAIAlFIAVBAmoiDyAJayIKKAAAIA8oAABHckUEQCAKIAUtAAEgCkF/ai0AAEYiBmshACAPIAZrIQVBACERDAELAkACQAJAIAAgFEsEQCAMIAAgC2oiACgAAEYNAQsgByAUTQ0BIBEgByALaiIAKAAARw0BIAYhBQsgBSAAayIKQQJqIRFBACEGIAAgEk0gBSADTXINAQNAIAVBf2oiBy0AACAAQX9qIgwtAABHDQIgBkEBaiEGIAcgA0sEQCAHIQUgDCIAIBJLDQELCyAJIQggDCEAIAohCSAHIQUMAgsgBiAWIAUgA2tBB3ZqIgBqIQYgACAFagwCCyAJIQggCiEJCyAFIAZqQQRqIAAgBmpBBGogBBAdIAZqIgxBAWohCiAFIANrIQcgASgCDCEAAkACQCAFIBBNBEAgACADEBwgASgCDCEAIAdBEE0EQCABIAAgB2oiADYCDAwDCyAAQRBqIANBEGoiBhAcIABBIGogA0EgahAcIAdBMUgNASAAIAdqIRggAEEwaiEDA0AgAyAGQSBqIgAQHCADQRBqIAZBMGoQHCAAIQYgA0EgaiIDIBhJDQALDAELIAAgAyAFIBAQIgsgASABKAIMIAdqIgA2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIBFBAWo2AgAgAyAHOwEEIApBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgCjsBBiABIANBCGo2AgQgDEEEaiAFaiIDQQFqIQYCQCADIBNLDQAgDiAPIA1BBBAeQQJ0aiAXQQJqNgIAIA4gA0F+aiIFIA1BBBAeQQJ0aiAFIAtrNgIAIAhFBEBBACEIDAELIAMoAAAgAyAIaygAAEcNAEEAIAhrIQYDQCAIIQUgCSEIIAUhCSADQQRqIgUgBSAGaiAEEB0hBSAOIAMgDUEEEB5BAnRqIAMgC2s2AgAgBUEBaiEGAkAgAyAQTQRAIAAgAxAcDAELIAAgAyADIBAQIgsgASgCBCIAQQE2AgAgAEEAOwEEIAZBgIAETwRAIAFBAjYCJCABIAAgASgCAGtBA3U2AigLIAAgBjsBBiABIABBCGo2AgQCQCAIRSADIAVqQQRqIgMgE0tyDQAgAygAACADIAhrKAAARw0AQQAgCGshBiABKAIMIQAMAQsLIANBAWohBgsgAwshBSAGIBNJDQALCyACIAkgFSAJGzYCACAIIBUgCBshBSACQQRqDAMLIAIoAgAiCSACKAIEIghBACAIIAMgACgCBCILIAMgC2sgBGoiBUEBIAAoAnR0IgZrIAAoAgwiByAFIAdrIAZLGyIUaiISIANGaiIFIBJrIgZLIgcbIAkgBksiBhshFUEAIAkgBhshCUEAIAggBxshCCAFQQFqIgYgAyAEaiIEQXhqIhNJBEAgACgCfCENIAAoAiAhDiAEQWBqIRAgACgCiAEiACAARWpBAWohFgNAIAUgDUEHEB4hACAFKAAAIQwgBiANQQcQHiEHIAYoAAAhESAOIAdBAnRqIgooAgAhByAOIABBAnRqIg8oAgAhACAPIAUgC2siFzYCACAKIAYgC2s2AgACfwJAIAlFIAVBAmoiDyAJayIKKAAAIA8oAABHckUEQCAKIAUtAAEgCkF/ai0AAEYiBmshACAPIAZrIQVBACERDAELAkACQAJAIAAgFEsEQCAMIAAgC2oiACgAAEYNAQsgByAUTQ0BIBEgByALaiIAKAAARw0BIAYhBQsgBSAAayIKQQJqIRFBACEGIAAgEk0gBSADTXINAQNAIAVBf2oiBy0AACAAQX9qIgwtAABHDQIgBkEBaiEGIAcgA0sEQCAHIQUgDCIAIBJLDQELCyAJIQggDCEAIAohCSAHIQUMAgsgBiAWIAUgA2tBB3ZqIgBqIQYgACAFagwCCyAJIQggCiEJCyAFIAZqQQRqIAAgBmpBBGogBBAdIAZqIgxBAWohCiAFIANrIQcgASgCDCEAAkACQCAFIBBNBEAgACADEBwgASgCDCEAIAdBEE0EQCABIAAgB2oiADYCDAwDCyAAQRBqIANBEGoiBhAcIABBIGogA0EgahAcIAdBMUgNASAAIAdqIRggAEEwaiEDA0AgAyAGQSBqIgAQHCADQRBqIAZBMGoQHCAAIQYgA0EgaiIDIBhJDQALDAELIAAgAyAFIBAQIgsgASABKAIMIAdqIgA2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIBFBAWo2AgAgAyAHOwEEIApBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgCjsBBiABIANBCGo2AgQgDEEEaiAFaiIDQQFqIQYCQCADIBNLDQAgDiAPIA1BBxAeQQJ0aiAXQQJqNgIAIA4gA0F+aiIFIA1BBxAeQQJ0aiAFIAtrNgIAIAhFBEBBACEIDAELIAMoAAAgAyAIaygAAEcNAEEAIAhrIQYDQCAIIQUgCSEIIAUhCSADQQRqIgUgBSAGaiAEEB0hBSAOIAMgDUEHEB5BAnRqIAMgC2s2AgAgBUEBaiEGAkAgAyAQTQRAIAAgAxAcDAELIAAgAyADIBAQIgsgASgCBCIAQQE2AgAgAEEAOwEEIAZBgIAETwRAIAFBAjYCJCABIAAgASgCAGtBA3U2AigLIAAgBjsBBiABIABBCGo2AgQCQCAIRSADIAVqQQRqIgMgE0tyDQAgAygAACADIAhrKAAARw0AQQAgCGshBiABKAIMIQAMAQsLIANBAWohBgsgAwshBSAGIBNJDQALCyACIAkgFSAJGzYCACAIIBUgCBshBSACQQRqDAILIAIoAgAiCSACKAIEIghBACAIIAMgACgCBCILIAMgC2sgBGoiBUEBIAAoAnR0IgZrIAAoAgwiByAFIAdrIAZLGyIUaiISIANGaiIFIBJrIgZLIgcbIAkgBksiBhshFUEAIAkgBhshCUEAIAggBxshCCAFQQFqIgYgAyAEaiIEQXhqIhNJBEAgACgCfCENIAAoAiAhDiAEQWBqIRAgACgCiAEiACAARWpBAWohFgNAIAUgDUEGEB4hACAFKAAAIQwgBiANQQYQHiEHIAYoAAAhESAOIAdBAnRqIgooAgAhByAOIABBAnRqIg8oAgAhACAPIAUgC2siFzYCACAKIAYgC2s2AgACfwJAIAlFIAVBAmoiDyAJayIKKAAAIA8oAABHckUEQCAKIAUtAAEgCkF/ai0AAEYiBmshACAPIAZrIQVBACERDAELAkACQAJAIAAgFEsEQCAMIAAgC2oiACgAAEYNAQsgByAUTQ0BIBEgByALaiIAKAAARw0BIAYhBQsgBSAAayIKQQJqIRFBACEGIAAgEk0gBSADTXINAQNAIAVBf2oiBy0AACAAQX9qIgwtAABHDQIgBkEBaiEGIAcgA0sEQCAHIQUgDCIAIBJLDQELCyAJIQggDCEAIAohCSAHIQUMAgsgBiAWIAUgA2tBB3ZqIgBqIQYgACAFagwCCyAJIQggCiEJCyAFIAZqQQRqIAAgBmpBBGogBBAdIAZqIgxBAWohCiAFIANrIQcgASgCDCEAAkACQCAFIBBNBEAgACADEBwgASgCDCEAIAdBEE0EQCABIAAgB2oiADYCDAwDCyAAQRBqIANBEGoiBhAcIABBIGogA0EgahAcIAdBMUgNASAAIAdqIRggAEEwaiEDA0AgAyAGQSBqIgAQHCADQRBqIAZBMGoQHCAAIQYgA0EgaiIDIBhJDQALDAELIAAgAyAFIBAQIgsgASABKAIMIAdqIgA2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIBFBAWo2AgAgAyAHOwEEIApBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgCjsBBiABIANBCGo2AgQgDEEEaiAFaiIDQQFqIQYCQCADIBNLDQAgDiAPIA1BBhAeQQJ0aiAXQQJqNgIAIA4gA0F+aiIFIA1BBhAeQQJ0aiAFIAtrNgIAIAhFBEBBACEIDAELIAMoAAAgAyAIaygAAEcNAEEAIAhrIQYDQCAIIQUgCSEIIAUhCSADQQRqIgUgBSAGaiAEEB0hBSAOIAMgDUEGEB5BAnRqIAMgC2s2AgAgBUEBaiEGAkAgAyAQTQRAIAAgAxAcDAELIAAgAyADIBAQIgsgASgCBCIAQQE2AgAgAEEAOwEEIAZBgIAETwRAIAFBAjYCJCABIAAgASgCAGtBA3U2AigLIAAgBjsBBiABIABBCGo2AgQCQCAIRSADIAVqQQRqIgMgE0tyDQAgAygAACADIAhrKAAARw0AQQAgCGshBiABKAIMIQAMAQsLIANBAWohBgsgAwshBSAGIBNJDQALCyACIAkgFSAJGzYCACAIIBUgCBshBSACQQRqDAELIAIoAgAiCSACKAIEIghBACAIIAMgACgCBCILIAMgC2sgBGoiBUEBIAAoAnR0IgZrIAAoAgwiByAFIAdrIAZLGyIUaiISIANGaiIFIBJrIgZLIgcbIAkgBksiBhshFUEAIAkgBhshCUEAIAggBxshCCAFQQFqIgYgAyAEaiIEQXhqIhNJBEAgACgCfCENIAAoAiAhDiAEQWBqIRAgACgCiAEiACAARWpBAWohFgNAIAUgDUEFEB4hACAFKAAAIQwgBiANQQUQHiEHIAYoAAAhESAOIAdBAnRqIgooAgAhByAOIABBAnRqIg8oAgAhACAPIAUgC2siFzYCACAKIAYgC2s2AgACfwJAIAlFIAVBAmoiDyAJayIKKAAAIA8oAABHckUEQCAKIAUtAAEgCkF/ai0AAEYiBmshACAPIAZrIQVBACERDAELAkACQAJAIAAgFEsEQCAMIAAgC2oiACgAAEYNAQsgByAUTQ0BIBEgByALaiIAKAAARw0BIAYhBQsgBSAAayIKQQJqIRFBACEGIAAgEk0gBSADTXINAQNAIAVBf2oiBy0AACAAQX9qIgwtAABHDQIgBkEBaiEGIAcgA0sEQCAHIQUgDCIAIBJLDQELCyAJIQggDCEAIAohCSAHIQUMAgsgBiAWIAUgA2tBB3ZqIgBqIQYgACAFagwCCyAJIQggCiEJCyAFIAZqQQRqIAAgBmpBBGogBBAdIAZqIgxBAWohCiAFIANrIQcgASgCDCEAAkACQCAFIBBNBEAgACADEBwgASgCDCEAIAdBEE0EQCABIAAgB2oiADYCDAwDCyAAQRBqIANBEGoiBhAcIABBIGogA0EgahAcIAdBMUgNASAAIAdqIRggAEEwaiEDA0AgAyAGQSBqIgAQHCADQRBqIAZBMGoQHCAAIQYgA0EgaiIDIBhJDQALDAELIAAgAyAFIBAQIgsgASABKAIMIAdqIgA2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIBFBAWo2AgAgAyAHOwEEIApBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAMgCjsBBiABIANBCGo2AgQgDEEEaiAFaiIDQQFqIQYCQCADIBNLDQAgDiAPIA1BBRAeQQJ0aiAXQQJqNgIAIA4gA0F+aiIFIA1BBRAeQQJ0aiAFIAtrNgIAIAhFBEBBACEIDAELIAMoAAAgAyAIaygAAEcNAEEAIAhrIQYDQCAIIQUgCSEIIAUhCSADQQRqIgUgBSAGaiAEEB0hBSAOIAMgDUEFEB5BAnRqIAMgC2s2AgAgBUEBaiEGAkAgAyAQTQRAIAAgAxAcDAELIAAgAyADIBAQIgsgASgCBCIAQQE2AgAgAEEAOwEEIAZBgIAETwRAIAFBAjYCJCABIAAgASgCAGtBA3U2AigLIAAgBjsBBiABIABBCGo2AgQCQCAIRSADIAVqQQRqIgMgE0tyDQAgAygAACADIAhrKAAARw0AQQAgCGshBiABKAIMIQAMAQsLIANBAWohBgsgAwshBSAGIBNJDQALCyACIAkgFSAJGzYCACAIIBUgCBshBSACQQRqCyAFNgIAIAQgA2sLYAEFfyAAKAIEIgQgACgCGGoiAkEDaiIDIAFBemoiBUkEQCAAKAKEASEGIAAoAnwhASAAKAIgIQADQCAAIAIgASAGEB5BAnRqIAIgBGs2AgAgAyICQQNqIgMgBUkNAAsLC/4dARl/IAAoAnghFSAAKAJ8IRMgACgCKCEWIAAoAiAhFCADIARqIg1BeGohFyACKAIEIQcgAigCACEIAkAgACgCDCIGIAAoAhAgACgCFCADIAAoAgQiC2sgBGoiBCAAKAJ0IgoQJyIQSwRAIBcgA0sEQCAAKAIIIg4gBiAQIAYgEEsbIg9qIRggCyAPaiERIA4gEGohGyANQWBqIRIgD0F/aiEcIAMhAANAIBYgAyAVIAUQHkECdGoiBCgCACEKIBQgAyATQQgQHkECdGoiBigCACEMIAYgAyALayIaNgIAIAQgGjYCAAJAAkACQAJAAkACQAJAIBpBAWoiGSAIayIEIBBNIBwgBGtBA0lyRQRAIA4gCyAEIA9JIgYbIARqIgkoAAAgA0EBaiIEKAAARg0BCyAMIBBNDQMgDiALIAwgD0kiBBsgDGoiCSkAACADKQAAUg0DIANBCGogCUEIaiANIBggDSAEGyARECBBCGohBiAJIBsgESAEGyIHSw0BIAMhBAwCCyADQQVqIAlBBGogDSAYIA0gBhsgERAgIglBAWohDCAEIABrIQogASgCDCEDAkACQCAEIBJNBEAgAyAAEBwgASgCDCEDIApBEE0EQCABIAMgCmo2AgwMAwsgA0EQaiAAQRBqIgYQHCADQSBqIABBIGoQHCAKQTFIDQEgAyAKaiEZIANBMGohAwNAIAMgBkEgaiIAEBwgA0EQaiAGQTBqEBwgACEGIANBIGoiAyAZSQ0ACwwBCyADIAAgBCASECILIAEgASgCDCAKajYCDCAKQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyAJQQRqIQYgASgCBCIDQQE2AgAgAyAKOwEEIAxBgIAESQ0EIAFBAjYCJCABIAMgASgCAGtBA3U2AigMBAsgAyEEIAMgAE0NAANAIANBf2oiBC0AACAJQX9qIgktAABHBEAgAyEEDAILIAZBAWohBiAJIAdNDQEgBCIDIABLDQALCyAaIAxrIQogBkF9aiEMIAQgAGshByABKAIMIQMCQAJAIAQgEk0EQCADIAAQHCABKAIMIQMgB0EQTQRAIAEgAyAHajYCDAwDCyADQRBqIABBEGoiCRAcIANBIGogAEEgahAcIAdBMUgNASADIAdqIRkgA0EwaiEDA0AgAyAJQSBqIgAQHCADQRBqIAlBMGoQHCAAIQkgA0EgaiIDIBlJDQALDAELIAMgACAEIBIQIgsgASABKAIMIAdqNgIMIAdBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAKQQNqNgIAIAMgBzsBBCAMQYCABEkNASABQQI2AiQgASADIAEoAgBrQQN1NgIoDAELAkACQCAKIBBNDQAgDiALIAogD0kiHRsgCmoiCSgAACADKAAARw0AIBQgA0EBaiIEIBNBCBAeQQJ0aiIGKAIAIQwgBiAZNgIAAkACQCAMIBBNDQAgDiALIAwgD0kiHhsgDGoiBykAACAEKQAAUg0AIANBCWogB0EIaiANIBggDSAeGyARECBBCGohBiAZIAxrIQogByAbIBEgHhsiCU0gBCAATXINAQNAIARBf2oiAy0AACAHQX9qIgctAABHDQIgBkEBaiEGIAcgCU0EQCADIQQMAwsgAyIEIABLDQALDAELIANBBGogCUEEaiANIBggDSAdGyARECBBBGohBiAaIAprIQogCSAbIBEgHRsiB00EQCADIQQMAQsgAyAATQRAIAMhBAwBCwNAIANBf2oiBC0AACAJQX9qIgktAABHBEAgAyEEDAILIAZBAWohBiAJIAdNDQEgBCIDIABLDQALCyAGQX1qIQwgBCAAayEHIAEoAgwhAwJAAkAgBCASTQRAIAMgABAcIAEoAgwhAyAHQRBNBEAgASADIAdqNgIMDAMLIANBEGogAEEQaiIJEBwgA0EgaiAAQSBqEBwgB0ExSA0BIAMgB2ohGSADQTBqIQMDQCADIAlBIGoiABAcIANBEGogCUEwahAcIAAhCSADQSBqIgMgGUkNAAsMAQsgAyAAIAQgEhAiCyABIAEoAgwgB2o2AgwgB0GAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIApBA2o2AgAgAyAHOwEEIAxBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsgAyAAa0EIdSADakEBaiEDDAMLIAghByAKIQgMAQsgCCEHIAohCAsgAyAMOwEGIAEgA0EIajYCBCAEIAZqIgAgF0sEQCAAIQMMAQsgFCALIBpBAmoiA2oiBCATQQgQHkECdGogAzYCACAUIABBfmoiBiATQQgQHkECdGogBiALazYCACAWIAQgFSAFEB5BAnRqIAM2AgAgFiAAQX9qIgMgFSAFEB5BAnRqIAMgC2s2AgAgCCEGIAchBANAAkAgBCEIIAYhBCAAIAtrIgYgCGsiAyAQTSAcIANrQQNJcg0AIAMgDiALIAMgD0kiBxtqIgMoAAAgACgAAEcNACAAQQRqIANBBGogDSAYIA0gBxsgERAgIgpBAWohByABKAIMIQMCQCAAIBJNBEAgAyAAEBwMAQsgAyAAIAAgEhAiCyABKAIEIgNBATYCACADQQA7AQQgB0GAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgAyAHOwEGIAEgA0EIajYCBCAWIAAgFSAFEB5BAnRqIAY2AgAgFCAAIBNBCBAeQQJ0aiAGNgIAIAghBiAEIQcgCkEEaiAAaiIAIQMgACAXTQ0BDAILCyAIIQcgBCEIIAAhAwsgAyAXSQ0ACyAAIQMLIAIgCDYCAAwBCyAIIAdBACAHIAMgCyAEQQEgCnQiAGsgBiAEIAZrIABLGyIQaiISIANGaiIEIBJrIgBLIgYbIAggAEsiABshGEEAIAggABshAEEAIAcgBhshCiAEIBdJBEAgDUFgaiERA0AgBCATQQgQHiEIIBYgBCAVIAUQHkECdGoiBigCACEPIBQgCEECdGoiCCgCACEOIAYgBCALayIMNgIAIAggDDYCAAJAAkAgAEUgBEEBaiIIIABrKAAAIAgoAABHckUEQCAEQQVqIgQgBCAAayANEB0iCUEBaiEPIAggA2shByABKAIMIQQCQAJAIAggEU0EQCAEIAMQHCABKAIMIQYgB0EQTQRAIAEgBiAHajYCDAwDCyAGQRBqIANBEGoiBBAcIAZBIGogA0EgahAcIAdBMUgNASAGIAdqIQ4gBkEwaiEDA0AgAyAEQSBqIgYQHCADQRBqIARBMGoQHCAGIQQgA0EgaiIDIA5JDQALDAELIAQgAyAIIBEQIgsgASABKAIMIAdqNgIMIAdBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAlBBGohBiABKAIEIgNBATYCACADIAc7AQQgD0GAgARJDQEgAUECNgIkIAEgAyABKAIAa0EDdTYCKAwBCwJAAkACQAJAAkAgDiAQSwRAIAsgDmoiCSkAACAEKQAAUg0BIARBCGogCUEIaiANEB1BCGohBiAEIAlrIQcgBCADTQRAIAQhCAwGCyAOIBBMBEAgBCEIDAYLA0AgBEF/aiIILQAAIAlBf2oiCS0AAEcEQCAEIQgMBwsgBkEBaiEGIAggA00NBiAIIQQgCSASSw0ACwwFCyAPIBBLDQEMAgsgDyAQTQ0BCyALIA9qIgkoAAAgBCgAAEYNAQsgBCADa0EIdSAEakEBaiEEDAMLIBQgCCATQQgQHkECdGoiBigCACEOIAYgDEEBajYCAAJAIA4gEE0NACALIA5qIgopAAAgCCkAAFINACAEQQlqIApBCGogDRAdQQhqIQYgCCAKayEHIA4gEEwgCCADTXINAQNAIAhBf2oiBC0AACAKQX9qIgotAABHDQIgBkEBaiEGIAQgA00EQCAEIQgMAwsgBCEIIAogEksNAAsMAQsgBEEEaiAJQQRqIA0QHUEEaiEGIAQgCWshByAEIANNBEAgBCEIDAELIA8gEEwEQCAEIQgMAQsDQCAEQX9qIggtAAAgCUF/aiIJLQAARwRAIAQhCAwCCyAGQQFqIQYgCCADTQ0BIAghBCAJIBJLDQALCyAGQX1qIQ8gCCADayEJIAEoAgwhBAJAAkAgCCARTQRAIAQgAxAcIAEoAgwhCiAJQRBNBEAgASAJIApqNgIMDAMLIApBEGogA0EQaiIEEBwgCkEgaiADQSBqEBwgCUExSA0BIAkgCmohDiAKQTBqIQMDQCADIARBIGoiChAcIANBEGogBEEwahAcIAohBCADQSBqIgMgDkkNAAsMAQsgBCADIAggERAiCyABIAEoAgwgCWo2AgwgCUGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAdBA2o2AgAgAyAJOwEEIA9BgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIAAhCiAHIQALIAMgDzsBBiABIANBCGo2AgQgBiAIaiIDIBdLBEAgAyEEDAELIBQgCyAMQQJqIgRqIgggE0EIEB5BAnRqIAQ2AgAgFCADQX5qIgYgE0EIEB5BAnRqIAYgC2s2AgAgFiAIIBUgBRAeQQJ0aiAENgIAIBYgA0F/aiIEIBUgBRAeQQJ0aiAEIAtrNgIAIAAhBiAKIQgDQAJAIAghACAGIQggAEUgAygAACADIABrKAAAR3INACADQQRqIgQgBCAAayANEB0hByAWIAMgFSAFEB5BAnRqIAMgC2siBDYCACAUIAMgE0EIEB5BAnRqIAQ2AgAgB0EBaiEGIAEoAgwhBAJAIAMgEU0EQCAEIAMQHAwBCyAEIAMgAyARECILIAEoAgQiBEEBNgIAIARBADsBBCAGQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAY7AQYgASAEQQhqNgIEIAAhBiAIIQogB0EEaiADaiIDIQQgAyAXTQ0BDAILCyAAIQogCCEAIAMhBAsgBCAXSQ0ACwsgAiAAIBggABs2AgAgCiAYIAobIQcLIAIgBzYCBCANIANrCyIAIAAgASACIAMgBCAAKAKEASIAQQQgAEF7akEDSRsQwwMLm0kBHn8CQAJAAkACQAJAIAAoAoQBQXtqDgMDAgEACyACKAIEIQggAigCACENIAMgACgCcCIGKAIAIg8gAyAAKAIEIgwgAyAMayAEaiIFQQEgACgCdHQiB2sgACgCDCIKIAUgCmsgB0sbIgtqIg5raiAGKAIEIhAgBigCDCIaaiIWRmoiBSADIARqIgpBeGoiG0kEQCAAKAJ4IRcgACgCfCETIAYoAnghHiAGKAJ8IRwgACgCKCEYIAAoAiAhFCAGKAIoIR8gBigCICEdIBAgCyAQaiAPayIZayEgIApBYGohEQNAIAUgE0EIEB4hACAFIBdBBBAeIQQgBSAcQQgQHiEHIAUgHkEEEB4hISAUIABBAnRqIgAoAgAhCSAYIARBAnRqIgQoAgAhBiAEIAUgDGsiFTYCACAAIBU2AgACQAJAAkAgCyAVQQFqIhIgDWsiAEF/c2pBA0kNACAQIAAgGWtqIAAgDGogACALSSIEGyIiKAAAIAVBAWoiACgAAEcNACAFQQVqICJBBGogCiAPIAogBBsgDhAgIglBAWohByAAIANrIQYgASgCDCEEAkACQCAAIBFNBEAgBCADEBwgASgCDCEEIAZBEE0EQCABIAQgBmo2AgwMAwsgBEEQaiADQRBqIgUQHCAEQSBqIANBIGoQHCAGQTFIDQEgBCAGaiESIARBMGohAwNAIAMgBUEgaiIEEBwgA0EQaiAFQTBqEBwgBCEFIANBIGoiAyASSQ0ACwwBCyAEIAMgACARECILIAEgASgCDCAGajYCDCAGQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyAJQQRqIQQgASgCBCIDQQE2AgAgAyAGOwEEIAdBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAAkAgCSALSwRAIAkgDGoiBykAACAFKQAAUg0BIAVBCGogB0EIaiAKEB1BCGohBCAFIAdrIQYgBSADTQRAIAUhAAwHCyAJIAtMBEAgBSEADAcLA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMCAsgBEEBaiEEIAAgA00NByAAIQUgByAOSw0ACwwGCwJAIB0gB0ECdGooAgAiACAaTA0AIAAgEGoiBykAACAFKQAAUg0AIAVBCGogB0EIaiAKIA8gDhAgQQhqIQQgFSAAayAZayEGIAUgA00EQCAFIQAMBwsDQCAFQX9qIgAtAAAgB0F/aiIHLQAARwRAIAUhAAwICyAEQQFqIQQgACADTQ0HIAAhBSAHIBZLDQALDAYLIAYgC00NAQwCCyAGIAtLDQELIB8gIUECdGooAgAiACAaTA0BIAAgEGoiBygAACAFKAAARw0BIAAgGWohBgwCCyAGIAxqIgcoAAAgBSgAAEYNAQsgBSADa0EIdSAFakEBaiEFDAMLIAVBAWoiACATQQgQHiEEIAAgHEEIEB4hCCAUIARBAnRqIgQoAgAhCSAEIBI2AgACQCAJIAtLBEAgCSAMaiIIKQAAIAApAABSDQEgBUEJaiAIQQhqIAoQHUEIaiEEIAAgCGshBiAJIAtMIAAgA01yDQIDQCAAQX9qIgUtAAAgCEF/aiIILQAARw0DIARBAWohBCAFIANNBEAgBSEADAQLIAUhACAIIA5LDQALDAILIB0gCEECdGooAgAiCSAaTA0AIAkgEGoiCCkAACAAKQAAUg0AIAVBCWogCEEIaiAKIA8gDhAgQQhqIQQgEiAJayAZayEGIAAgA00NAQNAIABBf2oiBS0AACAIQX9qIggtAABHDQIgBEEBaiEEIAUgA00EQCAFIQAMAwsgBSEAIAggFksNAAsMAQsgB0EEaiEAIAVBBGohBCAGIAtJBEAgBCAAIAogDyAOECBBBGohBCAVIAZrIQYgBSADTQRAIAUhAAwCCyAHIBZNBEAgBSEADAILA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMAwsgBEEBaiEEIAAgA00NAiAAIQUgByAWSw0ACwwBCyAEIAAgChAdQQRqIQQgBSAHayEGIAUgA00EQCAFIQAMAQsgByAOTQRAIAUhAAwBCwNAIAVBf2oiAC0AACAHQX9qIgctAABHBEAgBSEADAILIARBAWohBCAAIANNDQEgACEFIAcgDksNAAsLIARBfWohByAAIANrIQkgASgCDCEFAkACQCAAIBFNBEAgBSADEBwgASgCDCEIIAlBEE0EQCABIAggCWo2AgwMAwsgCEEQaiADQRBqIgUQHCAIQSBqIANBIGoQHCAJQTFIDQEgCCAJaiESIAhBMGohAwNAIAMgBUEgaiIIEBwgA0EQaiAFQTBqEBwgCCEFIANBIGoiAyASSQ0ACwwBCyAFIAMgACARECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgBkEDajYCACADIAk7AQQgB0GAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgDSEIIAYhDQsgAyAHOwEGIAEgA0EIajYCBCAAIARqIgMgG0sEQCADIQUMAQsgFCAMIBVBAmoiAGoiBCATQQgQHkECdGogADYCACAUIANBfmoiBSATQQgQHkECdGogBSAMazYCACAYIAQgF0EEEB5BAnRqIAA2AgAgGCADQX9qIgAgF0EEEB5BAnRqIAAgDGs2AgAgDSEEIAghAANAAkAgACENIAQhACALIAMgDGsiBSANayIEQX9zakEDSQ0AIAQgICAMIAQgC0kiCBtqIgQoAAAgAygAAEcNACADQQRqIARBBGogCiAPIAogCBsgDhAgIgZBAWohCCABKAIMIQQCQCADIBFNBEAgBCADEBwMAQsgBCADIAMgERAiCyABKAIEIgRBATYCACAEQQA7AQQgCEGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAIOwEGIAEgBEEIajYCBCAYIAMgF0EEEB5BAnRqIAU2AgAgFCADIBNBCBAeQQJ0aiAFNgIAIA0hBCAAIQggBkEEaiADaiIDIQUgAyAbTQ0BDAILCyANIQggACENIAMhBQsgBSAbSQ0ACwsMAwsgAigCBCEIIAIoAgAhDSADIAAoAnAiBigCACIPIAMgACgCBCIMIAMgDGsgBGoiBUEBIAAoAnR0IgdrIAAoAgwiCiAFIAprIAdLGyILaiIOa2ogBigCBCIQIAYoAgwiGmoiFkZqIgUgAyAEaiIKQXhqIhtJBEAgACgCeCEXIAAoAnwhEyAGKAJ4IR4gBigCfCEcIAAoAighGCAAKAIgIRQgBigCKCEfIAYoAiAhHSAQIAsgEGogD2siGWshICAKQWBqIREDQCAFIBNBCBAeIQAgBSAXQQcQHiEEIAUgHEEIEB4hByAFIB5BBxAeISEgFCAAQQJ0aiIAKAIAIQkgGCAEQQJ0aiIEKAIAIQYgBCAFIAxrIhU2AgAgACAVNgIAAkACQAJAIAsgFUEBaiISIA1rIgBBf3NqQQNJDQAgECAAIBlraiAAIAxqIAAgC0kiBBsiIigAACAFQQFqIgAoAABHDQAgBUEFaiAiQQRqIAogDyAKIAQbIA4QICIJQQFqIQcgACADayEGIAEoAgwhBAJAAkAgACARTQRAIAQgAxAcIAEoAgwhBCAGQRBNBEAgASAEIAZqNgIMDAMLIARBEGogA0EQaiIFEBwgBEEgaiADQSBqEBwgBkExSA0BIAQgBmohEiAEQTBqIQMDQCADIAVBIGoiBBAcIANBEGogBUEwahAcIAQhBSADQSBqIgMgEkkNAAsMAQsgBCADIAAgERAiCyABIAEoAgwgBmo2AgwgBkGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgCUEEaiEEIAEoAgQiA0EBNgIAIAMgBjsBBCAHQYCABEkNASABQQI2AiQgASADIAEoAgBrQQN1NgIoDAELAkACQAJAAkACQAJAIAkgC0sEQCAJIAxqIgcpAAAgBSkAAFINASAFQQhqIAdBCGogChAdQQhqIQQgBSAHayEGIAUgA00EQCAFIQAMBwsgCSALTARAIAUhAAwHCwNAIAVBf2oiAC0AACAHQX9qIgctAABHBEAgBSEADAgLIARBAWohBCAAIANNDQcgACEFIAcgDksNAAsMBgsCQCAdIAdBAnRqKAIAIgAgGkwNACAAIBBqIgcpAAAgBSkAAFINACAFQQhqIAdBCGogCiAPIA4QIEEIaiEEIBUgAGsgGWshBiAFIANNBEAgBSEADAcLA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMCAsgBEEBaiEEIAAgA00NByAAIQUgByAWSw0ACwwGCyAGIAtNDQEMAgsgBiALSw0BCyAfICFBAnRqKAIAIgAgGkwNASAAIBBqIgcoAAAgBSgAAEcNASAAIBlqIQYMAgsgBiAMaiIHKAAAIAUoAABGDQELIAUgA2tBCHUgBWpBAWohBQwDCyAFQQFqIgAgE0EIEB4hBCAAIBxBCBAeIQggFCAEQQJ0aiIEKAIAIQkgBCASNgIAAkAgCSALSwRAIAkgDGoiCCkAACAAKQAAUg0BIAVBCWogCEEIaiAKEB1BCGohBCAAIAhrIQYgCSALTCAAIANNcg0CA0AgAEF/aiIFLQAAIAhBf2oiCC0AAEcNAyAEQQFqIQQgBSADTQRAIAUhAAwECyAFIQAgCCAOSw0ACwwCCyAdIAhBAnRqKAIAIgkgGkwNACAJIBBqIggpAAAgACkAAFINACAFQQlqIAhBCGogCiAPIA4QIEEIaiEEIBIgCWsgGWshBiAAIANNDQEDQCAAQX9qIgUtAAAgCEF/aiIILQAARw0CIARBAWohBCAFIANNBEAgBSEADAMLIAUhACAIIBZLDQALDAELIAdBBGohACAFQQRqIQQgBiALSQRAIAQgACAKIA8gDhAgQQRqIQQgFSAGayEGIAUgA00EQCAFIQAMAgsgByAWTQRAIAUhAAwCCwNAIAVBf2oiAC0AACAHQX9qIgctAABHBEAgBSEADAMLIARBAWohBCAAIANNDQIgACEFIAcgFksNAAsMAQsgBCAAIAoQHUEEaiEEIAUgB2shBiAFIANNBEAgBSEADAELIAcgDk0EQCAFIQAMAQsDQCAFQX9qIgAtAAAgB0F/aiIHLQAARwRAIAUhAAwCCyAEQQFqIQQgACADTQ0BIAAhBSAHIA5LDQALCyAEQX1qIQcgACADayEJIAEoAgwhBQJAAkAgACARTQRAIAUgAxAcIAEoAgwhCCAJQRBNBEAgASAIIAlqNgIMDAMLIAhBEGogA0EQaiIFEBwgCEEgaiADQSBqEBwgCUExSA0BIAggCWohEiAIQTBqIQMDQCADIAVBIGoiCBAcIANBEGogBUEwahAcIAghBSADQSBqIgMgEkkNAAsMAQsgBSADIAAgERAiCyABIAEoAgwgCWo2AgwgCUGAgARJDQAgAUEBNgIkIAEgASgCBCABKAIAa0EDdTYCKAsgASgCBCIDIAZBA2o2AgAgAyAJOwEEIAdBgIAETwRAIAFBAjYCJCABIAMgASgCAGtBA3U2AigLIA0hCCAGIQ0LIAMgBzsBBiABIANBCGo2AgQgACAEaiIDIBtLBEAgAyEFDAELIBQgDCAVQQJqIgBqIgQgE0EIEB5BAnRqIAA2AgAgFCADQX5qIgUgE0EIEB5BAnRqIAUgDGs2AgAgGCAEIBdBBxAeQQJ0aiAANgIAIBggA0F/aiIAIBdBBxAeQQJ0aiAAIAxrNgIAIA0hBCAIIQADQAJAIAAhDSAEIQAgCyADIAxrIgUgDWsiBEF/c2pBA0kNACAEICAgDCAEIAtJIggbaiIEKAAAIAMoAABHDQAgA0EEaiAEQQRqIAogDyAKIAgbIA4QICIGQQFqIQggASgCDCEEAkAgAyARTQRAIAQgAxAcDAELIAQgAyADIBEQIgsgASgCBCIEQQE2AgAgBEEAOwEEIAhBgIAETwRAIAFBAjYCJCABIAQgASgCAGtBA3U2AigLIAQgCDsBBiABIARBCGo2AgQgGCADIBdBBxAeQQJ0aiAFNgIAIBQgAyATQQgQHkECdGogBTYCACANIQQgACEIIAZBBGogA2oiAyEFIAMgG00NAQwCCwsgDSEIIAAhDSADIQULIAUgG0kNAAsLDAILIAIoAgQhCCACKAIAIQ0gAyAAKAJwIgYoAgAiDyADIAAoAgQiDCADIAxrIARqIgVBASAAKAJ0dCIHayAAKAIMIgogBSAKayAHSxsiC2oiDmtqIAYoAgQiECAGKAIMIhpqIhZGaiIFIAMgBGoiCkF4aiIbSQRAIAAoAnghFyAAKAJ8IRMgBigCeCEeIAYoAnwhHCAAKAIoIRggACgCICEUIAYoAighHyAGKAIgIR0gECALIBBqIA9rIhlrISAgCkFgaiERA0AgBSATQQgQHiEAIAUgF0EGEB4hBCAFIBxBCBAeIQcgBSAeQQYQHiEhIBQgAEECdGoiACgCACEJIBggBEECdGoiBCgCACEGIAQgBSAMayIVNgIAIAAgFTYCAAJAAkACQCALIBVBAWoiEiANayIAQX9zakEDSQ0AIBAgACAZa2ogACAMaiAAIAtJIgQbIiIoAAAgBUEBaiIAKAAARw0AIAVBBWogIkEEaiAKIA8gCiAEGyAOECAiCUEBaiEHIAAgA2shBiABKAIMIQQCQAJAIAAgEU0EQCAEIAMQHCABKAIMIQQgBkEQTQRAIAEgBCAGajYCDAwDCyAEQRBqIANBEGoiBRAcIARBIGogA0EgahAcIAZBMUgNASAEIAZqIRIgBEEwaiEDA0AgAyAFQSBqIgQQHCADQRBqIAVBMGoQHCAEIQUgA0EgaiIDIBJJDQALDAELIAQgAyAAIBEQIgsgASABKAIMIAZqNgIMIAZBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAlBBGohBCABKAIEIgNBATYCACADIAY7AQQgB0GAgARJDQEgAUECNgIkIAEgAyABKAIAa0EDdTYCKAwBCwJAAkACQAJAAkACQCAJIAtLBEAgCSAMaiIHKQAAIAUpAABSDQEgBUEIaiAHQQhqIAoQHUEIaiEEIAUgB2shBiAFIANNBEAgBSEADAcLIAkgC0wEQCAFIQAMBwsDQCAFQX9qIgAtAAAgB0F/aiIHLQAARwRAIAUhAAwICyAEQQFqIQQgACADTQ0HIAAhBSAHIA5LDQALDAYLAkAgHSAHQQJ0aigCACIAIBpMDQAgACAQaiIHKQAAIAUpAABSDQAgBUEIaiAHQQhqIAogDyAOECBBCGohBCAVIABrIBlrIQYgBSADTQRAIAUhAAwHCwNAIAVBf2oiAC0AACAHQX9qIgctAABHBEAgBSEADAgLIARBAWohBCAAIANNDQcgACEFIAcgFksNAAsMBgsgBiALTQ0BDAILIAYgC0sNAQsgHyAhQQJ0aigCACIAIBpMDQEgACAQaiIHKAAAIAUoAABHDQEgACAZaiEGDAILIAYgDGoiBygAACAFKAAARg0BCyAFIANrQQh1IAVqQQFqIQUMAwsgBUEBaiIAIBNBCBAeIQQgACAcQQgQHiEIIBQgBEECdGoiBCgCACEJIAQgEjYCAAJAIAkgC0sEQCAJIAxqIggpAAAgACkAAFINASAFQQlqIAhBCGogChAdQQhqIQQgACAIayEGIAkgC0wgACADTXINAgNAIABBf2oiBS0AACAIQX9qIggtAABHDQMgBEEBaiEEIAUgA00EQCAFIQAMBAsgBSEAIAggDksNAAsMAgsgHSAIQQJ0aigCACIJIBpMDQAgCSAQaiIIKQAAIAApAABSDQAgBUEJaiAIQQhqIAogDyAOECBBCGohBCASIAlrIBlrIQYgACADTQ0BA0AgAEF/aiIFLQAAIAhBf2oiCC0AAEcNAiAEQQFqIQQgBSADTQRAIAUhAAwDCyAFIQAgCCAWSw0ACwwBCyAHQQRqIQAgBUEEaiEEIAYgC0kEQCAEIAAgCiAPIA4QIEEEaiEEIBUgBmshBiAFIANNBEAgBSEADAILIAcgFk0EQCAFIQAMAgsDQCAFQX9qIgAtAAAgB0F/aiIHLQAARwRAIAUhAAwDCyAEQQFqIQQgACADTQ0CIAAhBSAHIBZLDQALDAELIAQgACAKEB1BBGohBCAFIAdrIQYgBSADTQRAIAUhAAwBCyAHIA5NBEAgBSEADAELA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMAgsgBEEBaiEEIAAgA00NASAAIQUgByAOSw0ACwsgBEF9aiEHIAAgA2shCSABKAIMIQUCQAJAIAAgEU0EQCAFIAMQHCABKAIMIQggCUEQTQRAIAEgCCAJajYCDAwDCyAIQRBqIANBEGoiBRAcIAhBIGogA0EgahAcIAlBMUgNASAIIAlqIRIgCEEwaiEDA0AgAyAFQSBqIggQHCADQRBqIAVBMGoQHCAIIQUgA0EgaiIDIBJJDQALDAELIAUgAyAAIBEQIgsgASABKAIMIAlqNgIMIAlBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAGQQNqNgIAIAMgCTsBBCAHQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyANIQggBiENCyADIAc7AQYgASADQQhqNgIEIAAgBGoiAyAbSwRAIAMhBQwBCyAUIAwgFUECaiIAaiIEIBNBCBAeQQJ0aiAANgIAIBQgA0F+aiIFIBNBCBAeQQJ0aiAFIAxrNgIAIBggBCAXQQYQHkECdGogADYCACAYIANBf2oiACAXQQYQHkECdGogACAMazYCACANIQQgCCEAA0ACQCAAIQ0gBCEAIAsgAyAMayIFIA1rIgRBf3NqQQNJDQAgBCAgIAwgBCALSSIIG2oiBCgAACADKAAARw0AIANBBGogBEEEaiAKIA8gCiAIGyAOECAiBkEBaiEIIAEoAgwhBAJAIAMgEU0EQCAEIAMQHAwBCyAEIAMgAyARECILIAEoAgQiBEEBNgIAIARBADsBBCAIQYCABE8EQCABQQI2AiQgASAEIAEoAgBrQQN1NgIoCyAEIAg7AQYgASAEQQhqNgIEIBggAyAXQQYQHkECdGogBTYCACAUIAMgE0EIEB5BAnRqIAU2AgAgDSEEIAAhCCAGQQRqIANqIgMhBSADIBtNDQEMAgsLIA0hCCAAIQ0gAyEFCyAFIBtJDQALCwwBCyACKAIEIQggAigCACENIAMgACgCcCIGKAIAIg8gAyAAKAIEIgwgAyAMayAEaiIFQQEgACgCdHQiB2sgACgCDCIKIAUgCmsgB0sbIgpqIg5raiAGKAIEIhAgBigCDCIaaiIWRmoiBSADIARqIgtBeGoiG0kEQCAAKAJ4IRcgACgCfCETIAYoAnghHiAGKAJ8IRwgACgCKCEYIAAoAiAhFCAGKAIoIR8gBigCICEdIBAgCiAQaiAPayIZayEgIAtBYGohEQNAIAUgE0EIEB4hACAFIBdBBRAeIQQgBSAcQQgQHiEHIAUgHkEFEB4hISAUIABBAnRqIgAoAgAhCSAYIARBAnRqIgQoAgAhBiAEIAUgDGsiFTYCACAAIBU2AgACQAJAAkAgCiAVQQFqIhIgDWsiAEF/c2pBA0kNACAQIAAgGWtqIAAgDGogACAKSSIEGyIiKAAAIAVBAWoiACgAAEcNACAFQQVqICJBBGogCyAPIAsgBBsgDhAgIglBAWohByAAIANrIQYgASgCDCEEAkACQCAAIBFNBEAgBCADEBwgASgCDCEEIAZBEE0EQCABIAQgBmo2AgwMAwsgBEEQaiADQRBqIgUQHCAEQSBqIANBIGoQHCAGQTFIDQEgBCAGaiESIARBMGohAwNAIAMgBUEgaiIEEBwgA0EQaiAFQTBqEBwgBCEFIANBIGoiAyASSQ0ACwwBCyAEIAMgACARECILIAEgASgCDCAGajYCDCAGQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyAJQQRqIQQgASgCBCIDQQE2AgAgAyAGOwEEIAdBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAAkAgCSAKSwRAIAkgDGoiBykAACAFKQAAUg0BIAVBCGogB0EIaiALEB1BCGohBCAFIAdrIQYgBSADTQRAIAUhAAwHCyAJIApMBEAgBSEADAcLA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMCAsgBEEBaiEEIAAgA00NByAAIQUgByAOSw0ACwwGCwJAIB0gB0ECdGooAgAiACAaTA0AIAAgEGoiBykAACAFKQAAUg0AIAVBCGogB0EIaiALIA8gDhAgQQhqIQQgFSAAayAZayEGIAUgA00EQCAFIQAMBwsDQCAFQX9qIgAtAAAgB0F/aiIHLQAARwRAIAUhAAwICyAEQQFqIQQgACADTQ0HIAAhBSAHIBZLDQALDAYLIAYgCk0NAQwCCyAGIApLDQELIB8gIUECdGooAgAiACAaTA0BIAAgEGoiBygAACAFKAAARw0BIAAgGWohBgwCCyAGIAxqIgcoAAAgBSgAAEYNAQsgBSADa0EIdSAFakEBaiEFDAMLIAVBAWoiACATQQgQHiEEIAAgHEEIEB4hCCAUIARBAnRqIgQoAgAhCSAEIBI2AgACQCAJIApLBEAgCSAMaiIIKQAAIAApAABSDQEgBUEJaiAIQQhqIAsQHUEIaiEEIAAgCGshBiAJIApMIAAgA01yDQIDQCAAQX9qIgUtAAAgCEF/aiIILQAARw0DIARBAWohBCAFIANNBEAgBSEADAQLIAUhACAIIA5LDQALDAILIB0gCEECdGooAgAiCSAaTA0AIAkgEGoiCCkAACAAKQAAUg0AIAVBCWogCEEIaiALIA8gDhAgQQhqIQQgEiAJayAZayEGIAAgA00NAQNAIABBf2oiBS0AACAIQX9qIggtAABHDQIgBEEBaiEEIAUgA00EQCAFIQAMAwsgBSEAIAggFksNAAsMAQsgB0EEaiEAIAVBBGohBCAGIApJBEAgBCAAIAsgDyAOECBBBGohBCAVIAZrIQYgBSADTQRAIAUhAAwCCyAHIBZNBEAgBSEADAILA0AgBUF/aiIALQAAIAdBf2oiBy0AAEcEQCAFIQAMAwsgBEEBaiEEIAAgA00NAiAAIQUgByAWSw0ACwwBCyAEIAAgCxAdQQRqIQQgBSAHayEGIAUgA00EQCAFIQAMAQsgByAOTQRAIAUhAAwBCwNAIAVBf2oiAC0AACAHQX9qIgctAABHBEAgBSEADAILIARBAWohBCAAIANNDQEgACEFIAcgDksNAAsLIARBfWohByAAIANrIQkgASgCDCEFAkACQCAAIBFNBEAgBSADEBwgASgCDCEIIAlBEE0EQCABIAggCWo2AgwMAwsgCEEQaiADQRBqIgUQHCAIQSBqIANBIGoQHCAJQTFIDQEgCCAJaiESIAhBMGohAwNAIAMgBUEgaiIIEBwgA0EQaiAFQTBqEBwgCCEFIANBIGoiAyASSQ0ACwwBCyAFIAMgACARECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyABKAIEIgMgBkEDajYCACADIAk7AQQgB0GAgARPBEAgAUECNgIkIAEgAyABKAIAa0EDdTYCKAsgDSEIIAYhDQsgAyAHOwEGIAEgA0EIajYCBCAAIARqIgMgG0sEQCADIQUMAQsgFCAMIBVBAmoiAGoiBCATQQgQHkECdGogADYCACAUIANBfmoiBSATQQgQHkECdGogBSAMazYCACAYIAQgF0EFEB5BAnRqIAA2AgAgGCADQX9qIgAgF0EFEB5BAnRqIAAgDGs2AgAgDSEEIAghAANAAkAgACENIAQhACAKIAMgDGsiBSANayIEQX9zakEDSQ0AIAQgICAMIAQgCkkiCBtqIgQoAAAgAygAAEcNACADQQRqIARBBGogCyAPIAsgCBsgDhAgIgZBAWohCCABKAIMIQQCQCADIBFNBEAgBCADEBwMAQsgBCADIAMgERAiCyABKAIEIgRBATYCACAEQQA7AQQgCEGAgARPBEAgAUECNgIkIAEgBCABKAIAa0EDdTYCKAsgBCAIOwEGIAEgBEEIajYCBCAYIAMgF0EFEB5BAnRqIAU2AgAgFCADIBNBCBAeQQJ0aiAFNgIAIA0hBCAAIQggBkEEaiADaiIDIQUgAyAbTQ0BDAILCyANIQggACENIAMhBQsgBSAbSQ0ACwsgAiAINgIEIAIgDTYCACALIANrDwsgAiAINgIEIAIgDTYCACAKIANrC+42ARN/An8CQAJAAkACQCAAKAKEAUF7ag4DAwIBAAsgAigCACIIIAIoAgQiB0EAIAcgAyAAKAIEIg0gAyANayAEaiIFQQEgACgCdHQiBmsgACgCDCIJIAUgCWsgBksbIg5qIhIgA0ZqIgUgEmsiBksiCRsgCCAGSyIGGyEXQQAgCCAGGyEIQQAgByAJGyEHIAUgAyAEaiIEQXhqIhVJBEAgACgCeCETIAAoAnwhECAAKAIoIRQgACgCICERIARBYGohDwNAIAUgEEEIEB4hACAUIAUgE0EEEB5BAnRqIgYoAgAhCyARIABBAnRqIgAoAgAhDCAGIAUgDWsiFjYCACAAIBY2AgACQAJAIAhFIAVBAWoiACAIaygAACAAKAAAR3JFBEAgBUEFaiIFIAUgCGsgBBAdIgtBAWohCiAAIANrIQkgASgCDCEFAkACQCAAIA9NBEAgBSADEBwgASgCDCEGIAlBEE0EQCABIAYgCWo2AgwMAwsgBkEQaiADQRBqIgUQHCAGQSBqIANBIGoQHCAJQTFIDQEgBiAJaiEMIAZBMGohAwNAIAMgBUEgaiIGEBwgA0EQaiAFQTBqEBwgBiEFIANBIGoiAyAMSQ0ACwwBCyAFIAMgACAPECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyALQQRqIQYgASgCBCIDQQE2AgAgAyAJOwEEIApBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAIAwgDksEQCAMIA1qIgopAAAgBSkAAFINASAFQQhqIApBCGogBBAdQQhqIQYgBSAKayEJIAUgA00EQCAFIQAMBgsgDCAOTARAIAUhAAwGCwNAIAVBf2oiAC0AACAKQX9qIgotAABHBEAgBSEADAcLIAZBAWohBiAAIANNDQYgACEFIAogEksNAAsMBQsgCyAOSw0BDAILIAsgDk0NAQsgCyANaiIKKAAAIAUoAABGDQELIAUgA2tBCHUgBWpBAWohBQwDCyARIAAgEEEIEB5BAnRqIgcoAgAhDCAHIBZBAWo2AgACQCAMIA5NDQAgDCANaiIHKQAAIAApAABSDQAgBUEJaiAHQQhqIAQQHUEIaiEGIAAgB2shCSAMIA5MIAAgA01yDQEDQCAAQX9qIgUtAAAgB0F/aiIHLQAARw0CIAZBAWohBiAFIANNBEAgBSEADAMLIAUhACAHIBJLDQALDAELIAVBBGogCkEEaiAEEB1BBGohBiAFIAprIQkgBSADTQRAIAUhAAwBCyALIA5MBEAgBSEADAELA0AgBUF/aiIALQAAIApBf2oiCi0AAEcEQCAFIQAMAgsgBkEBaiEGIAAgA00NASAAIQUgCiASSw0ACwsgBkF9aiEKIAAgA2shCyABKAIMIQUCQAJAIAAgD00EQCAFIAMQHCABKAIMIQcgC0EQTQRAIAEgByALajYCDAwDCyAHQRBqIANBEGoiBRAcIAdBIGogA0EgahAcIAtBMUgNASAHIAtqIQwgB0EwaiEDA0AgAyAFQSBqIgcQHCADQRBqIAVBMGoQHCAHIQUgA0EgaiIDIAxJDQALDAELIAUgAyAAIA8QIgsgASABKAIMIAtqNgIMIAtBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAJQQNqNgIAIAMgCzsBBCAKQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAIIQcgCSEICyADIAo7AQYgASADQQhqNgIEIAAgBmoiAyAVSwRAIAMhBQwBCyARIA0gFkECaiIAaiIFIBBBCBAeQQJ0aiAANgIAIBEgA0F+aiIGIBBBCBAeQQJ0aiAGIA1rNgIAIBQgBSATQQQQHkECdGogADYCACAUIANBf2oiACATQQQQHkECdGogACANazYCACAIIQYgByEAA0ACQCAAIQggBiEAIAhFIAMoAAAgAyAIaygAAEdyDQAgA0EEaiIFIAUgCGsgBBAdIQkgFCADIBNBBBAeQQJ0aiADIA1rIgU2AgAgESADIBBBCBAeQQJ0aiAFNgIAIAlBAWohByABKAIMIQUCQCADIA9NBEAgBSADEBwMAQsgBSADIAMgDxAiCyABKAIEIgVBATYCACAFQQA7AQQgB0GAgARPBEAgAUECNgIkIAEgBSABKAIAa0EDdTYCKAsgBSAHOwEGIAEgBUEIajYCBCAIIQYgACEHIAlBBGogA2oiAyEFIAMgFU0NAQwCCwsgCCEHIAAhCCADIQULIAUgFUkNAAsLIAIgCCAXIAgbNgIAIAcgFyAHGyEIIAJBBGoMAwsgAigCACIIIAIoAgQiB0EAIAcgAyAAKAIEIg0gAyANayAEaiIFQQEgACgCdHQiBmsgACgCDCIJIAUgCWsgBksbIg5qIhIgA0ZqIgUgEmsiBksiCRsgCCAGSyIGGyEXQQAgCCAGGyEIQQAgByAJGyEHIAUgAyAEaiIEQXhqIhVJBEAgACgCeCETIAAoAnwhECAAKAIoIRQgACgCICERIARBYGohDwNAIAUgEEEIEB4hACAUIAUgE0EHEB5BAnRqIgYoAgAhCyARIABBAnRqIgAoAgAhDCAGIAUgDWsiFjYCACAAIBY2AgACQAJAIAhFIAVBAWoiACAIaygAACAAKAAAR3JFBEAgBUEFaiIFIAUgCGsgBBAdIgtBAWohCiAAIANrIQkgASgCDCEFAkACQCAAIA9NBEAgBSADEBwgASgCDCEGIAlBEE0EQCABIAYgCWo2AgwMAwsgBkEQaiADQRBqIgUQHCAGQSBqIANBIGoQHCAJQTFIDQEgBiAJaiEMIAZBMGohAwNAIAMgBUEgaiIGEBwgA0EQaiAFQTBqEBwgBiEFIANBIGoiAyAMSQ0ACwwBCyAFIAMgACAPECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyALQQRqIQYgASgCBCIDQQE2AgAgAyAJOwEEIApBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAIAwgDksEQCAMIA1qIgopAAAgBSkAAFINASAFQQhqIApBCGogBBAdQQhqIQYgBSAKayEJIAUgA00EQCAFIQAMBgsgDCAOTARAIAUhAAwGCwNAIAVBf2oiAC0AACAKQX9qIgotAABHBEAgBSEADAcLIAZBAWohBiAAIANNDQYgACEFIAogEksNAAsMBQsgCyAOSw0BDAILIAsgDk0NAQsgCyANaiIKKAAAIAUoAABGDQELIAUgA2tBCHUgBWpBAWohBQwDCyARIAAgEEEIEB5BAnRqIgcoAgAhDCAHIBZBAWo2AgACQCAMIA5NDQAgDCANaiIHKQAAIAApAABSDQAgBUEJaiAHQQhqIAQQHUEIaiEGIAAgB2shCSAMIA5MIAAgA01yDQEDQCAAQX9qIgUtAAAgB0F/aiIHLQAARw0CIAZBAWohBiAFIANNBEAgBSEADAMLIAUhACAHIBJLDQALDAELIAVBBGogCkEEaiAEEB1BBGohBiAFIAprIQkgBSADTQRAIAUhAAwBCyALIA5MBEAgBSEADAELA0AgBUF/aiIALQAAIApBf2oiCi0AAEcEQCAFIQAMAgsgBkEBaiEGIAAgA00NASAAIQUgCiASSw0ACwsgBkF9aiEKIAAgA2shCyABKAIMIQUCQAJAIAAgD00EQCAFIAMQHCABKAIMIQcgC0EQTQRAIAEgByALajYCDAwDCyAHQRBqIANBEGoiBRAcIAdBIGogA0EgahAcIAtBMUgNASAHIAtqIQwgB0EwaiEDA0AgAyAFQSBqIgcQHCADQRBqIAVBMGoQHCAHIQUgA0EgaiIDIAxJDQALDAELIAUgAyAAIA8QIgsgASABKAIMIAtqNgIMIAtBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAJQQNqNgIAIAMgCzsBBCAKQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAIIQcgCSEICyADIAo7AQYgASADQQhqNgIEIAAgBmoiAyAVSwRAIAMhBQwBCyARIA0gFkECaiIAaiIFIBBBCBAeQQJ0aiAANgIAIBEgA0F+aiIGIBBBCBAeQQJ0aiAGIA1rNgIAIBQgBSATQQcQHkECdGogADYCACAUIANBf2oiACATQQcQHkECdGogACANazYCACAIIQYgByEAA0ACQCAAIQggBiEAIAhFIAMoAAAgAyAIaygAAEdyDQAgA0EEaiIFIAUgCGsgBBAdIQkgFCADIBNBBxAeQQJ0aiADIA1rIgU2AgAgESADIBBBCBAeQQJ0aiAFNgIAIAlBAWohByABKAIMIQUCQCADIA9NBEAgBSADEBwMAQsgBSADIAMgDxAiCyABKAIEIgVBATYCACAFQQA7AQQgB0GAgARPBEAgAUECNgIkIAEgBSABKAIAa0EDdTYCKAsgBSAHOwEGIAEgBUEIajYCBCAIIQYgACEHIAlBBGogA2oiAyEFIAMgFU0NAQwCCwsgCCEHIAAhCCADIQULIAUgFUkNAAsLIAIgCCAXIAgbNgIAIAcgFyAHGyEIIAJBBGoMAgsgAigCACIIIAIoAgQiB0EAIAcgAyAAKAIEIg0gAyANayAEaiIFQQEgACgCdHQiBmsgACgCDCIJIAUgCWsgBksbIg5qIhIgA0ZqIgUgEmsiBksiCRsgCCAGSyIGGyEXQQAgCCAGGyEIQQAgByAJGyEHIAUgAyAEaiIEQXhqIhVJBEAgACgCeCETIAAoAnwhECAAKAIoIRQgACgCICERIARBYGohDwNAIAUgEEEIEB4hACAUIAUgE0EGEB5BAnRqIgYoAgAhCyARIABBAnRqIgAoAgAhDCAGIAUgDWsiFjYCACAAIBY2AgACQAJAIAhFIAVBAWoiACAIaygAACAAKAAAR3JFBEAgBUEFaiIFIAUgCGsgBBAdIgtBAWohCiAAIANrIQkgASgCDCEFAkACQCAAIA9NBEAgBSADEBwgASgCDCEGIAlBEE0EQCABIAYgCWo2AgwMAwsgBkEQaiADQRBqIgUQHCAGQSBqIANBIGoQHCAJQTFIDQEgBiAJaiEMIAZBMGohAwNAIAMgBUEgaiIGEBwgA0EQaiAFQTBqEBwgBiEFIANBIGoiAyAMSQ0ACwwBCyAFIAMgACAPECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyALQQRqIQYgASgCBCIDQQE2AgAgAyAJOwEEIApBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAIAwgDksEQCAMIA1qIgopAAAgBSkAAFINASAFQQhqIApBCGogBBAdQQhqIQYgBSAKayEJIAUgA00EQCAFIQAMBgsgDCAOTARAIAUhAAwGCwNAIAVBf2oiAC0AACAKQX9qIgotAABHBEAgBSEADAcLIAZBAWohBiAAIANNDQYgACEFIAogEksNAAsMBQsgCyAOSw0BDAILIAsgDk0NAQsgCyANaiIKKAAAIAUoAABGDQELIAUgA2tBCHUgBWpBAWohBQwDCyARIAAgEEEIEB5BAnRqIgcoAgAhDCAHIBZBAWo2AgACQCAMIA5NDQAgDCANaiIHKQAAIAApAABSDQAgBUEJaiAHQQhqIAQQHUEIaiEGIAAgB2shCSAMIA5MIAAgA01yDQEDQCAAQX9qIgUtAAAgB0F/aiIHLQAARw0CIAZBAWohBiAFIANNBEAgBSEADAMLIAUhACAHIBJLDQALDAELIAVBBGogCkEEaiAEEB1BBGohBiAFIAprIQkgBSADTQRAIAUhAAwBCyALIA5MBEAgBSEADAELA0AgBUF/aiIALQAAIApBf2oiCi0AAEcEQCAFIQAMAgsgBkEBaiEGIAAgA00NASAAIQUgCiASSw0ACwsgBkF9aiEKIAAgA2shCyABKAIMIQUCQAJAIAAgD00EQCAFIAMQHCABKAIMIQcgC0EQTQRAIAEgByALajYCDAwDCyAHQRBqIANBEGoiBRAcIAdBIGogA0EgahAcIAtBMUgNASAHIAtqIQwgB0EwaiEDA0AgAyAFQSBqIgcQHCADQRBqIAVBMGoQHCAHIQUgA0EgaiIDIAxJDQALDAELIAUgAyAAIA8QIgsgASABKAIMIAtqNgIMIAtBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAJQQNqNgIAIAMgCzsBBCAKQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAIIQcgCSEICyADIAo7AQYgASADQQhqNgIEIAAgBmoiAyAVSwRAIAMhBQwBCyARIA0gFkECaiIAaiIFIBBBCBAeQQJ0aiAANgIAIBEgA0F+aiIGIBBBCBAeQQJ0aiAGIA1rNgIAIBQgBSATQQYQHkECdGogADYCACAUIANBf2oiACATQQYQHkECdGogACANazYCACAIIQYgByEAA0ACQCAAIQggBiEAIAhFIAMoAAAgAyAIaygAAEdyDQAgA0EEaiIFIAUgCGsgBBAdIQkgFCADIBNBBhAeQQJ0aiADIA1rIgU2AgAgESADIBBBCBAeQQJ0aiAFNgIAIAlBAWohByABKAIMIQUCQCADIA9NBEAgBSADEBwMAQsgBSADIAMgDxAiCyABKAIEIgVBATYCACAFQQA7AQQgB0GAgARPBEAgAUECNgIkIAEgBSABKAIAa0EDdTYCKAsgBSAHOwEGIAEgBUEIajYCBCAIIQYgACEHIAlBBGogA2oiAyEFIAMgFU0NAQwCCwsgCCEHIAAhCCADIQULIAUgFUkNAAsLIAIgCCAXIAgbNgIAIAcgFyAHGyEIIAJBBGoMAQsgAigCACIIIAIoAgQiB0EAIAcgAyAAKAIEIg0gAyANayAEaiIFQQEgACgCdHQiBmsgACgCDCIJIAUgCWsgBksbIg5qIhIgA0ZqIgUgEmsiBksiCRsgCCAGSyIGGyEXQQAgCCAGGyEIQQAgByAJGyEHIAUgAyAEaiIEQXhqIhVJBEAgACgCeCETIAAoAnwhECAAKAIoIRQgACgCICERIARBYGohDwNAIAUgEEEIEB4hACAUIAUgE0EFEB5BAnRqIgYoAgAhCyARIABBAnRqIgAoAgAhDCAGIAUgDWsiFjYCACAAIBY2AgACQAJAIAhFIAVBAWoiACAIaygAACAAKAAAR3JFBEAgBUEFaiIFIAUgCGsgBBAdIgtBAWohCiAAIANrIQkgASgCDCEFAkACQCAAIA9NBEAgBSADEBwgASgCDCEGIAlBEE0EQCABIAYgCWo2AgwMAwsgBkEQaiADQRBqIgUQHCAGQSBqIANBIGoQHCAJQTFIDQEgBiAJaiEMIAZBMGohAwNAIAMgBUEgaiIGEBwgA0EQaiAFQTBqEBwgBiEFIANBIGoiAyAMSQ0ACwwBCyAFIAMgACAPECILIAEgASgCDCAJajYCDCAJQYCABEkNACABQQE2AiQgASABKAIEIAEoAgBrQQN1NgIoCyALQQRqIQYgASgCBCIDQQE2AgAgAyAJOwEEIApBgIAESQ0BIAFBAjYCJCABIAMgASgCAGtBA3U2AigMAQsCQAJAAkACQAJAIAwgDksEQCAMIA1qIgopAAAgBSkAAFINASAFQQhqIApBCGogBBAdQQhqIQYgBSAKayEJIAUgA00EQCAFIQAMBgsgDCAOTARAIAUhAAwGCwNAIAVBf2oiAC0AACAKQX9qIgotAABHBEAgBSEADAcLIAZBAWohBiAAIANNDQYgACEFIAogEksNAAsMBQsgCyAOSw0BDAILIAsgDk0NAQsgCyANaiIKKAAAIAUoAABGDQELIAUgA2tBCHUgBWpBAWohBQwDCyARIAAgEEEIEB5BAnRqIgcoAgAhDCAHIBZBAWo2AgACQCAMIA5NDQAgDCANaiIHKQAAIAApAABSDQAgBUEJaiAHQQhqIAQQHUEIaiEGIAAgB2shCSAMIA5MIAAgA01yDQEDQCAAQX9qIgUtAAAgB0F/aiIHLQAARw0CIAZBAWohBiAFIANNBEAgBSEADAMLIAUhACAHIBJLDQALDAELIAVBBGogCkEEaiAEEB1BBGohBiAFIAprIQkgBSADTQRAIAUhAAwBCyALIA5MBEAgBSEADAELA0AgBUF/aiIALQAAIApBf2oiCi0AAEcEQCAFIQAMAgsgBkEBaiEGIAAgA00NASAAIQUgCiASSw0ACwsgBkF9aiEKIAAgA2shCyABKAIMIQUCQAJAIAAgD00EQCAFIAMQHCABKAIMIQcgC0EQTQRAIAEgByALajYCDAwDCyAHQRBqIANBEGoiBRAcIAdBIGogA0EgahAcIAtBMUgNASAHIAtqIQwgB0EwaiEDA0AgAyAFQSBqIgcQHCADQRBqIAVBMGoQHCAHIQUgA0EgaiIDIAxJDQALDAELIAUgAyAAIA8QIgsgASABKAIMIAtqNgIMIAtBgIAESQ0AIAFBATYCJCABIAEoAgQgASgCAGtBA3U2AigLIAEoAgQiAyAJQQNqNgIAIAMgCzsBBCAKQYCABE8EQCABQQI2AiQgASADIAEoAgBrQQN1NgIoCyAIIQcgCSEICyADIAo7AQYgASADQQhqNgIEIAAgBmoiAyAVSwRAIAMhBQwBCyARIA0gFkECaiIAaiIFIBBBCBAeQQJ0aiAANgIAIBEgA0F+aiIGIBBBCBAeQQJ0aiAGIA1rNgIAIBQgBSATQQUQHkECdGogADYCACAUIANBf2oiACATQQUQHkECdGogACANazYCACAIIQYgByEAA0ACQCAAIQggBiEAIAhFIAMoAAAgAyAIaygAAEdyDQAgA0EEaiIFIAUgCGsgBBAdIQkgFCADIBNBBRAeQQJ0aiADIA1rIgU2AgAgESADIBBBCBAeQQJ0aiAFNgIAIAlBAWohByABKAIMIQUCQCADIA9NBEAgBSADEBwMAQsgBSADIAMgDxAiCyABKAIEIgVBATYCACAFQQA7AQQgB0GAgARPBEAgAUECNgIkIAEgBSABKAIAa0EDdTYCKAsgBSAHOwEGIAEgBUEIajYCBCAIIQYgACEHIAlBBGogA2oiAyEFIAMgFU0NAQwCCwsgCCEHIAAhCCADIQULIAUgFUkNAAsLIAIgCCAXIAgbNgIAIAcgFyAHGyEIIAJBBGoLIAg2AgAgBCADawuMAQEIfyAAKAIEIgQgACgCGGoiAkECaiABQXhqIgFNBEAgACgCeCEFIAAoAoQBIQYgACgCfCEHIAAoAighCCAAKAIgIQADQCACIAdBCBAeIQMgCCACIAUgBhAeQQJ0aiACIARrIgk2AgAgACADQQJ0aiAJNgIAIAJBBWohAyACQQNqIQIgAyABTQ0ACwsLgwUBAn8jAEHQAGsiCyQAQbp/IQwgC0E4aiAAIAEQ/wEQIUUEQCALQShqIAIgAyAJQX9qIgBqIgItAAAQYyALQRhqIAQgACAFaiIBLQAAEGMgC0EIaiAGIAAgB2oiBC0AABBjIAtBOGogCCAAQQN0aiIALwEEIAQtAABBAnRBsKcBaigCABBHIAtBOGoQOSALQThqIAAvAQYgAi0AAEECdEGQpAFqKAIAEEcgC0E4ahA5AkAgCgRAIAEtAAAiASABQRggAUEYSRsiAmsiAQRAIAtBOGogACgCACABEEcgC0E4ahA5CyALQThqIAAoAgAgAXYgAhBHDAELIAtBOGogACgCACABLQAAEEcLIAtBOGoQOSAJQQJPBEAgCUF+aiEMA0AgByAMai0AACECIAMgDGotAAAhBCALQThqIAtBGGogBSAMai0AACIAEGwgC0E4aiALQShqIAQQbCALQThqEDkgC0E4aiALQQhqIAIQbCALQThqEDkgC0E4aiAIIAxBA3RqIgEvAQQgAkECdEGwpwFqKAIAIgIQRyACIARBAnRBkKQBaigCACICakEZTwRAIAtBOGoQOQsgC0E4aiABLwEGIAIQRyALQThqEDkCQCAKBEAgACAAQRggAEEYSRsiAmsiAARAIAtBOGogASgCACAAEEcgC0E4ahA5CyALQThqIAEoAgAgAHYgAhBHDAELIAtBOGogASgCACAAEEcLIAtBOGoQOSAMQX9qIgwgCUkNAAsLIAtBOGogCygCKCALKAI0EHQgC0E4aiALKAIYIAsoAiQQdCALQThqIAsoAgggCygCFBB0IAtBOGoQ/QEiAEG6fyAAGyEMCyALQdAAaiQAIAwLLwAgACACQQN0aigCBCIAQRB2QQFqIgJBCHRBfyABdCAAayACQRB0akEIdCABdmsLTwEEfwNAIANBASAAIARBAnRqKAIAIgNBCHQiBSACbiIGIAUgAkkbIAYgAxtBAnRBkJwBaigCACADbGohAyAEQQFqIgQgAU0NAAsgA0EIdgtKAQF/IwBB8ARrIgQkACAEIAMgAiABEKcBIgMgACACIAEQpgEiAhAhRQRAIARB8ABqQYAEIAQgASADEKgBIQILIARB8ARqJAAgAguKAQEIfyMAQRBrIgMkACADIAAQc0F/IQUCQCAALwACIAJJDQAgAygCDCIHQQh0QYACaiEIIAMoAgghCUEAIQADQCAJIAcgABDJAyEGIAEgAEECdGooAgAiCgRAIAYgCE8NAiAGIApsIARqIQQLIABBAWoiACACTQ0ACyAEQQh2IQULIANBEGokACAFC18BAn9BCCABayEFQQAhAQNAIARBASAAIAFBAXRqLwEAIgQgBEH//wNGG0EQdEEQdSAFdEECdEGQnAFqKAIAIAIgAUECdGooAgBsaiEEIAFBAWoiASADTQ0ACyAEQQh2C2wBAX8CQAJAAkACQCACQf8fS0ECQQEgAkEfSxtqIgNBf2oOAwABAgMLIAAgAkEDdEEBcjoAAAwCCyAAIAJBBHRBBXJB9f8DcRAvDAELIAAgAkEEdEENchBNCyAAIANqIAEtAAA6AAAgA0EBagtBACAALQAAQQJHBEAgAkEANgIAIANBADYCACABQQA2AgAPCyABIAAoAAQ2AgAgAyAAKAAINgIAIAIgACgADDYCAAuLAQEBfyMAQSBrIgEkACAAQQBBmAYQKCIAQQA2AqADIABBADYCnAMgAEEANgKYAyABQRBqEOABIAEgASkDGDcDCCABIAEpAxA3AwAgACABEN8BNgIIIAAoAugFRQRAIAAQ9gEgAEEMaiIABEAgAEEAQfgAECgiAEEBNgIgIABBAzYCLAsLIAFBIGokAAtOACAAIAFB+AAQKiIAIAIoAhg2AhwgACACKQIQNwIUIAAgAikCCDcCDCAAIAIpAgA3AgQgACACKQIcNwIgIAAgAigCJDYCKCAAQQM2AiwLqQEBAn8jAEHQAWsiBiQAIAZBqAFqIgcgBSAERSAEaq0Q9QMgB0EBNgIcIAdCADcCICAGIAYpA7ABNwMQIAYgBikDuAE3AxggBiAGKQPAATcDICAGIAYpA8gBNwMoIAYgBikDqAE3AwggBkEwaiAAQQxqIAZBCGoQ0QMgACAGQTBqIAStEN4DIgUQIQR/IAUFIAAgASACIAMgBBDxAwshACAGQdABaiQAIAALJwECfyAAKAIQIgEgACgCDCICSQRAIAFBACACIAFrECgaCyAAEO0BCyYAIAAQ5QEgAEEANgJwIABBADYCSCAAQQA2AhQgACAAKAIMNgIYC2IBA38jAEEgayICJAAgARB7IAJBFGogAkEcaiACQRhqEM8DQYjsASACKAIUIgMQTCIENgIAIAEQeyAEIAMQowIgAkEIaiADQYjsASgCABDbASAAIAJBCGoQ2gEgAkEgaiQACzQAIABBADYCICAAIAE2AhAgACABNgIIIAAgATYCACAAIAEgAmo2AgQgABDmASAAQQA2AhwLQwECfkIBIQIgAFBFBEBC48iVvcub741PIQEDQEIBIAEgAEIBg1AbIAJ+IQIgASABfiEBIABCAYgiAEIAUg0ACwsgAgvEAgEDfyACKAIYQQFHBEBBBCACKAIEdCEFCyACKAIIIQYgAigCEEEDRgRAIAIoAgAiBEERIARBEUkbIQQLIANBAUYEQCAAQoGAgIAQNwIMIABCADcCBCAAQQE2AgAgARDuAQsgACAENgIcIAAQ1AMgASABKAIINgIMIAAgAUEEIAZ0EJ4BNgIgIAAgASAFEJ4BNgIoIAAgAUEEIAR0QQAgBBsQngE2AiQgASgCGEUEQCABENMDIAIoAhhBB08EQCAAIAFBgAgQVTYCLCAAIAFBkAEQVTYCMCAAIAFB1AEQVTYCNCAAIAFBgAEQVTYCOCAAIAFBiIACEFU2AjwgAEFAayABQZyABxBVNgIACyAAIAIpAgA3AnQgACACKAIYNgKMASAAIAIpAhA3AoQBIAAgAikCCDcCfEFAQQAgASgCGBsPC0FACzQAIABBADYCgAggAEHoI2pChICAgIABNwIAIABB4CNqQoCAgIAQNwIAIABB2CNqQgA3AgALLAECf0EBQQAgACgCBCIBIAAoAghrIgIgAiABSxt0QQggAXRqQQAgACgCABsLhQEBA38gACgCGCIBQQFHBEBBBCAAKAIEdCEDCyAAKAIIIQICfwJAIAAoAhBBA0YEQEGIjAlBACABQQZLGyEBQQQgAnQhAkGAgCAgACgCACIAQRFPDQIaIABFDQFBBCAAdAwCC0GIjAlBACABQQZLGyEBQQQgAnQhAgtBAAsgASADaiACamoLlQEBAn8gACABNgIUIAAoAgghBSAAKAIMIgRFBEAgAEHAADYCDEHAACEECyADQQdPBEAgACACIAQgBCACSRs2AgwLIAAoAgQiBEUEQCAAIAFBeWoiAkEGIAJBBksbIgQ2AgQLIAAoAhBFBEAgAEEAIAEgBGsiAiACIAFLGzYCEAsgACAFQQMgBRsiACAEIAAgBEkbNgIIC/AIAhB/AX4jAEHQAGsiBSQAIABBATYCuAMgAUHUAGohBiABKAJUBEAgBiABKAIEIAEoAhggASgCHBDcAyAAIAEoAmBBf2qtENcDNwOIBAsgASgCFCEIIAE1AgQhEyABQQRqIgkQ2wMhDiAFIAYpAhA3A0ggBUFAayAGKQIINwMAIAUgBikCADcDOAJ/QgEgE4YiEyACIBMgAlQbpyIEQQEgBBsiBEGAgAggBEGAgAhJGyILIQRBACAFKAI4RQ0AGiAEIAUoAkRuCyEMIAUgACgCwAQ2AjAgBSAAKQK4BDcDKCAFIABBsARqIg8pAgA3AyAgBSgCICAFKAIka0GAgID4eUshByAAQYACaiIEIgMgAygCDCADKAIUQQAQ5AEEfyADKAIcQQFqBUEACzYCHCAAKAKkAyENIAUgBikCEDcDGCAFIAYpAgg3AxAgBSAGKQIANwMIIAVBCGoQ2gMhAyAEKAIAIAAoAoQCEOcBIRACQAJ/QQAgBCIKKAIMIAQoAhQgAyAMQQxsIhEgDiALQSBqIhIgC0EDQQQgCEEDRhtuIghBC2xqampqQfj9AEHg9wAgDRtqIgMQ5AFFDQAaIAooAhxBgAFKCyAQIANJcgRAIA0EQEFAIQMMAgsgBCAAKAKYAyAAKAKcAyAAKAKgAxCkAQJ/IAQhByAAKAKcAxpBQCADIAAoApgDIAAoAqADEIcCIgpFDQAaIAcgCiADENYDQQALIgMQIQ0BIAAgBEHwIxCfASIDNgKoBCADRQRAQUAhAwwCCyAAIARB8CMQnwEiAzYCrAQgA0UEQEFAIQMMAgsgACAEQYAwEJ8BNgLABUEBIQdBQCEDIAAoAqwERQ0BCyAEEOYBIABBhAFqIAFB+AAQKhogACAJKAIYNgK8BSAAIAkpAhA3ArQFIAAgCSkCCDcCrAUgACAJKQIANwKkBSAAQgA3A7ACIAAgAkIBfDcDqAIgAEIANwO4AiACQn9RBEAgAEEANgKkAQsgACALNgKkAiAAQcACahCGAiAAQQA2AvwBIABBATYCACAAKAKoBBDZAyAEIBIQYCEDIABBADYCyAUgACALNgLcAyAAIAM2AsQDIARBABBgIQMgAEEANgLcBSAAIAM2AsQFIAAgBEEAEGA2AtgFIAYoAgAiCgRAIAAgBEEBIAEoAlggASgCXGt0IgMQYCIGNgKABCAGQQAgAxAoGgsCQCAAIgMoAgBBAUcNACADKALYAQ0AIANCADcDmAQgA0IANwOgBAsgACAINgLYAyAAIAQgCBBgNgLMAyAAIAQgCBBgNgLQAyAAIAQgCBBgNgLUAyAAIAQgCEEDdBBVNgK8AyAPIAQgCSAHENgDIgNBACADECEiBxshAyAHIApFcg0AIAAgBEEIIAEoAlh0IgEQVSIHNgL8A0EAIQMgB0EAIAEQKBogBCAREFUhASAAIAw2ApQEIAAgATYCkAQgAEIANwPoAyAAQgA3A/ADIABBADYC+AMgAEHoA2oQ5QELIAVB0ABqJAAgAwtMAQF/IwBBgAFrIgMkACADQQhqIAFB+AAQKhoCQCAAIANBCGogAhDdAyIBECENAEEAIQFBABAhDQAgAEEANgL8AQsgA0GAAWokACABC7MFAQZ/IAFBEG0hCCABQRBOBEADQCAAIAZBAnQiBWoiAUEAIAJBACABKAIAIgFBAUYbIAFqIgEgAmsiAyADIAFLGzYCACAAIAVBBHJqIgFBACACQQAgASgCACIDQQFGGyADaiIDIAJrIgQgBCADSxs2AgAgAUEAIAJBACABKAIEIgFBAUYbIAFqIgEgAmsiAyADIAFLGzYCBCAAIAVBDHJqIgFBACACQQAgASgCACIDQQFGGyADaiIDIAJrIgQgBCADSxs2AgAgAUEAIAJBACABKAIEIgNBAUYbIANqIgMgAmsiBCAEIANLGzYCBCABQQAgAkEAIAEoAggiA0EBRhsgA2oiAyACayIEIAQgA0sbNgIIIAFBACACQQAgASgCDCIBQQFGGyABaiIBIAJrIgMgAyABSxs2AgwgACAFQRxyaiIBQQAgAkEAIAEoAgAiA0EBRhsgA2oiAyACayIEIAQgA0sbNgIAIAFBACACQQAgASgCBCIDQQFGGyADaiIDIAJrIgQgBCADSxs2AgQgAUEAIAJBACABKAIIIgNBAUYbIANqIgMgAmsiBCAEIANLGzYCCCABQQAgAkEAIAEoAgwiA0EBRhsgA2oiAyACayIEIAQgA0sbNgIMIAFBACACQQAgASgCECIDQQFGGyADaiIDIAJrIgQgBCADSxs2AhAgAUEAIAJBACABKAIUIgNBAUYbIANqIgMgAmsiBCAEIANLGzYCFCABQQAgAkEAIAEoAhgiA0EBRhsgA2oiAyACayIEIAQgA0sbNgIYIAFBACACQQAgASgCHCIBQQFGGyABaiIBIAJrIgMgAyABSxs2AhwgACAFQTxyaiIBQQAgAkEAIAEoAgAiAUEBRhsgAWoiASACayIFIAUgAUsbNgIAIAZBEGohBiAHQQFqIgcgCEcNAAsLC8sDAQV/IwBBEGsiCSQAIAcgAhDpASENIAEgAEGECBAqIQoCfyADBEAgBCAFIAYgBxCdAQwBC0EGQT8gACgCgAgiAUECRhsgB08EQCAEIAUgBiAHEJ0BDAELQbp/IAdB//8AS0EEQQMgB0H/B0sbaiILIAVPDQAaIAJBBEkgB0GBCElxIQwgCSABNgIMIAUgC2shAyAEIAtqIQICfyALQQNGIAFBAkZxIAdBgAJJciIBBEAgAiADIAYgB0EAIAggCiAJQQxqIAwQ+AEMAQsgAiADIAYgB0EBIAggCiAJQQxqIAwQ+AELIQMgCSgCDCECIAMQISADRSADIAcgDWtPcnIEQCAKIABBhAgQKhogBCAFIAYgBxCdAQwBCyADQQFGBEAgCiAAQYQIECoaIAQgBiAHEM4DDAELIAJFBEAgCkEBNgKACAtBA0ECIAIbIQACQAJAAkACQCALQX1qDgMAAQIDCyAEIAdBBHRBBEEAIAEbciAAckEEcyADQQ50ahCjAQwCCyAEIAdBBHQgAHJBCHIgA0ESdGoQTQwBCyAEIAdBBHQgAHJBDHIgA0EWdGoQTSAEIANBCnY6AAQLIAMgC2oLIQAgCUEQaiQAIAALMwEBfwJAAkACQCAAKAJAQX9qDgICAAELQQEPCyAAKAIcQQFHDQAgACgCGEEARyEBCyABC/8GARJ/IwBB8AFrIggkACADKAIEIRUgACgCFCENIAAoAhAhDiAAKAIYIQ8gACgCBCEJIAAoAgAhEwJAIAEgAiADKAIcIhAgAxDhAyAEIAUgACgCCCIDIAAoAgwgA2sgBhDgAyIDECEiBw0AIAMgBGohCkG6fyEDIAQgBWoiCyAEIAogBxsiB2tBBEgNAAJ/IAkgE2siA0EDdSIFQf8ATQRAIAcgBToAACAHQQFqDAELIAVB//0BTQRAIAcgBToAASAHIAVBCHZBgAFzOgAAIAdBAmoMAQsgB0H/AToAACAHQQFqIAVBgIJ+akH//wNxEC8gB0EDagshCiACQYQIaiERIANFBEAgESABQYQIakHgGxAqGiAKIARrIQMMAQsgABDzAyAIQSM2AgwgCEEQaiAIQQxqIA4gBSAGEIMBIQMgAkHgI2oiByABQeAjaigCADYCACAKQQFqIgAgCyAAayACQbQZaiIWQQkgByAIQRBqIAgoAgwiByADIAVBCSABQbQZaiIDQZCaAUEGQQEgEBCiASIUIAhBEGogByAOIAVBkJoBQQZBIyADQaQKIAYQoQEiAxAhIgcNACAIQR82AgwgCEEQaiAIQQxqIA8gBSAGEIMBIQwgCCgCDCEJIAJB2CNqIhIgAUHYI2ooAgA2AgAgACAAIANqIAcbIgcgCyAHayARQQggEiAIQRBqIAkgDCAFQQggAUGECGoiA0HgmgFBBSAJQR1JIBAQogEiDCAIQRBqIAkgDyAFQeCaAUEFQRwgA0GEBiAGEKEBIgMQISIJDQAgCEE0NgIMIAhBEGogCEEMaiANIAUgBhCDASESIAJB3CNqIhcgAUHcI2ooAgA2AgAgByADIAdqIAkbIgkgCyAJayACQYgOaiIYQQkgFyAIQRBqIAgoAgwiAiASIAVBCSABQYgOaiIDQaCbAUEGQQEgEBCiASIBIAhBEGogAiANIAVBoJsBQQZBNCADQawLIAYQoQEiAxAhIgINACAKIAxBBHQgFEEGdGogAUECdGo6AAAgCSADIAlqIAIbIgYgCyAGayAYIA0gESAPIBYgDiATIAUgFUEZSxDIAyIDECENACADIAZqIQUgByAAQQAgFEECRhsgDEECRhsiACAJIAIbIAAgAUECRhsiAARAQQAhAyAFIABrQQRIDQELIAUgBGshAwsgCEHwAWokACADC6kCAQx/IwBBIGsiBiQAAkAgBEEUdiAEQf//P3FBAEdqIg5FDQAgAyAEaiELQQEgAigCFHQhDCABKAIIIQUDQCAFIAEoAgxPDQEgBiAAKAIQNgIYIAYgACkCCDcDECAGIAApAgA3AwggCyADIAlBFHRqIgRBgIBAayALIARrQYCAwABJGyIHIARrIQ0gBkEIaiAHEO8BBEAgAigCBCEPIAAgDCAEEK0DIRAgACgCFEEBIA90IBAQrAMLIAAgByAMEKsDIAAgASACIAQgDRCqAyIEECEEQCAEIQgMAgsCfyAFIAEoAggiB0kEQCABKAIAIAVBDGxqIgUgBSgCBCAKajYCBCAEDAELIAogDWoLIQogByEFIAlBAWoiCSAORw0ACwsgBkEgaiQAIAgLNAECf0G6fyEFIANBA2oiBiABTQR/IAAgA0EDdCAEahCjASAAQQNqIAIgAxAqGiAGBSAFCwshACABIABrIAMoAgAgAmpLBEAgA0EANgIAIARBADYCAAsLPgECf0EBIQIgAUECTwR/IAAtAAAhAwJAA0AgAyAAIAJqLQAARw0BIAJBAWoiAiABRw0AC0EBDwtBAAUgAgsLTwEBfwJAIAAgASACIAMgBCAFIAcQ4gMiAEUgBiAFTUEAIABBun9GG3IEfyAIBSAAECFFDQEgAAsPCyAAQQAgACAGIAYgAygCHBDpAWtJGwuEAwEPfyAAKAKwAyEJIABBvANqIgcoAgQiASAHKAIAIgprIgQEQCAAKAKsAyAJQRRsaiELIAogAWsiASAEIAEgBEobQQN2IARBfyAEQX9KGyIBQQEgAUEBSBtsIgFBASABQQFLGyEMIAcoAighDQNAIAsgA0EUbGoiASAKIANBA3RqIgUoAgAiAjYCBCABIAUvAQQiBjYCCCABIAUvAQYiCEEDaiIFNgIMAkAgAyANRw0AAkACQCAHKAIkQX9qDgIAAQILIAEgBkGAgARyIgY2AggMAQsgASAIQYOABGoiBTYCDAsCQCABAn8gAkEDTQRAIAEgAiAGRWoiCDYCECABIAsgAyACayIOIAMgDiACQQNGG0F/aiAGGyICQRRsakEEaiACQX9zQQJ0QdCwAWogAkF/ShsoAgAiAjYCBCAIQQRHDQIgAkF/agwBCyACQX1qCzYCBAsgASAGIA9qIgE2AgAgASAFaiEPIANBAWoiAyAMRw0ACwsgACAEQQN1IAlqNgKwAwurAwEHfyMAQRBrIgUkACACQQZLBEAgAEG8A2oiBxDyASAAIAAoAqgEIgY2ApgFIAAgACgCxAE2ApwFIAEgACgCtARrIgQgACgCyAQiA0GAA2pLBEAgACAEIAQgA2tBgH1qIgRBwAEgBEHAAUkbazYCyAQLIABBsARqIgQQ7AEhCCAAKAKsBCIDIAYoAuQjNgLkIyADQegjaiAGQegjaigCADYCACADQewjaiAGQewjaigCADYCACADQeQjaiEDIAchBgJAIAEgAmoCfyAAKAKcBCAAKAKgBEkEQCAAQZgEaiAEIAcgAyABIAIQ6wEMAQsgAEHYAWoiCSgCAARAIAVCADcCBCAFIAAoApAENgIAIAUgACgClAQ2AgwgAEHoA2ogBSAJIAEgAhDjAyIDECENAiAFIAQgByAAKAKsBEHkI2ogASACEOsBDAELIAQgByADIAEgAiAAKAKgASAIEPMBEQIACyIAayEBIAYoAgwgASAAECoaIAYgBigCDCAAajYCDEEAIQMLIAVBEGokACADDwsgAEGYBGogAiAAKAKYARDqASAFQRBqJABBAQvrAQECfwJAAkACQEEBIAAgAyAEEOkDIgVBAUZBAnQgBRAhGw4FAAICAgECCyAAKAKoAwRAIAAQ6ANBAA8LIABBvANqIAAoAqgEIAAoAqwEIABBhAFqIAEgAiAEIAAoAsAFEOcDIgZBGEsNACAAKAK4Aw0AIAMgBBDmA0UNACABIAMtAAA6AABBASEGCyAGECEhAiAAKAKoBCEBAkAgBkECSQRAIAEhBQwBCyACBEAgASEFDAELIAAoAqwEIQUgACABNgKsBCAAIAU2AqgECyAFQdgjaigCAEECRgRAIAVBATYC2CMLIAYhBQsgBQtrAQJ/IAAoAiBBASABKAIMdCACEKABAkAgASgCHCIEQQFGDQBBASABKAIIdCEBIAAoAighAyAEQQZGBEAgAyABIAIQ3wMMAQsgAyABIAIQoAELIAAoAhwiAQRAIAAoAiRBASABdCACEKABCwtSAQF/IAAgACgCBCIEIAMgBGsiAyACayADQX8gAXRBf3NxayIBajYCBCAAIAAoAgggAWo2AgggACAAKAIQIAFrNgIQIAAgACgCDCABazYCDCABC5cBAQF/IwBBIGsiBSQAIAUgACgCEDYCGCAFIAApAgg3AxAgBSAAKQIANwMIIAVBCGogBBDvAQRAIAAgAigCCCACKAIcEPQBQQEgAigCBHQgAxDsAyEDIAEQ7gEgACACIAMQ6wMgARDtASAAQQA2AnAgAEEANgIUIABBACAAKAIYIgAgA2siASABIABLGzYCGAsgBUEgaiQAC/ECAQ1/IAAoAogBIQUgACgCpAIhByAAKAKoAQRAIABBwAJqIAMgBBCFAgsgAEGEAWohDEEBIAV0IQ0gAEGgBWohDiAAQcQEaiEPIABBgAJqIRAgAEGwBGohESABIQUCQANAIAJBBkkEQEG6fw8LIBEgECAMIAMgAyAEIAcgBCAHSRsiCGoiChDtAyAAKAK0BCAKIA0gDyAOEOUDIAAoAsgEIAAoAsAEIglJBEAgACAJNgLIBAsgACAFQQNqIAJBfWogAyAIEOoDIgYQIQ0BIAcgBE8hBwJAAn8CQAJAAkAgBg4CAAECCyAFIAIgAyAIIAcQ5AMiBhAhRQ0DDAULQQIhCyAHIQkgCEEDdAwBCyAGQQN0IQlBBCELIAcLIQMgBSADIAlyIAtyEKMBIAZBA2ohBgsgAEEANgK4AyACIAZrIQIgBSAGaiEFIAohAyAEIAgiB2siBA0ACyAFIAFLBEAgAEEDNgIACyAFIAFrIQYLIAYLrgEBA39BRCEDIAEhBSABIQQCQAJAAkACQCAAKAIADgQDAAECAQsgASACIABBhAFqQgBBABDxASIDECENAiAAQQI2AgAgASADaiEFIAIgA2shAgtBun8hAyACQQRJDQEgBUEBEE0gAkF9aiECIAVBA2ohBAsgACgCqAEEQEG6fyEDIAJBBEkNASAEIABBwAJqEIQCpxBNIARBBGohBAsgAEEANgIAIAQgAWshAwsgAwvtAQICfwF+QUQhBgJAAkACQAJAIAAoAgAOAgMAAQsgASACIABBhAFqIAApA6gCQn98IAAoAvwBEPEBIgUQIQ0BIABBAjYCACABIAVqIQEgAiAFayECCyAERQ0AIABBsARqIAMgBBDwAUUEQCAAIAAoArwENgLIBAsgACgC2AEEQCAAQegDaiADIAQQ8AEaCyAAIAEgAiADIAQQ7gMiBhAhDQEgACAAKQOwAiAErXwiBzcDsAIgACAAKQO4AiAFIAZqIgGtfDcDuAJBuH8gASAHQgF8IAApA6gCIgdWGyABIAdCAFIbDwsgBSEGCyAGC1sBAX4gACABIAIgAyAEEPADIgMQIQRAIAMPCyAAIAEgA2ogAiADaxDvAyIBECEEQCABDwsCfyAAKQOoAiIFUEUEQEG4fyAFIAApA7ACQgF8Ug0BGgsgASADagsLkAEBA38gACEBAkACQCAAQQNxRQ0AIAAtAABFBEBBAA8LA0AgAUEBaiIBQQNxRQ0BIAEtAAANAAsMAQsDQCABIgJBBGohASACKAIAIgNBf3MgA0H//ft3anFBgIGChHhxRQ0ACyADQf8BcUUEQCACIABrDwsDQCACLQABIQMgAkEBaiIBIQIgAw0ACwsgASAAawviAQEIfyAAKAIUIQMgACgCECEEIAAoAgQiAiAAKAIAIgVrIgEEQCAAKAIYIQYgBSACayICIAEgAiABShtBA3YgAUF/IAFBf0obIgFBASABQQFIG2wiAUEBIAFBAUsbIQdBACEBA0AgBSABQQN0aiICLwEGIQggASAEaiACLwEEEIABOgAAIAEgBmogAigCABAkOgAAIAEgA2ogCBA8OgAAIAFBAWoiASAHRw0ACwsgACgCJCIBQQFGBH8gBCAAKAIoakEjOgAAIAAoAiQFIAELQQJGBEAgAyAAKAIoakE0OgAACwvJAQEDfwJAQn8gAiACUBsiAkKAgICAAloEQCABKAIAIQQMAQtBBiEDIAKnIgRBwABPBEAgBEF/ahAkQQFqIQMLIAEoAgAiBCADTQ0AIAEgAzYCACADIQQLIAEoAgggBEEBaiIDSwRAIAEgAzYCCAsgBCABKAIEIgUgASgCGBD0ASIDSQRAIAEgBCAFaiADazYCBAsgBEEJTQRAIAFBCjYCAAsgACABKQIANwIAIAAgASgCGDYCGCAAIAEpAhA3AhAgACABKQIINwIIC9MBAgJ/AX4jAEFAaiIDJAAgA0J/IAIgAlAbIgVCgYAQVCAFQoGACFRqIAVCgYABVGpBhAVsQRZBACABQQMgARsgAUEASBsgAUEWShtBHGxqIgRBmIUBaigCADYCOCADIARBkIUBaikCADcDMCADIARBiIUBaikCADcDKCADIARBgIUBaikCADcDICABQX9MBEAgA0EAIAFrNgI0CyADIAMoAjg2AhggAyADKQMwNwMQIAMgAykDKDcDCCADIAMpAyA3AwAgACADIAIQ9AMgA0FAayQACyIBAX8CQCABRQ0AIAAoAgAgAUsNACAAKAIEIAFPIQILIAILSwEEfwJAIABFDQAgAEEMaiIBIAAQ9gMhAiABIAAoArAlIgEgAEG0JWooAgAiAyAAQbglaigCACIEEKQBIAINACAAIAEgAyAEEGQLCzQBAn8gAEEBQQEQWyAAEDkgACgCDCICIAAoAhBJBH8gAiAAKAIIayAAKAIEQQBHagUgAQsLJAAgACABNgIMIAAgATYCCCAAQgA3AgAgACABIAJqQXxqNgIQC/UBAQV/AkAgAUERSSADQQxJcg0AIABBBmoiByABQXpqIAIgA0EDakECdiIGIAQQcSIFECEEQCAFDwsgBUUNACAAIAVB//8DcRAvIAUgB2oiBSAAIAFqIgcgBWsgAiAGaiIIIAYgBBBxIgEQIQRAIAEPCyABRQ0AIABBAmogAUH//wNxEC8gASAFaiIFIAcgBWsgBiAIaiIIIAYgBBBxIgEQIQRAIAEPCyABRQ0AIABBBGogAUH//wNxEC8gASAFaiIFIAcgBWsgBiAIaiIBIAIgA2ogAWsgBBBxIgEQIQRAIAEPCyABRQ0AIAEgBWogAGshCQsgCQtGAQN/IAJBAEgEQEEBDwsDQCAEIAEgA0ECdCIFaigCAEEARyAAIAVqLQACRXFyIQQgAiADRyEFIANBAWohAyAFDQALIARFCyoBAX8jAEEQayIAJAAgAEEANgIMQZTpASgCAEG/EkEAELkBIABBEGokAAv4BgEHfyMAQUBqIgckAAJAIAAgAUEDdGoiBC0AByIFIAJNBEAgBSECDAELIARBB2ohBkEBIAUgAmsiCXQhCEEAIQQgBSEDA0AgBiACOgAAIAQgCGpBfyAFIANrdGohBCAAIAFBf2oiAUEDdGoiA0EHaiEGIAMtAAciAyACSw0ACwNAIANB/wFxIAJHRQRAIAAgAUF/aiIBQQN0ai0AByEDDAELCyAHQvDhw4ePnrz4cDcDMCAHQvDhw4ePnrz4cDcDKCAHQvDhw4ePnrz4cDcDICAHQvDhw4ePnrz4cDcDGCAHQvDhw4ePnrz4cDcDECAHQvDhw4ePnrz4cDcDCCAHQvDhw4ePnrz4cDcDACAEIAl1IQUCQCABQX9MDQAgAiEGIAEhBANAIAYgA0H/AXEiA0sEQCAHIAIgA2tBAnRqIAQ2AgAgAyEGCyAEQQFIDQEgACAEQX9qIgRBA3RqLQAHIQMMAAsACyAFQQBKBEADQAJAAkAgBRAkQQFqIgRBAkkEQCAEIQMMAQsgByAEQQJ0aigCACEIA0ACQCAHIARBf2oiBkECdGooAgAhCSAIQfDhw4d/RwRAIAlB8OHDh39GDQEgACAIQQN0aigCACAAIAlBA3RqKAIAQQF0TQ0BC0EBIQMgCSEIIAYiBEEBSw0BDAILCyAEIgNBDEsNAQsDQAJAIAcgA0ECdGooAgBB8OHDh39HBEAgAyEEDAELQQ0hBCADQQFqIgNBDUcNAQsLIAcgBEF/aiIGQQJ0aigCACEJCyAHIARBAnRqIggoAgAhAyAJQfDhw4d/RgRAIAcgBkECdGogAzYCAAtBfyAGdCAFaiEFIAAgA0EDdGoiBiAGLQAHQQFqOgAHIAggAwR/IAggA0F/aiIDNgIAIANB8OHDh38gACADQQN0ai0AByACIARrRhsFQfDhw4d/CzYCACAFQQBKDQALCyAFQX9KDQAgBygCBCEEA0AgBUF/IAVBf0obIQYgBSEDA0AgBEHw4cOHf0YEQCABIQQDQCAEIgFBf2ohBCAAIAFBA3RqLQAHIAJGDQALIAAgAUEBaiIEQQN0aiIGIAYtAAdBf2o6AAcgA0EBaiEFIANBfkoNAwwCCyAAIARBAWoiBEEDdGoiBSAFLQAHQX9qOgAHIAMgBkchBSADQQFqIQMgBQ0ACwsLIAdBQGskACACC74CAQd/IwBBgAJrIgQkACAEQQBBgAIQKCEFA0AgBSABIANBAnRqKAIAQQFqECRBA3RqIgQgBCgCAEEBajYCACADQQFqIgMgAk0NAAtBHiEDIAUoAvABIQQDQCAFIANBf2oiA0EDdGoiByAHKAIAIARqIgQ2AgAgAw0AC0EAIQMDQCAFIANBA3RqIgQgBCgCADYCBCADQQFqIgNBIEcNAAsDQCABIAZBAnRqKAIAIghBAWoQJEEDdCAFaiIEIgNBDGogAygCDCIDQQFqNgIAAkAgAyAEKAIIIgRNDQADQCAIIAAgA0F/aiIHQQN0aiIJKAIATQ0BIAAgA0EDdGogCSkCADcCACAHIgMgBEsNAAsgBCEDCyAAIANBA3RqIgMgBjoABiADIAg2AgAgBkEBaiIGIAJNDQALIAVBgAJqJAAL4wYBDH8jAEFAaiIHJABBfyEFAkACQAJAIARBA3ENAEFSIQUgAkH/AUsNACADQQsgAxshDCAEQQBBgCAQKCEIIARBCGoiBiABIAIQ/gMgAiEDA0AgAyIFQX9qIQMgBiAFQQN0aigCACIBRQ0ACyAIIAEgBiADQQN0aiIBKAIAajYCiBAgAUGAAjsBBCAGIAVBA3RqQYACOwEEIAVB/wFqIgpBgAJNDQEgBUF+aiEDQYECIQEDQCAGIAFBA3RqQYCAgIAENgIAIAFBAWoiASAKTQ0ACyAIQYCAgIB4NgIAQYACIQFBgQIhCEGBAiEEA0AgBiAIQQN0aiAGIAMgBiADQQN0aigCACIJIAYgAUEDdGooAgAiC0kiDWsiCCABIAkgC09qIgkgBiAIQQN0aigCACILIAYgCUEDdGooAgAiDkkiDxtBA3RqIhAoAgAgBiADIAEgDRtBA3RqIgEoAgBqNgIAIBAgBDsBBCABIAQ7AQQgCSALIA5PaiEBIAggD2shAyAKIARBAWoiBEH//wNxIghPDQALDAILIAdBQGskACAFDwsgCEGAgICAeDYCAAtBACEDIAYgCkEDdGpBADoAByAFQf4BaiIBQYACTwRAA0AgBiABQQN0aiIEIAYgBC8BBEEDdGotAAdBAWo6AAcgAUF/aiIBQf8BSw0ACwsDQCAGIANBA3RqIgEgBiABLwEEQQN0ai0AB0EBajoAByADQQFqIgMgBU0NAAsgBiAFIAwQ/QMhBEEAIQMgB0EAOwE4IAdCADcDMCAHQgA3AyggB0IANwMgIAdBADsBGCAHQgA3AxAgB0IANwMIIAdCADcDAEF/IQEgBEEMTQRAA0AgB0EgaiAGIANBA3RqLQAHQQF0aiIBIAEvAQBBAWo7AQAgA0EBaiIDIAVNDQALIAQEQEEAIQUgBCEDA0AgByADQQF0IgFqIAU7AQAgB0EgaiABai8BACAFakH+/wNxQQF2IQUgA0F/aiIDDQALC0EAIQVBACEDA0AgACAGIANBA3RqIgEtAAZBAnRqIAEtAAc6AAIgA0EBaiIDIAJNDQALA0AgByAAIAVBAnRqIgEtAAJBAXRqIgMgAy8BACIDQQFqOwEAIAEgAzsBACAFQQFqIgUgAk0NAAsgBCEBCyAHQUBrJAAgAQvdAgEFfyMAQZACayIGJABBUiEFAkAgA0H/AUsNACAGQQA6AIMCQQEhBSAEQQFqIghBAUsEQANAIAZBgwJqIAVqIAggBWs6AAAgBCAFRiEJIAVBAWohBSAJRQ0ACwsCfyADBEADQCAGIAdqIAIgB0ECdGotAAIgBkGDAmpqLQAAOgAAIAdBAWoiByADRw0ACyAAQQFqIAFBf2ogBiADEPoBDAELIABBAWogAUF/aiAGQQAQ+gELIgUQIQ0AIAVBAkkgBSADQQF2T3JFBEAgACAFOgAAIAVBAWohBQwBC0F/IQUgA0GAAUsNAEG6fyEFIANBAWpBAXYiAiABTw0AIAJBAWohBSAAIANB/wBqOgAAQQAhByADIAZqQQA6AAAgA0UNAANAIAdBAXYgAGogBiAHQQFyai0AACAGIAdqLQAAQQR0ajoAASAHQQJqIgcgA0kNAAsLIAZBkAJqJAAgBQt/AQR/IwBBkARrIgQkACAEQf8BNgIIAkAgBEEQaiAEQQhqIARBDGogASACEGsiBhAhBEAgBiEFDAELQVQhBSAEKAIMIgdBBksNACADIARBEGogBCgCCCAHEIMEIgUQIQ0AIAAgASAGaiACIAZrIAMQggQhBQsgBEGQBGokACAFC+8FAQN/IwBBMGsiBCQAAkAgAy8BAgRAIARBGGogASACEEUiARAhDQEgBEEQaiAEQRhqIAMQggEgBEEIaiAEQRhqIAMQggFBACEBAkAgBEEYahAjBEBBACEDDAELA0AgACABaiICIARBEGogBEEYahBiOgAAIAIgBEEIaiAEQRhqEGI6AAEgBEEYahAjBEAgAUECciEDDAILIAIgBEEQaiAEQRhqEGI6AAIgAiAEQQhqIARBGGoQYjoAAyABQQRqIQMgBEEYahAjIQIgAUH3AUsNASADIQEgAkUNAAsLAn8DQEG6fyEBIANB/QFLDQMgACADaiICIARBEGogBEEYahBiOgAAIAIiBkEBaiEFIARBGGoQI0EDRgRAQQIhAyAEQQhqDAILIANB/AFLDQMgBiAEQQhqIARBGGoQYjoAASADQQJqIQMgBEEYahAjQQNHDQALIAAgA2ohBUEDIQMgBEEQagshASAFIAEgBEEYahBiOgAAIAIgA2ogAGshAQwBCyAEQRhqIAEgAhBFIgEQIQ0AIARBEGogBEEYaiADEIIBIARBCGogBEEYaiADEIIBQQAhAQJAIARBGGoQIwRAQQAhAwwBCwNAIAAgAWoiAiAEQRBqIARBGGoQYToAACACIARBCGogBEEYahBhOgABIARBGGoQIwRAIAFBAnIhAwwCCyACIARBEGogBEEYahBhOgACIAIgBEEIaiAEQRhqEGE6AAMgAUEEaiEDIARBGGoQIyECIAFB9wFLDQEgAyEBIAJFDQALCwJ/A0BBun8hASADQf0BSw0CIAAgA2oiAiAEQRBqIARBGGoQYToAACACIgZBAWohBSAEQRhqECNBA0YEQEECIQMgBEEIagwCCyADQfwBSw0CIAYgBEEIaiAEQRhqEGE6AAEgA0ECaiEDIARBGGoQI0EDRw0ACyAAIANqIQVBAyEDIARBEGoLIQEgBSABIARBGGoQYToAACACIANqIABrIQELIARBMGokACABC68DAQp/IwBBgARrIgkkAEFSIQUCQCACQf8BSw0AIABBBGohCkGAgAQgA0F/anRBEHUhC0EBIAN0IghBf2oiDCEHQQEhBQNAAkAgASAEQQF0Ig1qLwEAIgZB//8DRgRAIAogB0ECdGogBDoAAiAHQX9qIQdBASEGDAELIAVBACALIAZBEHRBEHVKGyEFCyAJIA1qIAY7AQAgAiAERyEGIARBAWohBCAGDQALIAAgBTsBAiAAIAM7AQAgCEEDdiAIQQF2akEDaiEGQQAhBEEAIQUDQCABIAVBAXRqLgEAIgBBAU4EQCAAQf//A3EiAEEBIABBAUsbIQtBACEAA0AgCiAEQQJ0aiAFOgACA0AgBCAGaiAMcSIEIAdLDQALIABBAWoiACALRw0ACwsgAiAFRyEAIAVBAWohBSAADQALQX8hBSAEDQAgCEEBIAhBAUsbIQJBACEFQQAhBANAIAkgCiAEQQJ0aiIALQACQQF0aiIBIAEvAQAiAUEBajsBACAAIAMgARAkayIHOgADIAAgASAHdCAIazsBACAEQQFqIgQgAkcNAAsLIAlBgARqJAAgBQsjAQF/IAAgACgCBCIBQQFqNgIEIAAgACgCAEEBIAF0cjYCAAtZAQF/IAAgAC0ASiIBQX9qIAFyOgBKIAAoAgAiAUEIcQRAIAAgAUEgcjYCAEF/DwsgAEIANwIEIAAgACgCLCIBNgIcIAAgATYCFCAAIAEgACgCMGo2AhBBAAuzAgECfyMAQUBqIgYkAAJAIANBA0kNACAGQShqIAAgARD/ARAhDQAgAiADakF/aiIALQAAIQECQCADQQFxBEAgBkEYaiAEIAEQYyAGQQhqIAQgAEF/ai0AABBjIAZBKGogBkEYaiAAQX5qIgMtAAAQbCAFBEAgBkEoahD+AQwCCyAGQShqEDkMAQsgBkEIaiAEIAEQYyAGQRhqIAQgAEF/aiIDLQAAEGMLIAMgAksEQANAIAZBKGogBkEIaiADQX9qLQAAEGwgBkEoaiAGQRhqIANBfmoiAy0AABBsAkAgBQRAIAZBKGoQ/gEMAQsgBkEoahA5CyADIAJLDQALCyAGQShqIAYoAgggBigCFBB0IAZBKGogBigCGCAGKAIkEHQgBkEoahD9ASEHCyAGQUBrJAAgBwskACAAQQA2AQQgAEEAOwEAIAAgATsBAiAAIAFBA3RqQgA3AggLzgQCBn8EfiADQQNsIAFBAWp2IQggAyABdiEKA0ACQCACIAVBAnRqKAIAIgZFBEAgACAFQQF0akEAOwEADAELAkACQCAGIApNBEAgACAFQQF0akH//wM7AQAMAQsgACAFQQF0aiEJIAYgCEsNASAJQQE7AQALIAMgBmshAyAHQQFqIQcMAQsgCUH+/wM7AQALIAVBAWoiBSAETQ0ACwJAAkBBASABdCIJIAdrIgZFDQAgAyAGbiAISwRAIANBA2wgBkEBdG4hBkEAIQUDQAJAIAAgBUEBdGoiCC8BAEH+/wNHDQAgAiAFQQJ0aigCACIKIAZLDQAgCEEBOwEAIAMgCmshAyAHQQFqIQcLIAVBAWoiBSAETQ0ACyAJIAdrIQYLIAcgBEEBaiIHRgRAQQAhBUEAIQFBACEDA0AgAiAFQQJ0aigCACIHIAEgByABSyIHGyEBIAUgAyAHGyEDIAVBAWoiBSAETQ0ACyAAIANBAXRqIgAgAC8BACAGajsBAAwBCyADRQRAQQAhAiAGRQ0CQQAhBQNAIAAgBUEBdGoiAS4BACIDQQFOBEAgASADQQFqOwEAIAZBf2ohBgsgBUEBaiAHcCEFIAYNAAsMAgsgBq1BPiABa60iC4ZCfyALQn98hkJ/hSIMfCADrYAhDUEAIQUDQCAAIAVBAXRqIgEvAQBB/v8DRgRAIAwgC4ghDiANIAIgBUECdGo1AgB+IAx8IgwgC4inIA6nayIDRQRAQX8PCyABIAM7AQALIAVBAWoiBSAETQ0ACwtBACECCyACC0QBAX9BfyEFIARBA3EEfyAFBSABKAIAQf4BTQRAIAAgASACIANBASAEEIMCDwsgAUH/ATYCACAAIAEgAiADIAQQgwELC1gBAX8jAEEQayIEJAACf0EBIAAgASAEQQxqEMAERQ0AGkECIAMoAgAgBCgCDEkNABpBASAAIAEgAhChBEUNABogAyAEKAIMNgIAQQALIQAgBEEQaiQAIAALiQIBA38CQAJAIAAoAhwiAygCNCIERQRAQQEhBSADIAAoAihBASADKAIkdEEBIAAoAiARAQAiBDYCNCAERQ0BCyADKAIoIgBFBEAgA0IANwIsIANBASADKAIkdCIANgIoCyAAIAJNBEAgBCABIABrIAAQKhogA0EANgIwDAILIAQgAygCMCIFaiABIAJrIAIgACAFayIAIAAgAksbIgAQKhogAiAAayICBEAgAygCNCABIAJrIAIQKhogAyACNgIwDAILQQAhBSADQQAgAygCMCAAaiIBIAEgAygCKCICRhs2AjAgAygCLCIBIAJPDQAgAyAAIAFqNgIsCyAFDwsgAyADKAIoNgIsQQALsjcBHX8jAEEQayISJABBfiEUAkAgAEUNACAAKAIcIgFFDQAgACgCDCIORQ0AIAAoAgAiBkUEQCAAKAIEDQELIAEoAgAiAkELRgRAIAFBDDYCAEEMIQILIAFB2ABqIRsgAUHwBWohFyABQfAAaiEZIAFB1ABqIRogAUHsAGohGCABQbAKaiEWIAEoAjwhBCABKAI4IQUgACgCBCIcIQcgACgCECIMIRMCQANAAkBBfCEUQQEhAwJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAn8CQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIAIOHwgJCg0QAwIBABobHBwdHh8gIQclJgY3BTknKARFLkYvCyABKAIQIQMMGAsgASgCECEDDBYLIAEoAhAhAwwUCyABKAIQIQMMEgsgASgCCCEJDCQLIAEoAkghCQwyCyABKAJIIQkMLwsgASgCaCEJDBwLIAEoAggiA0UNISAEQRBJBEADQCAHRQ08IAdBf2ohByAGLQAAIAR0IAVqIQUgBEEISSECIARBCGohBCAGQQFqIQYgAg0ACwsgA0ECcUUgBUGflgJHckUEQEEAIQUgAUEAQQBBABA1IgM2AhggEkGflgI7AAwgAyASQQxqQQIQNSEDIAFBATYCACABIAM2AhhBACEEIAEoAgAhAgw8CyABQQA2AhAgASgCICICBEAgAkF/NgIwCwJAIANBAXEEQCAFQQh0QYD+A3EgBUEIdmpBH3BFDQELIABBnu8ANgIYIAFBHTYCACABKAIAIQIMPAsgBUEPcUEIRwRAIABBte8ANgIYIAFBHTYCACABKAIAIQIMPAsgBUEEdiIDQQ9xIghBCGohAiABKAIkIglFBEAgASACNgIkDDoLIAIgCU0NOSAEQXxqIQQgAEHQ7wA2AhggAUEdNgIAIAMhBSABKAIAIQIMOwsgBEEQSQRAA0AgB0UNOyAHQX9qIQcgBi0AACAEdCAFaiEFIARBCEkhAyAEQQhqIQQgBkEBaiEGIAMNAAsLIAEgBTYCECAFQf8BcUEIRwRAIABBte8ANgIYIAFBHTYCACABKAIAIQIMOwsgBUGAwANxBEAgAEHk7wA2AhggAUEdNgIAIAEoAgAhAgw7CyABKAIgIgMEQCADIAVBCHZBAXE2AgALIAVBgARxBEAgEiAFOwAMIAEgASgCGCASQQxqQQIQNTYCGAsgAUECNgIAQQAhBEEAIQUMAQsgBEEfSw0BCyAGIQIDQCAHRQRAQQAhByACIQYgDyEDDDsLIAdBf2ohByACLQAAIAR0IAVqIQUgBEEYSSEDIARBCGohBCACQQFqIgYhAiADDQALCyABKAIgIgMEQCADIAU2AgQLIAEtABFBAnEEQCASIAU2AAwgASABKAIYIBJBDGpBBBA1NgIYCyABQQM2AgBBACEEQQAhBQwBCyAEQQ9LDQELIAYhAgNAIAdFBEBBACEHIAIhBiAPIQMMOAsgB0F/aiEHIAItAAAgBHQgBWohBSAEQQhJIQMgBEEIaiEEIAJBAWoiBiECIAMNAAsLIAEoAiAiCQRAIAkgBUEIdjYCDCAJIAVB/wFxNgIICyABKAIQIgNBgARxBEAgEiAFOwAMIAEgASgCGCASQQxqQQIQNTYCGAsgAUEENgIAQQAhBEEAIQVBACICIANBgAhxRQ0BGgwDCyABKAIQIgNBgAhxDQEgASgCICEJIAQLIQQgCQRAIAlBADYCEAsMAwsgBSECIARBD0sNAQsDQCAHRQRAQQAhByACIQUgDyEDDDMLIAdBf2ohByAGLQAAIAR0IAJqIQIgBEEISSEFIARBCGohBCAGQQFqIgghBiAFDQALIAghBiACIQULIAEgBTYCQCABKAIgIgIEQCACIAU2AhQLQQAhBCADQYAEcQRAIBIgBTsADCABIAEoAhggEkEMakECEDU2AhgLQQAhBQsgAUEFNgIACwJAIANBgAhxRQ0AIAcgASgCQCICIAIgB0sbIggEQAJAIAEoAiAiCUUNACAJKAIQIgpFDQAgCiAJKAIUIAJrIgNqIAYgCSgCGCICIANrIAggAyAIaiACSxsQKhogASgCECEDCyADQYAEcQRAIAEgASgCGCAGIAgQNTYCGAsgASABKAJAIAhrIgI2AkAgByAIayEHIAYgCGohBgsgAkUNACAPIQMMLwsgAUEGNgIAIAFBADYCQAsCQCADQYAQcQRAQQAhAyAHRQ0tA0AgA0EBaiECIAMgBmotAAAhCAJAIAEoAiAiA0UNACADKAIcIgpFDQAgASgCQCIJIAMoAiBPDQAgASAJQQFqNgJAIAkgCmogCDoAAAsgByACSwRAIAIhAyAIDQELCyABKAIQIgNBgARxBEAgASABKAIYIAYgAhA1NgIYCyACIAZqIQYgByACayEHIAhFDQEgDyEDDC8LIAEoAiAiAkUNACACQQA2AhwLIAFBBzYCACABQQA2AkALAkAgA0GAIHEEQEEAIQMgB0UNLANAIANBAWohAiADIAZqLQAAIQgCQCABKAIgIgNFDQAgAygCJCIKRQ0AIAEoAkAiCSADKAIoTw0AIAEgCUEBajYCQCAJIApqIAg6AAALIAcgAksEQCACIQMgCA0BCwsgASgCECIDQYAEcQRAIAEgASgCGCAGIAIQNTYCGAsgAiAGaiEGIAcgAmshByAIRQ0BIA8hAwwuCyABKAIgIgJFDQAgAkEANgIkCyABQQg2AgALIANBgARxBEAgBEEPTQRAA0AgB0UNLCAHQX9qIQcgBi0AACAEdCAFaiEFIARBCEkhAiAEQQhqIQQgBkEBaiEGIAINAAsLIAUgAS8BGEcNF0EAIQVBACEECyABKAIgIgIEQCACQQE2AjAgAiADQQl2QQFxNgIsCyABQQBBAEEAEDUiAzYCGCAAIAM2AjAgAUELNgIAIAEoAgAhAgwqCyAEQSBJBEADQCAHRQ0qIAdBf2ohByAGLQAAIAR0IAVqIQUgBEEYSSEDIARBCGohBCAGQQFqIQYgAw0ACwsgASAFQQh0QYCA/AdxIAVBGHRyIAVBCHZBgP4DcSAFQRh2cnIiAzYCGCAAIAM2AjAgAUEKNgIAQQAhBUEAIQQLIAEoAgxFBEAgACAMNgIQIAAgDjYCDCAAIAc2AgQgACAGNgIAIAEgBDYCPCABIAU2AjhBAiEUDCsLIAFBAEEAQQAQZSIDNgIYIAAgAzYCMCABQQs2AgALIAEoAgQNFCAEQQJLBH8gBAUgB0UNJyAHQX9qIQcgBi0AACAEdCAFaiEFIAZBAWohBiAEQQhqCyEDIAEgBUEBcTYCBEENIQQCQAJAAkACQCAFQQF2QQNxQQFrDgMAAQIDCyABQaDzADYCTCABQomAgIDQADcCVCABQaCDATYCUEETIQQMAgtBECEEDAELIABBkfAANgIYQR0hBAsgASAENgIAIANBfWohBCAFQQN2IQUgASgCACECDCcLIAUgBEEHcXYhBSAEQXhxIgRBH00EQANAIAdFDScgB0F/aiEHIAYtAAAgBHQgBWohBSAEQRhJIQMgBEEIaiEEIAZBAWohBiADDQALCyAFQf//A3EiAyAFQX9zQRB2RwRAIABBpPAANgIYIAFBHTYCACABKAIAIQIMJwsgAUEONgIAIAEgAzYCQEEAIQVBACEECyABQQ82AgALIAEoAkAiAwRAIAwgByADIAMgB0sbIgMgAyAMSxsiA0UEQCAPIQMMJwsgDiAGIAMQKiECIAEgASgCQCADazYCQCACIANqIQ4gDCADayEMIAMgBmohBiAHIANrIQcgASgCACECDCULIAFBCzYCACABKAIAIQIMJAsgBEEOSQRAA0AgB0UNJCAHQX9qIQcgBi0AACAEdCAFaiEFIARBBkkhAyAEQQhqIQQgBkEBaiEGIAMNAAsLIAEgBUEfcSIDQYECajYCYCABIAVBBXZBH3EiAkEBajYCZCABIAVBCnZBD3FBBGoiCDYCXCAEQXJqIQQgBUEOdiEFIANBHU1BACACQR5JG0UEQCAAQcHwADYCGCABQR02AgAgASgCACECDCQLIAFBETYCAEEAIQIgAUEANgJoDAELIAEoAmgiAiABKAJcIghPDQELIAIhAwNAIARBAk0EQCAHRQ0iIAdBf2ohByAGLQAAIAR0IAVqIQUgBkEBaiEGIARBCGohBAsgASADQQFqIgI2AmggASADQQF0QfDwAGovAQBBAXRqIAVBB3E7AXAgBEF9aiEEIAVBA3YhBSACIQMgAiAISQ0ACwsgAkETSQRAA0AgASACQQF0QfDwAGovAQBBAXRqQQA7AXAgAkEBaiICQRNHDQALIAFBEzYCaAsgAUEHNgJUIAEgFjYCTCABIBY2AmxBACEJQQAgGUETIBggGiAXEKwBIg8EQCAAQZbxADYCGCABQR02AgAgASgCACECDCELIAFBEjYCACABQQA2AmhBACEPCyAJIAEoAmAiHSABKAJkaiIQSQRAQX8gASgCVHRBf3MhFSABKAJMIQ0DQCAEIQogByECIAYhAwJAIAQgDSAFIBVxIhFBAnRqLQABIgtPBEAgBCEIDAELA0AgAkUNCiADLQAAIAp0IQsgA0EBaiEDIAJBf2ohAiAKQQhqIgghCiAIIA0gBSALaiIFIBVxIhFBAnRqLQABIgtJDQALCwJAIA0gEUECdGovAQIiBEEPTQRAIAEgCUEBaiIGNgJoIAEgCUEBdGogBDsBcCAIIAtrIQQgBSALdiEFIAYhCQwBCwJ/An8CQAJAAkAgBEFwag4CAAECCyAIIAtBAmoiBkkEQANAIAJFDSUgAkF/aiECIAMtAAAgCHQgBWohBSADQQFqIQMgCEEIaiIIIAZJDQALCyAIIAtrIQQgBSALdiEIIAlFBEAgAEGv8QA2AhggAUEdNgIAIAMhBiACIQcgCCEFIAEoAgAhAgwnCyAEQX5qIQQgCEECdiEFIAhBA3FBA2ohByAJQQF0IAFqLwFuDAMLIAggC0EDaiIGSQRAA0AgAkUNJCACQX9qIQIgAy0AACAIdCAFaiEFIANBAWohAyAIQQhqIgggBkkNAAsLIAggC2tBfWohBCAFIAt2IgZBA3YhBSAGQQdxQQNqDAELIAggC0EHaiIGSQRAA0AgAkUNIyACQX9qIQIgAy0AACAIdCAFaiEFIANBAWohAyAIQQhqIgggBkkNAAsLIAggC2tBeWohBCAFIAt2IgZBB3YhBSAGQf8AcUELagshB0EACyEGIAcgCWogEEsEQCAAQa/xADYCGCABQR02AgAgAyEGIAIhByABKAIAIQIMIwsDQCABIAlBAXRqIAY7AXAgCUEBaiEJIAdBf2oiBw0ACyABIAk2AmgLIAMhBiACIQcgCSAQSQ0ACwsgAS8B8ARFBEAgAEHJ8QA2AhggAUEdNgIAIAEoAgAhAgwgCyABQQk2AlQgASAWNgJMIAEgFjYCbEEBIBkgHSAYIBogFxCsASIPBEAgAEHu8QA2AhggAUEdNgIAIAEoAgAhAgwgCyABQQY2AlggASABKAJsNgJQQQIgASABKAJgQQF0akHwAGogASgCZCAYIBsgFxCsASIPBEAgAEGK8gA2AhggAUEdNgIAIAEoAgAhAgwgCyABQRM2AgBBACEPCyABQRQ2AgALIAxBggJJIAdBBklyRQRAIAAgDDYCECAAIA42AgwgACAHNgIEIAAgBjYCACABIAQ2AjwgASAFNgI4IAAgExCRBCABKAI8IQQgASgCOCEFIAAoAgQhByAAKAIAIQYgACgCECEMIAAoAgwhDiABKAIAQQtHDRYgAUF/NgLENyABKAIAIQIMHgsgAUEANgLENyAEIQkgByECIAYhAwJAIAQgASgCTCIQIAVBfyABKAJUdEF/cyINcSILQQJ0ai0AASIKTwRAIAQhCAwBCwNAIAJFDQggAy0AACAJdCEKIANBAWohAyACQX9qIQIgCUEIaiIIIQkgCCAQIAUgCmoiBSANcSILQQJ0ai0AASIKSQ0ACwsgCiEEIBAgC0ECdGoiBi8BAiERIAYtAAAiDUUgDUHwAXFyDQ0gAiEHIAMhBgJAIAQgECAFQX8gBCANanRBf3MiFXEgBHYgEWoiDUECdGotAAEiCmogCCIJTQRAIAghCwwBCwNAIAdFDQcgBi0AACAJdCEKIAZBAWohBiAHQX9qIQcgCUEIaiILIQkgBCAQIAUgCmoiBSAVcSAEdiARaiINQQJ0ai0AASIKaiALSw0ACwsgECANQQJ0aiIDLQAAIQ0gAy8BAiERIAEgBDYCxDcgCyAEayEIIAUgBHYhBQwOCyAMRQ0SIA4gASgCQDoAACABQRQ2AgAgDEF/aiEMIA5BAWohDiABKAIAIQIMHAsgASgCCCIJBEAgBEEfTQRAA0AgB0UNHSAHQX9qIQcgBi0AACAEdCAFaiEFIARBGEkhAiAEQQhqIQQgBkEBaiEGIAINAAsLIAAgEyAMayICIAAoAhRqNgIUIAEgASgCHCACajYCHAJAIAJFBEAgASgCECEIIAEoAhghAgwBCyAOIAJrIQogASgCGCETIAECfyABKAIQIggEQCATIAogAhA1DAELIBMgCiACEGULIgI2AhggACACNgIwCyAFIAVBCHRBgID8B3EgBUEYdHIgBUEIdkGA/gNxIAVBGHZyciAIGyACRw0KQQAhBSAMIRNBACEECyABQRs2AgALAkAgCUUNACABKAIQRQ0AIARBH00EQANAIAdFDRwgB0F/aiEHIAYtAAAgBHQgBWohBSAEQRhJIQIgBEEIaiEEIAZBAWohBiACDQALCyAFIAEoAhxHDQpBACEFQQAhBAsgAUEcNgIADBsLIAFBDDYCAAwRCyAGIAdqIQYgBCAHQQN0aiEEDBcLIAIgA2ohBiAIIAJBA3RqIQQMFgsgBiAHaiEGIAQgB0EDdGohBAwVC0F9IQMMFgtBfiEUDBYLIABB/e8ANgIYIAFBHTYCACABKAIAIQIMEwsgAUEaNgIAIAUgBEEHcXYhBSAEQXhxIQQgASgCACECDBILIABB8PIANgIYIAFBHTYCACAMIRMgASgCACECDBELIABBhfMANgIYIAFBHTYCACABKAIAIQIMEAtBACEEIAMhBiACIQcLIAEgEUH//wNxNgJAIAEgBCAKajYCxDcgCCAKayEEIAUgCnYhBSANRQRAIAFBGTYCACABKAIAIQIMDwsgDUEgcQRAIAFBCzYCACABQX82AsQ3IAEoAgAhAgwPCyANQcAAcQRAIABBoPIANgIYIAFBHTYCACABKAIAIQIMDwsgAUEVNgIAIAEgDUEPcSIJNgJICyAGIQggByEKAkAgCUUEQCABKAJAIQMMAQsgCCEDIAQiAiAJSQRAA0AgB0UNDCAHQX9qIQcgAy0AACACdCAFaiEFIANBAWoiBiEDIAJBCGoiAiAJSQ0ACwsgASABKALENyAJajYCxDcgASABKAJAIAVBfyAJdEF/c3FqIgM2AkAgAiAJayEEIAUgCXYhBQsgAUEWNgIAIAEgAzYCyDcLIAQhCSAHIQIgBiEDAkAgBCABKAJQIhAgBUF/IAEoAlh0QX9zIg1xIgtBAnRqLQABIgpPBEAgBCEIDAELA0AgAkUNCSADLQAAIAl0IQogA0EBaiEDIAJBf2ohAiAJQQhqIgghCSAIIBAgBSAKaiIFIA1xIgtBAnRqLQABIgpJDQALCyAQIAtBAnRqIgYvAQIhEQJAIAYtAAAiDUHwAXEEQCABKALENyEEIAMhBiACIQcgCiEJDAELIAIhByADIQYCQCAKIBAgBUF/IAogDWp0QX9zIhVxIAp2IBFqIg1BAnRqLQABIglqIAgiBE0EQCAIIQsMAQsDQCAHRQ0JIAYtAAAgBHQhCSAGQQFqIQYgB0F/aiEHIARBCGoiCyEEIAogECAFIAlqIgUgFXEgCnYgEWoiDUECdGotAAEiCWogC0sNAAsLIBAgDUECdGoiAy0AACENIAMvAQIhESABIAEoAsQ3IApqIgQ2AsQ3IAsgCmshCCAFIAp2IQULIAEgBCAJajYCxDcgCCAJayEEIAUgCXYhBSANQcAAcQRAIABBvPIANgIYIAFBHTYCACABKAIAIQIMDQsgAUEXNgIAIAEgDUEPcSIJNgJIIAEgEUH//wNxNgJECyAGIQggByEKIAkEQCAIIQMgBCICIAlJBEADQCAHRQ0HIAdBf2ohByADLQAAIAJ0IAVqIQUgA0EBaiIGIQMgAkEIaiICIAlJDQALCyABIAEoAsQ3IAlqNgLENyABIAEoAkQgBUF/IAl0QX9zcWo2AkQgBSAJdiEFIAIgCWshBAsgAUEYNgIACyAMDQELQQAhDCAPIQMMCgsCQCABKAJEIgMgEyAMayICSwRAAkAgAyACayICIAEoAixNDQAgASgCwDdFDQAgAEHS8gA2AhggAUEdNgIAIAEoAgAhAgwLCwJ/IAIgASgCMCIDSwRAIAEoAiggAiADayICawwBCyADIAJrCyEIIAEoAkAiFCACIAIgFEsbIQMgASgCNCAIaiECDAELIA4gA2shAiABKAJAIhQhAwsgASAUIAwgAyADIAxLGyIIazYCQCAIIQMDQCAOIAItAAA6AAAgDkEBaiEOIAJBAWohAiADQX9qIgMNAAsgDCAIayEMIAEoAkANACABQRQ2AgAgASgCACECDAgLIAEoAgAhAgwHCyAIIApqIQYgBCAKQQN0aiEEDAULIAIgA2ohBiAIIAJBA3RqIQQMBAsgBiAHaiEGIAQgB0EDdGohBAwDCyAIIApqIQYgBCAKQQN0aiEEDAILQQAhByADIQYgCCEEIA8hAwwDCyABQYACIAh0NgIUQQAhBCABQQBBAEEAEGUiAzYCGCAAIAM2AjAgAUEJQQsgBUGAwABxGzYCAEEAIQUgASgCACECDAELC0EAIQcgDyEDCyAAIAw2AhAgACAONgIMIAAgBzYCBCAAIAY2AgAgASAENgI8IAEgBTYCOAJAAkAgASgCKEUEQCAMIBNGDQEgASgCAEEZSw0BCyAAIA4gEyAMaxCLBA0BIAAoAhAhDCAAKAIEIQcLIAAgACgCCCAcIAdrajYCCCAAIBMgDGsiAiAAKAIUajYCFCABIAEoAhwgAmo2AhwCQCACRQ0AIAEoAghFDQAgACgCDCACayEGIAEoAhghBCABAn8gASgCEARAIAQgBiACEDUMAQsgBCAGIAIQZQsiAjYCGCAAIAI2AjALIAAgASgCPCABKAIEQQBHQQZ0aiABKAIAIgBBC0ZBB3RqQYACIABBDkZBCHQgAEETRhtqNgIsIANBeyADGyEUDAELIAFBHjYCAAsgEkEQaiQAIBQLkAEBA38gAEUEQEF+DwsgAEEANgIYIAAoAiAiAUUEQCAAQQA2AiggAEEbNgIgQRshAQsgACgCJEUEQCAAQRw2AiQLIAAoAihBAUHMNyABEQEAIgJFBEBBfA8LIAAgAjYCHEEAIQEgAkEANgI0IAAQjgQiAwR/IAAoAiggAiAAKAIkEQQAIABBADYCHCADBSABCwteAQJ/QX4hAgJAIABFDQAgACgCHCIBRQ0AAkAgASgCNCICRQ0AIAEoAiRBD0YNACAAKAIoIAIgACgCJBEEACABQQA2AjQLIAFBDzYCJCABQQE2AgggABCPBCECCyACCzEBAn9BfiEBAkAgAEUNACAAKAIcIgJFDQAgAkEANgIwIAJCADcCKCAAEJAEIQELIAELlQEBA39BfiECAkAgAEUNACAAKAIcIgFFDQBBACECIAFBADYCHCAAQQA2AgggAEIANwIUIAEoAggiAwRAIAAgA0EBcTYCMAsgAUIANwI4IAFBADYCICABQYCAAjYCFCABQQA2AgwgAUIANwIAIAFCgYCAgHA3AsA3IAEgAUGwCmoiADYCbCABIAA2AlAgASAANgJMCyACC9QLARV/IAAoAgxBf2oiBCAAKAIQIgMgAWtqIREgACgCHCIJKAIwIgogCSgCKCISaiETIAkoAjRBf2ohDEF/IAkoAlh0QX9zIRRBfyAJKAJUdEF/cyEVIAMgBGpB/31qIQ0gACgCAEF/aiIIIAAoAgRqQXtqIQ4gCSgCUCEPIAkoAkwhECAJKAI8IQUgCSgCOCEBIAkoAiwhFgNAIAVBDk0EQCAILQABIAV0IAFqIAgtAAIgBUEIanRqIQEgBUEQaiEFIAhBAmohCAsgBSAQIAEgFXFBAnRqIgMtAAEiAmshBSABIAJ2IQEgAy8BAiEHAkACQAJAIAMtAAAiAkUNACAJAn8CQAJAA0AgAkH/AXEhAyACQRBxBEAgB0H//wNxIQcCfyADQQ9xIgZFBEAgCCEDIAEMAQsCfyAFIAZPBEAgBSECIAgMAQsgBUEIaiECIAgtAAEgBXQgAWohASAIQQFqCyEDIAIgBmshBSABQX8gBnRBf3NxIAdqIQcgASAGdgshAiAFQQ5NBEAgAy0AASAFdCACaiADLQACIAVBCGp0aiECIAVBEGohBSADQQJqIQMLIAUgDyACIBRxQQJ0aiIILQABIgFrIQUgAiABdiEBIAgvAQIhBiAILQAAIgJBEHENAgNAIAJBwABxRQRAIAUgDyABQX8gAnRBf3NxIAZB//8DcWpBAnRqIgItAAEiBmshBSABIAZ2IQEgAi8BAiEGIAItAAAiAkEQcUUNAQwECwtBvPIAIQcgAyEIDAMLIANBwABxRQRAIAUgECABQX8gA3RBf3NxIAdB//8DcWpBAnRqIgMtAAEiAmshBSABIAJ2IQEgAy8BAiEHIAMtAAAiAkUNBQwBCwtBoPIAIQdBCyADQSBxDQIaDAELIAZB//8DcSELAn8gBSACQQ9xIgJPBEAgBSEGIAMMAQsgAy0AASAFdCABaiEBIANBAWogBUEIaiIGIAJPDQAaIAMtAAIgBnQgAWohASAFQRBqIQYgA0ECagshCCABQX8gAnRBf3NxIQMgBiACayEFIAEgAnYhAQJAIAMgC2oiCyAEIBFrIgNLBEACQCALIANrIgMgFk0NACAJKALAN0UNAEHS8gAhBwwDCwJAAkAgCkUEQCAMIBIgA2tqIQIgAyEGIAcgA00NAgNAIAQgAi0AAToAASAEQQFqIQQgAkEBaiECIAZBf2oiBg0ACwwBCyAKIANJBEAgDCATIANraiECIAMgCmsiAyEGIAcgA00NAgNAIAQgAi0AAToAASAEQQFqIQQgAkEBaiECIAZBf2oiBg0ACyAMIQIgByADayIHIAoiBk0EQAwDCwNAIAQgAi0AAToAASAEQQFqIQQgAkEBaiECIAZBf2oiBg0ACyAEIAtrIQIgByAKayEHDAILIAwgCiADa2ohAiADIQYgByADTQ0BA0AgBCACLQABOgABIARBAWohBCACQQFqIQIgBkF/aiIGDQALCyAEIAtrIQIgByADayEHCyAHQQNPBEADQCAEIAItAAE6AAEgBCACLQACOgACIAQgAi0AAzoAAyAEQQNqIQQgAkEDaiECIAdBfWoiB0ECSw0ACwsgB0UNBSAEIAItAAE6AAEgB0EBRw0BIARBAWohBAwFCyAEIAtrIQMDQCAEIgIgAyIGLQABOgABIAIgAy0AAjoAAiACIAMtAAM6AAMgAkEDaiEEIANBA2ohAyAHQX1qIgdBAksNAAsgB0UNBCACIAYtAAQ6AAQgB0EBRgRAIAJBBGohBAwFCyACIAYtAAU6AAUgAkEFaiEEDAQLIAQgAi0AAjoAAiAEQQJqIQQMAwsgACAHNgIYQR0LNgIADAILIAQgBzoAASAEQQFqIQQLIAQgDU8NACAIIA5JDQELCyAAIARBAWo2AgwgACANIARrQYECajYCECAAIAggBUEDdmsiA0EBajYCACAAIA4gA2tBBWo2AgQgCSAFQQdxIgA2AjwgCSABQX8gAHRBf3NxNgI4CzgBA38DQCACIABBAXFyIgNBAXQhAiABQQFKIQQgAEEBdiEAIAFBf2ohASAEDQALIANB/////wdxC6oDAQR/IwBBIGsiBCQAIAQgAi8BAEEBdCIDOwECIAQgAi8BAiADQf7/A3FqQQF0IgM7AQQgBCACLwEEIANB/v8DcWpBAXQiAzsBBiAEIAIvAQYgA0H+/wNxakEBdCIDOwEIIAQgAi8BCCADQf7/A3FqQQF0IgM7AQogBCACLwEKIANB/v8DcWpBAXQiAzsBDCAEIAIvAQwgA0H+/wNxakEBdCIDOwEOIAQgAi8BDiADQf7/A3FqQQF0IgM7ARAgBCACLwEQIANB/v8DcWpBAXQiAzsBEiAEIAIvARIgA0H+/wNxakEBdCIDOwEUIAQgAi8BFCADQf7/A3FqQQF0IgM7ARYgBCACLwEWIANB/v8DcWpBAXQiAzsBGCAEIAMgAi8BGGpBAXQiAzsBGiAEIAIvARogA2pBAXQiAzsBHCAEIAIvARwgA2pBAXQ7AR5BACECIAFBAE4EQANAIAAgAkECdGoiBi8BAiIDBEAgBCADQQF0aiIFIAUvAQAiBUEBajsBACAGIAUgAxCSBDsBAAsgASACRyEDIAJBAWohAiADDQALCyAEQSBqJAAL7gQBC38gAygCECEGIAMoAgghCCADKAIEIQwgAygCACEJIABB1BZqQgA3AQAgAEHMFmpCADcBACAAQcQWakIANwEAIABBvBZqQgA3AQAgASAAIAAoAtQoQQJ0akHcFmooAgBBAnRqQQA7AQICQCAAKALUKCIDQbsESg0AIANBAWohAwNAIAEgACADQQJ0akHcFmooAgAiBUECdCINaiIKIAEgCi8BAkECdGovAQIiBEEBaiAGIAYgBEobIgs7AQIgBiAETCEOAkAgBSACSg0AIAAgC0EBdGpBvBZqIgQgBC8BAEEBajsBAEEAIQQgBSAITgRAIAwgBSAIa0ECdGooAgAhBAsgACAAKAKoLSAKLwEAIgUgBCALamxqNgKoLSAJRQ0AIAAgACgCrC0gBCAJIA1qLwECaiAFbGo2AqwtCyAHIA5qIQcgA0EBaiIDQb0ERw0ACyAHRQ0AIAAgBkEBdGpBvBZqIQQDQCAGIQMDQCAAIAMiBUF/aiIDQQF0akG8FmoiCC8BACIJRQ0ACyAIIAlBf2o7AQAgACAFQQF0akG8FmoiAyADLwEAQQJqOwEAIAQgBC8BAEF/aiIDOwEAIAdBAkohBSAHQX5qIQcgBQ0ACyAGRQ0AQb0EIQUDQCADQf//A3EhByAFIQMDQCAHBEAgACADQX9qIgNBAnRqQdwWaigCACIEIAJKDQEgASAEQQJ0aiIFLwECIgQgBkcEQCAAIAAoAqgtIAUvAQAgBiAEa2xqNgKoLSAFIAY7AQILIAdBf2ohByADIQUMAQsLIAZBf2oiBkUNASAAIAZBAXRqQbwWai8BACEDDAALAAsLUwEBfyMAQSBrIgQkACAEIAE2AhggBCAANgIUIARBvAg2AhAgBEGACTYCCCAEIAI2AgwgBEEQaiAEQQhqEKoEIAMgBCgCDCACazYCACAEQSBqJAALkwUBBX8gAC8BuC0gAUH//QNqQf//A3EiBiAAKAK8LSIEdHIhBQJAIARBDE4EQCAAIAU7AbgtIAAgACgCFCIEQQFqNgIUIAQgACgCCGogBToAACAAIAAoAhQiBEEBajYCFCAEIAAoAghqIABBuS1qLQAAOgAAIAAoArwtIgVBdWohBCAGQRAgBWt2IQUMAQsgBEEFaiEECyAAIAQ2ArwtIAJBf2pB//8DcSIHIAR0IQYCfyAEQQxOBEAgACAFIAZyIgQ7AbgtIAAgACgCFCIFQQFqNgIUIAUgACgCCGogBDoAACAAIAAoAhQiBEEBajYCFCAEIAAoAghqIABBuS1qLQAAOgAAIAAoArwtIgVBdWohBCAHQRAgBWt2DAELIARBBWohBCAFIAZyCyEFIAAgBDYCvC0gACAFIANB/P8DakH//wNxIgYgBHRyIgU7AbgtAkAgBEENTgRAIAAgACgCFCIEQQFqNgIUIAQgACgCCGogBToAACAAIAAoAhQiBEEBajYCFCAEIAAoAghqIABBuS1qLQAAOgAAIAAoArwtIgVBdGohBCAGQRAgBWt2IQUMAQsgBEEEaiEECyAAIAQ2ArwtQQAhBiAAQbktaiEHA0AgACAFIAAgBkGA5QBqLQAAQQJ0akH+FGovAQAiCCAEdHIiBTsBuC0gAAJ/IARBDk4EQCAAIAAoAhQiBEEBajYCFCAEIAAoAghqIAU6AAAgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiAHLQAAOgAAIAAgCEEQIAAoArwtIgRrdiIFOwG4LSAEQXNqDAELIARBA2oLIgQ2ArwtIAZBAWoiBiADRw0ACyAAIABBlAFqIAFBf2oQiQIgACAAQYgTaiACQX9qEIkCC68CACAAIABBlAFqIABBnBZqKAIAEIoCIAAgAEGIE2ogAEGoFmooAgAQigIgACAAQbAWahCuASAAIAAoAqgtAn9BEiAAQboVai8BAA0AGkERIABBghVqLwEADQAaQRAgAEG2FWovAQANABpBDyAAQYYVai8BAA0AGkEOIABBshVqLwEADQAaQQ0gAEGKFWovAQANABpBDCAAQa4Vai8BAA0AGkELIABBjhVqLwEADQAaQQogAEGqFWovAQANABpBCSAAQZIVai8BAA0AGkEIIABBphVqLwEADQAaQQcgAEGWFWovAQANABpBBiAAQaIVai8BAA0AGkEFIABBmhVqLwEADQAaQQQgAEGeFWovAQANABpBA0ECIABB/hRqLwEAGwsiAEEDbGpBEWo2AqgtIAALjgEBAn9B/4D/n38hAQNAAkAgAUEBcUUNACAAIAJBAnRqLwGUAUUNAEEADwsgAUEBdiEBIAJBAWoiAkEgRw0AC0EBIQECQCAALwG4AQ0AIAAvAbwBDQAgAC8ByAENAEEgIQIDQCAAIAJBAnRqLwGUAUUEQEEAIQEgAkEBaiICQYACRw0BDAILC0EBIQELIAELrAEBAX8CQCAAAn8gACgCvC0iAUEQRgRAIAAgACgCFCIBQQFqNgIUIAEgACgCCGogAC0AuC06AAAgACAAKAIUIgFBAWo2AhQgASAAKAIIaiAAQbktai0AADoAACAAQQA7AbgtQQAMAQsgAUEISA0BIAAgACgCFCIBQQFqNgIUIAEgACgCCGogAC0AuC06AAAgACAAQbktai0AADsBuC0gACgCvC1BeGoLNgK8LQsLvwEBAn8gABCMAiAAIAAoAhQiA0EBajYCFCADIAAoAghqIAI6AAAgACAAKAIUIgNBAWo2AhQgAyAAKAIIaiACQQh2OgAAIAAgACgCFCIDQQFqNgIUIAMgACgCCGogAkF/cyIDOgAAIAAgACgCFCIEQQFqNgIUIAQgACgCCGogA0EIdjoAACACBEADQCABLQAAIQMgACAAKAIUIgRBAWo2AhQgBCAAKAIIaiADOgAAIAFBAWohASACQX9qIgINAAsLC/0GAQt/IwBBEGsiCiQAAkAgACgCCCAAKAIEIgNrQQRMBEAgABCxAUUNASAAKAIEIQMLA0AgA0EBaiEIIAMtAAAiB0EDcUUEQCAHQQJ2IgZBAWohBCAAKAIIIgsgCGsiBUEVSSAHQT9LciABKAIIIgwgASgCBCICayIJQRBJckUEQCACIAMoAAE2AAAgAiADKAAFNgAEIAIgAygACTYACCACIAMoAA02AAwgASACIARqNgIEIAQgCGohAwwCCwJAIAdB8AFJBEAgCCEGDAELIAsgCCAGQUVqIgdqIgZrIQUgB0ECdEHADWooAgAgCCgAAHFBAWohBAsCQCAEIAVNDQAgDCACayAFSQ0DA0AgASACIAYgBRAqIAVqNgIEIAAoAgAiAiAAKAIMIAIoAgAoAhARBAAgACgCACICIApBDGogAigCACgCDBEDACEGIAAgCigCDCIHNgIMIAdFDQQgACAGIAdqNgIIIAEoAgggASgCBCICayEJIAQgBWsiBCAHTQ0BIAkgByIFTw0ACwwDCyAJIARJDQIgASACIAYgBBAqIARqNgIEIAAoAgggBCAGaiIDa0EESg0BIAAgAzYCBCAAELEBRQ0CIAAoAgQhAwwBCyABKAIEIgYgASgCAGsgB0EBdEHACWovAQAiBUELdiIJQQJ0QcANaigCACAIKAAAcSAFQYAOcWoiBEF/ak0NAQJAIARBCEkgBUH/AXEiB0EQS3IgASgCCCAGayICQRBJckUEQCAGIAYgBGsiAigAADYAACAGIAIoAAQ2AAQgBiACKAAINgAIIAYgAigADDYADAwBCwJAAkAgAiAHQQpqTwRAIAYgBGshBSAGIQMgByECIARBB0wNAQwCCyACIAdJDQQgBiAEayEDIAYhBSAHIQIDQCAFIAMtAAA6AAAgBUEBaiEFIANBAWohAyACQQFKIQQgAkF/aiECIAQNAAsMAgsDQCADIAUoAAA2AAAgAyAFKAAENgAEIAIgBGshAiADIARqIgMgBWsiBEEISA0ACwsgAkEATA0AA0AgAyAFKAAANgAAIAMgBSgABDYABCADQQhqIQMgBUEIaiEFIAJBCEohBCACQXhqIQIgBA0ACwsgASAGIAdqNgIEIAAoAgggCCAJaiIDa0EESg0AIAAgAzYCBCAAELEBRQ0BIAAoAgQhAwwACwALIApBEGokAAuoBgEJfwNAAkACQAJAIAAoAnQiBkGDAk8EQCAAQQA2AmAMAQsgABB2IAAoAnQiBkGDAk9BBHJFBEBBAA8LIAYEQCAAQQA2AmAgBkECSw0BIAAoAmwhBwwCCyAAQQA2ArQtIAAgACgCXCIBQQBOBH8gACgCOCABagVBAAsgACgCbCABa0EBEEQgACAAKAJsNgJcIAAoAgAQNkEDQQIgACgCACgCEBsPCyAAKAJsIgdFBEBBACEHDAELIAAoAjggB2oiCEF/aiIBLQAAIgMgCC0AAEcNACADIAEtAAJHDQAgAyABLQADRw0AIAhBggJqIQlBfyEBA0ACQCABIAhqIgItAAQgA0cEQCACQQRqIQUMAQsgAi0ABSADRwRAIAJBBWohBQwBCyACLQAGIANHBEAgAkEGaiEFDAELIAItAAcgA0cEQCACQQdqIQUMAQsgAyAIIAFBCGoiBGoiBS0AAEcNACACLQAJIANHBEAgAkEJaiEFDAELIAItAAogA0cEQCACQQpqIQUMAQsgAkELaiEFIAFB9gFKDQAgBCEBIAMgBS0AAEYNAQsLIAAgBiAFIAlrQYICaiIBIAEgBksbIgE2AmAgAUEDSQ0AIAAoAqQtIAAoAqAtIgRBAXRqQQE7AQAgACAEQQFqNgKgLSAEIAAoApgtaiABQX1qIgE6AAAgAUH/AXFBoOUAai0AAEECdEGACHIgAGoiASABLwGYAUEBajsBmAEgACgCYCEBIABBADYCYCAAIAAvAYgTQQFqOwGIEyAAIAAoAnQgAWs2AnQgACABIAAoAmxqIgY2AmwMAQsgACgCOCAHai0AACEBIAAoAqQtIAAoAqAtIgRBAXRqQQA7AQAgACAEQQFqNgKgLSAEIAAoApgtaiABOgAAIAAgAUECdGoiASABLwGUAUEBajsBlAEgACAAKAJ0QX9qNgJ0IAAgACgCbEEBaiIGNgJsCyAAKAKgLSAAKAKcLUF/akcNAEEAIQEgACAAKAJcIgRBAE4EfyAAKAI4IARqBUEACyAGIARrQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhANAAsgAQu/AgEDfwJAA0ACQAJAIAAoAnQNACAAEHYgACgCdA0ADAELIABBADYCYCAAKAI4IAAoAmxqLQAAIQEgACgCpC0gACgCoC0iAkEBdGpBADsBACAAIAJBAWo2AqAtIAIgACgCmC1qIAE6AAAgACABQQJ0aiIBIAEvAZQBQQFqOwGUASAAIAAoAnRBf2o2AnQgACAAKAJsQQFqIgI2AmwgACgCoC0gACgCnC1Bf2pHDQEgACAAKAJcIgFBAE4EfyAAKAI4IAFqBUEACyACIAFrQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhANAQwCCwsgAEEANgK0LSAAIAAoAlwiAUEATgR/IAAoAjggAWoFQQALIAAoAmwgAWtBARBEIAAgACgCbDYCXCAAKAIAEDZBA0ECIAAoAgAoAhAbDwsgAwuGAQEBfyACIAAoAgQiAyADIAJLGyICBEAgACADIAJrNgIEIAEgACgCACACECohAQJAAkACQCAAKAIcKAIYQX9qDgIAAQILIAAgACgCMCABIAIQZTYCMAwBCyAAIAAoAjAgASACEDU2AjALIAAgACgCACACajYCACAAIAAoAgggAmo2AggLIAIL2goBB38CQANAAkACQAJAIAAoAnRBhQJLDQAgABB2IAEgACgCdCICQYYCT3JFBEBBAA8LIAJFDQIgAkECSw0AIAAgACgCYCICNgJ4IAAgACgCcDYCZEECIQQgAEECNgJgDAELQQIhBCAAIAAoAlQgACgCbCIDIAAoAjhqLQACIAAoAkggACgCWHRzcSICNgJIIAAoAkAgAyAAKAI0cUEBdGogACgCRCACQQF0aiICLwEAIgU7AQAgAiADOwEAIAAgACgCYCICNgJ4IAAgACgCcDYCZCAAQQI2AmAgBUUNAAJAIAIgACgCgAFPDQAgAyAFayAAKAIsQfp9aksNACAAIAAgBRCPAiIENgJgIARBBUsNACAAKAKIAUEBRwRAIARBA0cNAUEDIQQgACgCbCAAKAJwa0GBIEkNAQtBAiEEIABBAjYCYAsgACgCeCECCyACQQNJIAQgAktyRQRAIAAoAnQhBSAAKAKkLSAAKAKgLSIDQQF0aiAAKAJsIgYgACgCZEF/c2oiBDsBACAAIANBAWo2AqAtIAMgACgCmC1qIAJBfWoiAjoAACACQf8BcUGg5QBqLQAAQQJ0QYAIciAAaiICQZgBaiACLwGYAUEBajsBACAAIARBf2pB//8DcSICIAJBB3ZBgAJqIAJBgAJJG0Gg6ABqLQAAQQJ0akGIE2oiAiACLwEAQQFqOwEAIAAgACgCeCICQX5qIgQ2AnggACAAKAJ0IAJrQQFqNgJ0IAUgBmpBfWohBSAAKAJsIQIgACgCnC0hBiAAKAKgLSEIA0AgACACIgNBAWoiAjYCbCACIAVNBEAgACAAKAJUIAMgACgCOGotAAMgACgCSCAAKAJYdHNxIgc2AkggACgCQCAAKAI0IAJxQQF0aiAAKAJEIAdBAXRqIgcvAQA7AQAgByACOwEACyAAIARBf2oiBDYCeCAEDQALIABBAjYCYCAAQQA2AmggACADQQJqIgU2AmwgCCAGQX9qRw0CQQAhAkEAIQQgACAAKAJcIgNBAE4EfyAAKAI4IANqBSAECyAFIANrQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhANAgwDCyAAKAJoBEAgACgCbCAAKAI4akF/ai0AACECIAAoAqQtIAAoAqAtIgNBAXRqQQA7AQAgACADQQFqNgKgLSADIAAoApgtaiACOgAAIAAgAkECdGoiAkGUAWogAi8BlAFBAWo7AQAgACgCoC0gACgCnC1Bf2pGBEBBACECIAAgACgCXCIDQQBOBH8gACgCOCADagUgAgsgACgCbCADa0EAEEQgACAAKAJsNgJcIAAoAgAQNgsgACAAKAJsQQFqNgJsIAAgACgCdEF/ajYCdCAAKAIAKAIQDQJBAA8FIABBATYCaCAAIAAoAmxBAWo2AmwgACAAKAJ0QX9qNgJ0DAILAAsLIAAoAmgEQCAAKAJsIAAoAjhqQX9qLQAAIQIgACgCpC0gACgCoC0iA0EBdGpBADsBACAAIANBAWo2AqAtIAMgACgCmC1qIAI6AAAgACACQQJ0aiICQZQBaiACLwGUAUEBajsBACAAQQA2AmgLIAAgACgCbCIDQQIgA0ECSRs2ArQtIAFBBEYEQEEAIQQgACAAKAJcIgFBAE4EfyAAKAI4IAFqBSAECyADIAFrQQEQRCAAIAAoAmw2AlwgACgCABA2QQNBAiAAKAIAKAIQGw8LIAAoAqAtBEBBACECQQAhBCAAIAAoAlwiAUEATgR/IAAoAjggAWoFIAQLIAMgAWtBABBEIAAgACgCbDYCXCAAKAIAEDYgACgCACgCEEUNAQtBASECCyACC7wIAQ1/AkADQAJAAkACQCAAKAJ0QYUCTQRAIAAQdiABIAAoAnQiAkGGAk9yRQRAQQAPCyACRQ0DIAJBA0kNAQsgACAAKAJUIAAoAmwiBCAAKAI4ai0AAiAAKAJIIAAoAlh0c3EiAjYCSCAAKAJAIAQgACgCNHFBAXRqIAAoAkQgAkEBdGoiAi8BACIDOwEAIAIgBDsBACADRQ0AIAQgA2sgACgCLEH6fWpLDQAgACAAIAMQjwIiAzYCYAwBCyAAKAJgIQMLAkAgA0EDTwRAIAAoAqQtIAAoAqAtIgJBAXRqIAAoAmwgACgCcGsiBDsBACAAIAJBAWo2AqAtIAIgACgCmC1qIANBfWoiAjoAACACQf8BcUGg5QBqLQAAQQJ0QYAIciAAaiICQZgBaiACLwGYAUEBajsBACAAIARBf2pB//8DcSICIAJBB3ZBgAJqIAJBgAJJG0Gg6ABqLQAAQQJ0akGIE2oiAiACLwEAQQFqOwEAIAAgACgCdCAAKAJgIgNrIgI2AnQgACgCnC1Bf2ohByAAKAKgLSEIAkAgAkEDSQ0AIAMgACgCgAFLDQAgACADQX9qIgU2AmAgACgCSCEGIAAoAmwhAyAAKAI0IQkgACgCQCEKIAAoAkQhCyAAKAJUIQwgACgCOCENIAAoAlghDgNAIAAgAyICQQFqIgM2AmwgACACIA1qLQADIAYgDnRzIAxxIgY2AkggCiADIAlxQQF0aiALIAZBAXRqIgQvAQA7AQAgBCADOwEAIAAgBUF/aiIFNgJgIAUNAAsgACACQQJqIgM2AmwgByAIRw0EDAILIABBADYCYCAAIAAoAmwgA2oiAzYCbCAAIAAoAjggA2oiBC0AACICNgJIIAAgACgCVCAELQABIAIgACgCWHRzcTYCSCAHIAhHDQMMAQsgACgCOCAAKAJsai0AACEDIAAoAqQtIAAoAqAtIgJBAXRqQQA7AQAgACACQQFqNgKgLSACIAAoApgtaiADOgAAIAAgA0ECdGoiAkGUAWogAi8BlAFBAWo7AQAgACAAKAJ0QX9qNgJ0IAAgACgCbEEBaiIDNgJsIAAoAqAtIAAoApwtQX9qRw0CC0EAIQRBACEGIAAgACgCXCICQQBOBH8gACgCOCACagUgBgsgAyACa0EAEEQgACAAKAJsNgJcIAAoAgAQNiAAKAIAKAIQDQEMAgsLIAAgACgCbCICQQIgAkECSRs2ArQtIAFBBEYEQEEAIQUgACAAKAJcIgFBAE4EfyAAKAI4IAFqBSAFCyACIAFrQQEQRCAAIAAoAmw2AlwgACgCABA2QQNBAiAAKAIAKAIQGw8LIAAoAqAtBEBBACEEQQAhBSAAIAAoAlwiAUEATgR/IAAoAjggAWoFIAULIAIgAWtBABBEIAAgACgCbDYCXCAAKAIAEDYgACgCACgCEEUNAQtBASEECyAEC7YBAQF/IwBBQGoiAyQAIAMgATYCECADIAA2AgwgA0G8CDYCCCADIAI2AhwgAyACNgIYIANCADcAMSADQgA3AiwgAyADQQhqNgIoQQAhACADQQA2AiQCQCADQShqIANBJGoQrQRFDQAgAyACIAMoAiRqNgIgIANBKGogA0EYahCbBCADLQA4RQ0AIAMoAhwgAygCIEYhAAsgAygCKCIBIAMoAjQgASgCACgCEBEEACADQUBrJAAgAAvYAwEFfyAAKAIMQXtqIgJB//8DIAJB//8DSRshBQJAA0ACQCAAKAJ0IgJBAU0EQCAAEHYgACgCdCICIAFyRQRAQQAPCyACRQ0BCyAAQQA2AnQgACAAKAJsIAJqIgI2AmwgAkEAIAIgACgCXCIDIAVqIgRJGwR/IAIFIAAgBDYCbCAAIAIgBGs2AnRBACEEQQAhAiAAIANBAE4EfyAAKAI4IANqBSACCyAFQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhBFDQMgACgCXCEDIAAoAmwLIANrIgYgACgCLEH6fWpJDQFBACEEQQAhAiAAIANBAE4EfyAAKAI4IANqBSACCyAGQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhANAQwCCwtBACECIABBADYCtC0gAUEERgRAIAAgACgCXCIBQQBOBH8gACgCOCABagUgAgsgACgCbCABa0EBEEQgACAAKAJsNgJcIAAoAgAQNkEDQQIgACgCACgCEBsPCyAAKAJsIgMgACgCXCIBSgRAQQAhBCAAIAFBAE4EfyAAKAI4IAFqBSACCyADIAFrQQAQRCAAIAAoAmw2AlwgACgCABA2IAAoAgAoAhBFDQELQQEhBAsgBAtiACAAQQA2ArwtIABBADsBuC0gAEG4FmpBwOkBNgIAIAAgAEH8FGo2ArAWIABBrBZqQazpATYCACAAIABBiBNqNgKkFiAAQaAWakGY6QE2AgAgACAAQZQBajYCmBYgABCNAguoAQECfyAAIAAoAixBAXQ2AjwgACgCRCIBIAAoAkxBAXRBfmoiAmpBADsBACABQQAgAhAoGiAAQQA2ArQtIABCgICAgCA3AnQgAEIANwJoIABCgICAgCA3AlwgAEEANgJIIAAgACgChAFBDGwiAUG01wBqLwEANgKQASAAIAFBsNcAai8BADYCjAEgACABQbLXAGovAQA2AoABIAAgAUG21wBqLwEANgJ8C6oBAQJ/QX4hAgJAIABFDQAgACgCHCIBRQ0AIAAoAiBFDQAgACgCJEUNACAAQQI2AiwgAEEANgIIIABCADcCFCABQQA2AhQgASABKAIINgIQIAEoAhgiAkF/TARAIAFBACACayICNgIYCyABQSpB8QAgAhs2AgQgAAJ/IAJBAkYEQEEAQQBBABA1DAELQQBBAEEAEGULNgIwQQAhAiABQQA2AiggARCjBAsgAgsGACABEDgLCQAgASACbBBMC9ADAQN/QXohAgJAQaCEAS0AAEExRw0AQX4hAiAARQ0AIABBADYCGCAAKAIgIgNFBEAgAEEANgIoIABBGzYCIEEbIQMLIAAoAiRFBEAgAEEcNgIkC0EGIAEgAUF/RhsiBEEJSw0AQXwhAiAAKAIoQQFBxC0gAxEBACIBRQ0AIAAgATYCHCABQgE3AhggASAANgIAIAFB//8BNgI0IAFCgICCgPABNwIsIAFC//+BgNAANwJUIAFCgICCgPABNwJMIAEgACgCKEGAgAJBAiAAKAIgEQEANgI4IAEgACgCKCABKAIsQQIgACgCIBEBADYCQCAAKAIoIAEoAkxBAiAAKAIgEQEAIQIgAUEANgLALSABIAI2AkQgAUGAgAE2ApwtIAEgACgCKEGAgAFBBCAAKAIgEQEAIgI2AgggASABKAKcLSIDQQJ0NgIMAkACQCABKAI4RQ0AIAEoAkBFIAJFcg0AIAEoAkQNAQsgAUGaBTYCBCAAQbOEATYCGCAAEK8BGkF8DwsgAUEANgKIASABIAQ2AoQBIAFBCDoAJCABIAIgA0EDbGo2ApgtIAEgAiADQX5xajYCpC0gABClBCIBRQRAIAAoAhwQpAQLIAEhAgsgAgvhBgAgAEF/cyEAAkAgAkUgAUEDcUVyDQADQCABLQAAIABB/wFxc0ECdEGwF2ooAgAgAEEIdnMhACABQQFqIQEgAkF/aiICRQ0BIAFBA3ENAAsLIAJBH0sEQANAIAEoAhwgASgCGCABKAIUIAEoAhAgASgCDCABKAIIIAEoAgQgASgCACAAcyIAQQZ2QfwHcUGwJ2ooAgAgAEH/AXFBAnRBsC9qKAIAcyAAQQ52QfwHcUGwH2ooAgBzIABBFnZB/AdxQbAXaigCAHNzIgBBBnZB/AdxQbAnaigCACAAQf8BcUECdEGwL2ooAgBzIABBDnZB/AdxQbAfaigCAHMgAEEWdkH8B3FBsBdqKAIAc3MiAEEGdkH8B3FBsCdqKAIAIABB/wFxQQJ0QbAvaigCAHMgAEEOdkH8B3FBsB9qKAIAcyAAQRZ2QfwHcUGwF2ooAgBzcyIAQQZ2QfwHcUGwJ2ooAgAgAEH/AXFBAnRBsC9qKAIAcyAAQQ52QfwHcUGwH2ooAgBzIABBFnZB/AdxQbAXaigCAHNzIgBBBnZB/AdxQbAnaigCACAAQf8BcUECdEGwL2ooAgBzIABBDnZB/AdxQbAfaigCAHMgAEEWdkH8B3FBsBdqKAIAc3MiAEEGdkH8B3FBsCdqKAIAIABB/wFxQQJ0QbAvaigCAHMgAEEOdkH8B3FBsB9qKAIAcyAAQRZ2QfwHcUGwF2ooAgBzcyIAQQZ2QfwHcUGwJ2ooAgAgAEH/AXFBAnRBsC9qKAIAcyAAQQ52QfwHcUGwH2ooAgBzIABBFnZB/AdxQbAXaigCAHNzIgBBBnZB/AdxQbAnaigCACAAQf8BcUECdEGwL2ooAgBzIABBDnZB/AdxQbAfaigCAHMgAEEWdkH8B3FBsBdqKAIAcyEAIAFBIGohASACQWBqIgJBH0sNAAsLIAJBA0sEQANAIAEoAgAgAHMiAEEGdkH8B3FBsCdqKAIAIABB/wFxQQJ0QbAvaigCAHMgAEEOdkH8B3FBsB9qKAIAcyAAQRZ2QfwHcUGwF2ooAgBzIQAgAUEEaiEBIAJBfGoiAkEDSw0ACwsgAgRAA0AgAS0AACAAQf8BcXNBAnRBsBdqKAIAIABBCHZzIQAgAUEBaiEBIAJBf2oiAg0ACwsgAEF/cwvTBQELfyMAQaAQayICJAAgASACQZsQagJ/IAAgACgCACgCCBEAACIDQf8ATQRAIAIgAzoAmxAgAkGcEGoMAQsgA0H//wBNBEAgAiADQQd2OgCcECACIANBgAFyOgCbECACQZ0QagwBCyADQf///wBNBEAgAiADQQ52OgCdECACIANBgAFyOgCbECACIANBB3ZBgAFyOgCcECACQZ4QagwBCyACIANBgAFyOgCbECACIANBDnZBgAFyOgCdECACIANBB3ZBgAFyOgCcECADQRV2IQQgA0H/////AE0EQCACIAQ6AJ4QIAJBnxBqDAELIAIgA0EcdjoAnxAgAiAEQYABcjoAnhAgAkGgEGoLIAJBmxBqayILIAEoAgAoAggRBgAgAkEANgKQEAJAIANFDQADQCAAIAJBDGogACgCACgCDBEDACEIAn8gAigCDCIEIANBgIAEIANBgIAESRsiBk8EQCAGDAELAn8gCUUEQCAGEG0hCQsgCQsgCCAEECohCCAAIAQgACgCACgCEBEEAANAIAQgCGogACACQQxqIAAoAgAoAgwRAwAgBiAEayIFIAIoAgwiByAFIAdJGyIFECoaIAAgBSAAKAIAKAIQEQQAIAYgBCAFaiIESw0AC0EACyEMIAIgBjYCDEGAAiEFA0ACQCAFIgRBAXQhBSAEQf//AEsNACAEIAZJDQELCyACQRBqIQcCQCAEQYEISQ0AIAIoApAQIgcNACACQYCAAhBtIgc2ApAQCyAHQQAgBRAoIQcgASABIAYgBkEGbmpBIGoiBQJ/IApFBEAgBRBtIQoLIAoLIAEoAgAoAgwRAQAiBSAIIAIoAgwgBSAHIAQQtQQgBWsiBCABKAIAKAIIEQYAIAAgDCAAKAIAKAIQEQQAIAQgC2ohCyADIAZrIgMNAAsgCQRAIAkQOAsgChA4IAIoApAQIgBFDQAgABA4CyACQaAQaiQAC8wWAQh/QX4hAgJAAkACQCAARQ0AIAAoAhwiAUUNAAJAAkAgACgCDEUNACAAKAIARQRAIAAoAgQNAQsgASgCBCICQZoFR0EBcg0BCyAAQaaEATYCGEF+DwsgACgCEEUNASABIAA2AgAgASgCKBogAUEENgIoAkACQAJAAkACQAJAAkACQAJAAkACQCACQSpGBEAgASgCGEECRgRAIABBAEEAQQAQNTYCMCABIAEoAhQiAkEBajYCFCACIAEoAghqQR86AAAgASABKAIUIgJBAWo2AhQgAiABKAIIakGLAToAACABIAEoAhQiAkEBajYCFCACIAEoAghqQQg6AAAgASgCHCICRQRAIAEgASgCFCICQQFqNgIUIAIgASgCCGpBADoAACABIAEoAhQiAkEBajYCFCACIAEoAghqQQA6AAAgASABKAIUIgJBAWo2AhQgAiABKAIIakEAOgAAIAEgASgCFCICQQFqNgIUIAIgASgCCGpBADoAACABIAEoAhQiAkEBajYCFCACIAEoAghqQQA6AABBAiECIAEoAoQBIgNBCUcEQEEEIAEoAogBQQFKQQJ0IANBAkgbIQILIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAjoAACABIAEoAhQiAkEBajYCFCACIAEoAghqQQM6AAAgAUHxADYCBAwNCyACKAIkIQMgAigCHCEEIAIoAhAhBSACKAIsIQYgAigCACEHIAEgASgCFCIIQQFqNgIUQQIhAiAIIAEoAghqIAZBAEdBAXQgB0EAR3IgBUEAR0ECdHIgBEEAR0EDdHIgA0EAR0EEdHI6AAAgASgCHCgCBCEDIAEgASgCFCIEQQFqNgIUIAQgASgCCGogAzoAACABKAIcKAIEIQMgASABKAIUIgRBAWo2AhQgBCABKAIIaiADQQh2OgAAIAEoAhwvAQYhAyABIAEoAhQiBEEBajYCFCAEIAEoAghqIAM6AAAgASgCHC0AByEDIAEgASgCFCIEQQFqNgIUIAQgASgCCGogAzoAACABKAKEASIDQQlHBEBBBCABKAKIAUEBSkECdCADQQJIGyECCyABIAEoAhQiA0EBajYCFCADIAEoAghqIAI6AAAgASgCHCgCDCECIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAjoAAAJ/IAEoAhwiBCgCEARAIAQoAhQhAiABIAEoAhQiA0EBajYCFCADIAEoAghqIAI6AAAgASgCHCgCFCECIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAkEIdjoAACABKAIcIQQLIAQoAiwLBEAgACAAKAIwIAEoAgggASgCFBA1NgIwCyABQcUANgIEIAFBADYCIAwCCyABKAIwQQx0QYCQfmohBEEAIQICQCABKAKIAUEBSg0AIAEoAoQBIgNBAkgNAEHAACECIANBBkgNAEGAAUHAASADQQZGGyECCyABQfEANgIEIAEgAiAEciICQSByIAIgASgCbBsiAkEfcCACckEfcxB1IAEoAmwEQCABIAAvATIQdSABIAAvATAQdQsgAEEAQQBBABBlNgIwIAEoAgQhAgsgAkHFAEcNASABKAIcIQQLAkAgBCgCEARAIAEoAhQhAiABKAIgIgUgBC8BFE8NASACIQMDQCABKAIMIAJGBEACQCACIANNDQAgBCgCLEUNACAAIAAoAjAgASgCCCADaiACIANrEDU2AjALIAAQNiABKAIcIQQgASgCFCICIAEoAgxGDQMgASgCICEFIAIhAwsgBCgCECAFai0AACEEIAEgAkEBajYCFCABKAIIIAJqIAQ6AAAgASABKAIgQQFqIgU2AiAgBSABKAIcIgQvARRPBEAgAyECDAMFIAEoAhQhAgwBCwALAAsgAUHJADYCBAwCCwJAIAQoAixFDQAgASgCFCIDIAJNDQAgACAAKAIwIAEoAgggAmogAyACaxA1NgIwCyABKAIgIAQoAhRGBEAgAUHJADYCBCABQQA2AiAMAgsgASgCBCECCyACQckARw0BIAEoAhwhBAsgBCgCHEUNAiABKAIUIgIhAwJAA0ACQCABKAIMIAJGBEACQCACIANNDQAgASgCHCgCLEUNACAAIAAoAjAgASgCCCADaiACIANrEDU2AjALIAAQNiABKAIUIgIgASgCDEYNASACIQMLQQEhBSABKAIcKAIcIQQgASABKAIgIgZBAWo2AiAgBCAGai0AACEEIAEgAkEBajYCFCABKAIIIAJqIAQ6AAAgBARAIAEoAhQhAgwCBSADIQIMAwsACwtBACEFCwJAIAEoAhwiBCgCLEUNACABKAIUIgMgAk0NACAAIAAoAjAgASgCCCACaiADIAJrEDU2AjALIAUNASABKAIEIQILIAJB2wBHDQMgASgCHCEEDAILIAFBADYCIAsgAUHbADYCBAsgBCgCJEUNASABKAIUIgIhAwJAA0ACQCABKAIMIAJGBEACQCACIANNDQAgASgCHCgCLEUNACAAIAAoAjAgASgCCCADaiACIANrEDU2AjALIAAQNiABKAIUIgIgASgCDEYNASACIQMLQQEhBSABKAIcKAIkIQQgASABKAIgIgZBAWo2AiAgBCAGai0AACEEIAEgAkEBajYCFCABKAIIIAJqIAQ6AAAgBARAIAEoAhQhAgwCBSADIQIMAwsACwtBACEFCwJAIAEoAhwiBCgCLEUNACABKAIUIgMgAk0NACAAIAAoAjAgASgCCCACaiADIAJrEDU2AjALIAUNASABKAIEIQILIAJB5wBHDQIgASgCHCEEDAELIAFB5wA2AgQLIAQoAiwEQCABKAIUIgVBAmoiAiABKAIMIgRLBH8gABA2IAEoAgwhBCABKAIUIgVBAmoFIAILIARLDQEgACgCMCECIAEgBUEBajYCFCABKAIIIAVqIAI6AAAgACgCMCECIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAkEIdjoAACAAQQBBAEEAEDU2AjAgAUHxADYCBAwBCyABQfEANgIECwJAIAEoAhQEQCAAEDYgACgCEARAIAAoAgQhAgwCCwwECyAAKAIEIgINAEEAIQILAkACQAJAIAEoAgQiA0GaBUYEQCACRQ0BDAULIAINAQsgA0GaBUcNACABKAJ0RQ0BCwJ/AkACQAJAIAEoAogBQX5qDgIAAQILIAEQnQQMAgsgARCcBAwBCyABQQQgASgChAFBDGxBuNcAaigCABEDAAsiAkF+cUECRgRAIAFBmgU2AgQLIAJBfXFFBEBBACECIAAoAhANAgwECyACQQFHDQAgAUEAQQBBABCOAiAAEDYgACgCEA0ADAMLQQEhAiABKAIYIgNBAUgNACAAKAIwIQICQCADQQJGBEAgASABKAIUIgNBAWo2AhQgAyABKAIIaiACOgAAIAAoAjAhAiABIAEoAhQiA0EBajYCFCADIAEoAghqIAJBCHY6AAAgAC8BMiECIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAjoAACAALQAzIQIgASABKAIUIgNBAWo2AhQgAyABKAIIaiACOgAAIAAoAgghAiABIAEoAhQiA0EBajYCFCADIAEoAghqIAI6AAAgACgCCCECIAEgASgCFCIDQQFqNgIUIAMgASgCCGogAkEIdjoAACAALwEKIQIgASABKAIUIgNBAWo2AhQgAyABKAIIaiACOgAAIAAtAAshAiABIAEoAhQiA0EBajYCFCADIAEoAghqIAI6AAAMAQsgASACQRB2EHUgASAALwEwEHULIAAQNiABKAIYIgBBAU4EQCABQQAgAGs2AhgLIAEoAhRFIQILIAIPCyAAQceEATYCGEF7DwsgAUF/NgIoQQAL3QEBBn8CQCAAKAKAgBAiBSAAKAKEgBAiAyAAKAKMgBAiBGpBBGpJDQAgACgClIAQIgIgBSADa0F9aiIGTw0AA0AgACACQf//A3FBAXRqQYCACGogAiAAIAIgA2oQOkECdGoiBCgCAGsiB0H//wMgB0H//wNJGzsBACAEIAI2AgAgAkEBaiICIAZJDQALIAAoAoyAECEECyAAIAQ2ApCAECAAIAM2AoiAECAAQQA2ApyAECAAIAE2AoCAECAAIAUgA2siAjYCjIAQIAAgAjYClIAQIAAgASACazYChIAQC9kDAQR/IwBBEGsiAyQAIAFBADYCACAAKAIAIgIgA0EMaiACKAIAKAIMEQMAIQICQCADKAIMRQ0AIAIsAAAhAiAAKAIAIgRBASAEKAIAKAIQEQQAIAEgASgCACACQf8AcXI2AgACQCACQX9KDQAgACgCACICIANBDGogAigCACgCDBEDACECIAMoAgxFDQEgAiwAACECIAAoAgAiBEEBIAQoAgAoAhARBAAgASABKAIAIAJB/wBxQQd0cjYCACACQX9KDQAgACgCACICIANBDGogAigCACgCDBEDACECIAMoAgxFDQEgAiwAACECIAAoAgAiBEEBIAQoAgAoAhARBAAgASABKAIAIAJB/wBxQQ50cjYCACACQX9KDQAgACgCACICIANBDGogAigCACgCDBEDACECIAMoAgxFDQEgAiwAACECIAAoAgAiBEEBIAQoAgAoAhARBAAgASABKAIAIAJB/wBxQRV0cjYCACACQX9KDQAgACgCACICIANBDGogAigCACgCDBEDACECIAMoAgxFDQEgAiwAACEFIAAoAgAiAEEBIAAoAgAoAhARBAAgASABKAIAIAVBHHRyNgIAIAVBf0oNAEEAIQUMAQtBASEFCyADQRBqJAAgBQvhSQE3fwJAIAAoAoCAECIJIAAoAoSAECILayAAKAKQgBBrIghBgIAETwRAIABBADYCnIAQDAELAkAgCA0AIAMoAgBBgSBIDQAgACAAKAKcgBBBoIAQECoiACABEKwEIAAgBTsBmIAQDAELAkAgBEEATEEAIAZBAkYbDQAgAygCACIIQYCAgPAHSw0AIAAgCCAJajYCgIAQQQkgBSAFQQFIGyIFQQwgBUEMSBsiG0EMbCIJQZQWaigCACEuAkACfyAbQQlNBEAgA0EANgIAIAIgBGoiOkF7aiA6IAZBAkYiOxshKSABIAhqITMgASEoIAIhCQJAIAhBDUgNACAzQXRqIjIgAUkNAEGANCAbdkEBcSE0IDNBe2oiGEF/aiEvIBhBfWohIkEAIRsDQCAAKAKUgBAhBCAAKAKIgBAhEyAAKAKcgBAhFCAoIQwDQCAAKAKQgBAiBSAMIAtrIh9BgYB8aiAFQYCABGogH0sbIRUgACgCjIAQIRAgDCgAACEOIAQgH0kEQANAIAAgBEH//wNxQQF0akGAgAhqIAQgACAEIAtqEDpBAnRqIgUoAgBrIghB//8DIAhB//8DSRs7AQAgBSAENgIAIARBAWoiBCAfSQ0ACwsgACAfNgKUgBAgDEEIaiEhIAxBBGohEkEDIQgCQCAAIAwQOkECdCIjaigCACIHIBVJBEAgLiENDAELIA5B//8DcSAOQRB2RiAOQf8BcSAOQRh2RnEhJCAQIBNqIQ8gCyAQaiIdQQRqIREgDEF/aiEmQQAhJSAuIQ1BACEcA0ACQAJAAn8CQAJAIBAgB00EQCAIICZqLwAAIAcgC2oiCiAIakF/ai8AAEcNBSAOIAooAABHDQUgCkEEaiEEICIgEk0EfyASBSAEKAAAIBIoAABzIgUNAiAEQQRqIQQgIQsiBSAiSQRAA0AgBCgAACAFKAAAcyIWBEAgFhAlIAVqIBJrIQQMBwsgBEEEaiEEIAVBBGoiBSAiSQ0ACwsCQCAFIC9PDQAgBC8AACAFLwAARw0AIARBAmohBCAFQQJqIQULIAUgGEkEfyAFQQFqIAUgBC0AACAFLQAARhsFIAULIBJrIQQMBAsgDiAHIBNqIgQoAABHDQQgBEEEaiEEAn8gEiAYIAwgECAHa2oiICAgIBhLGyIWQX1qIgogEk0NABogBCgAACASKAAAcyIFDQIgBEEEaiEEICELIgUgCkkEQANAIAQoAAAgBSgAAHMiJwRAICcQJSAFaiASawwFCyAEQQRqIQQgBUEEaiIFIApJDQALCwJAIAUgFkF/ak8NACAELwAAIAUvAABHDQAgBEECaiEEIAVBAmohBQsgBSAWSQR/IAVBAWogBSAELQAAIAUtAABGGwUgBQsgEmsMAgsgBRAlIQQMAgsgBRAlCyEEIAcgC2ogHgJ/IARBBGoiCiAMaiAWRyAgIBhPckUEQCAdIQUCfwJAAn8gIiAWIgRLBEAgHSgAACAWKAAAcyIEDQIgESEFIBZBBGohBAsgBCAiSQsEQANAIAUoAAAgBCgAAHMiHgRAIB4QJSAEaiAWawwECyAFQQRqIQUgBEEEaiIEICJJDQALCwJAIAQgL08NACAFLwAAIAQvAABHDQAgBUECaiEFIARBAmohBAsgBCAYSQR/IARBAWogBCAFLQAAIAQtAABGGwUgBAsgFmsMAQsgBBAlCyAKaiEKCyAKIAhKIgQLGyEeIAogCCAEGyEIDAELIARBBGoiBCAIIAQgCEoiBBshCCAKIB4gBBshHgsgDUF/aiENAkACQCA0RSAAIAdB//8DcUEBdGpBgIAIai8BACIEQQFHcg0AICVFBEBBASElICRFDQFBAiElIBIgGCAOEDNBBGohHAsgJUECRyAHQX9qIgUgFUlyDQBBAiElIBAgBRAyRQ0AIA4gEyALIAUgEEkiFhsgBWoiCigAAEcNACAKQQRqIA8gGCAWGyIHIA4QM0EEaiEEIBMgACgCkIAQIiBqIRYCQCAFIBBJBEAgByAEIApqRgRAIB0gGCAEIA4QPRAzIARqIQQLIAogFiAOEDEhBwwBCyAKIAogHSAOEDEiB2sgHUcgICAQT3INACAPIBZBACAHayAOED0QMSAHaiEHCyAFIAUgB2siCiAVIAogFUsbIgprIARqIhYgHEkgBCAcS3JFBEAgBCAFIBxraiIEIBAgECAEEDIbIQcMAgsgECAKEDJFBEAgECEHDAILAkAgCCAWIBwgFiAcSRsiBE8EQCAeIQUgCCEEDAELIAwgCiALaiIFa0H//wNKDQQLIAogACAKQf//A3FBAXRqQYCACGovAQAiCEkEQCAFIR4gBCEIDAQLIAogCGshByAFIR4gBCEIDAELIAcgBGshBwsgDUUNASAHIBVPDQALCwJAIA1FIB8gFWtB/v8DS3INACAfIBQgI2ooAgAiCiAVaiAUKAKAgBAgFCgChIAQIh1rIhFrIg9rQf//A0sNAANAIA1FDQEgDiAKIB1qIgQoAABGBEAgBEEEaiEEAn8CQAJ/IBIgGCAMIBEgCmtqIgUgBSAYSxsiEEF9aiIWIBJNDQAaIAQoAAAgEigAAHMiBQ0BIARBBGohBCAhCyIFIBZJBEADQCAEKAAAIAUoAABzIgcEQCAHECUgBWogEmsMBAsgBEEEaiEEIAVBBGoiBSAWSQ0ACwsCQCAFIBBBf2pPDQAgBC8AACAFLwAARw0AIARBAmohBCAFQQJqIQULIAUgEEkEfyAFQQFqIAUgBC0AACAFLQAARhsFIAULIBJrDAELIAUQJQtBBGoiBCAIIAQgCEoiBBshCCALIA9qIB4gBBshHgsgDUF/aiENIAogFCAKQf//A3FBAXRqQYCACGovAQAiBGshCiAfIA8gBGsiD2tBgIAESQ0ACwsgCEEDSgRAICghHyAJIQ4gDCEdIB4iCSEWIAghEgJ/An8CQAJAAkADQCAJIR4CQCAMIAgiDWoiKCAyTQRAIAAoApCAECIEIChBfmoiESAAKAKEgBAiIWsiIEGBgHxqIARBgIAEaiAgSxshIyAAKAKMgBAhFCAAKAKIgBAhJiAAKAKcgBAhJyARKAAAIRMgACgClIAQIgQgIEkEQANAIAAgBEH//wNxQQF0akGAgAhqIAQgACAEICFqEDpBAnRqIgUoAgBrIghB//8DIAhB//8DSRs7AQAgBSAENgIAIARBAWoiBCAgSQ0ACwsgESAMayEqIAAgIDYClIAQIBFBCGohMCARQQRqIRUgDCARayEkAkAgACAREDpBAnQiLGooAgAiByAjSQRAIC4hECANIQgMAQsgE0H//wNxIBNBEHZGIBNB/wFxIBNBGHZGcSE1IBQgJmohMSAUICFqIhxBBGohJUEAIS1BACAqayE2IAxBf2ohNyANIQggLiEQQQAhCQNAAkACQAJ/AkACQCAUIAdNBEAgCCA3ai8AACAHICFqIgsgNmogCGpBf2ovAABHDQUgEyALKAAARw0FAkAgKkUEQEEAIQoMAQsgJCAcIAtrIgQgJCAEShsiD0EfdSAPcSEFQQAhBANAIAQiCiAPTARAIAUhCgwCCyARIApBf2oiBGotAAAgBCALai0AAEYNAAsLIAtBBGohBCAiIBVNBH8gFQUgBCgAACAVKAAAcyIFDQIgBEEEaiEEIDALIgUgIkkEQANAIAQoAAAgBSgAAHMiDwRAIA8QJSAFaiAVayEEDAcLIARBBGohBCAFQQRqIgUgIkkNAAsLAkAgBSAvTw0AIAQvAAAgBS8AAEcNACAEQQJqIQQgBUECaiEFCyAFIBhJBH8gBUEBaiAFIAQtAAAgBS0AAEYbBSAFCyAVayEEDAQLIBMgByAmaiIKKAAARw0EIApBBGohBCAAKAKQgBAhOAJ/IBUgGCARIBQgB2tqIisgKyAYSxsiC0F9aiIPIBVNDQAaIAQoAAAgFSgAAHMiBQ0CIARBBGohBCAwCyIFIA9JBEADQCAEKAAAIAUoAABzIjkEQCA5ECUgBWogFWsMBQsgBEEEaiEEIAVBBGoiBSAPSQ0ACwsCQCAFIAtBf2pPDQAgBC8AACAFLwAARw0AIARBAmohBCAFQQJqIQULIAUgC0kEfyAFQQFqIAUgBC0AACAFLQAARhsFIAULIBVrDAILIAUQJSEEDAILIAUQJQshBCARIARBBGoiD2ogC0cgKyAYT3JFBEAgHCEFAn8CQAJ/ICIgCyIESwRAIBwoAAAgCygAAHMiBA0CICUhBSALQQRqIQQLIAQgIkkLBEADQCAFKAAAIAQoAABzIisEQCArECUgBGogC2sMBAsgBUEEaiEFIARBBGoiBCAiSQ0ACwsCQCAEIC9PDQAgBS8AACAELwAARw0AIAVBAmohBSAEQQJqIQQLIAQgGEkEfyAEQQFqIAQgBS0AACAELQAARhsFIAQLIAtrDAELIAQQJQsgD2ohDwsCQCAqRQRAQQAhBQwBCyAkICYgOGogCmsiBCAkIARKGyIrQR91ICtxIQtBACEEA0AgBCIFICtMBEAgCyEFDAILIBEgBUF/aiIEai0AACAEIApqLQAARg0ACwsgDyAFayIEIAhMDQEgBSARaiEZIAcgIWogBWohGyAEIQgMAQsgBCAKa0EEaiIEIAhMDQAgCiARaiEZIAogC2ohGyAEIQgLIBBBf2ohEAJAAkAgNEUgACAHQf//A3FBAXRqQYCACGovAQAiBEEBR3INACAtRQRAQQEhLSA1RQ0BQQIhLSAVIBggExAzQQRqIQkLIC1BAkcgB0F/aiIFICNJcg0AQQIhLSAUIAUQMkUNACATICYgISAFIBRJIgobIAVqIgsoAABHDQAgC0EEaiAxIBggChsiByATEDNBBGohBCAmIAAoApCAECIPaiEKAkAgBSAUSQRAIAcgBCALakYEQCAcIBggBCATED0QMyAEaiEECyALIAogExAxIQcMAQsgCyALIBwgExAxIgdrIBxHIA8gFE9yDQAgMSAKQQAgB2sgExA9EDEgB2ohBwsgBSAFIAdrIgsgIyALICNLGyIKayAEaiILIAlJIAQgCUtyRQRAIAQgBSAJa2oiBCAUIBQgBBAyGyEHDAILIAogFCAUIAoQMiIEGyEHICogBEVyDQECQCAIIAsgCSALIAlJGyIETwRAIBkhBSAbIQsgCCEEDAELIBEiBSAKICFqIgtrQf//A0oNBAsgCiAAIApB//8DcUEBdGpBgIAIai8BACIISQRAIAUhGSALIRsgBCEIDAQLIAogCGshByAFIRkgCyEbIAQhCAwBCyAHIARrIQcLIBBFDQEgByAjTw0ACwsCQCAgICNrQf7/A0sEQCAbIQkMAQsgEEUEQCAbIQkMAQsgICAnICxqKAIAIg8gI2ogJygCgIAQICcoAoSAECIHayIlayILa0H//wNLBEAgGyEJDAELIBshCQNAIBBFDQECQCATIAcgD2oiCigAAEcNACAKQQRqIQQCfwJAAn8gFSAYIBEgJSAPa2oiBSAFIBhLGyIbQX1qIhwgFU0NABogBCgAACAVKAAAcyIFDQEgBEEEaiEEIDALIgUgHEkEQANAIAQoAAAgBSgAAHMiFARAIBQQJSAFaiAVawwECyAEQQRqIQQgBUEEaiIFIBxJDQALCwJAIAUgG0F/ak8NACAELwAAIAUvAABHDQAgBEECaiEEIAVBAmohBQsgBSAbSQR/IAVBAWogBSAELQAAIAUtAABGGwUgBQsgFWsMAQsgBRAlC0EEaiEUAkAgKkUEQEEAIQUMAQsgJCAHICcoAoyAEGogCmsiBCAkIARKGyIcQR91IBxxIRtBACEEA0AgBCIFIBxMBEAgGyEFDAILIBEgBUF/aiIEai0AACAEIApqLQAARg0ACwsgFCAFayIEIAhMDQAgBSARaiEZIAsgIWogBWohCSAEIQgLIBBBf2ohECAPICcgD0H//wNxQQF0akGAgAhqLwEAIgRrIQ8gICALIARrIgtrQYCABEkNAAsLIAggDUcNASAJIRsLIAwgH2shBCAGBEAgDiAEQf8BbmogBGpBCWogKUsNBQsgDkEBaiEFAkAgBEEPTwRAIA5B8AE6AAAgBEFxaiIHQf8BTwRAIAVB/wEgBEHyfWoiCEH/AW4iBUEBahAoGiAFQYF+bCAIaiEHIAUgDmpBAmohBQsgBSAHOgAAIAVBAWohBQwBCyAOIARBBHQ6AAALIAUgHyAEIAVqIgkQOyAJIAwgHmtB//8DcRAvIA1BfGohCCAJQQJqIQkgBgRAIAkgCEH/AW5qQQZqIClLDQULIA4tAAAhDCAIQQ9PBEAgDiAMQQ9qOgAAIA1BbWoiB0H+A08EQCAJQf8BIA1B73tqIghB/gNuIglBAXQiDEECahAoGiAJQYJ8bCAIaiEHIAUgBCAMampBBGohCQsgB0H/AU8EQCAJQf8BOgAAIAdBgX5qIQcgCUEBaiEJCyAJIAc6AAAgCUEBaiEJDAQLIA4gCCAMajoAAAwDCyAdIAwgHSAMSSAZIAwgEmpJcSIEGyERIAkhGyAZIgwgEWtBA0gNACASIA0gBBshFSAWIB4gBBshHiAfIRYDQCARIBVqIh9BA2ohNSARIBVBEiAVQRJIGyIwaiExAkACQANAAn8CQCAMIBFrIgRBEUoNACARIAxrIAQgCGpBfGogMCAxIAggDGpBfGpLG2oiBEEBSA0AIAggBGshEiAEIAxqIRkgBCAJagwBCyAMIRkgCCESIAkLIRsCQCASIBlqIiggMk0EQCAAKAKQgBAiBCAoQX1qIg0gACgChIAQIiFrIiBBgYB8aiAEQYCABGogIEsbISMgACgCjIAQIRQgACgCiIAQISYgACgCnIAQIScgDSgAACETIAAoApSAECIEICBJBEADQCAAIARB//8DcUEBdGpBgIAIaiAEIAAgBCAhahA6QQJ0aiIFKAIAayIIQf//AyAIQf//A0kbOwEAIAUgBDYCACAEQQFqIgQgIEkNAAsLIA0gGWshKiAAICA2ApSAECANQQhqIS0gDUEEaiEdIBkgDWshJAJAIAAgDRA6QQJ0IjZqKAIAIgcgI0kEQCAuIRAgEiEIDAELIBNB//8DcSATQRB2RiATQf8BcSATQRh2RnEhNyAUICZqISsgFCAhaiIcQQRqISVBACEMQQAgKmshOCAZQX9qITkgEiEIIC4hEEEAIQkDQAJAAkACfwJAAkAgFCAHTQRAIAggOWovAAAgByAhaiILIDhqIAhqQX9qLwAARw0FIBMgCygAAEcNBQJAICpFBEBBACEKDAELICQgHCALayIEICQgBEobIg9BH3UgD3EhBUEAIQQDQCAEIgogD0wEQCAFIQoMAgsgDSAKQX9qIgRqLQAAIAQgC2otAABGDQALCyALQQRqIQQgIiAdTQR/IB0FIAQoAAAgHSgAAHMiBQ0CIARBBGohBCAtCyIFICJJBEADQCAEKAAAIAUoAABzIg8EQCAPECUgBWogHWshBAwHCyAEQQRqIQQgBUEEaiIFICJJDQALCwJAIAUgL08NACAELwAAIAUvAABHDQAgBEECaiEEIAVBAmohBQsgBSAYSQR/IAVBAWogBSAELQAAIAUtAABGGwUgBQsgHWshBAwECyATIAcgJmoiCigAAEcNBCAKQQRqIQQgACgCkIAQITwCfyAdIBggDSAUIAdraiIsICwgGEsbIgtBfWoiDyAdTQ0AGiAEKAAAIB0oAABzIgUNAiAEQQRqIQQgLQsiBSAPSQRAA0AgBCgAACAFKAAAcyI9BEAgPRAlIAVqIB1rDAULIARBBGohBCAFQQRqIgUgD0kNAAsLAkAgBSALQX9qTw0AIAQvAAAgBS8AAEcNACAEQQJqIQQgBUECaiEFCyAFIAtJBH8gBUEBaiAFIAQtAAAgBS0AAEYbBSAFCyAdawwCCyAFECUhBAwCCyAFECULIQQgDSAEQQRqIg9qIAtHICwgGE9yRQRAIBwhBQJ/AkACfyAiIAsiBEsEQCAcKAAAIAsoAABzIgQNAiAlIQUgC0EEaiEECyAEICJJCwRAA0AgBSgAACAEKAAAcyIsBEAgLBAlIARqIAtrDAQLIAVBBGohBSAEQQRqIgQgIkkNAAsLAkAgBCAvTw0AIAUvAAAgBC8AAEcNACAFQQJqIQUgBEECaiEECyAEIBhJBH8gBEEBaiAEIAUtAAAgBC0AAEYbBSAECyALawwBCyAEECULIA9qIQ8LAkAgKkUEQEEAIQUMAQsgJCAmIDxqIAprIgQgJCAEShsiLEEfdSAscSELQQAhBANAIAQiBSAsTARAIAshBQwCCyANIAVBf2oiBGotAAAgBCAKai0AAEYNAAsLIA8gBWsiBCAITA0BIAUgDWohFyAHICFqIAVqIRogBCEIDAELIAQgCmtBBGoiBCAITA0AIAogDWohFyAKIAtqIRogBCEICyAQQX9qIRACQAJAIDRFIAAgB0H//wNxQQF0akGAgAhqLwEAIgRBAUdyDQAgDEUEQEEBIQwgN0UNAUECIQwgHSAYIBMQM0EEaiEJCyAMQQJHIAdBf2oiBSAjSXINAEECIQwgFCAFEDJFDQAgEyAmICEgBSAUSSIKGyAFaiILKAAARw0AIAtBBGogKyAYIAobIgogExAzQQRqIQQgJiAAKAKQgBAiD2ohDAJAIAUgFEkEQCAKIAQgC2pGBEAgHCAYIAQgExA9EDMgBGohBAsgCyAMIBMQMSEHDAELIAsgCyAcIBMQMSIHayAcRyAPIBRPcg0AICsgDEEAIAdrIBMQPRAxIAdqIQcLIAUgBSAHayIMICMgDCAjSxsiCmsgBGoiCyAJSSAEIAlLckUEQCAEIAUgCWtqIgQgFCAUIAQQMhshB0ECIQwMAgsgCiAUIBQgChAyIgQbIQdBAiEMICogBEVyDQECQCAIIAsgCSALIAlJGyIETwRAIBchBSAaIQsgCCEEDAELIA0iBSAKICFqIgtrQf//A0oNBAsgCiAAIApB//8DcUEBdGpBgIAIai8BACIISQRAIAUhFyALIRogBCEIDAQLIAogCGshByAFIRcgCyEaIAQhCAwBCyAHIARrIQcLIBBFDQEgByAjTw0ACwsCQAJAIBBFICAgI2tB/v8DS3INACAgICcgNmooAgAiDyAjaiAnKAKAgBAgJygChIAQIgprIhxrIgtrQf//A0sNACAXIQwgGiEJA0AgEEUNAgJAIBMgCiAPaiIaKAAARw0AIBpBBGohBAJ/AkACfyAdIBggDSAcIA9raiIFIAUgGEsbIhdBfWoiByAdTQ0AGiAEKAAAIB0oAABzIgUNASAEQQRqIQQgLQsiBSAHSQRAA0AgBCgAACAFKAAAcyIlBEAgJRAlIAVqIB1rDAQLIARBBGohBCAFQQRqIgUgB0kNAAsLAkAgBSAXQX9qTw0AIAQvAAAgBS8AAEcNACAEQQJqIQQgBUECaiEFCyAFIBdJBH8gBUEBaiAFIAQtAAAgBS0AAEYbBSAFCyAdawwBCyAFECULQQRqISUCQCAqRQRAQQAhBQwBCyAkIAogJygCjIAQaiAaayIEICQgBEobIgdBH3UgB3EhF0EAIQQDQCAEIgUgB0wEQCAXIQUMAgsgDSAFQX9qIgRqLQAAIAQgGmotAABGDQALCyAlIAVrIgQgCEwNACAFIA1qIQwgCyAhaiAFaiEJIAQhCAsgEEF/aiEQIA8gJyAPQf//A3FBAXRqQYCACGovAQAiBGshDyAgIAsgBGsiC2tBgIAESQ0ACwwBCyAXIQwgGiEJCyAIIBJHDQEgCSEaIAwhFwsgESAWayEFIAYEQCAOIAVB/wFuaiAFakEJaiApSw0ECyAZIBFrIBUgGSAfSRshCSAOQQFqIQcCQCAFQQ9PBEAgDkHwAToAACAFQXFqIgRB/wFPBEAgB0H/ASAFQfJ9aiIIQf8BbiIEQQFqECgaIAQgDmpBAmohByAEQYF+bCAIaiEECyAHIAQ6AAAgB0EBaiEHDAELIA4gBUEEdDoAAAsgByAWIAUgB2oiBBA7IAQgESAea0H//wNxEC8gCUF8aiEIIARBAmohBCAGBEAgBCAIQf8BbmpBBmogKUsNBAsgDi0AACEMAkAgCEEPTwRAIA4gDEEPajoAACAJQW1qIghB/gNPBEAgBEH/ASAJQe97aiIEQf4DbiIIQQF0IgxBAmoQKBogCEGCfGwgBGohCCAHIAUgDGpqQQRqIQQLIAhB/wFPBEAgBEH/AToAACAIQYF+aiEIIARBAWohBAsgBCAIOgAAIARBAWohBAwBCyAOIAggDGo6AAALIBkgCSARaiIFayEIIAYEQCAEIAhB/wFuaiAIakEJaiApSw0HCyAEQQFqIQcCQCAIQQ9PBEAgBEHwAToAACAIQXFqIg1B/wFPBEAgB0H/ASAIQfJ9aiIMQf8BbiIJQQFqECgaIAQgCWpBAmohByAJQYF+bCAMaiENCyAHIA06AAAgB0EBaiEHDAELIAQgCEEEdDoAAAsgByAFIAcgCGoiCRA7IAkgGSAba0H//wNxEC8gEkF8aiEIIAlBAmohCSAGBEAgCSAIQf8BbmpBBmogKUsNBwsgBC0AACEMIAhBD08EQCAEIAxBD2o6AAACfyASQW1qIgRB/gNPBEAgCUH/ASASQe97aiIEQf4DbiIIQQF0IglBAmoQKBogByAJIBlqIAVrakEEaiEJIAhBgnxsIARqIQQLIARB/wFPCwRAIAlB/wE6AAAgCUEBaiEJIARBgX5qIQQLIAkgBDoAACAJQQFqIQkMCAsgBCAIIAxqOgAADAcLIAwgNU8NASAMIRcgCSEaIAwgH0kNAAsCQCAZIB9PDQAgEiAfIBlrIgRrIhJBA0oEQCAEIBtqIRsgHyEZDAELIAwhGSAJIRsgCCESCyARIBZrIQQgBgRAIA4gBEH/AW5qIARqQQlqIClLDQILIA5BAWohBQJAIARBD08EQCAOQfABOgAAIARBcWoiB0H/AU8EQCAFQf8BIARB8n1qIhdB/wFuIgVBAWoQKBogBUGBfmwgF2ohByAFIA5qQQJqIQULIAUgBzoAACAFQQFqIQUMAQsgDiAEQQR0OgAACyAFIBYgBCAFaiIaEDsgGiARIB5rQf//A3EQLyAVQXxqIRcgGkECaiEHIAYEQCAHIBdB/wFuakEGaiApSw0CCyAOLQAAIRoCfyAXQQ9PBEAgDiAaQQ9qOgAAAn8gFUFtaiINQf4DTwRAIAdB/wEgFUHve2oiF0H+A24iGkEBdCIeQQJqECgaIAUgBCAeampBBGohByAaQYJ8bCAXaiENCyANQf8BTwsEQCAHQf8BOgAAIAdBAWohByANQYF+aiENCyAHIA06AAAgB0EBagwBCyAOIBcgGmo6AAAgBwshDiAMIRcgCSEaIBkhHSAbIRYMAwsCfyAZIB9PBEAgFSENIBIMAQsgEiAZIBFrIg1BEUoNABogEiANIBJqQXxqIDAgMSASIBlqQXxqSxsiDSARIBlraiIEQQFIDQAaIAQgG2ohGyAEIBlqIRkgEiAEawshFSARIBZrIQQgBgRAIA4gBEH/AW5qIARqQQlqIClLDQELIA5BAWohBQJAIARBD08EQCAOQfABOgAAIARBcWoiB0H/AU8EQCAFQf8BIARB8n1qIhdB/wFuIgVBAWoQKBogBUGBfmwgF2ohByAFIA5qQQJqIQULIAUgBzoAACAFQQFqIQUMAQsgDiAEQQR0OgAACyAFIBYgBCAFaiIaEDsgGiARIB5rQf//A3EQLyANQXxqIRcgGkECaiEHIAYEQCAHIBdB/wFuakEGaiApSw0BCyAOLQAAIRoCfyAXQQ9PBEAgDiAaQQ9qOgAAAn8gDUFtaiIQQf4DTwRAIAdB/wEgDUHve2oiF0H+A24iGkEBdCIeQQJqECgaIAUgBCAeampBBGohByAaQYJ8bCAXaiEQCyAQQf8BTwsEQCAHQf8BOgAAIAdBAWohByAQQYF+aiEQCyAHIBA6AAAgDSARaiEWIBkhESAHQQFqDAELIA4gFyAaajoAACANIBFqIRYgGSERIAcLIQ4gGyEeIAwhFyAJIRoMAQsLCyAWDAMLIAUhKCAEDAMLICggMksNBiAAKAKEgBAhCwwFCyAfCyEoIA4LIQlBACEHIAZBAkYNAwwGCyAfIQQgDEEBaiIMIDJNDQALCwsgMyAoayIEQfABakH/AW4hBQJAIAZFDQAgBCAFaiAJakEBaiApQQVqIDogOxsiBU0NAEEAIQcgBkEBRg0DIAlBf3MgBWoiBCAEQfABakH/AW5rIQQLIAQgKGohBgJAIARBD08EQCAJQfABOgAAIAlBAWohBSAEQXFqIghB/wFJBEAgBSIJIAg6AAAMAgsgBUH/ASAEQfJ9aiIIQf8BbiIFQQFqECgaIAUgCWpBAmoiCSAFQYF+bCAIajoAAAwBCyAJIARBBHQ6AAALIAlBAWogKCAEECohBSADIAYgAWs2AgAgBCAFaiACawwBCyAAIAEgAiADIAQgLiAJQZgWaigCACAGIAVBC0pBASAALQCagBBBAEcQkAILIgdBAEoNAQsgAEEBOgCbgBALIAcPCyAAIAEgAiADIAQgBSAGEJECCzAAIAAoApyAEEUEQCAAIAEgAiADIAQgBSAGEJECDwsgACABIAIgAyAEIAUgBhCuBAt+AQF/IAAoAoCAECAAKAKEgBBrIgJBgYCAgARPBEAgAEEAQYCACBAoQYCACGpB/wFBgIAIECgaQQAhAgsgACABNgKAgBAgACACQYCABGoiAjYClIAQIAAgAjYCkIAQIAAgAjYCjIAQIAAgASACayIBNgKEgBAgACABNgKIgBALTwEBfyAALQCbgBAEQCAAEJICGiAAIAEQsAEPCyAAQQA2ApyAECAAKAKEgBAhAiAAQQA2AoSAECAAIAAoAoCAECACazYCgIAQIAAgARCwAQtQAQJ/IwBBEGsiBiQAIAYgAzYCDCAAQQNxRQRAIAAgBRCxBCAAIAEQsAQgACABIAIgBkEMaiAEIAUgAxCTAiAEShCvBCEHCyAGQRBqJAAgBwvyKAETfyAFQQEgBUEBShshBiAAIgVFIABBB3FyBH9BAAUgBUEAQaCAARAoCyEIAkACQAJAAkAgAxCTAiAETARAIANBioAESg0BIANBgICA8AdLDQIgASADaiEMIAgoAoCAASEAIAhBAzsBhoABIAggACADajYCgIABIAggCCgCkIABIANqNgKQgAECQCADQQ1IBEAgAiEDIAEhAAwBCyAMQXVqIRAgDEF0aiEUIAEgASgAAEEDEDAgCEEDIAEgAGsiCxBJIAxBe2oiEUF/aiETIBFBfWohDyAGQQZ0IgVBAXIhEiABQQFqIgQoAABBAxAwIQogASEJIAIhBgNAIARBAWohDSAKIAhBAxBIIQcgBSEOIBIhAwJAA0AgDSgAAEEDEDAhACAEIAtrIAogCEEDEFwgByALaiIKKAAAIAQoAABGDQEgDkEGdSEVIAAgCEEDEEghByADIQ4gA0EBaiEDIAAhCiAVIA0iBGoiDSAQTQ0ACyAGIQMgCSEADAILA0AgCiINIAFNIAQiACAJTXJFBEAgAEF/aiIELQAAIA1Bf2oiCi0AAEYNAQsLIAZBAWohAwJAIAAgCWsiBEEPTwRAIAZB8AE6AAAgBEFxaiIKQf8BTgRAIANB/wEgAEHvAWoiAyAKQf0DIApB/QNIGyIHIAlqa0H/AW5BAWoQKBogBiADIAlrIAdrQf8BbiIHakECaiEDIAQgB0GBfmxqQfJ9aiEKCyADIAo6AAAgA0EBaiEDDAELIAYgBEEEdDoAAAsgAyAJIAMgBGoiChA7A0AgCiAAIA1rQf//A3EQLyANQQRqIQMCfwJAAn8gDyAAQQRqIglNBEAgCQwBCyADKAAAIAkoAABzIgMNASANQQhqIQMgAEEIagsiBCAPSQRAA0AgAygAACAEKAAAcyIHBEAgBxAlIARqIAlrDAQLIANBBGohAyAEQQRqIgQgD0kNAAsLAkAgBCATTw0AIAMvAAAgBC8AAEcNACADQQJqIQMgBEECaiEECyAEIBFJBH8gBEEBaiAEIAMtAAAgBC0AAEYbBSAECyAJawwBCyADECULIQQgCkECaiEDIAAgBGpBBGohACAGLQAAIQkCQCAEQQ9PBEAgBiAJQQ9qOgAAIANBfxA0IARBcWoiBEH8B08EQANAIANBBGoiA0F/EDQgBEGEeGoiBEH7B0sNAAsLIAMgBEH//wNxQf8BbiIGaiIDIAZBgX5sIARqOgAAIANBAWohAwwBCyAGIAQgCWo6AAALIAAgEE8NAiAAQX5qIgQgBCgAAEEDEDAgCEEDIAsQSSAAKAAAQQMQMCIEIAhBAxBIIQYgACALayAEIAhBAxBcIAYgC2oiDSgAACAAKAAARgRAIANBADoAACADQQFqIQogAyEGDAELCyAAQQFqIgQoAABBAxAwIQogACEJIAMhBiAEIBRNDQALCwJAIAwgAGsiBEEPTwRAIANB8AE6AAAgA0EBaiEBIARBcWoiBUH/AUkEQCABIgMgBToAAAwCCyABQf8BIARB8n1qIgFB/wFuQQFqECgaIAFB/wFuIgUgA2pBAmoiAyAFQYF+bCABajoAAAwBCyADIARBBHQ6AAALDAQLIANBioAETARAIANBgICA8AdLDQIgAiAEaiEPIAEgA2ohDCAIKAKAgAEhACAIQQM7AYaAASAIIAAgA2o2AoCAASAIIAgoApCAASADajYCkIABAkAgA0ENSARAIAIhAyABIQAMAQsgDEF1aiERIAxBdGohFSABIAEoAABBAxAwIAhBAyABIABrIgsQSSAMQXtqIhRBf2ohFyAUQX1qIRAgBkEGdCIJQQFyIRIgAUEBaiIEKAAAQQMQMCEKIAEhBSACIQYDQCAEQQFqIQ0gCiAIQQMQSCEHIAkhDiASIQMCQANAIA0oAABBAxAwIQAgBCALayAKIAhBAxBcIAcgC2oiCigAACAEKAAARg0BIA5BBnUhFiAAIAhBAxBIIQcgAyEOIANBAWohAyAAIQogFiANIgRqIg0gEU0NAAsgBiEDIAUhAAwCCwNAIAoiDSABTSAEIgAgBU1yRQRAIABBf2oiBC0AACANQX9qIgotAABGDQELCyAGIAAgBWsiA2ogA0H/AW5qQQlqIA9LBEBBAA8LIAZBAWohBAJAIANBD08EQCAGQfABOgAAIANBcWoiCkH/AU4EQCAEQf8BIABB7wFqIgQgCkH9AyAKQf0DSBsiByAFamtB/wFuQQFqECgaIAYgBCAFayAHa0H/AW4iB2pBAmohBCADIAdBgX5sakHyfWohCgsgBCAKOgAAIARBAWohBAwBCyAGIANBBHQ6AAALIAQgBSADIARqIgoQOwNAIAogACANa0H//wNxEC8gDUEEaiEDIAoCfwJAAn8gECAAQQRqIgVNBEAgBQwBCyADKAAAIAUoAABzIgMNASANQQhqIQMgAEEIagsiBCAQSQRAA0AgAygAACAEKAAAcyIHBEAgBxAlIARqIAVrDAQLIANBBGohAyAEQQRqIgQgEEkNAAsLAkAgBCAXTw0AIAMvAAAgBC8AAEcNACADQQJqIQMgBEECaiEECyAEIBRJBH8gBEEBaiAEIAMtAAAgBC0AAEYbBSAECyAFawwBCyADECULIgRB8AFqQf8BbmpBCGogD0sEQEEADwsgCkECaiEDIAAgBGpBBGohACAGLQAAIQUCQCAEQQ9PBEAgBiAFQQ9qOgAAIANBfxA0IARBcWoiBEH8B08EQANAIANBBGoiA0F/EDQgBEGEeGoiBEH7B0sNAAsLIAMgBEH//wNxQf8BbiIFaiIDIAVBgX5sIARqOgAAIANBAWohAwwBCyAGIAQgBWo6AAALIAAgEU8NAiAAQX5qIgQgBCgAAEEDEDAgCEEDIAsQSSAAKAAAQQMQMCIEIAhBAxBIIQUgACALayAEIAhBAxBcIAUgC2oiDSgAACAAKAAARgRAIANBADoAACADQQFqIQogAyEGDAELCyAAQQFqIgQoAABBAxAwIQogACEFIAMhBiAEIBVNDQALCyADIAwgAGsiBGogBEHwAWpB/wFuakEBaiAPSw0CAkAgBEEPTwRAIANB8AE6AAAgA0EBaiEBIARBcWoiBUH/AUkEQCABIgMgBToAAAwCCyABQf8BIARB8n1qIgFB/wFuQQFqECgaIAFB/wFuIgUgA2pBAmoiAyAFQYF+bCABajoAAAwBCyADIARBBHQ6AAALDAQLIANBgICA8AdLDQEgAiAEaiEPIAEgA2oiEEF1aiERIBBBdGohFSAIKAKAgAEhACAIQQFBAiABQf//A0sbIgs7AYaAASAIIAAgA2o2AoCAASAIIAgoApCAASADajYCkIABIAEgASgAACALEDAgCCALIAEgAGsiDBBJIBBBe2oiF0F/aiEYIBdBfWohFCAGQQZ0IgpBAXIhDSABQQFqIgMoAAAgCxAwIQQgAUGAgARJIRYgAiEFIAEhBgNAAkACQCAWRQRAIAMgFUsNAiADQQFqIQ4gCiEJIA0hBwNAIAQgCBCFASEAIA4oAABBARAwIRIgAyAEIAhBASAMEEkgAEH//wNqIANPBEAgACgAACADKAAARg0DCyAJQQZ1IQAgByEJIAdBAWohByASIQQgACAOIgNqIg4gEU0NAAsMAgsgAyAVSw0BIANBAWohDiAEIAggCxBIIQAgCiEJIA0hBwNAIA4oAAAgCxAwIRIgAyAMayITIAQgCCALEFwgAEH//wNqIBNPBEAgACAMaiIAKAAAIAMoAABGDQILIAlBBnUhEyASIAggCxBIIQAgByEJIAdBAWohByASIQQgEyAOIgNqIg4gEU0NAAsMAQsDQCAAIgQgAU0gAyIJIAZNckUEQCAJQX9qIgMtAAAgBEF/aiIALQAARg0BCwtBACETIAUgCSAGayIDaiADQf8BbmpBCWogD0sNAyAFQQFqIQACQCADQQ9PBEAgBUHwAToAACADQXFqIgdB/wFOBEAgAEH/ASAJQe8BaiIAIAdB/QMgB0H9A0gbIgcgBmprQf8BbkEBahAoGiAFIAAgBmsgB2tB/wFuIgdqQQJqIQAgAyAHQYF+bGpB8n1qIQcLIAAgBzoAACAAQQFqIQAMAQsgBSADQQR0OgAACyAAIAYgACADaiIHEDsgCSEGA0AgByAGIARrQf//A3EQLyAEQQRqIQMgBwJ/AkACfyAUIAZBBGoiAE0EQCAADAELIAMoAAAgACgAAHMiAw0BIARBCGohAyAGQQhqCyIEIBRJBEADQCADKAAAIAQoAABzIgkEQCAJECUgBGogAGsMBAsgA0EEaiEDIARBBGoiBCAUSQ0ACwsCQCAEIBhPDQAgAy8AACAELwAARw0AIANBAmohAyAEQQJqIQQLIAQgF0kEfyAEQQFqIAQgAy0AACAELQAARhsFIAQLIABrDAELIAMQJQsiAEHwAWpB/wFuakEIaiAPSw0EIAdBAmohAyAAIAZqQQRqIQYgBS0AACEEAn8gAEEPTwRAIAUgBEEPajoAACADQX8QNCAAQXFqIgRB/AdPBEADQCADQQRqIgNBfxA0IARBhHhqIgRB+wdLDQALCyADIARB//8DcUH/AW4iAGoiAyAAQYF+bCAEajoAACADQQFqDAELIAUgACAEajoAACADCyEFIAYgEU8NASAGQX5qIgAgACgAACALEDAgCCALIAwQSSAGKAAAIQACQAJAIBZFBEAgAEEBEDAiACAIEIUBIQQgBiAAIAhBASAMEEkgBEH//wNqIAZJDQEgBCgAACAGKAAARw0BDAILIAAgCxAwIgMgCCALEEghACAGIAxrIgQgAyAIIAsQXCAAQf//A2ogBEkNACAAIAxqIgQoAAAgBigAAEYNAQsgBkEBaiIDKAAAIAsQMCEEDAMLIAVBADoAACAFQQFqIQcMAAsACwtBACETIAUgECAGayIBaiABQfABakH/AW5qQQFqIA9LDQECQCABQQ9PBEAgBUHwAToAACAFQQFqIQAgAUFxaiIDQf8BSQRAIAAiBSADOgAADAILIABB/wEgAUHyfWoiAEH/AW5BAWoQKBogAEH/AW4iAyAFakECaiIFIANBgX5sIABqOgAADAELIAUgAUEEdDoAAAsgBUEBaiAGIAEQKiABaiACayETDAELIANBgICA8AdLDQAgASADaiIPQXVqIRAgD0F0aiEUIAgoAoCAASEAIAhBAUECIAFB//8DSxsiCzsBhoABIAggACADajYCgIABIAggCCgCkIABIANqNgKQgAEgASABKAAAIAsQMCAIIAsgASAAayIMEEkgD0F7aiITQX9qIRcgE0F9aiERIAZBBnQiCkEBciENIAFBAWoiAygAACALEDAhBCABQYCABEkhFSACIQUgASEGA0ACQCAVRQRAIAMgFEsNBCADQQFqIQ4gCiEJIA0hBwNAIAQgCBCFASEAIA4oAABBARAwIRIgAyAEIAhBASAMEEkgAEH//wNqIANPBEAgACgAACADKAAARg0DCyAJQQZ1IQAgByEJIAdBAWohByASIQQgACAOIgNqIg4gEE0NAAsMBAsgAyAUSw0DIANBAWohDiAEIAggCxBIIQAgCiEJIA0hBwNAIA4oAAAgCxAwIRIgAyAMayIWIAQgCCALEFwgAEH//wNqIBZPBEAgACAMaiIAKAAAIAMoAABGDQILIAlBBnUhFiASIAggCxBIIQAgByEJIAdBAWohByASIQQgFiAOIgNqIg4gEE0NAAsMAwsDQCAAIgQgAU0gAyIJIAZNckUEQCAJQX9qIgMtAAAgBEF/aiIALQAARg0BCwsgBUEBaiEDAkAgCSAGayIAQQ9PBEAgBUHwAToAACAAQXFqIgdB/wFOBEAgA0H/ASAJQe8BaiIDIAdB/QMgB0H9A0gbIgcgBmprQf8BbkEBahAoGiAFIAMgBmsgB2tB/wFuIgdqQQJqIQMgACAHQYF+bGpB8n1qIQcLIAMgBzoAACADQQFqIQMMAQsgBSAAQQR0OgAACyADIAYgACADaiIHEDsgCSEGA0AgByAGIARrQf//A3EQLyAEQQRqIQMCfwJAAn8gESAGQQRqIgBNBEAgAAwBCyADKAAAIAAoAABzIgMNASAEQQhqIQMgBkEIagsiBCARSQRAA0AgAygAACAEKAAAcyIJBEAgCRAlIARqIABrDAQLIANBBGohAyAEQQRqIgQgEUkNAAsLAkAgBCAXTw0AIAMvAAAgBC8AAEcNACADQQJqIQMgBEECaiEECyAEIBNJBH8gBEEBaiAEIAMtAAAgBC0AAEYbBSAECyAAawwBCyADECULIQAgB0ECaiEDIAAgBmpBBGohBiAFLQAAIQQCfyAAQQ9PBEAgBSAEQQ9qOgAAIANBfxA0IABBcWoiBEH8B08EQANAIANBBGoiA0F/EDQgBEGEeGoiBEH7B0sNAAsLIAMgBEH//wNxQf8BbiIAaiIDIABBgX5sIARqOgAAIANBAWoMAQsgBSAAIARqOgAAIAMLIQUgBiAQTw0DIAZBfmoiACAAKAAAIAsQMCAIIAsgDBBJIAYoAAAhAAJAAkAgFUUEQCAAQQEQMCIAIAgQhQEhBCAGIAAgCEEBIAwQSSAEQf//A2ogBkkNASAEKAAAIAYoAABHDQEMAgsgACALEDAiAyAIIAsQSCEAIAYgDGsiBCADIAggCxBcIABB//8DaiAESQ0AIAAgDGoiBCgAACAGKAAARg0BCyAGQQFqIgMoAAAgCxAwIQQMAgsgBUEAOgAAIAVBAWohBwwACwALAAsgEw8LAkAgDyAGayIBQQ9PBEAgBUHwAToAACAFQQFqIQAgAUFxaiIDQf8BSQRAIAAiBSADOgAADAILIABB/wEgAUHyfWoiAEH/AW5BAWoQKBogAEH/AW4iAyAFakECaiIFIANBgX5sIABqOgAADAELIAUgAUEEdDoAAAsgBUEBaiAGIAEQKiABaiACaw8LIANBAWogACAEECogBGogAmsLJgAgAEEXNgIQIABBGDYCDCAAQRk2AgggAEEaNgIEIABBwBU2AgAL1QgBCX8gBAR/QRBBICAEQRB2IgUbQXhBACAFIAQgBRsiBUEIdiIEG2pBfEEAIAQgBSAEGyIFQQR2IgQbakF+QQAgBCAFIAQbIgVBAnYiBBtqIAQgBSAEG0EBS2sFQSELIQsgACABaiEJAkAgAUEPSQ0AIAlBfGohDCAJQXFqIQ0gACIGQQFqIgEhBANAIAEoAAAhB0EgIQEDQCAEIgUgAUEFdmoiBCANSwRAIAYhAAwDCyADIAdBvc/W8QFsIAt2QQF0aiIILwEAIQogBCgAACEHIAggBSAAazsBACABQQFqIQEgBSgAACAAIApqIgooAABHDQALIAUgBmsiCEF/aiEBAkACQCAIQT1OBEAgAkEBaiEEQQAhBwNAIAQgAToAACAEQQFqIQQgB0EBaiEHIAFBCHYiAQ0ACyACIAdBAnRBbGo6AAAMAQsgAiABQQJ0OgAAIAJBAWohBCAIQRBKDQAgAiAGKAAANgABIAIgBigABDYABSACIAYoAAg2AAkgAiAGKAAMNgANDAELIAQgBiAIECoaCyAEIAhqIQIDQCAKQQRqIQdBACEEAkACQCAMIAVBBGoiAUkNAANAIAEoAAAiBiAEIAdqKAAAIghGBEAgBEEEaiEEIAFBBGoiASAMTQ0BDAILCyAEQXhBACAGIAhzIgRBEHQiASAEIAEbIgZBCHQiBBtBD0EfIAEbakF8QQAgBCAGIAQbIgRBBHQiARtqQX5BACABIAQgARsiBEECdCIBG2ogASAEIAEbQf////8HcUEAR2tBA3VqIQQMAQsgASAJTw0AIAkgBCABa2ohBgNAIAQgB2otAAAgAS0AAEcNASAEQQFqIQQgAUEBaiIBIAlHDQALIAYhBAsgBSAKayEGIARBBGohAQJAIARBwABIBEAgASEHDAELIAEhBANAIAIgBjsAASACQf4BOgAAIAJBA2ohAiAEQYMBSiEIIARBQGoiByEEIAgNAAsLIAdBwQBOBEAgAiAGOwABIAJB7gE6AAAgB0FEaiEHIAJBA2ohAgsgASAFaiEFAn8gB0ELSiAGQf8PS3JFBEAgAiAGOgABIAIgBkEDdkHgAXEgB0ECdGpB8QFqOgAAIAJBAmoMAQsgAiAGOwABIAIgB0ECdEF+ajoAACACQQNqCyECIAUgDU8EQCAFIQAMAwsgAyAFQX9qIgEoAABBvc/W8QFsIAt2QQF0aiAFIABrIgRBf2o7AQAgACADIAUoAABBvc/W8QFsIAt2QQF0aiIGLwEAaiIKKAAAIQcgBiAEOwEAIAcgBSgAAEYNAAsgBUEBaiEEIAFBAmohASAFIQYMAAsACyAAIAlJBH8gCSAAayIDQX9qIQEgAgJ/IANBPU4EQCACQQFqIQRBACEHA0AgBCABOgAAIARBAWohBCAHQQFqIQcgAUEIdiIBDQALIAdBAnRBbGoMAQsgAkEBaiEEIAFBAnQLOgAAIAQgACADECogA2oFIAILC+sCAhV/AX5CsH8hGSACQQdxBH4gGQUgAwRAIAJBA3YhBSADQQN0IQkDQCAFBEAgCEEDdCIGIAVsIQogBkEHciILIAVsIQwgBkEGciINIAVsIQ4gBkEFciIPIAVsIRAgBkEEciIRIAVsIRIgBkEDciITIAVsIRQgBkECciIVIAVsIRYgBkEBciIXIAVsIRhBACEEA0AgASAGIAQgCWwiB2pqIAAgBCAKamotAAA6AAAgASAHIBdqaiAAIAQgGGpqLQAAOgAAIAEgByAVamogACAEIBZqai0AADoAACABIAcgE2pqIAAgBCAUamotAAA6AAAgASAHIBFqaiAAIAQgEmpqLQAAOgAAIAEgByAPamogACAEIBBqai0AADoAACABIAcgDWpqIAAgBCAOamotAAA6AAAgASAHIAtqaiAAIAQgDGpqLQAAOgAAIARBAWoiBCAFRw0ACwsgCEEBaiIIIANHDQALCyACIANsrQsLNAEBfkKwfyEFAkAgAkEHcQ0AIAAgBCACIAMQtgQiBUIAUw0AIAQgASACIAMQuAQhBQsgBQv2AgINfwJ+QrB/IREgAkEHcQR+IBEFIAIgA2whByADQQN0IgUEQCADQQdsIQkgA0EGbCEKIANBBWwhCyADQQJ0IQwgA0EDbCENIANBAXQhDiAFQX9qIAdPIQ8DQCAPRQRAIAZBA3YhEEEAIQggBSECA0AgASAIIBBqIgRqIAAgBiAIamopAwAiEUIHiCARhUKqgaiFoJWA1QCDIhIgEYUgEkIHhoUiEUIOiCARhULMmYOAwJkzgyISIBGFIBJCDoaFIhFCHIggEYVC8OHDhw+DIhIgEYUiETwAACABIAMgBGpqIBFCCIg8AAAgASAEIA5qaiARQhCIPAAAIAEgBCANamogEUIYiDwAACABIAQgDGpqIBEgEkIchoUiEUIgiDwAACABIAQgC2pqIBFCKIg8AAAgASAEIApqaiARQjCIPAAAIAEgBCAJamogEUI4iDwAACACIgggBWoiAkF/aiAHSQ0ACwsgBkEIaiIGIAVJDQALCyAHrQsLVQEBfkKwfyEFAkAgAkEHcQ0AIAAgASACIAMQvAQiBUIAUw0AIAEgBCACIAMQuwQiBUIAUw0AIAJBB3EEfkKwfwUgBCABIAMgAkEDdhC6BAshBQsgBQtZAQN/A0AgAgRAIAIgBGwhBkEAIQUDQCABIAVBA3QgBGogA2xqIAAgBSAGaiADbGogAxAqGiAFQQFqIgUgAkcNAAsLIARBAWoiBEEIRw0ACyACIANsQQN0rQvAAgIHfwJ+QrB/IQsgAiADbCIEQQdxBH4gCwUgBEEDdiICBEAgAkEHbCEFIAJBBmwhBiACQQVsIQcgAkECdCEIIAJBA2whCSACQQF0IQpBACEDA0AgASADaiAAIANBA3RqKQMAIgtCB4ggC4VCqoGohaCVgNUAgyIMIAuFIAxCB4aFIgtCDoggC4VCzJmDgMCZM4MiDCALhSAMQg6GhSILQhyIIAuFQvDhw4cPgyIMIAuFIgs8AAAgASACIANqaiALQgiIPAAAIAEgAyAKamogC0IQiDwAACABIAMgCWpqIAtCGIg8AAAgASADIAhqaiALIAxCHIaFIgtCIIg8AAAgASADIAdqaiALQiiIPAAAIAEgAyAGamogC0IwiDwAACABIAMgBWpqIAtCOIg8AAAgA0EBaiIDIAJHDQALCyAErQsLrQMBEn8CQCACRQ0AIAJBCE8EQANAIAMEQCADIAVsIQcgBUEHciIIIANsIQkgBUEGciIKIANsIQsgBUEFciIMIANsIQ0gBUEEciIOIANsIQ8gBUEDciIQIANsIREgBUECciISIANsIRMgBUEBciIUIANsIRVBACEEA0AgASAFIAIgBGwiBmpqIAAgBCAHamotAAA6AAAgASAGIBRqaiAAIAQgFWpqLQAAOgAAIAEgBiASamogACAEIBNqai0AADoAACABIAYgEGpqIAAgBCARamotAAA6AAAgASAGIA5qaiAAIAQgD2pqLQAAOgAAIAEgBiAMamogACAEIA1qai0AADoAACABIAYgCmpqIAAgBCALamotAAA6AAAgASAGIAhqaiAAIAQgCWpqLQAAOgAAIARBAWoiBCADRw0ACwsgBUEPaiEEIAVBCGohBSAEIAJJDQALCyACQXhxIgUgAk8NAANAIAMEQCADIAVsIQZBACEEA0AgASACIARsIAVqaiAAIAQgBmpqLQAAOgAAIARBAWoiBCADRw0ACwsgBUEBaiIFIAJHDQALCyACIANsrQuCAQEGfyABIAEgAG4iBiAAbGshByAAIAFNBEAgBkEBIAZBAUsbIQgDQCAABEAgACAEbCEJQQAhBQNAIAMgBSAJamogAiAFIAZsIARqai0AADoAACAFQQFqIgUgAEcNAAsLIARBAWoiBCAIRw0ACwsgAyABIAdrIgBqIAAgAmogBxAqGgsNACAAIAEgAiADEL0EC4IBAQZ/IAEgASAAbiIGIABsayEHIAAEQCAGQQEgBkEBSxshCANAIAAgAU0EQCAEIAZsIQlBACEFA0AgAyAFIAlqaiACIAAgBWwgBGpqLQAAOgAAIAVBAWoiBSAIRw0ACwsgBEEBaiIEIABHDQALCyADIAEgB2siAGogACACaiAHECoaC7gBAQN/AkAgAUEBSA0AIAAsAAAiBEH/AHEhAwJAIARBf0oNACABQQJIDQEgACwAASIEQQd0QYD/AHEgA3IhAyAEQX9KDQAgAUEDSA0BIAAsAAIiBEEOdEGAgP8AcSADciEDIARBf0oNACABQQRIDQEgACwAAyIEQRV0QYCAgP8AcSADciEDIARBf0oNACABQQVIDQEgAC0ABCIAQQ9LDQEgAEEcdCADciEDCyACIAM2AgBBASEFCyAFCw0AIAAgASACIAMQvwQLlAIBA38gACABEDcaIAJBA3YiBEH4////AXEhAyABIAJBB3EiBWohAiAAIAVqIQACQAJAAkACQAJAAkACQAJAIARBB3FBf2oOBwYFBAMCAQAHCyAAIAIQNyEAIAJBCGohAgsgACACEDchACACQQhqIQILIAAgAhA3IQAgAkEIaiECCyAAIAIQNyEAIAJBCGohAgsgACACEDchACACQQhqIQILIAAgAhA3IQAgAkEIaiECCyAAIAIQNyEAIAJBCGohAgsgAwRAA0AgACACEDcgAkEIahA3IAJBEGoQNyACQRhqEDcgAkEgahA3IAJBKGoQNyACQTBqEDcgAkE4ahA3IQAgAkFAayECIANBeGoiAw0ACwsgAAstACACBEADQCAAIAEtAAA6AAAgAEEBaiEAIAFBAWohASACQX9qIgINAAsLIAALvQUBA38gACABayIDQQlPBEAgACABIAIQUA8LAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIANBfmpBH3cOEAABDAIMDAwDBAUGBwgJCgsMCyACQQFNDQwDQCAAIAEQeCEAIAJBfmoiAkEBSw0ACwwMCyACQQNNDQsDQCAAIAEQdyEAIAJBfGoiAkEDSw0ACwwLCyACQQdNDQoDQCAAIAEQNyEAIAJBeGoiAkEHSw0ACwwKCyACQQ9NDQkDQCAAIAEQViEAIAJBcGoiAkEPSw0ACwwJCyACQRJJDQggAUEQaiEDA0AgACABEFYgAxB4IQAgAkFuaiICQRFLDQALDAgLIAJBFEkNByABQRBqIQMDQCAAIAEQViADEHchACACQWxqIgJBE0sNAAsMBwsgAkEWSQ0GIAFBFGohAyABQRBqIQQDQCAAIAEQViAEEHcgAxB4IQAgAkFqaiICQRVLDQALDAYLIAJBGEkNBSABQRBqIQMDQCAAIAEQViADEDchACACQWhqIgJBF0sNAAsMBQsgAkEaSQ0EIAFBGGohAyABQRBqIQQDQCAAIAEQViAEEDcgAxB4IQAgAkFmaiICQRlLDQALDAQLIAJBHEkNAyABQRhqIQMgAUEQaiEEA0AgACABEFYgBBA3IAMQdyEAIAJBZGoiAkEbSw0ACwwDCyACQR5JDQIgAUEcaiEDIAFBGGohBCABQRBqIQUDQCAAIAEQViAFEDcgBBB3IAMQeCEAIAJBYmoiAkEdSw0ACwwCCyACQR9NDQEDQCAAIAEQlAIhACACQWBqIgJBH0sNAAsMAQsgAkUNAQNAIAAgAS0AADoAACAAQQFqIQAgAUEBaiEBIAJBf2oiAg0ACwwBCyACRQ0AA0AgACABLQAAOgAAIABBAWohACABQQFqIQEgAkF/aiICDQALCyAAC7EBAgJ/An4gAEF/ai0AACEDAkACQCABQXhqIgQgAE0NACADrUL/AYNCgYKEiJCgwIABfiEFA0AgAikAACIGIAVRBEAgAkEIaiECIABBCGoiACAESQ0BDAILCyAGp0H/AXEgA0cNAQNAIABBAWohACACLQABIQEgAkEBaiECIAEgA0YNAAsMAQsgACABTw0AA0AgAi0AACADRw0BIAJBAWohAiAAQQFqIgAgAUkNAAsLIAALJgEBf0ECIQQgAygCACABEJUCTwR/IAAgASACIAMQlQRBAAUgBAsLC8zcATgAQYAIC4MGTjZzbmFwcHk0U2lua0UAABh0AAAABAAATjZzbmFwcHk2U291cmNlRQAAAAAYdAAAGAQAAAAAAABsBAAAAQAAAAIAAAADAAAABAAAAAUAAABONnNuYXBweTE1Qnl0ZUFycmF5U291cmNlRQAAjHIAAFAEAAAsBAAAAAAAALQEAAAGAAAABwAAAAgAAAAJAAAATjZzbmFwcHkyMlVuY2hlY2tlZEJ5dGVBcnJheVNpbmtFAAAAjHIAAJAEAAAQBAAAAQAECAEQASACAAUIAhACIAMABggDEAMgBAAHCAQQBCAFAAgIBRAFIAYACQgGEAYgBwAKCAcQByAIAAsICBAIIAkABAkJEAkgCgAFCQoQCiALAAYJCxALIAwABwkMEAwgDQAICQ0QDSAOAAkJDhAOIA8ACgkPEA8gEAALCRAQECARAAQKERARIBIABQoSEBIgEwAGChMQEyAUAAcKFBAUIBUACAoVEBUgFgAJChYQFiAXAAoKFxAXIBgACwoYEBggGQAECxkQGSAaAAULGhAaIBsABgsbEBsgHAAHCxwQHCAdAAgLHRAdIB4ACQseEB4gHwAKCx8QHyAgAAsLIBAgICEABAwhECEgIgAFDCIQIiAjAAYMIxAjICQABwwkECQgJQAIDCUQJSAmAAkMJhAmICcACgwnECcgKAALDCgQKCApAAQNKRApICoABQ0qECogKwAGDSsQKyAsAAcNLBAsIC0ACA0tEC0gLgAJDS4QLiAvAAoNLxAvIDAACw0wEDAgMQAEDjEQMSAyAAUOMhAyIDMABg4zEDMgNAAHDjQQNCA1AAgONRA1IDYACQ42EDYgNwAKDjcQNyA4AAsOOBA4IDkABA85EDkgOgAFDzoQOiA7AAYPOxA7IDwABw88EDwgAQgIDz0QPSABEAkPPhA+IAEYCg8/ED8gASALD0AQQCAAAAAA/wAAAP//AAD///8A/////2RlY29tcHJlc3MAY29tcHJlc3MAZnJlZV9yZXN1bHQAdmkAAHhzAABpaWlpaWlpAEGQDgvUBigHAAAwBwAAMAcAAMxzAADMcwAAzHMAABh0AAC2BwAAQHQAAEgHAAAAAAAAAQAAAIgHAAAAAAAATlN0M19fMjEyYmFzaWNfc3RyaW5nSWNOU18xMWNoYXJfdHJhaXRzSWNFRU5TXzlhbGxvY2F0b3JJY0VFRUUAABh0AACQBwAATlN0M19fMjIxX19iYXNpY19zdHJpbmdfY29tbW9uSUxiMUVFRQBOMTBlbXNjcmlwdGVuM3ZhbEUAAAAAGHQAANQHAABOMTBlbXNjcmlwdGVuMTFtZW1vcnlfdmlld0loRUUAaWlpAAAoBwAAMAcAABgIAAAgCAAAJAgAACoIAAAxCAAANggAAGJsb3NjbHoAbHo0AGx6NGhjAHNuYXBweQB6bGliAHpzdGQARXJyb3IuICBudGhyZWFkcyBjYW5ub3QgYmUgbGFyZ2VyIHRoYW4gQkxPU0NfTUFYX1RIUkVBRFMgKCVkKQBFcnJvci4gIG50aHJlYWRzIG11c3QgYmUgYSBwb3NpdGl2ZSBpbnRlZ2VyAEVSUk9SOyByZXR1cm4gY29kZSBmcm9tIHB0aHJlYWRfY3JlYXRlKCkgaXMgJWQKAAlFcnJvciBkZXRhaWw6ICVzCgBCbG9zYyBoYXMgbm90IGJlZW4gY29tcGlsZWQgd2l0aCAnJXMnIABjb21wcmVzc2lvbiBzdXBwb3J0LiAgUGxlYXNlIHVzZSBvbmUgaGF2aW5nIGl0LgBFcnJvciBhbGxvY2F0aW5nIG1lbW9yeSEARVJST1I7IHJldHVybiBjb2RlIGZyb20gcHRocmVhZF9qb2luKCkgaXMgJWQKAElucHV0IGJ1ZmZlciBzaXplIGNhbm5vdCBleGNlZWQgJWQgYnl0ZXMKAE91dHB1dCBidWZmZXIgc2l6ZSBzaG91bGQgYmUgbGFyZ2VyIHRoYW4gJWQgYnl0ZXMKAGBjbGV2ZWxgIHBhcmFtZXRlciBtdXN0IGJlIGJldHdlZW4gMCBhbmQgOSEKAGBzaHVmZmxlYCBwYXJhbWV0ZXIgbXVzdCBiZSBlaXRoZXIgMCwgMSBvciAyIQoAAAAAAQAAgAAAAAABAAAAAQAACgoLDA0ODg4O/wAICBAgICAgQABB9hQLUfC/mpmZmZmZuT+amZmZmZnJPzMzMzMzM9M/mpmZmZmZ2T8zMzMzMzPjP83MzMzMzOw/ZmZmZmZm7j8AAAAAAADwPwAAAAAAAPA/Z2VuZXJpYwBB1BULGQEAAAACAAAAAQAAAAAAAAAEAAAABAAAAAQAQfwVC64B//////z///8BAAAAAgAAAAMAAAAAAAAAAgAAABAAAAAAAAAAAgAAABAAAAAAAAAAAgAAABAAAAAAAAAABAAAABAAAAAAAAAACAAAABAAAAAAAAAAEAAAABAAAAAAAAAAIAAAABAAAAAAAAAAQAAAABAAAAAAAAAAgAAAABAAAAAAAAAAAAEAABAAAAABAAAAYAAAAEAAAAABAAAAAAIAAIAAAAABAAAAAEAAAAAQAEG0FwvxQJYwB3csYQ7uulEJmRnEbQeP9GpwNaVj6aOVZJ4yiNsOpLjceR7p1eCI2dKXK0y2Cb18sX4HLbjnkR2/kGQQtx3yILBqSHG5895BvoR91Noa6+TdbVG11PTHhdODVphsE8Coa2R6+WL97Mllik9cARTZbAZjYz0P+vUNCI3IIG47XhBpTORBYNVycWei0eQDPEfUBEv9hQ3Sa7UKpfqotTVsmLJC1sm720D5vKzjbNgydVzfRc8N1txZPdGrrDDZJjoA3lGAUdfIFmHQv7X0tCEjxLNWmZW6zw+lvbieuAIoCIgFX7LZDMYk6Quxh3xvLxFMaFirHWHBPS1mtpBB3HYGcdsBvCDSmCoQ1e+JhbFxH7W2BqXkv58z1LjooskHeDT5AA+OqAmWGJgO4bsNan8tPW0Il2xkkQFcY+b0UWtrYmFsHNgwZYVOAGLy7ZUGbHulARvB9AiCV8QP9cbZsGVQ6bcS6ri+i3yIufzfHd1iSS3aFfN804xlTNT7WGGyTc5RtTp0ALyj4jC71EGl30rXldg9bcTRpPv01tNq6WlD/NluNEaIZ63QuGDacy0EROUdAzNfTAqqyXwN3TxxBVCqQQInEBALvoYgDMkltWhXs4VvIAnUZrmf5GHODvneXpjJ2SkimNCwtKjXxxc9s1mBDbQuO1y9t61susAgg7jttrO/mgzitgOa0rF0OUfV6q930p0VJtsEgxbccxILY+OEO2SUPmptDahaanoLzw7knf8JkyeuAAqxngd9RJMP8NKjCIdo8gEe/sIGaV1XYvfLZ2WAcTZsGecGa252G9T+4CvTiVp62hDMSt1nb9+5+fnvvo5DvrcX1Y6wYOij1tZ+k9GhxMLYOFLy30/xZ7vRZ1e8pt0GtT9LNrJI2isN2EwbCq/2SgM2YHoEQcPvYN9V32eo745uMXm+aUaMs2HLGoNmvKDSbyU24mhSlXcMzANHC7u5FgIiLyYFVb47usUoC72yklq0KwRqs1yn/9fCMc/QtYue2Swdrt5bsMJkmybyY+yco2p1CpNtAqkGCZw/Ng7rhWcHchNXAAWCSr+VFHq44q4rsXs4G7YMm47Skg2+1eW379x8Id/bC9TS04ZC4tTx+LPdaG6D2h/NFr6BWya59uF3sG93R7cY5loIiHBqD//KOwZmXAsBEf+eZY9prmL40/9rYUXPbBZ44gqg7tIN11SDBE7CswM5YSZnp/cWYNBNR2lJ23duPkpq0a7cWtbZZgvfQPA72DdTrrypxZ673n/Pskfp/7UwHPK9vYrCusowk7NTpqO0JAU20LqTBtfNKVfeVL9n2SMuemazuEphxAIbaF2UK28qN74LtKGODMMb3wVaje8CLQAAAABBMRsZgmI2MsNTLSsExWxkRfR3fYanWlbHlkFPCIrZyEm7wtGK6O/6y9n04wxPtaxNfq61ji2Dns8cmIdREsJKECPZU9Nw9HiSQe9hVdeuLhTmtTfXtZgcloSDBVmYG4IYqQCb2/otsJrLNqldXXfmHGxs/98/QdSeDlrNoiSEleMVn4wgRrKnYXepvqbh6PHn0PPoJIPew2Wyxdqqrl1d659GRCjMa29p/XB2rmsxOe9aKiAsCQcLbTgcEvM2Rt+yB13GcVRw7TBla/T38yq7tsIxonWRHIk0oAeQ+7yfF7qNhA553qklOO+yPP9583O+SOhqfRvFQTwq3lgFT3nwRH5i6YctT8LGHFTbAYoVlEC7Do2D6COmwtk4vw3FoDhM9Lshj6eWCs6WjRMJAMxcSDHXRYti+m7KU+F3VF27uhVsoKPWP42Ilw6WkVCY194RqczH0vrh7JPL+vVc12JyHeZ5a961VECfhE9ZWBIOFhkjFQ/acDgkm0EjPadr/WXmWuZ8JQnLV2Q40E6jrpEB4p+KGCHMpzNg/bwqr+Ekre7QP7QtgxKfbLIJhqskSMnqFVPQKUZ++2h3ZeL2eT8vt0gkNnQbCR01KhIE8rxTS7ONSFJw3mV5Me9+YP7z5ue/wv3+fJHQ1T2gy8z6NoqDuweRmnhUvLE5ZaeoS5iDOwqpmCLJ+rUJiMuuEE9d718ObPRGzT/ZbYwOwnRDElrzAiNB6sFwbMGAQXfYR9c2lwbmLY7FtQClhIQbvBqKQXFbu1pomOh3Q9nZbFoeTy0VX342DJwtGyfdHAA+EgCYuVMxg6CQYq6L0VO1khbF9N1X9O/ElKfC79WW2fbpvAeuqI0ct2veMZwq7yqF7XlryqxIcNNvG134LipG4eE23magB8V/Y1ToVCJl803l87ICpMKpG2eRhDAmoJ8puK7F5Pmf3v06zPPWe/3oz7xrqYD9WrKZPgmfsn84hKuwJBws8RUHNTJGKh5zdzEHtOFwSPXQa1E2g0Z6d7JdY07X+ssP5uHSzLXM+Y2E1+BKEpavCyONtshwoJ2JQbuERl0jAwdsOBrEPxUxhQ4OKEKYT2cDqVR+wPp5VYHLYkwfxTiBXvQjmJ2nDrPclhWqGwBU5VoxT/yZYmLX2FN5zhdP4UlWfvpQlS3Xe9QczGITio0tUruWNJHoux/Q2aAG7PN+Xq3CZUdukUhsL6BTdeg2EjqpBwkjalQkCCtlPxHkeaeWpUi8j2YbkaQnKoq94LzL8qGN0Oti3v3AI+/m2b3hvBT80KcNP4OKJn6ykT+5JNBw+BXLaTtG5kJ6d/1btWtl3PRafsU3CVPudjhI97GuCbjwnxKhM8w/inL9JJMAAAAAN2rCAW7UhANZvkYC3KgJB+vCywayfI0EhRZPBbhREw6PO9EP1oWXDeHvVQxk+RoJU5PYCAotngo9R1wLcKMmHEfJ5B0ed6IfKR1gHqwLLxubYe0awt+rGPW1aRnI8jUS/5j3E6YmsRGRTHMQFFo8FSMw/hR6jrgWTeR6F+BGTTjXLI85jpLJO7n4Czo87kQ/C4SGPlI6wDxlUAI9WBdeNm99nDc2w9o1AakYNIS/VzGz1ZUw6mvTMt0BETOQ5Wskp4+pJf4x7yfJWy0mTE1iI3snoCIimeYgFfMkISi0eCof3rorRmD8KXEKPij0HHEtw3azLJrI9S6tojcvwI2acPfnWHGuWR5zmTPcchwlk3crT1F2cvEXdEWb1XV43Il+T7ZLfxYIDX0hYs98pHSAeZMeQnjKoAR6/crGe7AuvGyHRH5t3vo4b+mQ+m5shrVrW+x3agJSMWg1OPNpCH+vYj8VbWNmqythUcHpYNTXpmXjvWRkugMiZo1p4Gcgy9dIF6EVSU4fU0t5dZFK/GPeT8sJHE6St1pMpd2YTZiaxEav8AZH9k5ARcEkgkREMs1Bc1gPQCrmSUIdjItDUGjxVGcCM1U+vHVXCda3VozA+FO7qjpS4hR8UNV+vlHoOeJa31MgW4btZlmxh6RYNJHrXQP7KVxaRW9ebS+tX4AbNeG3cffg7s+x4tmlc+Ncszzma9n+5zJnuOUFDXrkOEom7w8g5O5WnqLsYfRg7eTiL+jTiO3pijar671caerwuBP9x9LR/J5sl/6pBlX/LBAa+ht62PtCxJ75da5c+EjpAPN/g8LyJj2E8BFXRvGUQQn0oyvL9fqVjffN/0/2YF142Vc3utgOifzaOeM+27z1cd6Ln7Pf0iH13eVLN9zYDGvX72ap1rbY79SBsi3VBKRi0DPOoNFqcObTXRok0hD+XsUnlJzEfiraxklAGMfMVlfC+zyVw6KC08GV6BHAqK9Ny5/Fj8rGe8nI8RELyXQHRMxDbYbNGtPAzy25As5Alq+Rd/xtkC5CK5IZKOmTnD6mlqtUZJfy6iKVxYDglPjHvJ/PrX6elhM4nKF5+p0kb7WYEwV3mUq7MZt90fOaMDWJjQdfS4xe4Q2OaYvPj+ydgIrb90KLgkkEibUjxoiIZJqDvw5YguawHoDR2tyBVMyThGOmUYU6GBeHDXLVhqDQ4qmXuiCozgRmqvlupKt8eOuuSxIprxKsb60lxq2sGIHxpy/rM6Z2VXWkQT+3pcQp+KDzQzqhqv18o52XvqLQc8S15xkGtL6nQLaJzYK3DNvNsjuxD7NiD0mxVWWLsGgi17tfSBW6BvZTuDGckbm0it68g+AcvdpeWr/tNJi+AAAAAGVnvLiLyAmq7q+1EleXYo8y8N433F9rJbk4153vKLTFik8IfWTgvW8BhwHXuL/WSt3YavIzd9/gVhBjWJ9XGVD6MKXoFJ8Q+nH4rELIwHvfrafHZ0MIcnUmb87NcH+tlRUYES37t6Q/ntAYhyfozxpCj3OirCDGsMlHegg+rzKgW8iOGLVnOwrQAIeyaThQLwxf7Jfi8FmFh5flPdGHhmW04DrdWk+Pzz8oM3eGEOTq43dYUg3Y7UBov1H4ofgr8MSfl0gqMCJaT1ee4vZvSX+TCPXHfadA1RjA/G1O0J81K7cjjcUYlp+gfyonGUf9unwgQQKSj/QQ9+hIqD1YFJtYP6gjtpAdMdP3oYlqz3YUD6jKrOEHf76EYMMG0nCgXrcXHOZZuKn0PN8VTIXnwtHggH5pDi/Le2tId8OiDw3Lx2ixcynHBGFMoLjZ9ZhvRJD/0/x+UGbuGzfaVk0nuQ4oQAW2xu+wpKOIDBwasNuBf9dnOZF40iv0H26TA/cmO2aQmoOIPy+R7ViTKVRgRLQxB/gM36hNHrrP8abs35L+ibguRmcXm1QCcCfsu0jwcd4vTMkwgPnbVedFY5ygP2v5x4PTF2g2wXIPinnLN13krlDhXED/VE4lmOj2c4iLrhbvNxb4QIIEnSc+vCQf6SFBeFWZr9fgi8qwXDM7tlntXtHlVbB+UEfVGez/bCE7YglGh9rn6TLIgo6OcNSe7Six+VGQX1bkgjoxWDqDCY+n5m4zHwjBhg1tpjq1pOFAvcGG/AUvKUkXSk71r/N2IjKWEZ6KeL4rmB3ZlyBLyfR4Lq5IwMAB/dKlZkFqHF6W93k5Kk+Xlp9d8vEj5QUZa01gftf1jtFi5+u23l9SjgnCN+m1etlGAGi8IbzQ6jHfiI9WYzBh+dYiBJ5qmr2mvQfYwQG/Nm60rVMJCBWaTnId/ynOpRGGe7d04ccPzdkQkqi+rCpGERk4I3algHVmxtgQAXpg/q7PcpvJc8oi8aRXR5YY76k5rf3MXhFFBu5NdmOJ8c6NJkTc6EH4ZFF5L/k0HpNB2rEmU7/WmuvpxvmzjKFFC2IO8BkHaUyhvlGbPNs2J4Q1mZKWUP4uLpm5VCb83uieEnFdjHcW4TTOLjapq0mKEUXmPwMggYO7dpHg4xP2XFv9WelJmD5V8SEGgmxEYT7Uqs6Lxs+pN344QX/WXSbDbrOJdnzW7srEb9YdWQqxoeHkHhTzgXmoS9dpyxOyDnerXKHCuTnGfgGA/qmc5ZkVJAs2oDZuURyOpxZmhsJx2j4s3m8sSbnTlPCBBAmV5rixe0kNox4usRtIPtJDLVlu+8P22+mmkWdRH6mwzHrODHSUYblm8QYF3gAAAAB3BzCW7g5hLJkJUboHbcQZcGr0j+ljpTWeZJWjDtuIMnncuKTg1ekel9LZiAm2TCt+sXy957gtB5C/HZEdtxBkarAg8vO5cUiEvkHeGtrUfW3d5Ov01LVRg9OFxxNsmFZka6jA/WL5eoplyewUAVxPYwZs2foPPWONCA31O24gyExpEF7VYEHkomdxcjwD5NFLBNRH0g2F/aUKtWs1taj6QrKYbNu7ydasvPlAMths40XfXHXc1g3Pq9E9WSbZMKxR3gA6yNdRgL/QYRYhtPS1VrPEI8+6lZm4vaUPKAK4nl8FiAjGDNmysQvpJC9vfIdYaEwRwWEdq7ZmLT123EGQAdtxBpjSILzv1RAqcbGFiQa2tR+fv+Sl6LjUM3gHyaIPAPk0lgmojuEOmBh/ag27CG09LZFkbJfmY1wBa2tR9BxsYWKFZTDY8mIATmwGle0bAaV7ggj0wfUPxFdlsNnGErfpUIu+uOr8uYh8Yt0d3xXaLUmM03zz+9RMZU2yYVg6tVHOo7wAdNS7MOJK36VBPdiV16TRxG3T1vT7Q2npajRu2fytZ4hG2mC40EQELXMzAx3lqgpMX90NfMlQBXE8JwJBqr4LEBDJDCCGV2i1JSBvhbO5ZtQJzmHkn17e+Q4p2cmYsNCYIsfXqLRZsz0XLrQNgbe9XDvAumyt7biDIJq/s7YDtuIMdLHSmurVRzmd0nevBNsmFXPcFoPjYwsSlGQ7hA1taj56alqo5A7PC5MJ/50KAK4nfQeesfAPk0SHCKPSHgHyaGkGwv73YlddgGVnyxlsNnFuawbn/tQbdonTK+AQ2npaZ91KzPm532+Ovu/5F7e+Q2CwjtXW1qPoodGTfjjYwsRP3/JS0btn8aa8V2c/tQbdSLI2S9gNK9qvChtMNgNK9kEEemDfYO/DqGffVTFuju9Gab55y2GzjLxmgxolb9KgUmjiNswMd5W7C0cDIgIWuVUFJi/Fuju+sr0LKCu0WpJcs2oEwtf/p7XQzzEs2Z6LW96uHZtkwrDsY/ImdWqjnAJtkwqcCQap6w42P3IHZ4UFAFcTlb9KguK4ehR7sSuuDLYbOJLSjpvl1b4NfNzvtwvb3yGG09LU8dTiQmjds/gf2oNugb4Wzfa5JltvsHfhGLdHd4gIWub/D2pwZgY7yhEBC1yPZZ7/+GKuaWFr/9MWbM9FoArieNcN0u5OBINUOQOzwqdnJmHQYBb3SWlHTT5ud9uu0WpK2dZa3EDfC2Y32DvwqbyuU967nsVHss9/MLX/6b298hzKusKKU7OTMCS0o6a60DYFzdcGk1TeVykj2We/s2Z6LsRhSrhdaBsCKm8rlLQLvjfDDI6hWgXfGy0C740AAAAAGRsxQTI2YoIrLVPDZGzFBH139EVWWqeGT0GWx8jZigjRwrtJ+u/oiuP02custU8Mta5+TZ6DLY6HmBzPSsISUVPZIxB49HDTYe9Bki6u11U3teYUHJi11wWDhJaCG5hZmwCpGLAt+tupNsua5nddXf9sbBzUQT/fzVoOnpWEJKKMnxXjp7JGIL6pd2Hx6OGm6PPQ58PegyTaxbJlXV2uqkRGn+tva8wodnD9aTkxa64gKlrvCwcJLBIcOG3fRjbzxl0Hsu1wVHH0a2Uwuyrz96IxwraJHJF1kAegNBefvPsOhI26JaneeTyy7zhz83n/auhIvkHFG31Y3io88HlPBelifkTCTy2H21QcxpQVigGNDrtApiPog7842cI4oMUNIbv0TAqWp48TjZbOXMwACUXXMUhu+mKLd+FTyrq7XVSjoGwViI0/1pGWDpfe15hQx8ypEezh+tL1+suTcmLXXGt55h1AVLXeWU+EnxYOElgPFSMZJDhw2j0jQZtl/WunfOZa5lfLCSVO0DhkAZGuoxiKn+Izp8whKrz9YK0k4a+0P9DunxKDLYYJsmzJSCSr0FMV6vt+RiniZXdoLz959jYkSLcdCRt0BBIqNUtTvPJSSI2zeWXecGB+7zHn5vP+/v3Cv9XQkXzMy6A9g4o2+pqRB7uxvFR4qKdlOTuDmEsimKkKCbX6yRCuy4hf711PRvRsDm3ZP810wg6M81oSQ+pBIwLBbHDB2HdBgJc210eOLeYGpQC1xbwbhIRxQYoaaFq7W0N36JhabNnZFS1PHgw2fl8nGy2cPgAc3bmYABKggzFTi65ikJK1U9Hd9MUWxO/0V+/Cp5T22ZbVrge86bccjaicMd5rhSrvKspree3TcEis+F0bb+FGKi5m3jbhf8UHoFToVGNN82UiArLz5RupwqQwhJFnKZ+gJuTFrrj93p/51vPMOs/o/XuAqWu8mbJa/bKfCT6rhDh/LBwksDUHFfEeKkYyBzF3c0hw4bRRa9D1ekaDNmNdsnfL+tdO0uHmD/nMtczg14SNr5YSSraNIwudoHDIhLtBiQMjXUYaOGwHMRU/xCgODoVnT5hCflSpA1V5+sBMYsuBgTjFH5gj9F6zDqedqhWW3OVUABv8TzFa12Jimc55U9hJ4U8XUPp+VnvXLZVizBzULY2KEzSWu1Ifu+iRBqDZ0F5+8+xHZcKtbEiRbnVToC86EjboIwkHqQgkVGoRP2Urlqd55I+8SKWkkRtmvYoqJ/LLvODr0I2hwP3eYtnm7yMUvOG9DafQ/CaKgz8/kbJ+cNAkuWnLFfhC5kY7W/13etxla7XFflr07lMJN/dIOHa4Ca6xoRKf8Io/zDOTJP1yAAAAAAHCajcDhNRuAka+WQcJqNwGy8LrBI18sgVPFoUOE1G4D9E7jw2XhdYMVe/hCRr5ZAjYk1MKni0KC1xHPRwmo3Ad5MlHH6J3Hh5gHSkbLwusGu1hmxir38IZabX1EjXyyBP3mP8RsSamEHNMkRU8WhQU/jAjFriOehd65E04TUbgOY8s1zvJko46C/i5P0TuPD6GhAs8wDpSPQJQZTZeF1g3nH1vNdrDNjQYqQExV7+EMJXVszLTa+ozEQHdJGvlkCWpj6cn7zH+Ji1bySNiTUwioCd7IOaZIiEk8xUqeLQoK7reHyn8YEYoPgpxLXEc9CyzdsMu9ciaLzeirXCajcBxWOf3cx5ZrnLcM5l3kyUcdlFPK3QX8XJ11ZtFfonceH9Ltk99DQgWfM9iIXmAdKR4Qh6TegSgynvGyv1svC6wbX5Eh284+t5u+pDpa7WGbGp37FtoMVICafM4NWKvfwhjbRU/YSurZmDpwVFlptfUZGS942YiA7pn4GmNSNfLIEkVoRdLUx9OSpF1eU/eY/xOHAnLTFq3kk2Y3aVGxJqYRwbwr0VATvZEgiTBQc0yREAPWHNCSeYqQ4uMHVTxaFBVMwJnV3W8Pla31glT+MCMUjqqu1B8FOJRvn7VWuI56FsgU99ZZu2GWKSHsV3rkTRcKfsDXm9FWl+tL23hNRuA4Pdxt+Kxz+7jc6XZ5jyzXOf+2WvluGcy5HoNBe8mSjju5CAP7KKeVu1g9GHoL+Lk6e2I0+urNorqaVy9/RO48PzR0sf+l2ye/1UGqfoaECz72Hob+Z7EQvhcrnXzAOlI8sKDf/CEPSbxRlcR9AlBlPXLK6P3jZX69k//zdl4XWDYujdX2vyJDts+4znecfW837Ofi931IdLcN0vl12sM2NapZu/U79i21S2ygdBipATRoM4z0+ZwatIkGl3FXv4QxJyUJ8baKn7HGEBJwldWzMOVPPvB04KiwBHolctNr6jKj8WfyMl7xskLEfHMRAd0zYZtQ8/A0xrOArktka+WQJBt/HeSK0Iuk+koGZamPpyXZFSrlSLq8pTggMWfvMf4nn6tz5w4E5ad+nmhmLVvJJl3BRObMbtKmvPRfY2JNTCMS18Hjg3hXo/Pi2mKgJ3si0L324kESYKIxiO1g5pkiIJYDr+AHrDmgdza0YSTzFSFUaZjhxcYOobVcg2p4tCgqCC6l6pmBM6rpG75rut4fK8pEkutb6wSrK3GJafxgRimM+svpHVVdqW3P0Gg+CnEoTpD86N8/aqivpedtcRz0LQGGee2QKe+t4LNibLN2wyzD7E7sUkPYrCLZVW71yJouhVIX7hT9ga5kZwxvN6KtL0c4IO/Wl7avpg07QAAAAC4vGdlqgnIixK1r+6PYpdXN97wMiVrX9yd1zi5xbQo730IT4pvveBk1wGHAUrWv7jyatjd4N93M1hjEFZQGVef6KUw+voQnxRCrPhx33vAyGfHp611cghDzc5vJpWtf3AtERgVP6S3+4cY0J4az+gnonOPQrDGIKwIekfJoDKvPhiOyFsKO2e1socA0C9QOGmX7F8MhVnw4j3ll4dlhofR3TrgtM+PT1p3Myg/6uQQhlJYd+NA7dgN+FG/aPAr+KFIl5/EWiIwKuKeV09/SW/2x/UIk9VAp31t/MAYNZ/QTo0jtyuflhjFJyp/oLr9RxkCQSB8EPSPkqhI6PebFFg9I6g/WDEdkLaJoffTFHbPaqzKqA++fwfhBsNghF6gcNLmHBe39Km4WUwV3zzRwueFaX6A4HvLLw7Dd0hryw0PonOxaMdhBMcp2bigTERvmPX80/+Q7mZQflbaNxsOuSdNtgVAKKSw78YcDIijgduwGjln138r0niRk24f9Dsm9wODmpBmkS8/iCmTWO20RGBUDPgHMR5NqN+m8c+6/pLf7EYuuIlUmxdn7CdwAnHwSLvJTC/e2/mAMGNF51VrP6Cc04PH+cE2aBd5ig9y5F03y1zhUK5OVP9A9uiYJa6LiHMWN+8WBIJA+Lw+J50h6R8kmVV4QYvg168zXLDK7Vm2O1Xl0V5HUH6w/+wZ1WI7IWzah0YJyDLp53COjoIo7Z7UkFH5sYLkVl86WDE6p48Jgx8zbuYNhsEItTqmbb1A4aQF/IbBF0kpL6/1TkoyInbzip4Rlpgrvnggl9kdePTJS8BIri7S/QHAakFmpfeWXhxPKjl5XZ+Wl+Uj8fJNaxkF9dd+YOdi0Y5f3rbrwgmOUnq16TdoAEbZ0LwhvIjfMeowY1aPItb5YZpqngQHvaa9vwHB2K20bjYVCAlTHXJOmqXOKf+3e4YRD8fhdJIQ2c0qrL6oOBkRRoCldiPYxmZ1YHoBEHLPrv7Kc8mbV6TxIu8Ylkf9rTmpRRFezHZN7gbO8Ylj3EQmjWT4Qej5L3lRQZMeNFMmsdrrmta/s/nG6QtFoYwZ8A5ioUxpBzybUb6EJzbblpKZNS4u/lAmVLmZnuje/IxdcRI04RZ3qTYuzhGKSasDP+ZFu4OBIOPgkXZbXPYTSelZ/fFVPphsggYh1D5hRMaLzqp+N6nP1n9BOG7DJl18domzxMru1lkd1m/hobEK8xQe5EuoeYETy2nXq3cOsrnCoVwBfsY5nKn+gCQVmeU2oDYLjhxRboZmFqc+2nHCLG/eLJTTuUkJBIHwsbjmlaMNSXsbsS4eQ9I+SPtuWS3p2/bDUWeRpsywqR90DM56ZrlhlN4FBvEAAAAAAAAAAB0AAAAEAAQACAAEAB4AAAAEAAUAEAAIAB4AAAAEAAYAIAAgAB4AAAAEAAQAEAAQAB8AAAAIABAAIAAgAB8AAAAIABAAgACAAB8AAAAIACAAgAAAAR8AAAAgAIAAAgEABB8AAAAgAAIBAgEAEB8AQfDYAAsJAgAAAAMAAAAHAEGC2QALdQUAEAAFAAgABQAYAAUABAAFABQABQAMAAUAHAAFAAIABQASAAUACgAFABoABQAGAAUAFgAFAA4ABQAeAAUAAQAFABEABQAJAAUAGQAFAAUABQAVAAUADQAFAB0ABQADAAUAEwAFAAsABQAbAAUABwAFABcABQBBkNoAC2UBAAAAAQAAAAIAAAACAAAAAwAAAAMAAAAEAAAABAAAAAUAAAAFAAAABgAAAAYAAAAHAAAABwAAAAgAAAAIAAAACQAAAAkAAAAKAAAACgAAAAsAAAALAAAADAAAAAwAAAANAAAADQBBgNsAC/8IDAAIAIwACABMAAgAzAAIACwACACsAAgAbAAIAOwACAAcAAgAnAAIAFwACADcAAgAPAAIALwACAB8AAgA/AAIAAIACACCAAgAQgAIAMIACAAiAAgAogAIAGIACADiAAgAEgAIAJIACABSAAgA0gAIADIACACyAAgAcgAIAPIACAAKAAgAigAIAEoACADKAAgAKgAIAKoACABqAAgA6gAIABoACACaAAgAWgAIANoACAA6AAgAugAIAHoACAD6AAgABgAIAIYACABGAAgAxgAIACYACACmAAgAZgAIAOYACAAWAAgAlgAIAFYACADWAAgANgAIALYACAB2AAgA9gAIAA4ACACOAAgATgAIAM4ACAAuAAgArgAIAG4ACADuAAgAHgAIAJ4ACABeAAgA3gAIAD4ACAC+AAgAfgAIAP4ACAABAAgAgQAIAEEACADBAAgAIQAIAKEACABhAAgA4QAIABEACACRAAgAUQAIANEACAAxAAgAsQAIAHEACADxAAgACQAIAIkACABJAAgAyQAIACkACACpAAgAaQAIAOkACAAZAAgAmQAIAFkACADZAAgAOQAIALkACAB5AAgA+QAIAAUACACFAAgARQAIAMUACAAlAAgApQAIAGUACADlAAgAFQAIAJUACABVAAgA1QAIADUACAC1AAgAdQAIAPUACAANAAgAjQAIAE0ACADNAAgALQAIAK0ACABtAAgA7QAIAB0ACACdAAgAXQAIAN0ACAA9AAgAvQAIAH0ACAD9AAgAEwAJABMBCQCTAAkAkwEJAFMACQBTAQkA0wAJANMBCQAzAAkAMwEJALMACQCzAQkAcwAJAHMBCQDzAAkA8wEJAAsACQALAQkAiwAJAIsBCQBLAAkASwEJAMsACQDLAQkAKwAJACsBCQCrAAkAqwEJAGsACQBrAQkA6wAJAOsBCQAbAAkAGwEJAJsACQCbAQkAWwAJAFsBCQDbAAkA2wEJADsACQA7AQkAuwAJALsBCQB7AAkAewEJAPsACQD7AQkABwAJAAcBCQCHAAkAhwEJAEcACQBHAQkAxwAJAMcBCQAnAAkAJwEJAKcACQCnAQkAZwAJAGcBCQDnAAkA5wEJABcACQAXAQkAlwAJAJcBCQBXAAkAVwEJANcACQDXAQkANwAJADcBCQC3AAkAtwEJAHcACQB3AQkA9wAJAPcBCQAPAAkADwEJAI8ACQCPAQkATwAJAE8BCQDPAAkAzwEJAC8ACQAvAQkArwAJAK8BCQBvAAkAbwEJAO8ACQDvAQkAHwAJAB8BCQCfAAkAnwEJAF8ACQBfAQkA3wAJAN8BCQA/AAkAPwEJAL8ACQC/AQkAfwAJAH8BCQD/AAkA/wEJAAAABwBAAAcAIAAHAGAABwAQAAcAUAAHADAABwBwAAcACAAHAEgABwAoAAcAaAAHABgABwBYAAcAOAAHAHgABwAEAAcARAAHACQABwBkAAcAFAAHAFQABwA0AAcAdAAHAAMACACDAAgAQwAIAMMACAAjAAgAowAIAGMACADjAAgAQaDkAAtNAQAAAAEAAAABAAAAAQAAAAIAAAACAAAAAgAAAAIAAAADAAAAAwAAAAMAAAADAAAABAAAAAQAAAAEAAAABAAAAAUAAAAFAAAABQAAAAUAQYDlAAsTEBESAAgHCQYKBQsEDAMNAg4BDwBBoeUAC+wCAQIDBAUGBwgICQkKCgsLDAwMDA0NDQ0ODg4ODw8PDxAQEBAQEBAQERERERERERESEhISEhISEhMTExMTExMTFBQUFBQUFBQUFBQUFBQUFBUVFRUVFRUVFRUVFRUVFRUWFhYWFhYWFhYWFhYWFhYWFxcXFxcXFxcXFxcXFxcXFxgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxscAAAAAAEAAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAwAAAAOAAAAEAAAABQAAAAYAAAAHAAAACAAAAAoAAAAMAAAADgAAABAAAAAUAAAAGAAAABwAAAAgAAAAKAAAADAAAAA4ABBoegAC/UEAQIDBAQFBQYGBgYHBwcHCAgICAgICAgJCQkJCQkJCQoKCgoKCgoKCgoKCgoKCgoLCwsLCwsLCwsLCwsLCwsLDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwNDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PAAAQERISExMUFBQUFRUVFRYWFhYWFhYWFxcXFxcXFxcYGBgYGBgYGBgYGBgYGBgYGRkZGRkZGRkZGRkZGRkZGRoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxscHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHQAAAAABAAAAAgAAAAMAAAAEAAAABgAAAAgAAAAMAAAAEAAAABgAAAAgAAAAMAAAAEAAAABgAAAAgAAAAMAAAAAAAQAAgAEAAAACAAAAAwAAAAQAAAAGAAAACAAAAAwAAAAQAAAAGAAAACAAAAAwAAAAQAAAAGAAQaDtAAvEAwEAAgADAAQABQAHAAkADQARABkAIQAxAEEAYQCBAMEAAQGBAQECAQMBBAEGAQgBDAEQARgBIAEwAUABYAAAAAADAAQABQAGAAcACAAJAAoACwANAA8AEQATABcAGwAfACMAKwAzADsAQwBTAGMAcwCDAKMAwwDjAAIBAAAAAAAAEAAQABAAEAARABEAEgASABMAEwAUABQAFQAVABYAFgAXABcAGAAYABkAGQAaABoAGwAbABwAHAAdAB0AQABAABAAEAAQABAAEAAQABAAEAARABEAEQARABIAEgASABIAEwATABMAEwAUABQAFAAUABUAFQAVABUAEABIAE4AaW5jb3JyZWN0IGhlYWRlciBjaGVjawB1bmtub3duIGNvbXByZXNzaW9uIG1ldGhvZABpbnZhbGlkIHdpbmRvdyBzaXplAHVua25vd24gaGVhZGVyIGZsYWdzIHNldABoZWFkZXIgY3JjIG1pc21hdGNoAGludmFsaWQgYmxvY2sgdHlwZQBpbnZhbGlkIHN0b3JlZCBibG9jayBsZW5ndGhzAHRvbyBtYW55IGxlbmd0aCBvciBkaXN0YW5jZSBzeW1ib2xzAEHw8AAL4xMQABEAEgAAAAgABwAJAAYACgAFAAsABAAMAAMADQACAA4AAQAPAGludmFsaWQgY29kZSBsZW5ndGhzIHNldABpbnZhbGlkIGJpdCBsZW5ndGggcmVwZWF0AGludmFsaWQgY29kZSAtLSBtaXNzaW5nIGVuZC1vZi1ibG9jawBpbnZhbGlkIGxpdGVyYWwvbGVuZ3RocyBzZXQAaW52YWxpZCBkaXN0YW5jZXMgc2V0AGludmFsaWQgbGl0ZXJhbC9sZW5ndGggY29kZQBpbnZhbGlkIGRpc3RhbmNlIGNvZGUAaW52YWxpZCBkaXN0YW5jZSB0b28gZmFyIGJhY2sAaW5jb3JyZWN0IGRhdGEgY2hlY2sAaW5jb3JyZWN0IGxlbmd0aCBjaGVjawAAAAAAYAcAAAAIUAAACBAAFAhzABIHHwAACHAAAAgwAAAJwAAQBwoAAAhgAAAIIAAACaAAAAgAAAAIgAAACEAAAAngABAHBgAACFgAAAgYAAAJkAATBzsAAAh4AAAIOAAACdAAEQcRAAAIaAAACCgAAAmwAAAICAAACIgAAAhIAAAJ8AAQBwQAAAhUAAAIFAAVCOMAEwcrAAAIdAAACDQAAAnIABEHDQAACGQAAAgkAAAJqAAACAQAAAiEAAAIRAAACegAEAcIAAAIXAAACBwAAAmYABQHUwAACHwAAAg8AAAJ2AASBxcAAAhsAAAILAAACbgAAAgMAAAIjAAACEwAAAn4ABAHAwAACFIAAAgSABUIowATByMAAAhyAAAIMgAACcQAEQcLAAAIYgAACCIAAAmkAAAIAgAACIIAAAhCAAAJ5AAQBwcAAAhaAAAIGgAACZQAFAdDAAAIegAACDoAAAnUABIHEwAACGoAAAgqAAAJtAAACAoAAAiKAAAISgAACfQAEAcFAAAIVgAACBYAQAgAABMHMwAACHYAAAg2AAAJzAARBw8AAAhmAAAIJgAACawAAAgGAAAIhgAACEYAAAnsABAHCQAACF4AAAgeAAAJnAAUB2MAAAh+AAAIPgAACdwAEgcbAAAIbgAACC4AAAm8AAAIDgAACI4AAAhOAAAJ/ABgBwAAAAhRAAAIEQAVCIMAEgcfAAAIcQAACDEAAAnCABAHCgAACGEAAAghAAAJogAACAEAAAiBAAAIQQAACeIAEAcGAAAIWQAACBkAAAmSABMHOwAACHkAAAg5AAAJ0gARBxEAAAhpAAAIKQAACbIAAAgJAAAIiQAACEkAAAnyABAHBAAACFUAAAgVABAIAgETBysAAAh1AAAINQAACcoAEQcNAAAIZQAACCUAAAmqAAAIBQAACIUAAAhFAAAJ6gAQBwgAAAhdAAAIHQAACZoAFAdTAAAIfQAACD0AAAnaABIHFwAACG0AAAgtAAAJugAACA0AAAiNAAAITQAACfoAEAcDAAAIUwAACBMAFQjDABMHIwAACHMAAAgzAAAJxgARBwsAAAhjAAAIIwAACaYAAAgDAAAIgwAACEMAAAnmABAHBwAACFsAAAgbAAAJlgAUB0MAAAh7AAAIOwAACdYAEgcTAAAIawAACCsAAAm2AAAICwAACIsAAAhLAAAJ9gAQBwUAAAhXAAAIFwBACAAAEwczAAAIdwAACDcAAAnOABEHDwAACGcAAAgnAAAJrgAACAcAAAiHAAAIRwAACe4AEAcJAAAIXwAACB8AAAmeABQHYwAACH8AAAg/AAAJ3gASBxsAAAhvAAAILwAACb4AAAgPAAAIjwAACE8AAAn+AGAHAAAACFAAAAgQABQIcwASBx8AAAhwAAAIMAAACcEAEAcKAAAIYAAACCAAAAmhAAAIAAAACIAAAAhAAAAJ4QAQBwYAAAhYAAAIGAAACZEAEwc7AAAIeAAACDgAAAnRABEHEQAACGgAAAgoAAAJsQAACAgAAAiIAAAISAAACfEAEAcEAAAIVAAACBQAFQjjABMHKwAACHQAAAg0AAAJyQARBw0AAAhkAAAIJAAACakAAAgEAAAIhAAACEQAAAnpABAHCAAACFwAAAgcAAAJmQAUB1MAAAh8AAAIPAAACdkAEgcXAAAIbAAACCwAAAm5AAAIDAAACIwAAAhMAAAJ+QAQBwMAAAhSAAAIEgAVCKMAEwcjAAAIcgAACDIAAAnFABEHCwAACGIAAAgiAAAJpQAACAIAAAiCAAAIQgAACeUAEAcHAAAIWgAACBoAAAmVABQHQwAACHoAAAg6AAAJ1QASBxMAAAhqAAAIKgAACbUAAAgKAAAIigAACEoAAAn1ABAHBQAACFYAAAgWAEAIAAATBzMAAAh2AAAINgAACc0AEQcPAAAIZgAACCYAAAmtAAAIBgAACIYAAAhGAAAJ7QAQBwkAAAheAAAIHgAACZ0AFAdjAAAIfgAACD4AAAndABIHGwAACG4AAAguAAAJvQAACA4AAAiOAAAITgAACf0AYAcAAAAIUQAACBEAFQiDABIHHwAACHEAAAgxAAAJwwAQBwoAAAhhAAAIIQAACaMAAAgBAAAIgQAACEEAAAnjABAHBgAACFkAAAgZAAAJkwATBzsAAAh5AAAIOQAACdMAEQcRAAAIaQAACCkAAAmzAAAICQAACIkAAAhJAAAJ8wAQBwQAAAhVAAAIFQAQCAIBEwcrAAAIdQAACDUAAAnLABEHDQAACGUAAAglAAAJqwAACAUAAAiFAAAIRQAACesAEAcIAAAIXQAACB0AAAmbABQHUwAACH0AAAg9AAAJ2wASBxcAAAhtAAAILQAACbsAAAgNAAAIjQAACE0AAAn7ABAHAwAACFMAAAgTABUIwwATByMAAAhzAAAIMwAACccAEQcLAAAIYwAACCMAAAmnAAAIAwAACIMAAAhDAAAJ5wAQBwcAAAhbAAAIGwAACZcAFAdDAAAIewAACDsAAAnXABIHEwAACGsAAAgrAAAJtwAACAsAAAiLAAAISwAACfcAEAcFAAAIVwAACBcAQAgAABMHMwAACHcAAAg3AAAJzwARBw8AAAhnAAAIJwAACa8AAAgHAAAIhwAACEcAAAnvABAHCQAACF8AAAgfAAAJnwAUB2MAAAh/AAAIPwAACd8AEgcbAAAIbwAACC8AAAm/AAAIDwAACI8AAAhPAAAJ/wAQBQEAFwUBARMFEQAbBQEQEQUFABkFAQQVBUEAHQUBQBAFAwAYBQECFAUhABwFASASBQkAGgUBCBYFgQBABQAAEAUCABcFgQETBRkAGwUBGBEFBwAZBQEGFQVhAB0FAWAQBQQAGAUBAxQFMQAcBQEwEgUNABoFAQwWBcEAQAUAADEuMi44AHN0cmVhbSBlcnJvcgBpbnN1ZmZpY2llbnQgbWVtb3J5AGJ1ZmZlciBlcnJvcgBB5IQBC6EVazgHAA2yBwCc8gcAcGQIAGCuCgCwcQsAMKoMABMAAAAMAAAADQAAAAEAAAAGAAAAAQAAAAEAAAATAAAADQAAAA4AAAABAAAABwAAAAAAAAABAAAAFAAAAA8AAAAQAAAAAQAAAAYAAAAAAAAAAQAAABUAAAAQAAAAEQAAAAEAAAAFAAAAAAAAAAIAAAAVAAAAEgAAABIAAAABAAAABQAAAAAAAAACAAAAFQAAABIAAAATAAAAAgAAAAUAAAACAAAAAwAAABUAAAATAAAAEwAAAAMAAAAFAAAABAAAAAMAAAAVAAAAEwAAABMAAAADAAAABQAAAAgAAAAEAAAAFQAAABMAAAATAAAAAwAAAAUAAAAQAAAABQAAABUAAAATAAAAFAAAAAQAAAAFAAAAEAAAAAUAAAAWAAAAFAAAABUAAAAEAAAABQAAABAAAAAFAAAAFgAAABUAAAAWAAAABAAAAAUAAAAQAAAABQAAABYAAAAVAAAAFgAAAAUAAAAFAAAAEAAAAAUAAAAWAAAAFQAAABYAAAAFAAAABQAAACAAAAAGAAAAFgAAABYAAAAXAAAABQAAAAUAAAAgAAAABgAAABYAAAAXAAAAFwAAAAYAAAAFAAAAIAAAAAYAAAAWAAAAFgAAABYAAAAFAAAABQAAADAAAAAHAAAAFwAAABcAAAAWAAAABQAAAAQAAABAAAAABwAAABcAAAAXAAAAFgAAAAYAAAADAAAAQAAAAAgAAAAXAAAAGAAAABYAAAAHAAAAAwAAAAABAAAJAAAAGQAAABkAAAAXAAAABwAAAAMAAAAAAQAACQAAABoAAAAaAAAAGAAAAAcAAAADAAAAAAIAAAkAAAAbAAAAGwAAABkAAAAJAAAAAwAAAOcDAAAJAAAAEgAAAAwAAAANAAAAAQAAAAUAAAABAAAAAQAAABIAAAANAAAADgAAAAEAAAAGAAAAAAAAAAEAAAASAAAADgAAAA4AAAABAAAABQAAAAAAAAACAAAAEgAAABAAAAAQAAAAAQAAAAQAAAAAAAAAAgAAABIAAAAQAAAAEQAAAAIAAAAFAAAAAgAAAAMAAAASAAAAEgAAABIAAAADAAAABQAAAAIAAAADAAAAEgAAABIAAAATAAAAAwAAAAUAAAAEAAAABAAAABIAAAASAAAAEwAAAAQAAAAEAAAABAAAAAQAAAASAAAAEgAAABMAAAAEAAAABAAAAAgAAAAFAAAAEgAAABIAAAATAAAABQAAAAQAAAAIAAAABQAAABIAAAASAAAAEwAAAAYAAAAEAAAACAAAAAUAAAASAAAAEgAAABMAAAAFAAAABAAAAAwAAAAGAAAAEgAAABMAAAATAAAABwAAAAQAAAAMAAAABgAAABIAAAASAAAAEwAAAAQAAAAEAAAAEAAAAAcAAAASAAAAEgAAABMAAAAEAAAAAwAAACAAAAAHAAAAEgAAABIAAAATAAAABgAAAAMAAACAAAAABwAAABIAAAATAAAAEwAAAAYAAAADAAAAgAAAAAgAAAASAAAAEwAAABMAAAAIAAAAAwAAAAABAAAIAAAAEgAAABMAAAATAAAABgAAAAMAAACAAAAACQAAABIAAAATAAAAEwAAAAgAAAADAAAAAAEAAAkAAAASAAAAEwAAABMAAAAKAAAAAwAAAAACAAAJAAAAEgAAABMAAAATAAAADAAAAAMAAAAAAgAACQAAABIAAAATAAAAEwAAAA0AAAADAAAA5wMAAAkAAAARAAAADAAAAAwAAAABAAAABQAAAAEAAAABAAAAEQAAAAwAAAANAAAAAQAAAAYAAAAAAAAAAQAAABEAAAANAAAADwAAAAEAAAAFAAAAAAAAAAEAAAARAAAADwAAABAAAAACAAAABQAAAAAAAAACAAAAEQAAABEAAAARAAAAAgAAAAQAAAAAAAAAAgAAABEAAAAQAAAAEQAAAAMAAAAEAAAAAgAAAAMAAAARAAAAEQAAABEAAAADAAAABAAAAAQAAAAEAAAAEQAAABEAAAARAAAAAwAAAAQAAAAIAAAABQAAABEAAAARAAAAEQAAAAQAAAAEAAAACAAAAAUAAAARAAAAEQAAABEAAAAFAAAABAAAAAgAAAAFAAAAEQAAABEAAAARAAAABgAAAAQAAAAIAAAABQAAABEAAAARAAAAEQAAAAUAAAAEAAAACAAAAAYAAAARAAAAEgAAABEAAAAHAAAABAAAAAwAAAAGAAAAEQAAABIAAAARAAAAAwAAAAQAAAAMAAAABwAAABEAAAASAAAAEQAAAAQAAAADAAAAIAAAAAcAAAARAAAAEgAAABEAAAAGAAAAAwAAAAABAAAHAAAAEQAAABIAAAARAAAABgAAAAMAAACAAAAACAAAABEAAAASAAAAEQAAAAgAAAADAAAAAAEAAAgAAAARAAAAEgAAABEAAAAKAAAAAwAAAAACAAAIAAAAEQAAABIAAAARAAAABQAAAAMAAAAAAQAACQAAABEAAAASAAAAEQAAAAcAAAADAAAAAAIAAAkAAAARAAAAEgAAABEAAAAJAAAAAwAAAAACAAAJAAAAEQAAABIAAAARAAAACwAAAAMAAADnAwAACQAAAA4AAAAMAAAADQAAAAEAAAAFAAAAAQAAAAEAAAAOAAAADgAAAA8AAAABAAAABQAAAAAAAAABAAAADgAAAA4AAAAPAAAAAQAAAAQAAAAAAAAAAQAAAA4AAAAOAAAADwAAAAIAAAAEAAAAAAAAAAIAAAAOAAAADgAAAA4AAAAEAAAABAAAAAIAAAADAAAADgAAAA4AAAAOAAAAAwAAAAQAAAAEAAAABAAAAA4AAAAOAAAADgAAAAQAAAAEAAAACAAAAAUAAAAOAAAADgAAAA4AAAAGAAAABAAAAAgAAAAFAAAADgAAAA4AAAAOAAAACAAAAAQAAAAIAAAABQAAAA4AAAAPAAAADgAAAAUAAAAEAAAACAAAAAYAAAAOAAAADwAAAA4AAAAJAAAABAAAAAgAAAAGAAAADgAAAA8AAAAOAAAAAwAAAAQAAAAMAAAABwAAAA4AAAAPAAAADgAAAAQAAAADAAAAGAAAAAcAAAAOAAAADwAAAA4AAAAFAAAAAwAAACAAAAAIAAAADgAAAA8AAAAPAAAABgAAAAMAAABAAAAACAAAAA4AAAAPAAAADwAAAAcAAAADAAAAAAEAAAgAAAAOAAAADwAAAA8AAAAFAAAAAwAAADAAAAAJAAAADgAAAA8AAAAPAAAABgAAAAMAAACAAAAACQAAAA4AAAAPAAAADwAAAAcAAAADAAAAAAEAAAkAAAAOAAAADwAAAA8AAAAIAAAAAwAAAAABAAAJAAAADgAAAA8AAAAPAAAACAAAAAMAAAAAAgAACQAAAA4AAAAPAAAADwAAAAkAAAADAAAAAAIAAAkAAAAOAAAADwAAAA8AAAAKAAAAAwAAAOcDAAAJAAAAIAAAACAAAAAhAAAAIgAAACMAAAAkAAAAJQAAACYAAAAnAAAAKAAAACkAAAApAAAAKgAAACsAAAAsAAAALQAAAC4AAAAvAAAAMAAAADAAAAAxAAAAMQAAADIAAAAzAAAANAAAADUAAAA2AAAANwAAADgAAAA4AEGQmgEL+gEEAAMAAgACAAIAAgACAAIAAgACAAIAAgACAAEAAQABAAIAAgACAAIAAgACAAIAAgACAAMAAgABAAEAAQABAAEA//////////8AAAAAAAAAAAEAAQABAAEAAQABAAIAAgACAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAP////////////8AAAAAAAABAAQAAwACAAIAAgACAAIAAgABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAP//////////////////AEGVnAEL+AcIAAAABwAAagYAAAAGAACtBQAAagUAADEFAAAABQAA1AQAAK0EAACKBAAAagQAAEwEAAAxBAAAFwQAAAAEAADpAwAA1AMAAMADAACtAwAAmwMAAIoDAAB5AwAAagMAAFsDAABMAwAAPgMAADEDAAAkAwAAFwMAAAsDAAAAAwAA9AIAAOkCAADeAgAA1AIAAMoCAADAAgAAtgIAAK0CAACkAgAAmwIAAJICAACKAgAAggIAAHkCAAByAgAAagIAAGICAABbAgAAUwIAAEwCAABFAgAAPgIAADcCAAAxAgAAKgIAACQCAAAeAgAAFwIAABECAAALAgAABQIAAAACAAD6AQAA9AEAAO8BAADpAQAA5AEAAN4BAADZAQAA1AEAAM8BAADKAQAAxQEAAMABAAC7AQAAtgEAALIBAACtAQAAqAEAAKQBAACfAQAAmwEAAJcBAACSAQAAjgEAAIoBAACGAQAAggEAAH4BAAB5AQAAdQEAAHIBAABuAQAAagEAAGYBAABiAQAAXgEAAFsBAABXAQAAUwEAAFABAABMAQAASQEAAEUBAABCAQAAPgEAADsBAAA3AQAANAEAADEBAAAuAQAAKgEAACcBAAAkAQAAIQEAAB4BAAAaAQAAFwEAABQBAAARAQAADgEAAAsBAAAIAQAABQEAAAIBAAAAAQAA/QAAAPoAAAD3AAAA9AAAAPEAAADvAAAA7AAAAOkAAADmAAAA5AAAAOEAAADeAAAA3AAAANkAAADXAAAA1AAAANEAAADPAAAAzAAAAMoAAADHAAAAxQAAAMIAAADAAAAAvgAAALsAAAC5AAAAtgAAALQAAACyAAAArwAAAK0AAACrAAAAqAAAAKYAAACkAAAAogAAAJ8AAACdAAAAmwAAAJkAAACXAAAAlQAAAJIAAACQAAAAjgAAAIwAAACKAAAAiAAAAIYAAACEAAAAggAAAIAAAAB+AAAAewAAAHkAAAB3AAAAdQAAAHMAAAByAAAAcAAAAG4AAABsAAAAagAAAGgAAABmAAAAZAAAAGIAAABgAAAAXgAAAF0AAABbAAAAWQAAAFcAAABVAAAAUwAAAFIAAABQAAAATgAAAEwAAABKAAAASQAAAEcAAABFAAAAQwAAAEIAAABAAAAAPgAAAD0AAAA7AAAAOQAAADcAAAA2AAAANAAAADIAAAAxAAAALwAAAC4AAAAsAAAAKgAAACkAAAAnAAAAJQAAACQAAAAiAAAAIQAAAB8AAAAeAAAAHAAAABoAAAAZAAAAFwAAABYAAAAUAAAAEwAAABEAAAAQAAAADgAAAA0AAAALAAAACgAAAAgAAAAHAAAABQAAAAQAAAACAAAAAQBBkKUBC1EBAAAAAQAAAAEAAAABAAAAAgAAAAIAAAADAAAAAwAAAAQAAAAEAAAABQAAAAcAAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAQfGlAQu/AQECAwQFBgcICQoLDA0ODxAQERESEhMTFBQUFBUVFRUWFhYWFhYWFhcXFxcXFxcXGBgYGBgYGBgYGBgYGBgYGAABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fICAhISIiIyMkJCQkJSUlJSYmJiYmJiYmJycnJycnJycoKCgoKCgoKCgoKCgoKCgoKSkpKSkpKSkpKSkpKSkpKSoqKioqKioqKioqKioqKioqKioqKioqKioqKioqKioqAEHwpwELTQEAAAABAAAAAQAAAAEAAAACAAAAAgAAAAMAAAADAAAABAAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAEHIqAELDQEAAAABAAAAAgAAAAIAQeCoAQvTBgEAAAABAAAAAgAAAAIAAAAmAAAAggAAACEFAABKAAAAZwgAACYAAADAAQAAgAAAAEkFAABKAAAAvggAACkAAAAsAgAAgAAAAEkFAABKAAAAvggAAC8AAADKAgAAgAAAAIoFAABKAAAAhAkAADUAAABzAwAAgAAAAJ0FAABKAAAAoAkAAD0AAACBAwAAgAAAAOsFAABLAAAAPgoAAEQAAACeAwAAgAAAAE0GAABLAAAAqgoAAEsAAACzAwAAgAAAAMEGAABNAAAAHw0AAE0AAABTBAAAgAAAACMIAABRAAAApg8AAFQAAACZBAAAgAAAAEsJAABXAAAAsRIAAFgAAADaBAAAgAAAAG8JAABdAAAAIxQAAFQAAABFBQAAgAAAAFQKAABqAAAAjBQAAGoAAACvBQAAgAAAAHYJAAB8AAAAThAAAHwAAADSAgAAgAAAAGMHAACRAAAAkAcAAJIAAAAAAAAAAQAAAAIAAAAEAAAAAAAAAAIAAAAEAAAACAAAAAAAAAABAAAAAQAAAAUAAAANAAAAHQAAAD0AAAB9AAAA/QAAAP0BAAD9AwAA/QcAAP0PAAD9HwAA/T8AAP1/AAD9/wAA/f8BAP3/AwD9/wcA/f8PAP3/HwD9/z8A/f9/AP3//wD9//8B/f//A/3//wf9//8P/f//H/3//z/9//9/AAAAAAEAAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABMAAAAUAAAAFQAAABYAAAAXAAAAGAAAABkAAAAaAAAAGwAAABwAAAAdAAAAHgAAAB8AAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABQAAAAVAAAAFgAAABcAAAAYAAAAGQAAABoAAAAbAAAAHAAAAB0AAAAeAAAAHwAAACAAAAAhAAAAIgAAACMAAAAlAAAAJwAAACkAAAArAAAALwAAADMAAAA7AAAAQwAAAFMAAABjAAAAgwAAAAMBAAADAgAAAwQAAAMIAAADEAAAAyAAAANAAAADgAAAAwABAEHErwELlQEBAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEgAAABQAAAAWAAAAGAAAABwAAAAgAAAAKAAAADAAAABAAAAAgAAAAAABAAAAAgAAAAQAAAAIAAAAEAAAACAAAABAAAAAgAAAAAABAAEAAAAEAAAACABB5LABC4sBAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABIAAAAUAAAAFgAAABgAAAAcAAAAIAAAACgAAAAwAAAAQAAAAIAAAAAAAQAAAAIAAAAEAAAACAAAABAAAAAgAAAAQAAAAIAAAAAAAQBBsLIBC9YEAQAAAAEAAAABAAAAAQAAAAIAAAACAAAAAwAAAAMAAAAEAAAABgAAAAcAAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAABAAEBBgAAAAAAAAQAAAAAEAAABAAAAAAgAAAFAQAAAAAAAAUDAAAAAAAABQQAAAAAAAAFBgAAAAAAAAUHAAAAAAAABQkAAAAAAAAFCgAAAAAAAAUMAAAAAAAABg4AAAAAAAEFEAAAAAAAAQUUAAAAAAABBRYAAAAAAAIFHAAAAAAAAwUgAAAAAAAEBTAAAAAgAAYFQAAAAAAABwWAAAAAAAAIBgABAAAAAAoGAAQAAAAADAYAEAAAIAAABAAAAAAAAAAEAQAAAAAAAAUCAAAAIAAABQQAAAAAAAAFBQAAACAAAAUHAAAAAAAABQgAAAAgAAAFCgAAAAAAAAULAAAAAAAABg0AAAAgAAEFEAAAAAAAAQUSAAAAIAABBRYAAAAAAAIFGAAAACAAAwUgAAAAAAADBSgAAAAAAAYEQAAAABAABgRAAAAAIAAHBYAAAAAAAAkGAAIAAAAACwYACAAAMAAABAAAAAAQAAAEAQAAACAAAAUCAAAAIAAABQMAAAAgAAAFBQAAACAAAAUGAAAAIAAABQgAAAAgAAAFCQAAACAAAAULAAAAIAAABQwAAAAAAAAGDwAAACAAAQUSAAAAIAABBRQAAAAgAAIFGAAAACAAAgUcAAAAIAADBSgAAAAgAAQFMAAAAAAAEAYAAAEAAAAPBgCAAAAAAA4GAEAAAAAADQYAIABBlLcBC4MEAQAAAAEAAAAFAAAADQAAAB0AAAA9AAAAfQAAAP0AAAD9AQAA/QMAAP0HAAD9DwAA/R8AAP0/AAD9fwAA/f8AAP3/AQD9/wMA/f8HAP3/DwD9/x8A/f8/AP3/fwD9//8A/f//Af3//wP9//8H/f//D/3//x/9//8//f//fwAAAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEQAAABIAAAATAAAAFAAAABUAAAAWAAAAFwAAABgAAAAZAAAAGgAAABsAAAAcAAAAHQAAAB4AAAAfAAAAAQABAQUAAAAAAAAFAAAAAAAABgQ9AAAAAAAJBf0BAAAAAA8F/X8AAAAAFQX9/x8AAAADBQUAAAAAAAcEfQAAAAAADAX9DwAAAAASBf3/AwAAABcF/f9/AAAABQUdAAAAAAAIBP0AAAAAAA4F/T8AAAAAFAX9/w8AAAACBQEAAAAQAAcEfQAAAAAACwX9BwAAAAARBf3/AQAAABYF/f8/AAAABAUNAAAAEAAIBP0AAAAAAA0F/R8AAAAAEwX9/wcAAAABBQEAAAAQAAYEPQAAAAAACgX9AwAAAAAQBf3/AAAAABwF/f//DwAAGwX9//8HAAAaBf3//wMAABkF/f//AQAAGAX9//8AQaC7AQvTAQMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEQAAABIAAAATAAAAFAAAABUAAAAWAAAAFwAAABgAAAAZAAAAGgAAABsAAAAcAAAAHQAAAB4AAAAfAAAAIAAAACEAAAAiAAAAIwAAACUAAAAnAAAAKQAAACsAAAAvAAAAMwAAADsAAABDAAAAUwAAAGMAAACDAAAAAwEAAAMCAAADBAAAAwgAAAMQAAADIAAAA0AAAAOAAAADAAEAQYC+AQtRAQAAAAEAAAABAAAAAQAAAAIAAAACAAAAAwAAAAMAAAAEAAAABAAAAAUAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAEHgvgELhgQBAAEBBgAAAAAAAAYDAAAAAAAABAQAAAAgAAAFBQAAAAAAAAUGAAAAAAAABQgAAAAAAAAFCQAAAAAAAAULAAAAAAAABg0AAAAAAAAGEAAAAAAAAAYTAAAAAAAABhYAAAAAAAAGGQAAAAAAAAYcAAAAAAAABh8AAAAAAAAGIgAAAAAAAQYlAAAAAAABBikAAAAAAAIGLwAAAAAAAwY7AAAAAAAEBlMAAAAAAAcGgwAAAAAACQYDAgAAEAAABAQAAAAAAAAEBQAAACAAAAUGAAAAAAAABQcAAAAgAAAFCQAAAAAAAAUKAAAAAAAABgwAAAAAAAAGDwAAAAAAAAYSAAAAAAAABhUAAAAAAAAGGAAAAAAAAAYbAAAAAAAABh4AAAAAAAAGIQAAAAAAAQYjAAAAAAABBicAAAAAAAIGKwAAAAAAAwYzAAAAAAAEBkMAAAAAAAUGYwAAAAAACAYDAQAAIAAABAQAAAAwAAAEBAAAABAAAAQFAAAAIAAABQcAAAAgAAAFCAAAACAAAAUKAAAAIAAABQsAAAAAAAAGDgAAAAAAAAYRAAAAAAAABhQAAAAAAAAGFwAAAAAAAAYaAAAAAAAABh0AAAAAAAAGIAAAAAAAEAYDAAEAAAAPBgOAAAAAAA4GA0AAAAAADQYDIAAAAAAMBgMQAAAAAAsGAwgAAAAACgYDBABB8MIBC5EOCAAAAAgAAAAIAAAABwAAAAgAAAAJAAAACgAAAAsAAAAAAAAAAQAAAAIAAAABAAAABAAAAAQAAAAEAAAABAAAAAAAAAABAAAAAwAAAAcAAAAPAAAAHwAAAD8AAAB/AAAA/wAAAP8BAAD/AwAA/wcAAP8PAAD/HwAA/z8AAP9/AAD//wAA//8BAP//AwD//wcA//8PAP//HwD//z8A//9/AP///wD///8B////A////wf///8P////H////z////9/dm9pZABib29sAGNoYXIAc2lnbmVkIGNoYXIAdW5zaWduZWQgY2hhcgBzaG9ydAB1bnNpZ25lZCBzaG9ydABpbnQAdW5zaWduZWQgaW50AGxvbmcAdW5zaWduZWQgbG9uZwBmbG9hdABkb3VibGUAc3RkOjpzdHJpbmcAc3RkOjpiYXNpY19zdHJpbmc8dW5zaWduZWQgY2hhcj4Ac3RkOjp3c3RyaW5nAHN0ZDo6dTE2c3RyaW5nAHN0ZDo6dTMyc3RyaW5nAGVtc2NyaXB0ZW46OnZhbABlbXNjcmlwdGVuOjptZW1vcnlfdmlldzxjaGFyPgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzxzaWduZWQgY2hhcj4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8dW5zaWduZWQgY2hhcj4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8c2hvcnQ+AGVtc2NyaXB0ZW46Om1lbW9yeV92aWV3PHVuc2lnbmVkIHNob3J0PgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzxpbnQ+AGVtc2NyaXB0ZW46Om1lbW9yeV92aWV3PHVuc2lnbmVkIGludD4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8bG9uZz4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8dW5zaWduZWQgbG9uZz4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8aW50OF90PgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzx1aW50OF90PgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzxpbnQxNl90PgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzx1aW50MTZfdD4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8aW50MzJfdD4AZW1zY3JpcHRlbjo6bWVtb3J5X3ZpZXc8dWludDMyX3Q+AGVtc2NyaXB0ZW46Om1lbW9yeV92aWV3PGZsb2F0PgBlbXNjcmlwdGVuOjptZW1vcnlfdmlldzxkb3VibGU+AAAAGHQAAFBlAABOMTBlbXNjcmlwdGVuMTFtZW1vcnlfdmlld0lkRUUAABh0AAB4ZQAATjEwZW1zY3JpcHRlbjExbWVtb3J5X3ZpZXdJZkVFAAAYdAAAoGUAAE4xMGVtc2NyaXB0ZW4xMW1lbW9yeV92aWV3SW1FRQAAGHQAAMhlAABOMTBlbXNjcmlwdGVuMTFtZW1vcnlfdmlld0lsRUUAABh0AADwZQAATjEwZW1zY3JpcHRlbjExbWVtb3J5X3ZpZXdJakVFAAAYdAAAGGYAAE4xMGVtc2NyaXB0ZW4xMW1lbW9yeV92aWV3SWlFRQAAGHQAAEBmAABOMTBlbXNjcmlwdGVuMTFtZW1vcnlfdmlld0l0RUUAABh0AABoZgAATjEwZW1zY3JpcHRlbjExbWVtb3J5X3ZpZXdJc0VFAAAYdAAAkGYAAE4xMGVtc2NyaXB0ZW4xMW1lbW9yeV92aWV3SWFFRQAAGHQAALhmAABOMTBlbXNjcmlwdGVuMTFtZW1vcnlfdmlld0ljRUUAAEB0AADwZgAAAAAAAAEAAACIBwAAAAAAAE5TdDNfXzIxMmJhc2ljX3N0cmluZ0lEaU5TXzExY2hhcl90cmFpdHNJRGlFRU5TXzlhbGxvY2F0b3JJRGlFRUVFAAAAQHQAAExnAAAAAAAAAQAAAIgHAAAAAAAATlN0M19fMjEyYmFzaWNfc3RyaW5nSURzTlNfMTFjaGFyX3RyYWl0c0lEc0VFTlNfOWFsbG9jYXRvcklEc0VFRUUAAABAdAAAqGcAAAAAAAABAAAAiAcAAAAAAABOU3QzX18yMTJiYXNpY19zdHJpbmdJd05TXzExY2hhcl90cmFpdHNJd0VFTlNfOWFsbG9jYXRvckl3RUVFRQAAQHQAAABoAAAAAAAAAQAAAIgHAAAAAAAATlN0M19fMjEyYmFzaWNfc3RyaW5nSWhOU18xMWNoYXJfdHJhaXRzSWhFRU5TXzlhbGxvY2F0b3JJaEVFRUUAABEACgAREREAAAAABQAAAAAAAAkAAAAACwAAAAAAAAAAEQAPChEREQMKBwABAAkLCwAACQYLAAALAAYRAAAAERERAEGR0QELIQsAAAAAAAAAABEACgoREREACgAAAgAJCwAAAAkACwAACwBBy9EBCwEMAEHX0QELFQwAAAAADAAAAAAJDAAAAAAADAAADABBhdIBCwEOAEGR0gELFQ0AAAAEDQAAAAAJDgAAAAAADgAADgBBv9IBCwEQAEHL0gELHg8AAAAADwAAAAAJEAAAAAAAEAAAEAAAEgAAABISEgBBgtMBCw4SAAAAEhISAAAAAAAACQBBs9MBCwELAEG/0wELFQoAAAAACgAAAAAJCwAAAAAACwAACwBB7dMBCwEMAEH50wELJwwAAAAADAAAAAAJDAAAAAAADAAADAAALSsgICAwWDB4AChudWxsKQBBsNQBC2cwMTIzNDU2Nzg5QUJDREVGGRJEOwI/LEcUPTMwChsGRktFNw9JDo4XA0AdPGkrNh9KLRwBICUpIQgMFRYiLhA4Pgs0MRhkdHV2L0EJfzkRI0MyQomKiwUEJignDSoeNYwHGkiTE5SVAEGg1QEL9hNJbGxlZ2FsIGJ5dGUgc2VxdWVuY2UARG9tYWluIGVycm9yAFJlc3VsdCBub3QgcmVwcmVzZW50YWJsZQBOb3QgYSB0dHkAUGVybWlzc2lvbiBkZW5pZWQAT3BlcmF0aW9uIG5vdCBwZXJtaXR0ZWQATm8gc3VjaCBmaWxlIG9yIGRpcmVjdG9yeQBObyBzdWNoIHByb2Nlc3MARmlsZSBleGlzdHMAVmFsdWUgdG9vIGxhcmdlIGZvciBkYXRhIHR5cGUATm8gc3BhY2UgbGVmdCBvbiBkZXZpY2UAT3V0IG9mIG1lbW9yeQBSZXNvdXJjZSBidXN5AEludGVycnVwdGVkIHN5c3RlbSBjYWxsAFJlc291cmNlIHRlbXBvcmFyaWx5IHVuYXZhaWxhYmxlAEludmFsaWQgc2VlawBDcm9zcy1kZXZpY2UgbGluawBSZWFkLW9ubHkgZmlsZSBzeXN0ZW0ARGlyZWN0b3J5IG5vdCBlbXB0eQBDb25uZWN0aW9uIHJlc2V0IGJ5IHBlZXIAT3BlcmF0aW9uIHRpbWVkIG91dABDb25uZWN0aW9uIHJlZnVzZWQASG9zdCBpcyBkb3duAEhvc3QgaXMgdW5yZWFjaGFibGUAQWRkcmVzcyBpbiB1c2UAQnJva2VuIHBpcGUASS9PIGVycm9yAE5vIHN1Y2ggZGV2aWNlIG9yIGFkZHJlc3MAQmxvY2sgZGV2aWNlIHJlcXVpcmVkAE5vIHN1Y2ggZGV2aWNlAE5vdCBhIGRpcmVjdG9yeQBJcyBhIGRpcmVjdG9yeQBUZXh0IGZpbGUgYnVzeQBFeGVjIGZvcm1hdCBlcnJvcgBJbnZhbGlkIGFyZ3VtZW50AEFyZ3VtZW50IGxpc3QgdG9vIGxvbmcAU3ltYm9saWMgbGluayBsb29wAEZpbGVuYW1lIHRvbyBsb25nAFRvbyBtYW55IG9wZW4gZmlsZXMgaW4gc3lzdGVtAE5vIGZpbGUgZGVzY3JpcHRvcnMgYXZhaWxhYmxlAEJhZCBmaWxlIGRlc2NyaXB0b3IATm8gY2hpbGQgcHJvY2VzcwBCYWQgYWRkcmVzcwBGaWxlIHRvbyBsYXJnZQBUb28gbWFueSBsaW5rcwBObyBsb2NrcyBhdmFpbGFibGUAUmVzb3VyY2UgZGVhZGxvY2sgd291bGQgb2NjdXIAU3RhdGUgbm90IHJlY292ZXJhYmxlAFByZXZpb3VzIG93bmVyIGRpZWQAT3BlcmF0aW9uIGNhbmNlbGVkAEZ1bmN0aW9uIG5vdCBpbXBsZW1lbnRlZABObyBtZXNzYWdlIG9mIGRlc2lyZWQgdHlwZQBJZGVudGlmaWVyIHJlbW92ZWQARGV2aWNlIG5vdCBhIHN0cmVhbQBObyBkYXRhIGF2YWlsYWJsZQBEZXZpY2UgdGltZW91dABPdXQgb2Ygc3RyZWFtcyByZXNvdXJjZXMATGluayBoYXMgYmVlbiBzZXZlcmVkAFByb3RvY29sIGVycm9yAEJhZCBtZXNzYWdlAEZpbGUgZGVzY3JpcHRvciBpbiBiYWQgc3RhdGUATm90IGEgc29ja2V0AERlc3RpbmF0aW9uIGFkZHJlc3MgcmVxdWlyZWQATWVzc2FnZSB0b28gbGFyZ2UAUHJvdG9jb2wgd3JvbmcgdHlwZSBmb3Igc29ja2V0AFByb3RvY29sIG5vdCBhdmFpbGFibGUAUHJvdG9jb2wgbm90IHN1cHBvcnRlZABTb2NrZXQgdHlwZSBub3Qgc3VwcG9ydGVkAE5vdCBzdXBwb3J0ZWQAUHJvdG9jb2wgZmFtaWx5IG5vdCBzdXBwb3J0ZWQAQWRkcmVzcyBmYW1pbHkgbm90IHN1cHBvcnRlZCBieSBwcm90b2NvbABBZGRyZXNzIG5vdCBhdmFpbGFibGUATmV0d29yayBpcyBkb3duAE5ldHdvcmsgdW5yZWFjaGFibGUAQ29ubmVjdGlvbiByZXNldCBieSBuZXR3b3JrAENvbm5lY3Rpb24gYWJvcnRlZABObyBidWZmZXIgc3BhY2UgYXZhaWxhYmxlAFNvY2tldCBpcyBjb25uZWN0ZWQAU29ja2V0IG5vdCBjb25uZWN0ZWQAQ2Fubm90IHNlbmQgYWZ0ZXIgc29ja2V0IHNodXRkb3duAE9wZXJhdGlvbiBhbHJlYWR5IGluIHByb2dyZXNzAE9wZXJhdGlvbiBpbiBwcm9ncmVzcwBTdGFsZSBmaWxlIGhhbmRsZQBSZW1vdGUgSS9PIGVycm9yAFF1b3RhIGV4Y2VlZGVkAE5vIG1lZGl1bSBmb3VuZABXcm9uZyBtZWRpdW0gdHlwZQBObyBlcnJvciBpbmZvcm1hdGlvbgAAYmFzaWNfc3RyaW5nAHN0ZDo6ZXhjZXB0aW9uAAAAAADccQAAPAAAAD0AAAA+AAAAGHQAAORxAABTdDlleGNlcHRpb24AAAAAAAAAAAhyAAAQAAAAPwAAAEAAAACMcgAAFHIAANxxAABTdDExbG9naWNfZXJyb3IAAAAAADhyAAAQAAAAQQAAAEAAAACMcgAARHIAAAhyAABTdDEybGVuZ3RoX2Vycm9yAFN0OXR5cGVfaW5mbwAAABh0AABVcgAAjHIAAAFzAABkcgAAjHIAAKxyAABscgAAAAAAANByAABCAAAAQwAAAEQAAABFAAAARgAAAEcAAABIAAAASQAAAE4xMF9fY3h4YWJpdjExN19fY2xhc3NfdHlwZV9pbmZvRQAAAIxyAADccgAAeHIAAE4xMF9fY3h4YWJpdjEyMF9fc2lfY2xhc3NfdHlwZV9pbmZvRQBOMTBfX2N4eGFiaXYxMTZfX3NoaW1fdHlwZV9pbmZvRQAAAAAAAABAcwAAQgAAAEoAAABEAAAARQAAAEsAAACMcgAATHMAAGxyAABOMTBfX2N4eGFiaXYxMjNfX2Z1bmRhbWVudGFsX3R5cGVfaW5mb0UAdgAAACxzAAB0cwAAYgAAACxzAACAcwAAYwAAACxzAACMcwAAaAAAACxzAACYcwAAYQAAACxzAACkcwAAcwAAACxzAACwcwAAdAAAACxzAAC8cwAAaQAAACxzAADIcwAAagAAACxzAADUcwAAbAAAACxzAADgcwAAbQAAACxzAADscwAAZgAAACxzAAD4cwAAZAAAACxzAAAEdAAAAAAAAHhyAABCAAAATAAAAEQAAABFAAAARgAAAE0AAABOAAAATwAAAAAAAABgdAAAQgAAAFAAAABEAAAARQAAAEYAAABRAAAAUgAAAFMAAACMcgAAbHQAAHhyAABOMTBfX2N4eGFiaXYxMjFfX3ZtaV9jbGFzc190eXBlX2luZm9FAAAAcHUAQZjpAQtBgC0AAAAyAAABAQAAHgEAAA8AAACALAAAAC0AAAAAAAAeAAAADwAAAAAAAAAwLAAAAAAAABMAAAAHAAAAAAAAAAUAQeTpAQsBOQBB/OkBCwo6AAAAOwAAAC12AEGU6gELAQIAQaPqAQsF//////8AQejqAQsJoH5QAAAAAAAFAEH86gELAVQAQZTrAQsOOgAAAFUAAACYegAAAAQAQazrAQsBAQBBu+sBCwUK/////w==\");\n\nvar _a;\nvar BloscShuffle;\n(function(BloscShuffle2) {\n BloscShuffle2[BloscShuffle2[\"NOSHUFFLE\"] = 0] = \"NOSHUFFLE\";\n BloscShuffle2[BloscShuffle2[\"SHUFFLE\"] = 1] = \"SHUFFLE\";\n BloscShuffle2[BloscShuffle2[\"BITSHUFFLE\"] = 2] = \"BITSHUFFLE\";\n BloscShuffle2[BloscShuffle2[\"AUTOSHUFFLE\"] = -1] = \"AUTOSHUFFLE\";\n})(BloscShuffle || (BloscShuffle = {}));\nconst COMPRESSORS = new Set([\"blosclz\", \"lz4\", \"lz4hc\", \"snappy\", \"zlib\", \"zstd\"]);\nlet emscriptenModule;\nconst init = () => blosc_codec({ noInitialRun: true, wasmBinary });\nconst Blosc = (_a = class {\n constructor(clevel = 5, cname = \"lz4\", shuffle = 1, blocksize = 0) {\n if (clevel < 0 || clevel > 9) {\n throw new Error(`Invalid compression level: '${clevel}'. It should be between 0 and 9`);\n }\n if (!COMPRESSORS.has(cname)) {\n throw new Error(`Invalid compressor '${cname}'. Valid compressors include\n 'blosclz', 'lz4', 'lz4hc','snappy', 'zlib', 'zstd'.`);\n }\n if (shuffle < -1 || shuffle > 2) {\n throw new Error(`Invalid shuffle ${shuffle}. Must be one of 0 (NOSHUFFLE),\n 1 (SHUFFLE), 2 (BITSHUFFLE), -1 (AUTOSHUFFLE).`);\n }\n this.blocksize = blocksize;\n this.clevel = clevel;\n this.cname = cname;\n this.shuffle = shuffle;\n }\n static fromConfig({ blocksize, clevel, cname, shuffle }) {\n return new _a(clevel, cname, shuffle, blocksize);\n }\n async encode(data) {\n if (!emscriptenModule) {\n emscriptenModule = init();\n }\n const module = await emscriptenModule;\n const view = module.compress(data, this.cname, this.clevel, this.shuffle, this.blocksize);\n const result = new Uint8Array(view);\n module.free_result();\n return result;\n }\n async decode(data, out) {\n if (!emscriptenModule) {\n emscriptenModule = init();\n }\n const module = await emscriptenModule;\n const view = module.decompress(data);\n const result = new Uint8Array(view);\n module.free_result();\n if (out !== void 0) {\n out.set(result);\n return out;\n }\n return result;\n }\n}, _a.codecId = \"blosc\", _a.COMPRESSORS = [...COMPRESSORS], _a.NOSHUFFLE = 0, _a.SHUFFLE = 1, _a.BITSHUFFLE = 2, _a.AUTOSHUFFLE = -1, _a);\n\nexport default Blosc;\n","import { Zlib, GZip, Blosc } from 'numcodecs';\nimport { addCodec } from './zarr-core';\n\naddCodec(Zlib.codecId, () => Zlib);\naddCodec(GZip.codecId, () => GZip);\naddCodec(Blosc.codecId, () => Blosc);\n\nexport * from './zarr-core';\n","import JSZip from \"jszip\";\nimport { KeyError } from \"zarr\";\nimport { AsyncStore, ValidStoreType } from \"zarr/types/storage/types\";\n\n/**\n * Preserves (double) slashes earlier in the path, so this works better\n * for URLs. From https://stackoverflow.com/a/46427607/4178400\n * @param args parts of a path or URL to join.\n */\nfunction joinUrlParts(...args: string[]) {\n return args\n .map((part, i) => {\n if (i === 0) return part.trim().replace(/[/]*$/g, \"\");\n return part.trim().replace(/(^[/]*|[/]*$)/g, \"\");\n })\n .filter((x) => x.length)\n .join(\"/\");\n}\nclass ReadOnlyStore {\n async keys() {\n return [];\n }\n\n async deleteItem() {\n return false;\n }\n\n async setItem() {\n console.warn(\"Cannot write to read-only store.\");\n return false;\n }\n}\n\nexport class FileStore\n extends ReadOnlyStore\n implements AsyncStore\n{\n private _map: Map;\n private _rootPrefix: string;\n private _rootName: string;\n\n constructor(fileMap: Map, rootName: string, rootPrefix = \"\") {\n super();\n this._map = fileMap;\n this._rootPrefix = rootPrefix;\n this._rootName = rootName;\n }\n\n get rootName() {\n return this._rootName;\n }\n\n private _key(key: string) {\n return joinUrlParts(this._rootPrefix, key);\n }\n\n async getItem(key: string) {\n const file = this._map.get(this._key(key));\n if (!file) {\n throw new KeyError(key);\n }\n const buffer = await file.arrayBuffer();\n return buffer;\n }\n\n async containsItem(key: string) {\n const path = this._key(key);\n return this._map.has(path);\n }\n}\n\nexport class ZipStore implements AsyncStore {\n private _rootName: string;\n private _zip: ReturnType;\n private _needsInitialGroup: boolean;\n\n private createGroup() {\n return JSON.stringify({ zarr_format: 2 });\n }\n\n constructor(name: string, zip?: JSZip) {\n this._rootName = name;\n this._rootName = `${name}.zarr`;\n this._zip = zip ? zip : new JSZip();\n this._zip.folder(this._rootName);\n this._needsInitialGroup = zip ? false : true;\n }\n\n async keys(): Promise {\n // TODO -zarr: not sure if this is correct\n // currently all files with full path\n return Object.values(this._zip.files)\n .filter((f) => !f.dir)\n .map((f) => f.name);\n }\n\n async getItem(key: string): Promise {\n // This is to cover the case where we're using the store to write\n // and zarr.js expects .zgroup to be present at the root group\n // before attempting to write it\n if (key === `${this._rootName}/.zgroup` && this._needsInitialGroup) {\n const initialGroup = this.createGroup();\n this._zip.file(key, initialGroup);\n return initialGroup;\n }\n const item = this._zip.file(key);\n\n if (!item) {\n throw new Error(`No item with key ${key}`);\n }\n\n return item.async(\"arraybuffer\");\n }\n\n async containsItem(key: string) {\n return this._zip.file(key) !== null;\n }\n\n async setItem(item: string, value: ValidStoreType) {\n this._zip.file(item, value);\n return true;\n }\n\n async deleteItem(item: string) {\n this._zip.remove(item);\n return true;\n }\n\n get zip() {\n return this._zip;\n }\n\n get rootName() {\n return this._rootName;\n }\n\n // listDir?: (path?: string) => Promise;\n // rmDir?: (path?: string) => Promise;\n\n // getSize?: (path?: string) => Promise;\n // rename?: (path?: string) => Promise;\n}\n\nexport type CustomStore = FileStore | ZipStore;\n\nexport const fListToStore = async (\n files: FileList,\n zipFile: boolean\n): Promise => {\n if (zipFile) {\n const file = files[0];\n const zip = await new JSZip().loadAsync(file);\n // find folder of pattern \"projectName.zarr/\"\n const rootFile = zip.folder(/.*\\.zarr\\/$/);\n\n if (rootFile.length !== 1) {\n throw new Error(\"Could not determine zarr root\");\n }\n\n // \"projectName.zarr/\" -> \"projectName\"\n const fileName = rootFile[0].name.split(\".\")[0];\n\n return new ZipStore(fileName, zip);\n } else {\n const rootName = files[0].webkitRelativePath.split(\"/\")[0];\n\n /*\n * You can't randomly access files from a directory by path name\n * without the Native File System API, so we need to get objects for _all_\n * the files right away for Zarr. This is unfortunate because we need to iterate\n * over all File objects and create an in-memory index.\n *\n * fMap is simple key-value mapping from 'some/file/path' -> File\n */\n const fMap: Map = new Map();\n\n for (const file of files) {\n if (file.name === \".DS_Store\") continue;\n // TODO: check browser compat with webkitRelativePath vs path\n fMap.set(file.webkitRelativePath, file);\n }\n\n return new FileStore(fMap, rootName);\n }\n};\n\nexport class PseudoFileList {\n private _files: File[];\n\n constructor(files: File[]) {\n let self = this;\n this._files = files;\n\n return new Proxy(this, {\n get(target, prop) {\n if (!isNaN(Number(prop)) && !(prop in target)) {\n return self._files[Number(prop)];\n } else {\n return Reflect.get(target, prop);\n }\n },\n }) satisfies FileList;\n }\n\n public item(elem: number) {\n return this._files[elem];\n }\n\n public get length() {\n return this._files.length;\n }\n\n [key: number]: File;\n\n [Symbol.iterator](): IterableIterator {\n return this._files[Symbol.iterator]();\n }\n}\n","import { Group, NestedArray, group } from \"zarr\";\n//import { Blosc } from \"numcodecs\";\n\nimport { Tensor } from \"@tensorflow/tfjs\";\nimport { PreprocessOptions } from \"utils/models/types\";\nimport { availableClassifierModels } from \"utils/models/availableClassificationModels\";\nimport { availableSegmenterModels } from \"utils/models/availableSegmentationModels\";\nimport { ZipStore } from \"../zarrStores\";\nimport { LoadCB } from \"../types\";\nimport { ClassifierState, ProjectState, SegmenterState } from \"store/types\";\nimport { Colors } from \"utils/common/types\";\nimport {\n Kind,\n AnnotationObject,\n Category,\n ImageObject,\n} from \"store/data/types\";\n\n/* \n =====================\n Project Serialization\n =====================\n */\n\nconst serializeImageColors = async (colorsGroup: Group, colors: Colors) => {\n const numChannels = colors.color.shape[0];\n const rangeMins = new Float32Array(numChannels);\n const rangeMaxs = new Float32Array(numChannels);\n const visibilities = new Uint8Array(numChannels);\n\n for (let i = 0; i < numChannels; i++) {\n rangeMins[i] = colors.range[i][0];\n rangeMaxs[i] = colors.range[i][1];\n visibilities[i] = Number(colors.visible[i]);\n }\n\n await writeArray(colorsGroup, \"range_min\", rangeMins);\n await writeArray(colorsGroup, \"range_max\", rangeMaxs);\n await writeArray(colorsGroup, \"visible_B\", visibilities);\n\n await writeTensor(colorsGroup, \"color\", colors.color, [\n colors.color.shape[0],\n colors.color.shape[1],\n ]);\n};\n\nconst serializeThings = async (\n thingsGroup: Group,\n things: Array,\n loadCb: LoadCB\n) => {\n const thingNames = things.map((thing) => thing.name);\n\n thingsGroup.attrs.setItem(\"thing_names\", thingNames);\n loadCb(0, `serializing ${things.length} images`);\n\n for (let i = 0; i < things.length; i++) {\n let thing = things[i];\n let thingGroup = await thingsGroup.createGroup(thingNames[i]);\n let data = await writeTensor(thingGroup, thingNames[i], thing.data, [\n thing.shape.planes,\n thing.shape.height,\n thing.shape.width,\n thing.shape.channels,\n ]);\n await data.attrs.setItem(\"bit_depth\", thing.bitDepth);\n await thingGroup.attrs.setItem(\"thing_id\", thing.id);\n await thingGroup.attrs.setItem(\"active_plane\", thing.activePlane);\n await thingGroup.attrs.setItem(\"class_category_id\", thing.categoryId);\n await thingGroup.attrs.setItem(\"classifier_partition\", thing.partition);\n await thingGroup.attrs.setItem(\"kind\", thing.kind);\n\n if (thing.kind === \"Image\") {\n await thingGroup.attrs.setItem(\n \"contents\",\n (thing as ImageObject).containing\n );\n\n let colorGroup = await thingGroup.createGroup(\"colors\");\n await serializeImageColors(colorGroup, (thing as ImageObject).colors);\n } else {\n await thingGroup.attrs.setItem(\n \"bbox\",\n (thing as AnnotationObject).boundingBox\n );\n await thingGroup.attrs.setItem(\n \"mask\",\n (thing as AnnotationObject).encodedMask\n );\n await thingGroup.attrs.setItem(\n \"image_id\",\n (thing as AnnotationObject).imageId\n );\n }\n loadCb(\n (i + 1) / thingNames.length,\n `serialized image ${i + 1}/${thingNames.length}`\n );\n }\n};\n\nconst serializeCategories = async (\n categoryGroup: Group,\n categories: Category[]\n) => {\n await categoryGroup.attrs.setItem(\n \"category_id\",\n categories.map((cat) => cat.id)\n );\n await categoryGroup.attrs.setItem(\n \"color\",\n categories.map((cat) => cat.color)\n );\n await categoryGroup.attrs.setItem(\n \"name\",\n categories.map((cat) => cat.name)\n );\n await categoryGroup.attrs.setItem(\n \"kind\",\n categories.map((cat) => cat.kind)\n );\n await categoryGroup.attrs.setItem(\n \"contents\",\n categories.map((cat) => cat.containing)\n );\n};\nconst serializeKinds = async (kindGroup: Group, kinds: Kind[]) => {\n await kindGroup.attrs.setItem(\n \"kind_id\",\n kinds.map((k) => k.id)\n );\n await kindGroup.attrs.setItem(\n \"contents\",\n kinds.map((k) => k.containing)\n );\n await kindGroup.attrs.setItem(\n \"categories\",\n kinds.map((k) => k.categories)\n );\n await kindGroup.attrs.setItem(\n \"unknown_category_id\",\n kinds.map((k) => k.unknownCategoryId)\n );\n};\n\nconst _serializeProject = async (\n projectGroup: Group,\n project: ProjectState,\n data: {\n kinds: Array;\n categories: Array;\n things: Array;\n },\n loadCb: LoadCB\n) => {\n await projectGroup.attrs.setItem(\"name\", project.name);\n\n const thingsGroup = await projectGroup.createGroup(\"things\");\n\n await serializeThings(thingsGroup, data.things, loadCb);\n\n const categoriesGroup = await projectGroup.createGroup(\"categories\");\n await serializeCategories(categoriesGroup, data.categories);\n\n const kindsGroup = await projectGroup.createGroup(\"kinds\");\n await serializeKinds(kindsGroup, data.kinds);\n};\n\n/*\n ========================\n Classifier Serialization\n ========================\n */\n\nconst serializePreprocessOptions = async (\n preprocessOptionsGroup: Group,\n preprocessOptions: PreprocessOptions\n) => {\n await preprocessOptionsGroup.attrs.setItem(\n \"shuffle_B\",\n Number(preprocessOptions.shuffle)\n );\n\n const rescaleOptionsGroup = await preprocessOptionsGroup.createGroup(\n \"rescale_options\"\n );\n\n await rescaleOptionsGroup.attrs.setItem(\n \"rescale_B\",\n Number(preprocessOptions.rescaleOptions.rescale)\n );\n\n await rescaleOptionsGroup.attrs.setItem(\n \"center_B\",\n Number(preprocessOptions.rescaleOptions.center)\n );\n\n const cropOptionsGroup = await preprocessOptionsGroup.createGroup(\n \"crop_options\"\n );\n await cropOptionsGroup.attrs.setItem(\n \"num_crops\",\n preprocessOptions.cropOptions.numCrops\n );\n await cropOptionsGroup.attrs.setItem(\n \"crop_schema\",\n preprocessOptions.cropOptions.cropSchema\n );\n};\n\nconst serializeClassifier = async (\n classifierGroup: Group,\n classifier: ClassifierState\n) => {\n const classifierModel =\n availableClassifierModels[classifier.selectedModelIdx];\n\n await classifierGroup.attrs.setItem(\"name\", classifierModel.name);\n\n const { planes, height, width, channels } = classifier.inputShape;\n await writeArray(\n classifierGroup,\n \"input_shape\",\n new Uint8Array([planes, height, width, channels])\n );\n\n await classifierGroup.attrs.setItem(\n \"training_percent\",\n classifier.trainingPercentage\n );\n\n await classifierGroup.attrs.setItem(\"metrics\", classifier.metrics);\n\n const optSettingsGroup = await classifierGroup.createGroup(\n \"optimizer_settings\"\n );\n\n await optSettingsGroup.attrs.setItem(\"epochs\", classifier.fitOptions.epochs);\n\n await optSettingsGroup.attrs.setItem(\n \"batch_size\",\n classifier.fitOptions.batchSize\n );\n\n await optSettingsGroup.attrs.setItem(\n \"optimization_algorithm\",\n classifier.optimizationAlgorithm\n );\n\n await optSettingsGroup.attrs.setItem(\n \"learning_rate\",\n classifier.learningRate\n );\n\n await optSettingsGroup.attrs.setItem(\n \"loss_function\",\n classifier.lossFunction\n );\n\n const preprocessOptionsGroup = await classifierGroup.createGroup(\n \"preprocess_options\"\n );\n await serializePreprocessOptions(\n preprocessOptionsGroup,\n classifier.preprocessOptions\n );\n};\n\nconst serializeSegmenter = async (\n segmenterGroup: Group,\n segmenter: SegmenterState\n) => {\n const segmenterModel = availableSegmenterModels[segmenter.selectedModelIdx];\n\n await segmenterGroup.attrs.setItem(\"name\", segmenterModel.name);\n};\n\n/*\n ===========\n Entry Point\n ===========\n */\n\nexport const serializeProject = async (\n name: string,\n projectSlice: ProjectState,\n data: {\n kinds: Array;\n categories: Array;\n things: Array;\n },\n classifierSlice: ClassifierState,\n segmenterSlice: SegmenterState,\n loadCb: LoadCB\n) => {\n const zipStore = new ZipStore(name);\n const root = await group(zipStore, zipStore.rootName);\n\n // yarn/npm start/build must be run with REACT_APP_VERSION=$npm_package_version\n const piximiVersion = process.env.REACT_APP_VERSION;\n\n if (!piximiVersion) {\n throw Error(\"Missing Piximi version\");\n }\n\n root.attrs.setItem(\"version\", piximiVersion);\n\n const projectGroup = await root.createGroup(\"project\");\n await _serializeProject(projectGroup, projectSlice, data, loadCb);\n\n const classifierGroup = await root.createGroup(\"classifier\");\n await serializeClassifier(classifierGroup, classifierSlice);\n\n const segmenterGroup = await root.createGroup(\"segmenter\");\n await serializeSegmenter(segmenterGroup, segmenterSlice);\n\n return zipStore.zip;\n};\n\n/*\n ==========================\n File Serialization Helpers\n ==========================\n */\n\n/*\n * tensor.dataSync() returns either a Float32Array, Uint8Array\n * or Int32Array. The reason for recasting it is because the returned\n * data's buffer (rawData.buffer) sometimes has extra padding bytes.\n * recasting it as its own TypedArray sets a new underlying buffer\n * of the appropriate byteLength\n */\nconst cleanBuffer = (tensor: Tensor) => {\n const rawData = tensor.dataSync();\n\n if (rawData instanceof Float32Array) {\n return Float32Array.from(rawData);\n } else if (rawData instanceof Int32Array) {\n return Int32Array.from(rawData);\n } else if (rawData instanceof Uint8Array) {\n return Uint8Array.from(rawData);\n } else {\n return rawData;\n }\n};\n\nconst writeArray = async (\n group: Group,\n name: string,\n value: Float32Array | Uint8Array | Int32Array | Uint16Array | Uint32Array,\n shape?: number[]\n) => {\n const nested = new NestedArray(value, shape);\n return group.createDataset(name, undefined, nested, {\n chunks: false,\n fillValue: 0.0,\n //compressor: { id: Blosc.codecId },\n });\n};\n\nconst writeTensor = async (\n group: Group,\n name: string,\n tensor: Tensor,\n shape?: number[]\n) => {\n return writeArray(\n group,\n name,\n cleanBuffer(tensor),\n shape ? shape : tensor.shape\n );\n};\n","import { ChangeEvent, useState } from \"react\";\nimport { useDispatch, useSelector } from \"react-redux\";\n\nimport { Grid, TextField } from \"@mui/material\";\n\nimport { projectSlice } from \"store/project\";\n// TODO: implement segmenter serialization\n\nimport { HotkeyView } from \"utils/common/enums\";\nimport { useHotkeys } from \"hooks\";\nimport { saveAs } from \"file-saver\";\nimport { applicationSettingsSlice } from \"store/applicationSettings\";\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { selectDataProject } from \"store/data/selectors\";\nimport { logger } from \"utils/common/helpers\";\nimport { serializeProject } from \"utils/file-io/serialize\";\nimport { AlertState } from \"utils/common/types\";\nimport { AlertType } from \"utils/common/enums\";\nimport { selectProject } from \"store/project/selectors\";\nimport { selectClassifier } from \"store/classifier/selectors\";\nimport { selectSegmenter } from \"store/segmenter/selectors\";\n\ntype SaveProjectDialogProps = {\n onClose: () => void;\n open: boolean;\n};\n\nexport const SaveProjectDialog = ({\n onClose,\n open,\n}: SaveProjectDialogProps) => {\n const dispatch = useDispatch();\n\n const classifier = useSelector(selectClassifier);\n const segmenter = useSelector(selectSegmenter);\n\n const project = useSelector(selectProject);\n const data = useSelector(selectDataProject);\n\n const [projectName, setProjectName] = useState(project.name);\n\n const onLoadProgress = (loadPercent: number, loadMessage: string) => {\n dispatch(\n projectSlice.actions.sendLoadPercent({ loadPercent, loadMessage })\n );\n };\n\n const onSaveProjectClick = async () => {\n serializeProject(\n projectName,\n project,\n data,\n classifier,\n segmenter,\n onLoadProgress\n )\n .then((zip) => {\n return zip.generateAsync(\n {\n type: \"blob\",\n compression: \"DEFLATE\",\n compressionOptions: { level: 4 },\n },\n // onUpdate callback\n (meta: { percent: number }) => {\n onLoadProgress(\n meta.percent / 100,\n `compressing ${meta.percent.toFixed(2)}%`\n );\n // process.env.REACT_APP_LOG_LEVEL === \"1\" &&\n logger(`zipping %${Math.floor(meta.percent)}`);\n }\n );\n })\n .then((blob) => {\n onLoadProgress(-1, \"saving project...\");\n saveAs(blob, `${projectName}.zip`);\n // don't use onLoadProgress here, it may be sleeping\n // and ignoring updates; this *must* go through\n dispatch(projectSlice.actions.setLoadPercent({ loadPercent: 1 }));\n })\n .catch((err: Error) => {\n process.env.REACT_APP_LOG_LEVEL === \"1\" && console.error(err);\n\n process.env.NODE_ENV !== \"production\" &&\n process.env.REACT_APP_LOG_LEVEL === \"1\" &&\n console.error(err);\n const warning: AlertState = {\n alertType: AlertType.Warning,\n name: \"Could not parse project file\",\n description: `Error while parsing the project file: ${err.name}\\n${err.message}`,\n };\n\n dispatch(\n applicationSettingsSlice.actions.updateAlertState({\n alertState: warning,\n })\n );\n });\n\n onClose();\n };\n\n const onCancel = () => {\n onClose();\n };\n\n const onNameChange = (event: ChangeEvent) => {\n setProjectName(event.target.value);\n };\n\n useHotkeys(\n \"enter\",\n () => {\n onSaveProjectClick();\n },\n HotkeyView.SaveProjectDialog,\n { enableOnTags: [\"INPUT\"] },\n [onSaveProjectClick]\n );\n\n return (\n \n \n \n \n \n }\n onConfirm={onSaveProjectClick}\n confirmText=\"Save Project\"\n />\n );\n};\n","import React, { ChangeEvent, useState } from \"react\";\n\nimport { Grid, TextField } from \"@mui/material\";\n\nimport { useHotkeys } from \"hooks\";\n\nimport { HotkeyView } from \"utils/common/enums\";\nimport { DialogWithAction } from \"../DialogWithAction\";\n\ntype ExportAnnotationsDialogProps = {\n onClose: () => void;\n open: boolean;\n handleSave: (userProjectName: string) => void;\n defaultName: string;\n};\n\nexport const ExportAnnotationsDialog = ({\n onClose,\n open,\n handleSave,\n defaultName,\n}: ExportAnnotationsDialogProps) => {\n const [projectName, setProjectName] = useState(defaultName);\n\n const onCancel = () => {\n setProjectName(defaultName);\n onClose();\n };\n\n const onNameChange = (event: ChangeEvent) => {\n setProjectName(event.target.value);\n };\n\n useHotkeys(\n \"enter\",\n () => {\n handleSave(projectName);\n onClose();\n },\n HotkeyView.ExportAnnotationsDialog,\n { enableOnTags: [\"INPUT\"] },\n [handleSave]\n );\n\n return (\n \n \n \n \n \n }\n onClose={onCancel}\n onConfirm={() => handleSave(projectName)}\n confirmText=\"Export Annotations\"\n isOpen={open}\n />\n );\n};\n","import { batch, useDispatch, useSelector } from \"react-redux\";\nimport { dataSlice } from \"store/data/dataSlice\";\nimport { DialogWithAction } from \"../DialogWithAction\";\nimport { Box, TextField } from \"@mui/material\";\nimport { selectAllKindIds } from \"store/data/selectors\";\nimport { ChangeEvent, useCallback, useState } from \"react\";\nimport { generateUnknownCategory } from \"utils/common/helpers\";\nimport { Kind } from \"store/data/types\";\n\ntype CreateCategoriesDialogProps = {\n onClose: () => void;\n withContainedThings?: string[];\n withContainedCategories?: string[];\n open: boolean;\n changesPermanent?: boolean;\n secondaryAction?: () => void;\n};\n\nexport const CreateKindDialog = ({\n onClose,\n withContainedThings = [],\n withContainedCategories = [],\n open,\n changesPermanent,\n secondaryAction,\n}: CreateCategoriesDialogProps) => {\n const dispatch = useDispatch();\n const existingKinds = useSelector(selectAllKindIds);\n const [name, setName] = useState(\"\");\n const [errorHelperText, setErrorHelperText] = useState(\" \");\n const [isInvalidName, setIsInvalidName] = useState(false);\n\n const handleNameChange = (event: ChangeEvent) => {\n setName(event.target.value);\n validateInput(event.target.value);\n };\n\n const validateInput = useCallback(\n (categoryName: string) => {\n let validInput = true;\n let helperText = \" \";\n\n if (categoryName === \"\") {\n helperText = \"Please type a kind name.\";\n validInput = false;\n } else if (\n existingKinds\n .map((kind) => kind.toString().toUpperCase())\n .includes(categoryName.toUpperCase())\n ) {\n helperText =\n \"Kind names must be unique. A kind with this name already exits.\";\n validInput = false;\n }\n setErrorHelperText(helperText);\n setIsInvalidName(!validInput);\n return validInput;\n },\n [existingKinds]\n );\n\n const handleConfirm = () => {\n const newUnknownCategory = generateUnknownCategory(name);\n const kind: Kind = {\n id: name,\n categories: [...withContainedCategories, newUnknownCategory.id],\n containing: withContainedThings,\n unknownCategoryId: newUnknownCategory.id,\n };\n\n batch(() => {\n dispatch(\n dataSlice.actions.addCategories({\n categories: [newUnknownCategory],\n isPermanent: changesPermanent,\n })\n );\n\n dispatch(\n dataSlice.actions.addKinds({\n kinds: [kind],\n isPermanent: changesPermanent,\n })\n );\n });\n secondaryAction && secondaryAction();\n onClose();\n };\n\n return (\n \n \n \n }\n onConfirm={() => handleConfirm()}\n confirmDisabled={isInvalidName}\n />\n );\n};\n","import { Box, IconButton } from \"@mui/material\";\nimport InfoIcon from \"@mui/icons-material/Info\";\n\ntype EvaluateMetricInfoBoxProps = {\n metric: string;\n value: number;\n link: string;\n};\n\nexport const EvaluationMetricsInfoBox = (props: EvaluateMetricInfoBoxProps) => {\n const { metric, value, link } = props;\n\n return (\n \n ({\n backgroundColor: theme.palette.info.light,\n display: \"flex\",\n flexDirection: \"column\",\n alignItems: { xs: \"center\", md: \"flex-start\" },\n m: 1,\n width: 130,\n borderRadius: 2,\n })}\n >\n ({\n fontSize: 16,\n color: theme.palette.info.dark,\n mt: 1,\n pl: 1,\n pr: 1,\n })}\n >\n {metric}:\n \n ({\n fontSize: 15,\n color: theme.palette.background.default,\n pl: 1,\n })}\n >\n {Number.isNaN(value) ? \"N/A\" : value.toFixed(3)}\n \n ({ color: theme.palette.background.default })}\n onClick={() => window.open(link)}\n >\n \n \n \n \n );\n};\n","let updateQueue = makeQueue();\nconst raf = fn => schedule(fn, updateQueue);\nlet writeQueue = makeQueue();\n\nraf.write = fn => schedule(fn, writeQueue);\n\nlet onStartQueue = makeQueue();\n\nraf.onStart = fn => schedule(fn, onStartQueue);\n\nlet onFrameQueue = makeQueue();\n\nraf.onFrame = fn => schedule(fn, onFrameQueue);\n\nlet onFinishQueue = makeQueue();\n\nraf.onFinish = fn => schedule(fn, onFinishQueue);\n\nlet timeouts = [];\n\nraf.setTimeout = (handler, ms) => {\n let time = raf.now() + ms;\n\n let cancel = () => {\n let i = timeouts.findIndex(t => t.cancel == cancel);\n if (~i) timeouts.splice(i, 1);\n __raf.count -= ~i ? 1 : 0;\n };\n\n let timeout = {\n time,\n handler,\n cancel\n };\n timeouts.splice(findTimeout(time), 0, timeout);\n __raf.count += 1;\n start();\n return timeout;\n};\n\nlet findTimeout = time => ~(~timeouts.findIndex(t => t.time > time) || ~timeouts.length);\n\nraf.cancel = fn => {\n updateQueue.delete(fn);\n writeQueue.delete(fn);\n};\n\nraf.sync = fn => {\n sync = true;\n raf.batchedUpdates(fn);\n sync = false;\n};\n\nraf.throttle = fn => {\n let lastArgs;\n\n function queuedFn() {\n try {\n fn(...lastArgs);\n } finally {\n lastArgs = null;\n }\n }\n\n function throttled(...args) {\n lastArgs = args;\n raf.onStart(queuedFn);\n }\n\n throttled.handler = fn;\n\n throttled.cancel = () => {\n onStartQueue.delete(queuedFn);\n lastArgs = null;\n };\n\n return throttled;\n};\n\nlet nativeRaf = typeof window != 'undefined' ? window.requestAnimationFrame : () => {};\n\nraf.use = impl => nativeRaf = impl;\n\nraf.now = typeof performance != 'undefined' ? () => performance.now() : Date.now;\n\nraf.batchedUpdates = fn => fn();\n\nraf.catch = console.error;\nraf.frameLoop = 'always';\n\nraf.advance = () => {\n if (raf.frameLoop !== 'demand') {\n console.warn('Cannot call the manual advancement of rafz whilst frameLoop is not set as demand');\n } else {\n update();\n }\n};\n\nlet ts = -1;\nlet sync = false;\n\nfunction schedule(fn, queue) {\n if (sync) {\n queue.delete(fn);\n fn(0);\n } else {\n queue.add(fn);\n start();\n }\n}\n\nfunction start() {\n if (ts < 0) {\n ts = 0;\n\n if (raf.frameLoop !== 'demand') {\n nativeRaf(loop);\n }\n }\n}\n\nfunction loop() {\n if (~ts) {\n nativeRaf(loop);\n raf.batchedUpdates(update);\n }\n}\n\nfunction update() {\n let prevTs = ts;\n ts = raf.now();\n let count = findTimeout(ts);\n\n if (count) {\n eachSafely(timeouts.splice(0, count), t => t.handler());\n __raf.count -= count;\n }\n\n onStartQueue.flush();\n updateQueue.flush(prevTs ? Math.min(64, ts - prevTs) : 16.667);\n onFrameQueue.flush();\n writeQueue.flush();\n onFinishQueue.flush();\n}\n\nfunction makeQueue() {\n let next = new Set();\n let current = next;\n return {\n add(fn) {\n __raf.count += current == next && !next.has(fn) ? 1 : 0;\n next.add(fn);\n },\n\n delete(fn) {\n __raf.count -= current == next && next.has(fn) ? 1 : 0;\n return next.delete(fn);\n },\n\n flush(arg) {\n if (current.size) {\n next = new Set();\n __raf.count -= current.size;\n eachSafely(current, fn => fn(arg) && next.add(fn));\n __raf.count += next.size;\n current = next;\n }\n }\n\n };\n}\n\nfunction eachSafely(values, each) {\n values.forEach(value => {\n try {\n each(value);\n } catch (e) {\n raf.catch(e);\n }\n });\n}\n\nconst __raf = {\n count: 0,\n\n clear() {\n ts = -1;\n timeouts = [];\n onStartQueue = makeQueue();\n updateQueue = makeQueue();\n onFrameQueue = makeQueue();\n writeQueue = makeQueue();\n onFinishQueue = makeQueue();\n __raf.count = 0;\n }\n\n};\n\nexport { __raf, raf };\n","import { raf } from '@react-spring/rafz';\nexport { raf } from '@react-spring/rafz';\nimport * as React from 'react';\nimport { useEffect, useState, useRef } from 'react';\n\nfunction noop() {}\nconst defineHidden = (obj, key, value) => Object.defineProperty(obj, key, {\n value,\n writable: true,\n configurable: true\n});\nconst is = {\n arr: Array.isArray,\n obj: a => !!a && a.constructor.name === 'Object',\n fun: a => typeof a === 'function',\n str: a => typeof a === 'string',\n num: a => typeof a === 'number',\n und: a => a === undefined\n};\nfunction isEqual(a, b) {\n if (is.arr(a)) {\n if (!is.arr(b) || a.length !== b.length) return false;\n\n for (let i = 0; i < a.length; i++) {\n if (a[i] !== b[i]) return false;\n }\n\n return true;\n }\n\n return a === b;\n}\nconst each = (obj, fn) => obj.forEach(fn);\nfunction eachProp(obj, fn, ctx) {\n if (is.arr(obj)) {\n for (let i = 0; i < obj.length; i++) {\n fn.call(ctx, obj[i], `${i}`);\n }\n\n return;\n }\n\n for (const key in obj) {\n if (obj.hasOwnProperty(key)) {\n fn.call(ctx, obj[key], key);\n }\n }\n}\nconst toArray = a => is.und(a) ? [] : is.arr(a) ? a : [a];\nfunction flush(queue, iterator) {\n if (queue.size) {\n const items = Array.from(queue);\n queue.clear();\n each(items, iterator);\n }\n}\nconst flushCalls = (queue, ...args) => flush(queue, fn => fn(...args));\n\nlet createStringInterpolator$1;\nlet to;\nlet colors$1 = null;\nlet skipAnimation = false;\nlet willAdvance = noop;\nconst assign = globals => {\n if (globals.to) to = globals.to;\n if (globals.now) raf.now = globals.now;\n if (globals.colors !== undefined) colors$1 = globals.colors;\n if (globals.skipAnimation != null) skipAnimation = globals.skipAnimation;\n if (globals.createStringInterpolator) createStringInterpolator$1 = globals.createStringInterpolator;\n if (globals.requestAnimationFrame) raf.use(globals.requestAnimationFrame);\n if (globals.batchedUpdates) raf.batchedUpdates = globals.batchedUpdates;\n if (globals.willAdvance) willAdvance = globals.willAdvance;\n if (globals.frameLoop) raf.frameLoop = globals.frameLoop;\n};\n\nvar globals = /*#__PURE__*/Object.freeze({\n __proto__: null,\n get createStringInterpolator () { return createStringInterpolator$1; },\n get to () { return to; },\n get colors () { return colors$1; },\n get skipAnimation () { return skipAnimation; },\n get willAdvance () { return willAdvance; },\n assign: assign\n});\n\nconst startQueue = new Set();\nlet currentFrame = [];\nlet prevFrame = [];\nlet priority = 0;\nconst frameLoop = {\n get idle() {\n return !startQueue.size && !currentFrame.length;\n },\n\n start(animation) {\n if (priority > animation.priority) {\n startQueue.add(animation);\n raf.onStart(flushStartQueue);\n } else {\n startSafely(animation);\n raf(advance);\n }\n },\n\n advance,\n\n sort(animation) {\n if (priority) {\n raf.onFrame(() => frameLoop.sort(animation));\n } else {\n const prevIndex = currentFrame.indexOf(animation);\n\n if (~prevIndex) {\n currentFrame.splice(prevIndex, 1);\n startUnsafely(animation);\n }\n }\n },\n\n clear() {\n currentFrame = [];\n startQueue.clear();\n }\n\n};\n\nfunction flushStartQueue() {\n startQueue.forEach(startSafely);\n startQueue.clear();\n raf(advance);\n}\n\nfunction startSafely(animation) {\n if (!currentFrame.includes(animation)) startUnsafely(animation);\n}\n\nfunction startUnsafely(animation) {\n currentFrame.splice(findIndex(currentFrame, other => other.priority > animation.priority), 0, animation);\n}\n\nfunction advance(dt) {\n const nextFrame = prevFrame;\n\n for (let i = 0; i < currentFrame.length; i++) {\n const animation = currentFrame[i];\n priority = animation.priority;\n\n if (!animation.idle) {\n willAdvance(animation);\n animation.advance(dt);\n\n if (!animation.idle) {\n nextFrame.push(animation);\n }\n }\n }\n\n priority = 0;\n prevFrame = currentFrame;\n prevFrame.length = 0;\n currentFrame = nextFrame;\n return currentFrame.length > 0;\n}\n\nfunction findIndex(arr, test) {\n const index = arr.findIndex(test);\n return index < 0 ? arr.length : index;\n}\n\nconst colors = {\n transparent: 0x00000000,\n aliceblue: 0xf0f8ffff,\n antiquewhite: 0xfaebd7ff,\n aqua: 0x00ffffff,\n aquamarine: 0x7fffd4ff,\n azure: 0xf0ffffff,\n beige: 0xf5f5dcff,\n bisque: 0xffe4c4ff,\n black: 0x000000ff,\n blanchedalmond: 0xffebcdff,\n blue: 0x0000ffff,\n blueviolet: 0x8a2be2ff,\n brown: 0xa52a2aff,\n burlywood: 0xdeb887ff,\n burntsienna: 0xea7e5dff,\n cadetblue: 0x5f9ea0ff,\n chartreuse: 0x7fff00ff,\n chocolate: 0xd2691eff,\n coral: 0xff7f50ff,\n cornflowerblue: 0x6495edff,\n cornsilk: 0xfff8dcff,\n crimson: 0xdc143cff,\n cyan: 0x00ffffff,\n darkblue: 0x00008bff,\n darkcyan: 0x008b8bff,\n darkgoldenrod: 0xb8860bff,\n darkgray: 0xa9a9a9ff,\n darkgreen: 0x006400ff,\n darkgrey: 0xa9a9a9ff,\n darkkhaki: 0xbdb76bff,\n darkmagenta: 0x8b008bff,\n darkolivegreen: 0x556b2fff,\n darkorange: 0xff8c00ff,\n darkorchid: 0x9932ccff,\n darkred: 0x8b0000ff,\n darksalmon: 0xe9967aff,\n darkseagreen: 0x8fbc8fff,\n darkslateblue: 0x483d8bff,\n darkslategray: 0x2f4f4fff,\n darkslategrey: 0x2f4f4fff,\n darkturquoise: 0x00ced1ff,\n darkviolet: 0x9400d3ff,\n deeppink: 0xff1493ff,\n deepskyblue: 0x00bfffff,\n dimgray: 0x696969ff,\n dimgrey: 0x696969ff,\n dodgerblue: 0x1e90ffff,\n firebrick: 0xb22222ff,\n floralwhite: 0xfffaf0ff,\n forestgreen: 0x228b22ff,\n fuchsia: 0xff00ffff,\n gainsboro: 0xdcdcdcff,\n ghostwhite: 0xf8f8ffff,\n gold: 0xffd700ff,\n goldenrod: 0xdaa520ff,\n gray: 0x808080ff,\n green: 0x008000ff,\n greenyellow: 0xadff2fff,\n grey: 0x808080ff,\n honeydew: 0xf0fff0ff,\n hotpink: 0xff69b4ff,\n indianred: 0xcd5c5cff,\n indigo: 0x4b0082ff,\n ivory: 0xfffff0ff,\n khaki: 0xf0e68cff,\n lavender: 0xe6e6faff,\n lavenderblush: 0xfff0f5ff,\n lawngreen: 0x7cfc00ff,\n lemonchiffon: 0xfffacdff,\n lightblue: 0xadd8e6ff,\n lightcoral: 0xf08080ff,\n lightcyan: 0xe0ffffff,\n lightgoldenrodyellow: 0xfafad2ff,\n lightgray: 0xd3d3d3ff,\n lightgreen: 0x90ee90ff,\n lightgrey: 0xd3d3d3ff,\n lightpink: 0xffb6c1ff,\n lightsalmon: 0xffa07aff,\n lightseagreen: 0x20b2aaff,\n lightskyblue: 0x87cefaff,\n lightslategray: 0x778899ff,\n lightslategrey: 0x778899ff,\n lightsteelblue: 0xb0c4deff,\n lightyellow: 0xffffe0ff,\n lime: 0x00ff00ff,\n limegreen: 0x32cd32ff,\n linen: 0xfaf0e6ff,\n magenta: 0xff00ffff,\n maroon: 0x800000ff,\n mediumaquamarine: 0x66cdaaff,\n mediumblue: 0x0000cdff,\n mediumorchid: 0xba55d3ff,\n mediumpurple: 0x9370dbff,\n mediumseagreen: 0x3cb371ff,\n mediumslateblue: 0x7b68eeff,\n mediumspringgreen: 0x00fa9aff,\n mediumturquoise: 0x48d1ccff,\n mediumvioletred: 0xc71585ff,\n midnightblue: 0x191970ff,\n mintcream: 0xf5fffaff,\n mistyrose: 0xffe4e1ff,\n moccasin: 0xffe4b5ff,\n navajowhite: 0xffdeadff,\n navy: 0x000080ff,\n oldlace: 0xfdf5e6ff,\n olive: 0x808000ff,\n olivedrab: 0x6b8e23ff,\n orange: 0xffa500ff,\n orangered: 0xff4500ff,\n orchid: 0xda70d6ff,\n palegoldenrod: 0xeee8aaff,\n palegreen: 0x98fb98ff,\n paleturquoise: 0xafeeeeff,\n palevioletred: 0xdb7093ff,\n papayawhip: 0xffefd5ff,\n peachpuff: 0xffdab9ff,\n peru: 0xcd853fff,\n pink: 0xffc0cbff,\n plum: 0xdda0ddff,\n powderblue: 0xb0e0e6ff,\n purple: 0x800080ff,\n rebeccapurple: 0x663399ff,\n red: 0xff0000ff,\n rosybrown: 0xbc8f8fff,\n royalblue: 0x4169e1ff,\n saddlebrown: 0x8b4513ff,\n salmon: 0xfa8072ff,\n sandybrown: 0xf4a460ff,\n seagreen: 0x2e8b57ff,\n seashell: 0xfff5eeff,\n sienna: 0xa0522dff,\n silver: 0xc0c0c0ff,\n skyblue: 0x87ceebff,\n slateblue: 0x6a5acdff,\n slategray: 0x708090ff,\n slategrey: 0x708090ff,\n snow: 0xfffafaff,\n springgreen: 0x00ff7fff,\n steelblue: 0x4682b4ff,\n tan: 0xd2b48cff,\n teal: 0x008080ff,\n thistle: 0xd8bfd8ff,\n tomato: 0xff6347ff,\n turquoise: 0x40e0d0ff,\n violet: 0xee82eeff,\n wheat: 0xf5deb3ff,\n white: 0xffffffff,\n whitesmoke: 0xf5f5f5ff,\n yellow: 0xffff00ff,\n yellowgreen: 0x9acd32ff\n};\n\nconst NUMBER = '[-+]?\\\\d*\\\\.?\\\\d+';\nconst PERCENTAGE = NUMBER + '%';\n\nfunction call(...parts) {\n return '\\\\(\\\\s*(' + parts.join(')\\\\s*,\\\\s*(') + ')\\\\s*\\\\)';\n}\n\nconst rgb = new RegExp('rgb' + call(NUMBER, NUMBER, NUMBER));\nconst rgba = new RegExp('rgba' + call(NUMBER, NUMBER, NUMBER, NUMBER));\nconst hsl = new RegExp('hsl' + call(NUMBER, PERCENTAGE, PERCENTAGE));\nconst hsla = new RegExp('hsla' + call(NUMBER, PERCENTAGE, PERCENTAGE, NUMBER));\nconst hex3 = /^#([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/;\nconst hex4 = /^#([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/;\nconst hex6 = /^#([0-9a-fA-F]{6})$/;\nconst hex8 = /^#([0-9a-fA-F]{8})$/;\n\nfunction normalizeColor(color) {\n let match;\n\n if (typeof color === 'number') {\n return color >>> 0 === color && color >= 0 && color <= 0xffffffff ? color : null;\n }\n\n if (match = hex6.exec(color)) return parseInt(match[1] + 'ff', 16) >>> 0;\n\n if (colors$1 && colors$1[color] !== undefined) {\n return colors$1[color];\n }\n\n if (match = rgb.exec(color)) {\n return (parse255(match[1]) << 24 | parse255(match[2]) << 16 | parse255(match[3]) << 8 | 0x000000ff) >>> 0;\n }\n\n if (match = rgba.exec(color)) {\n return (parse255(match[1]) << 24 | parse255(match[2]) << 16 | parse255(match[3]) << 8 | parse1(match[4])) >>> 0;\n }\n\n if (match = hex3.exec(color)) {\n return parseInt(match[1] + match[1] + match[2] + match[2] + match[3] + match[3] + 'ff', 16) >>> 0;\n }\n\n if (match = hex8.exec(color)) return parseInt(match[1], 16) >>> 0;\n\n if (match = hex4.exec(color)) {\n return parseInt(match[1] + match[1] + match[2] + match[2] + match[3] + match[3] + match[4] + match[4], 16) >>> 0;\n }\n\n if (match = hsl.exec(color)) {\n return (hslToRgb(parse360(match[1]), parsePercentage(match[2]), parsePercentage(match[3])) | 0x000000ff) >>> 0;\n }\n\n if (match = hsla.exec(color)) {\n return (hslToRgb(parse360(match[1]), parsePercentage(match[2]), parsePercentage(match[3])) | parse1(match[4])) >>> 0;\n }\n\n return null;\n}\n\nfunction hue2rgb(p, q, t) {\n if (t < 0) t += 1;\n if (t > 1) t -= 1;\n if (t < 1 / 6) return p + (q - p) * 6 * t;\n if (t < 1 / 2) return q;\n if (t < 2 / 3) return p + (q - p) * (2 / 3 - t) * 6;\n return p;\n}\n\nfunction hslToRgb(h, s, l) {\n const q = l < 0.5 ? l * (1 + s) : l + s - l * s;\n const p = 2 * l - q;\n const r = hue2rgb(p, q, h + 1 / 3);\n const g = hue2rgb(p, q, h);\n const b = hue2rgb(p, q, h - 1 / 3);\n return Math.round(r * 255) << 24 | Math.round(g * 255) << 16 | Math.round(b * 255) << 8;\n}\n\nfunction parse255(str) {\n const int = parseInt(str, 10);\n if (int < 0) return 0;\n if (int > 255) return 255;\n return int;\n}\n\nfunction parse360(str) {\n const int = parseFloat(str);\n return (int % 360 + 360) % 360 / 360;\n}\n\nfunction parse1(str) {\n const num = parseFloat(str);\n if (num < 0) return 0;\n if (num > 1) return 255;\n return Math.round(num * 255);\n}\n\nfunction parsePercentage(str) {\n const int = parseFloat(str);\n if (int < 0) return 0;\n if (int > 100) return 1;\n return int / 100;\n}\n\nfunction colorToRgba(input) {\n let int32Color = normalizeColor(input);\n if (int32Color === null) return input;\n int32Color = int32Color || 0;\n let r = (int32Color & 0xff000000) >>> 24;\n let g = (int32Color & 0x00ff0000) >>> 16;\n let b = (int32Color & 0x0000ff00) >>> 8;\n let a = (int32Color & 0x000000ff) / 255;\n return `rgba(${r}, ${g}, ${b}, ${a})`;\n}\n\nconst createInterpolator = (range, output, extrapolate) => {\n if (is.fun(range)) {\n return range;\n }\n\n if (is.arr(range)) {\n return createInterpolator({\n range,\n output: output,\n extrapolate\n });\n }\n\n if (is.str(range.output[0])) {\n return createStringInterpolator$1(range);\n }\n\n const config = range;\n const outputRange = config.output;\n const inputRange = config.range || [0, 1];\n const extrapolateLeft = config.extrapolateLeft || config.extrapolate || 'extend';\n const extrapolateRight = config.extrapolateRight || config.extrapolate || 'extend';\n\n const easing = config.easing || (t => t);\n\n return input => {\n const range = findRange(input, inputRange);\n return interpolate(input, inputRange[range], inputRange[range + 1], outputRange[range], outputRange[range + 1], easing, extrapolateLeft, extrapolateRight, config.map);\n };\n};\n\nfunction interpolate(input, inputMin, inputMax, outputMin, outputMax, easing, extrapolateLeft, extrapolateRight, map) {\n let result = map ? map(input) : input;\n\n if (result < inputMin) {\n if (extrapolateLeft === 'identity') return result;else if (extrapolateLeft === 'clamp') result = inputMin;\n }\n\n if (result > inputMax) {\n if (extrapolateRight === 'identity') return result;else if (extrapolateRight === 'clamp') result = inputMax;\n }\n\n if (outputMin === outputMax) return outputMin;\n if (inputMin === inputMax) return input <= inputMin ? outputMin : outputMax;\n if (inputMin === -Infinity) result = -result;else if (inputMax === Infinity) result = result - inputMin;else result = (result - inputMin) / (inputMax - inputMin);\n result = easing(result);\n if (outputMin === -Infinity) result = -result;else if (outputMax === Infinity) result = result + outputMin;else result = result * (outputMax - outputMin) + outputMin;\n return result;\n}\n\nfunction findRange(input, inputRange) {\n for (var i = 1; i < inputRange.length - 1; ++i) if (inputRange[i] >= input) break;\n\n return i - 1;\n}\n\nfunction _extends() {\n _extends = Object.assign || function (target) {\n for (var i = 1; i < arguments.length; i++) {\n var source = arguments[i];\n\n for (var key in source) {\n if (Object.prototype.hasOwnProperty.call(source, key)) {\n target[key] = source[key];\n }\n }\n }\n\n return target;\n };\n\n return _extends.apply(this, arguments);\n}\n\nconst $get = Symbol.for('FluidValue.get');\nconst $observers = Symbol.for('FluidValue.observers');\n\nconst hasFluidValue = arg => Boolean(arg && arg[$get]);\n\nconst getFluidValue = arg => arg && arg[$get] ? arg[$get]() : arg;\n\nconst getFluidObservers = target => target[$observers] || null;\n\nfunction callFluidObserver(observer, event) {\n if (observer.eventObserved) {\n observer.eventObserved(event);\n } else {\n observer(event);\n }\n}\n\nfunction callFluidObservers(target, event) {\n let observers = target[$observers];\n\n if (observers) {\n observers.forEach(observer => {\n callFluidObserver(observer, event);\n });\n }\n}\n\nclass FluidValue {\n constructor(get) {\n this[$get] = void 0;\n this[$observers] = void 0;\n\n if (!get && !(get = this.get)) {\n throw Error('Unknown getter');\n }\n\n setFluidGetter(this, get);\n }\n\n}\n\nconst setFluidGetter = (target, get) => setHidden(target, $get, get);\n\nfunction addFluidObserver(target, observer) {\n if (target[$get]) {\n let observers = target[$observers];\n\n if (!observers) {\n setHidden(target, $observers, observers = new Set());\n }\n\n if (!observers.has(observer)) {\n observers.add(observer);\n\n if (target.observerAdded) {\n target.observerAdded(observers.size, observer);\n }\n }\n }\n\n return observer;\n}\n\nfunction removeFluidObserver(target, observer) {\n let observers = target[$observers];\n\n if (observers && observers.has(observer)) {\n const count = observers.size - 1;\n\n if (count) {\n observers.delete(observer);\n } else {\n target[$observers] = null;\n }\n\n if (target.observerRemoved) {\n target.observerRemoved(count, observer);\n }\n }\n}\n\nconst setHidden = (target, key, value) => Object.defineProperty(target, key, {\n value,\n writable: true,\n configurable: true\n});\n\nconst numberRegex = /[+\\-]?(?:0|[1-9]\\d*)(?:\\.\\d*)?(?:[eE][+\\-]?\\d+)?/g;\nconst colorRegex = /(#(?:[0-9a-f]{2}){2,4}|(#[0-9a-f]{3})|(rgb|hsl)a?\\((-?\\d+%?[,\\s]+){2,3}\\s*[\\d\\.]+%?\\))/gi;\nconst unitRegex = new RegExp(`(${numberRegex.source})(%|[a-z]+)`, 'i');\nlet namedColorRegex;\nconst rgbaRegex = /rgba\\(([0-9\\.-]+), ([0-9\\.-]+), ([0-9\\.-]+), ([0-9\\.-]+)\\)/gi;\n\nconst rgbaRound = (_, p1, p2, p3, p4) => `rgba(${Math.round(p1)}, ${Math.round(p2)}, ${Math.round(p3)}, ${p4})`;\n\nconst createStringInterpolator = config => {\n if (!namedColorRegex) namedColorRegex = colors$1 ? new RegExp(`(${Object.keys(colors$1).join('|')})(?!\\\\w)`, 'g') : /^\\b$/;\n const output = config.output.map(value => getFluidValue(value).replace(colorRegex, colorToRgba).replace(namedColorRegex, colorToRgba));\n const keyframes = output.map(value => value.match(numberRegex).map(Number));\n const outputRanges = keyframes[0].map((_, i) => keyframes.map(values => {\n if (!(i in values)) {\n throw Error('The arity of each \"output\" value must be equal');\n }\n\n return values[i];\n }));\n const interpolators = outputRanges.map(output => createInterpolator(_extends({}, config, {\n output\n })));\n return input => {\n var _output$find;\n\n const missingUnit = !unitRegex.test(output[0]) && ((_output$find = output.find(value => unitRegex.test(value))) == null ? void 0 : _output$find.replace(numberRegex, ''));\n let i = 0;\n return output[0].replace(numberRegex, () => `${interpolators[i++](input)}${missingUnit || ''}`).replace(rgbaRegex, rgbaRound);\n };\n};\n\nconst prefix = 'react-spring: ';\n\nconst once = fn => {\n const func = fn;\n let called = false;\n\n if (typeof func != 'function') {\n throw new TypeError(`${prefix}once requires a function parameter`);\n }\n\n return (...args) => {\n if (!called) {\n func(...args);\n called = true;\n }\n };\n};\n\nconst warnInterpolate = once(console.warn);\nfunction deprecateInterpolate() {\n warnInterpolate(`${prefix}The \"interpolate\" function is deprecated in v9 (use \"to\" instead)`);\n}\nconst warnDirectCall = once(console.warn);\nfunction deprecateDirectCall() {\n warnDirectCall(`${prefix}Directly calling start instead of using the api object is deprecated in v9 (use \".start\" instead), this will be removed in later 0.X.0 versions`);\n}\n\nfunction isAnimatedString(value) {\n return is.str(value) && (value[0] == '#' || /\\d/.test(value) || value in (colors$1 || {}));\n}\n\nconst useOnce = effect => useEffect(effect, emptyDeps);\nconst emptyDeps = [];\n\nfunction useForceUpdate() {\n const update = useState()[1];\n const mounted = useState(makeMountedRef)[0];\n useOnce(mounted.unmount);\n return () => {\n if (mounted.current) {\n update({});\n }\n };\n}\n\nfunction makeMountedRef() {\n const mounted = {\n current: true,\n unmount: () => () => {\n mounted.current = false;\n }\n };\n return mounted;\n}\n\nfunction useMemoOne(getResult, inputs) {\n const [initial] = useState(() => ({\n inputs,\n result: getResult()\n }));\n const committed = useRef();\n const prevCache = committed.current;\n let cache = prevCache;\n\n if (cache) {\n const useCache = Boolean(inputs && cache.inputs && areInputsEqual(inputs, cache.inputs));\n\n if (!useCache) {\n cache = {\n inputs,\n result: getResult()\n };\n }\n } else {\n cache = initial;\n }\n\n useEffect(() => {\n committed.current = cache;\n\n if (prevCache == initial) {\n initial.inputs = initial.result = undefined;\n }\n }, [cache]);\n return cache.result;\n}\n\nfunction areInputsEqual(next, prev) {\n if (next.length !== prev.length) {\n return false;\n }\n\n for (let i = 0; i < next.length; i++) {\n if (next[i] !== prev[i]) {\n return false;\n }\n }\n\n return true;\n}\n\nfunction usePrev(value) {\n const prevRef = useRef();\n useEffect(() => {\n prevRef.current = value;\n });\n return prevRef.current;\n}\n\nconst useLayoutEffect = typeof window !== 'undefined' && window.document && window.document.createElement ? React.useLayoutEffect : React.useEffect;\n\nexport { FluidValue, globals as Globals, addFluidObserver, callFluidObserver, callFluidObservers, colorToRgba, colors, createInterpolator, createStringInterpolator, defineHidden, deprecateDirectCall, deprecateInterpolate, each, eachProp, flush, flushCalls, frameLoop, getFluidObservers, getFluidValue, hasFluidValue, hex3, hex4, hex6, hex8, hsl, hsla, is, isAnimatedString, isEqual, noop, removeFluidObserver, rgb, rgba, setFluidGetter, toArray, useForceUpdate, useLayoutEffect, useMemoOne, useOnce, usePrev };\n","import { defineHidden, is, createInterpolator, eachProp, hasFluidValue, getFluidValue, each, isAnimatedString, useForceUpdate, useLayoutEffect, addFluidObserver, removeFluidObserver, raf, useOnce } from '@react-spring/shared';\nimport * as React from 'react';\nimport { forwardRef, useRef, useCallback, useEffect } from 'react';\n\nconst $node = Symbol.for('Animated:node');\nconst isAnimated = value => !!value && value[$node] === value;\nconst getAnimated = owner => owner && owner[$node];\nconst setAnimated = (owner, node) => defineHidden(owner, $node, node);\nconst getPayload = owner => owner && owner[$node] && owner[$node].getPayload();\nclass Animated {\n constructor() {\n this.payload = void 0;\n setAnimated(this, this);\n }\n\n getPayload() {\n return this.payload || [];\n }\n\n}\n\nclass AnimatedValue extends Animated {\n constructor(_value) {\n super();\n this.done = true;\n this.elapsedTime = void 0;\n this.lastPosition = void 0;\n this.lastVelocity = void 0;\n this.v0 = void 0;\n this.durationProgress = 0;\n this._value = _value;\n\n if (is.num(this._value)) {\n this.lastPosition = this._value;\n }\n }\n\n static create(value) {\n return new AnimatedValue(value);\n }\n\n getPayload() {\n return [this];\n }\n\n getValue() {\n return this._value;\n }\n\n setValue(value, step) {\n if (is.num(value)) {\n this.lastPosition = value;\n\n if (step) {\n value = Math.round(value / step) * step;\n\n if (this.done) {\n this.lastPosition = value;\n }\n }\n }\n\n if (this._value === value) {\n return false;\n }\n\n this._value = value;\n return true;\n }\n\n reset() {\n const {\n done\n } = this;\n this.done = false;\n\n if (is.num(this._value)) {\n this.elapsedTime = 0;\n this.durationProgress = 0;\n this.lastPosition = this._value;\n if (done) this.lastVelocity = null;\n this.v0 = null;\n }\n }\n\n}\n\nclass AnimatedString extends AnimatedValue {\n constructor(value) {\n super(0);\n this._string = null;\n this._toString = void 0;\n this._toString = createInterpolator({\n output: [value, value]\n });\n }\n\n static create(value) {\n return new AnimatedString(value);\n }\n\n getValue() {\n let value = this._string;\n return value == null ? this._string = this._toString(this._value) : value;\n }\n\n setValue(value) {\n if (is.str(value)) {\n if (value == this._string) {\n return false;\n }\n\n this._string = value;\n this._value = 1;\n } else if (super.setValue(value)) {\n this._string = null;\n } else {\n return false;\n }\n\n return true;\n }\n\n reset(goal) {\n if (goal) {\n this._toString = createInterpolator({\n output: [this.getValue(), goal]\n });\n }\n\n this._value = 0;\n super.reset();\n }\n\n}\n\nconst TreeContext = {\n dependencies: null\n};\n\nclass AnimatedObject extends Animated {\n constructor(source) {\n super();\n this.source = source;\n this.setValue(source);\n }\n\n getValue(animated) {\n const values = {};\n eachProp(this.source, (source, key) => {\n if (isAnimated(source)) {\n values[key] = source.getValue(animated);\n } else if (hasFluidValue(source)) {\n values[key] = getFluidValue(source);\n } else if (!animated) {\n values[key] = source;\n }\n });\n return values;\n }\n\n setValue(source) {\n this.source = source;\n this.payload = this._makePayload(source);\n }\n\n reset() {\n if (this.payload) {\n each(this.payload, node => node.reset());\n }\n }\n\n _makePayload(source) {\n if (source) {\n const payload = new Set();\n eachProp(source, this._addToPayload, payload);\n return Array.from(payload);\n }\n }\n\n _addToPayload(source) {\n if (TreeContext.dependencies && hasFluidValue(source)) {\n TreeContext.dependencies.add(source);\n }\n\n const payload = getPayload(source);\n\n if (payload) {\n each(payload, node => this.add(node));\n }\n }\n\n}\n\nclass AnimatedArray extends AnimatedObject {\n constructor(source) {\n super(source);\n }\n\n static create(source) {\n return new AnimatedArray(source);\n }\n\n getValue() {\n return this.source.map(node => node.getValue());\n }\n\n setValue(source) {\n const payload = this.getPayload();\n\n if (source.length == payload.length) {\n return payload.map((node, i) => node.setValue(source[i])).some(Boolean);\n }\n\n super.setValue(source.map(makeAnimated));\n return true;\n }\n\n}\n\nfunction makeAnimated(value) {\n const nodeType = isAnimatedString(value) ? AnimatedString : AnimatedValue;\n return nodeType.create(value);\n}\n\nfunction getAnimatedType(value) {\n const parentNode = getAnimated(value);\n return parentNode ? parentNode.constructor : is.arr(value) ? AnimatedArray : isAnimatedString(value) ? AnimatedString : AnimatedValue;\n}\n\nfunction _extends() {\n _extends = Object.assign || function (target) {\n for (var i = 1; i < arguments.length; i++) {\n var source = arguments[i];\n\n for (var key in source) {\n if (Object.prototype.hasOwnProperty.call(source, key)) {\n target[key] = source[key];\n }\n }\n }\n\n return target;\n };\n\n return _extends.apply(this, arguments);\n}\n\nconst withAnimated = (Component, host) => {\n const hasInstance = !is.fun(Component) || Component.prototype && Component.prototype.isReactComponent;\n return forwardRef((givenProps, givenRef) => {\n const instanceRef = useRef(null);\n const ref = hasInstance && useCallback(value => {\n instanceRef.current = updateRef(givenRef, value);\n }, [givenRef]);\n const [props, deps] = getAnimatedState(givenProps, host);\n const forceUpdate = useForceUpdate();\n\n const callback = () => {\n const instance = instanceRef.current;\n\n if (hasInstance && !instance) {\n return;\n }\n\n const didUpdate = instance ? host.applyAnimatedValues(instance, props.getValue(true)) : false;\n\n if (didUpdate === false) {\n forceUpdate();\n }\n };\n\n const observer = new PropsObserver(callback, deps);\n const observerRef = useRef();\n useLayoutEffect(() => {\n const lastObserver = observerRef.current;\n observerRef.current = observer;\n each(deps, dep => addFluidObserver(dep, observer));\n\n if (lastObserver) {\n each(lastObserver.deps, dep => removeFluidObserver(dep, lastObserver));\n raf.cancel(lastObserver.update);\n }\n });\n useEffect(callback, []);\n useOnce(() => () => {\n const observer = observerRef.current;\n each(observer.deps, dep => removeFluidObserver(dep, observer));\n });\n const usedProps = host.getComponentProps(props.getValue());\n return React.createElement(Component, _extends({}, usedProps, {\n ref: ref\n }));\n });\n};\n\nclass PropsObserver {\n constructor(update, deps) {\n this.update = update;\n this.deps = deps;\n }\n\n eventObserved(event) {\n if (event.type == 'change') {\n raf.write(this.update);\n }\n }\n\n}\n\nfunction getAnimatedState(props, host) {\n const dependencies = new Set();\n TreeContext.dependencies = dependencies;\n if (props.style) props = _extends({}, props, {\n style: host.createAnimatedStyle(props.style)\n });\n props = new AnimatedObject(props);\n TreeContext.dependencies = null;\n return [props, dependencies];\n}\n\nfunction updateRef(ref, value) {\n if (ref) {\n if (is.fun(ref)) ref(value);else ref.current = value;\n }\n\n return value;\n}\n\nconst cacheKey = Symbol.for('AnimatedComponent');\nconst createHost = (components, {\n applyAnimatedValues: _applyAnimatedValues = () => false,\n createAnimatedStyle: _createAnimatedStyle = style => new AnimatedObject(style),\n getComponentProps: _getComponentProps = props => props\n} = {}) => {\n const hostConfig = {\n applyAnimatedValues: _applyAnimatedValues,\n createAnimatedStyle: _createAnimatedStyle,\n getComponentProps: _getComponentProps\n };\n\n const animated = Component => {\n const displayName = getDisplayName(Component) || 'Anonymous';\n\n if (is.str(Component)) {\n Component = animated[Component] || (animated[Component] = withAnimated(Component, hostConfig));\n } else {\n Component = Component[cacheKey] || (Component[cacheKey] = withAnimated(Component, hostConfig));\n }\n\n Component.displayName = `Animated(${displayName})`;\n return Component;\n };\n\n eachProp(components, (Component, key) => {\n if (is.arr(components)) {\n key = getDisplayName(Component);\n }\n\n animated[key] = animated(Component);\n });\n return {\n animated\n };\n};\n\nconst getDisplayName = arg => is.str(arg) ? arg : arg && is.str(arg.displayName) ? arg.displayName : is.fun(arg) && arg.name || null;\n\nexport { Animated, AnimatedArray, AnimatedObject, AnimatedString, AnimatedValue, createHost, getAnimated, getAnimatedType, getPayload, isAnimated, setAnimated };\n","import { eachProp, is, toArray, isAnimatedString, Globals, getFluidValue, useLayoutEffect, each, raf, flush, FluidValue, deprecateInterpolate, callFluidObservers, frameLoop, hasFluidValue, flushCalls, isEqual, getFluidObservers, addFluidObserver, removeFluidObserver, noop, useMemoOne, deprecateDirectCall, useForceUpdate, usePrev, useOnce, createInterpolator, createStringInterpolator } from '@react-spring/shared';\nexport { Globals, createInterpolator } from '@react-spring/shared';\nimport * as React from 'react';\nimport { useContext, useMemo, useRef, useState } from 'react';\nimport { getAnimated, AnimatedValue, getPayload, AnimatedString, getAnimatedType, setAnimated } from '@react-spring/animated';\nexport * from '@react-spring/types/animated';\nexport * from '@react-spring/types/interpolation';\n\nfunction _extends() {\n _extends = Object.assign || function (target) {\n for (var i = 1; i < arguments.length; i++) {\n var source = arguments[i];\n\n for (var key in source) {\n if (Object.prototype.hasOwnProperty.call(source, key)) {\n target[key] = source[key];\n }\n }\n }\n\n return target;\n };\n\n return _extends.apply(this, arguments);\n}\n\nfunction callProp(value, ...args) {\n return is.fun(value) ? value(...args) : value;\n}\nconst matchProp = (value, key) => value === true || !!(key && value && (is.fun(value) ? value(key) : toArray(value).includes(key)));\nconst resolveProp = (prop, key) => is.obj(prop) ? key && prop[key] : prop;\nconst getDefaultProp = (props, key) => props.default === true ? props[key] : props.default ? props.default[key] : undefined;\n\nconst noopTransform = value => value;\n\nconst getDefaultProps = (props, transform = noopTransform) => {\n let keys = DEFAULT_PROPS;\n\n if (props.default && props.default !== true) {\n props = props.default;\n keys = Object.keys(props);\n }\n\n const defaults = {};\n\n for (const key of keys) {\n const value = transform(props[key], key);\n\n if (!is.und(value)) {\n defaults[key] = value;\n }\n }\n\n return defaults;\n};\nconst DEFAULT_PROPS = ['config', 'onProps', 'onStart', 'onChange', 'onPause', 'onResume', 'onRest'];\nconst RESERVED_PROPS = {\n config: 1,\n from: 1,\n to: 1,\n ref: 1,\n loop: 1,\n reset: 1,\n pause: 1,\n cancel: 1,\n reverse: 1,\n immediate: 1,\n default: 1,\n delay: 1,\n onProps: 1,\n onStart: 1,\n onChange: 1,\n onPause: 1,\n onResume: 1,\n onRest: 1,\n onResolve: 1,\n items: 1,\n trail: 1,\n sort: 1,\n expires: 1,\n initial: 1,\n enter: 1,\n update: 1,\n leave: 1,\n children: 1,\n onDestroyed: 1,\n keys: 1,\n callId: 1,\n parentId: 1\n};\n\nfunction getForwardProps(props) {\n const forward = {};\n let count = 0;\n eachProp(props, (value, prop) => {\n if (!RESERVED_PROPS[prop]) {\n forward[prop] = value;\n count++;\n }\n });\n\n if (count) {\n return forward;\n }\n}\n\nfunction inferTo(props) {\n const to = getForwardProps(props);\n\n if (to) {\n const out = {\n to\n };\n eachProp(props, (val, key) => key in to || (out[key] = val));\n return out;\n }\n\n return _extends({}, props);\n}\nfunction computeGoal(value) {\n value = getFluidValue(value);\n return is.arr(value) ? value.map(computeGoal) : isAnimatedString(value) ? Globals.createStringInterpolator({\n range: [0, 1],\n output: [value, value]\n })(1) : value;\n}\nfunction hasProps(props) {\n for (const _ in props) return true;\n\n return false;\n}\nfunction isAsyncTo(to) {\n return is.fun(to) || is.arr(to) && is.obj(to[0]);\n}\nfunction detachRefs(ctrl, ref) {\n var _ctrl$ref;\n\n (_ctrl$ref = ctrl.ref) == null ? void 0 : _ctrl$ref.delete(ctrl);\n ref == null ? void 0 : ref.delete(ctrl);\n}\nfunction replaceRef(ctrl, ref) {\n if (ref && ctrl.ref !== ref) {\n var _ctrl$ref2;\n\n (_ctrl$ref2 = ctrl.ref) == null ? void 0 : _ctrl$ref2.delete(ctrl);\n ref.add(ctrl);\n ctrl.ref = ref;\n }\n}\n\nfunction useChain(refs, timeSteps, timeFrame = 1000) {\n useLayoutEffect(() => {\n if (timeSteps) {\n let prevDelay = 0;\n each(refs, (ref, i) => {\n const controllers = ref.current;\n\n if (controllers.length) {\n let delay = timeFrame * timeSteps[i];\n if (isNaN(delay)) delay = prevDelay;else prevDelay = delay;\n each(controllers, ctrl => {\n each(ctrl.queue, props => {\n const memoizedDelayProp = props.delay;\n\n props.delay = key => delay + callProp(memoizedDelayProp || 0, key);\n });\n ctrl.start();\n });\n }\n });\n } else {\n let p = Promise.resolve();\n each(refs, ref => {\n const controllers = ref.current;\n\n if (controllers.length) {\n const queues = controllers.map(ctrl => {\n const q = ctrl.queue;\n ctrl.queue = [];\n return q;\n });\n p = p.then(() => {\n each(controllers, (ctrl, i) => each(queues[i] || [], update => ctrl.queue.push(update)));\n return Promise.all(ref.start());\n });\n }\n });\n }\n });\n}\n\nconst config = {\n default: {\n tension: 170,\n friction: 26\n },\n gentle: {\n tension: 120,\n friction: 14\n },\n wobbly: {\n tension: 180,\n friction: 12\n },\n stiff: {\n tension: 210,\n friction: 20\n },\n slow: {\n tension: 280,\n friction: 60\n },\n molasses: {\n tension: 280,\n friction: 120\n }\n};\n\nconst linear = t => t;\n\nconst defaults = _extends({}, config.default, {\n mass: 1,\n damping: 1,\n easing: linear,\n clamp: false\n});\n\nclass AnimationConfig {\n constructor() {\n this.tension = void 0;\n this.friction = void 0;\n this.frequency = void 0;\n this.damping = void 0;\n this.mass = void 0;\n this.velocity = 0;\n this.restVelocity = void 0;\n this.precision = void 0;\n this.progress = void 0;\n this.duration = void 0;\n this.easing = void 0;\n this.clamp = void 0;\n this.bounce = void 0;\n this.decay = void 0;\n this.round = void 0;\n Object.assign(this, defaults);\n }\n\n}\nfunction mergeConfig(config, newConfig, defaultConfig) {\n if (defaultConfig) {\n defaultConfig = _extends({}, defaultConfig);\n sanitizeConfig(defaultConfig, newConfig);\n newConfig = _extends({}, defaultConfig, newConfig);\n }\n\n sanitizeConfig(config, newConfig);\n Object.assign(config, newConfig);\n\n for (const key in defaults) {\n if (config[key] == null) {\n config[key] = defaults[key];\n }\n }\n\n let {\n mass,\n frequency,\n damping\n } = config;\n\n if (!is.und(frequency)) {\n if (frequency < 0.01) frequency = 0.01;\n if (damping < 0) damping = 0;\n config.tension = Math.pow(2 * Math.PI / frequency, 2) * mass;\n config.friction = 4 * Math.PI * damping * mass / frequency;\n }\n\n return config;\n}\n\nfunction sanitizeConfig(config, props) {\n if (!is.und(props.decay)) {\n config.duration = undefined;\n } else {\n const isTensionConfig = !is.und(props.tension) || !is.und(props.friction);\n\n if (isTensionConfig || !is.und(props.frequency) || !is.und(props.damping) || !is.und(props.mass)) {\n config.duration = undefined;\n config.decay = undefined;\n }\n\n if (isTensionConfig) {\n config.frequency = undefined;\n }\n }\n}\n\nconst emptyArray = [];\nclass Animation {\n constructor() {\n this.changed = false;\n this.values = emptyArray;\n this.toValues = null;\n this.fromValues = emptyArray;\n this.to = void 0;\n this.from = void 0;\n this.config = new AnimationConfig();\n this.immediate = false;\n }\n\n}\n\nfunction scheduleProps(callId, {\n key,\n props,\n defaultProps,\n state,\n actions\n}) {\n return new Promise((resolve, reject) => {\n var _props$cancel;\n\n let delay;\n let timeout;\n let cancel = matchProp((_props$cancel = props.cancel) != null ? _props$cancel : defaultProps == null ? void 0 : defaultProps.cancel, key);\n\n if (cancel) {\n onStart();\n } else {\n if (!is.und(props.pause)) {\n state.paused = matchProp(props.pause, key);\n }\n\n let pause = defaultProps == null ? void 0 : defaultProps.pause;\n\n if (pause !== true) {\n pause = state.paused || matchProp(pause, key);\n }\n\n delay = callProp(props.delay || 0, key);\n\n if (pause) {\n state.resumeQueue.add(onResume);\n actions.pause();\n } else {\n actions.resume();\n onResume();\n }\n }\n\n function onPause() {\n state.resumeQueue.add(onResume);\n state.timeouts.delete(timeout);\n timeout.cancel();\n delay = timeout.time - raf.now();\n }\n\n function onResume() {\n if (delay > 0 && !Globals.skipAnimation) {\n timeout = raf.setTimeout(onStart, delay);\n state.pauseQueue.add(onPause);\n state.timeouts.add(timeout);\n } else {\n onStart();\n }\n }\n\n function onStart() {\n state.pauseQueue.delete(onPause);\n state.timeouts.delete(timeout);\n\n if (callId <= (state.cancelId || 0)) {\n cancel = true;\n }\n\n try {\n actions.start(_extends({}, props, {\n callId,\n cancel\n }), resolve);\n } catch (err) {\n reject(err);\n }\n }\n });\n}\n\nconst getCombinedResult = (target, results) => results.length == 1 ? results[0] : results.some(result => result.cancelled) ? getCancelledResult(target.get()) : results.every(result => result.noop) ? getNoopResult(target.get()) : getFinishedResult(target.get(), results.every(result => result.finished));\nconst getNoopResult = value => ({\n value,\n noop: true,\n finished: true,\n cancelled: false\n});\nconst getFinishedResult = (value, finished, cancelled = false) => ({\n value,\n finished,\n cancelled\n});\nconst getCancelledResult = value => ({\n value,\n cancelled: true,\n finished: false\n});\n\nfunction runAsync(to, props, state, target) {\n const {\n callId,\n parentId,\n onRest\n } = props;\n const {\n asyncTo: prevTo,\n promise: prevPromise\n } = state;\n\n if (!parentId && to === prevTo && !props.reset) {\n return prevPromise;\n }\n\n return state.promise = (async () => {\n state.asyncId = callId;\n state.asyncTo = to;\n const defaultProps = getDefaultProps(props, (value, key) => key === 'onRest' ? undefined : value);\n let preventBail;\n let bail;\n const bailPromise = new Promise((resolve, reject) => (preventBail = resolve, bail = reject));\n\n const bailIfEnded = bailSignal => {\n const bailResult = callId <= (state.cancelId || 0) && getCancelledResult(target) || callId !== state.asyncId && getFinishedResult(target, false);\n\n if (bailResult) {\n bailSignal.result = bailResult;\n bail(bailSignal);\n throw bailSignal;\n }\n };\n\n const animate = (arg1, arg2) => {\n const bailSignal = new BailSignal();\n const skipAnimationSignal = new SkipAniamtionSignal();\n return (async () => {\n if (Globals.skipAnimation) {\n stopAsync(state);\n skipAnimationSignal.result = getFinishedResult(target, false);\n bail(skipAnimationSignal);\n throw skipAnimationSignal;\n }\n\n bailIfEnded(bailSignal);\n const props = is.obj(arg1) ? _extends({}, arg1) : _extends({}, arg2, {\n to: arg1\n });\n props.parentId = callId;\n eachProp(defaultProps, (value, key) => {\n if (is.und(props[key])) {\n props[key] = value;\n }\n });\n const result = await target.start(props);\n bailIfEnded(bailSignal);\n\n if (state.paused) {\n await new Promise(resume => {\n state.resumeQueue.add(resume);\n });\n }\n\n return result;\n })();\n };\n\n let result;\n\n if (Globals.skipAnimation) {\n stopAsync(state);\n return getFinishedResult(target, false);\n }\n\n try {\n let animating;\n\n if (is.arr(to)) {\n animating = (async queue => {\n for (const props of queue) {\n await animate(props);\n }\n })(to);\n } else {\n animating = Promise.resolve(to(animate, target.stop.bind(target)));\n }\n\n await Promise.all([animating.then(preventBail), bailPromise]);\n result = getFinishedResult(target.get(), true, false);\n } catch (err) {\n if (err instanceof BailSignal) {\n result = err.result;\n } else if (err instanceof SkipAniamtionSignal) {\n result = err.result;\n } else {\n throw err;\n }\n } finally {\n if (callId == state.asyncId) {\n state.asyncId = parentId;\n state.asyncTo = parentId ? prevTo : undefined;\n state.promise = parentId ? prevPromise : undefined;\n }\n }\n\n if (is.fun(onRest)) {\n raf.batchedUpdates(() => {\n onRest(result, target, target.item);\n });\n }\n\n return result;\n })();\n}\nfunction stopAsync(state, cancelId) {\n flush(state.timeouts, t => t.cancel());\n state.pauseQueue.clear();\n state.resumeQueue.clear();\n state.asyncId = state.asyncTo = state.promise = undefined;\n if (cancelId) state.cancelId = cancelId;\n}\nclass BailSignal extends Error {\n constructor() {\n super('An async animation has been interrupted. You see this error because you ' + 'forgot to use `await` or `.catch(...)` on its returned promise.');\n this.result = void 0;\n }\n\n}\nclass SkipAniamtionSignal extends Error {\n constructor() {\n super('SkipAnimationSignal');\n this.result = void 0;\n }\n\n}\n\nconst isFrameValue = value => value instanceof FrameValue;\nlet nextId$1 = 1;\nclass FrameValue extends FluidValue {\n constructor(...args) {\n super(...args);\n this.id = nextId$1++;\n this.key = void 0;\n this._priority = 0;\n }\n\n get priority() {\n return this._priority;\n }\n\n set priority(priority) {\n if (this._priority != priority) {\n this._priority = priority;\n\n this._onPriorityChange(priority);\n }\n }\n\n get() {\n const node = getAnimated(this);\n return node && node.getValue();\n }\n\n to(...args) {\n return Globals.to(this, args);\n }\n\n interpolate(...args) {\n deprecateInterpolate();\n return Globals.to(this, args);\n }\n\n toJSON() {\n return this.get();\n }\n\n observerAdded(count) {\n if (count == 1) this._attach();\n }\n\n observerRemoved(count) {\n if (count == 0) this._detach();\n }\n\n _attach() {}\n\n _detach() {}\n\n _onChange(value, idle = false) {\n callFluidObservers(this, {\n type: 'change',\n parent: this,\n value,\n idle\n });\n }\n\n _onPriorityChange(priority) {\n if (!this.idle) {\n frameLoop.sort(this);\n }\n\n callFluidObservers(this, {\n type: 'priority',\n parent: this,\n priority\n });\n }\n\n}\n\nconst $P = Symbol.for('SpringPhase');\nconst HAS_ANIMATED = 1;\nconst IS_ANIMATING = 2;\nconst IS_PAUSED = 4;\nconst hasAnimated = target => (target[$P] & HAS_ANIMATED) > 0;\nconst isAnimating = target => (target[$P] & IS_ANIMATING) > 0;\nconst isPaused = target => (target[$P] & IS_PAUSED) > 0;\nconst setActiveBit = (target, active) => active ? target[$P] |= IS_ANIMATING | HAS_ANIMATED : target[$P] &= ~IS_ANIMATING;\nconst setPausedBit = (target, paused) => paused ? target[$P] |= IS_PAUSED : target[$P] &= ~IS_PAUSED;\n\nclass SpringValue extends FrameValue {\n constructor(arg1, arg2) {\n super();\n this.key = void 0;\n this.animation = new Animation();\n this.queue = void 0;\n this.defaultProps = {};\n this._state = {\n paused: false,\n pauseQueue: new Set(),\n resumeQueue: new Set(),\n timeouts: new Set()\n };\n this._pendingCalls = new Set();\n this._lastCallId = 0;\n this._lastToId = 0;\n this._memoizedDuration = 0;\n\n if (!is.und(arg1) || !is.und(arg2)) {\n const props = is.obj(arg1) ? _extends({}, arg1) : _extends({}, arg2, {\n from: arg1\n });\n\n if (is.und(props.default)) {\n props.default = true;\n }\n\n this.start(props);\n }\n }\n\n get idle() {\n return !(isAnimating(this) || this._state.asyncTo) || isPaused(this);\n }\n\n get goal() {\n return getFluidValue(this.animation.to);\n }\n\n get velocity() {\n const node = getAnimated(this);\n return node instanceof AnimatedValue ? node.lastVelocity || 0 : node.getPayload().map(node => node.lastVelocity || 0);\n }\n\n get hasAnimated() {\n return hasAnimated(this);\n }\n\n get isAnimating() {\n return isAnimating(this);\n }\n\n get isPaused() {\n return isPaused(this);\n }\n\n advance(dt) {\n let idle = true;\n let changed = false;\n const anim = this.animation;\n let {\n config,\n toValues\n } = anim;\n const payload = getPayload(anim.to);\n\n if (!payload && hasFluidValue(anim.to)) {\n toValues = toArray(getFluidValue(anim.to));\n }\n\n anim.values.forEach((node, i) => {\n if (node.done) return;\n const to = node.constructor == AnimatedString ? 1 : payload ? payload[i].lastPosition : toValues[i];\n let finished = anim.immediate;\n let position = to;\n\n if (!finished) {\n position = node.lastPosition;\n\n if (config.tension <= 0) {\n node.done = true;\n return;\n }\n\n let elapsed = node.elapsedTime += dt;\n const from = anim.fromValues[i];\n const v0 = node.v0 != null ? node.v0 : node.v0 = is.arr(config.velocity) ? config.velocity[i] : config.velocity;\n let velocity;\n\n if (!is.und(config.duration)) {\n let p = 1;\n\n if (config.duration > 0) {\n if (this._memoizedDuration !== config.duration) {\n this._memoizedDuration = config.duration;\n\n if (node.durationProgress > 0) {\n node.elapsedTime = config.duration * node.durationProgress;\n elapsed = node.elapsedTime += dt;\n }\n }\n\n p = (config.progress || 0) + elapsed / this._memoizedDuration;\n p = p > 1 ? 1 : p < 0 ? 0 : p;\n node.durationProgress = p;\n }\n\n position = from + config.easing(p) * (to - from);\n velocity = (position - node.lastPosition) / dt;\n finished = p == 1;\n } else if (config.decay) {\n const decay = config.decay === true ? 0.998 : config.decay;\n const e = Math.exp(-(1 - decay) * elapsed);\n position = from + v0 / (1 - decay) * (1 - e);\n finished = Math.abs(node.lastPosition - position) < 0.1;\n velocity = v0 * e;\n } else {\n velocity = node.lastVelocity == null ? v0 : node.lastVelocity;\n const precision = config.precision || (from == to ? 0.005 : Math.min(1, Math.abs(to - from) * 0.001));\n const restVelocity = config.restVelocity || precision / 10;\n const bounceFactor = config.clamp ? 0 : config.bounce;\n const canBounce = !is.und(bounceFactor);\n const isGrowing = from == to ? node.v0 > 0 : from < to;\n let isMoving;\n let isBouncing = false;\n const step = 1;\n const numSteps = Math.ceil(dt / step);\n\n for (let n = 0; n < numSteps; ++n) {\n isMoving = Math.abs(velocity) > restVelocity;\n\n if (!isMoving) {\n finished = Math.abs(to - position) <= precision;\n\n if (finished) {\n break;\n }\n }\n\n if (canBounce) {\n isBouncing = position == to || position > to == isGrowing;\n\n if (isBouncing) {\n velocity = -velocity * bounceFactor;\n position = to;\n }\n }\n\n const springForce = -config.tension * 0.000001 * (position - to);\n const dampingForce = -config.friction * 0.001 * velocity;\n const acceleration = (springForce + dampingForce) / config.mass;\n velocity = velocity + acceleration * step;\n position = position + velocity * step;\n }\n }\n\n node.lastVelocity = velocity;\n\n if (Number.isNaN(position)) {\n console.warn(`Got NaN while animating:`, this);\n finished = true;\n }\n }\n\n if (payload && !payload[i].done) {\n finished = false;\n }\n\n if (finished) {\n node.done = true;\n } else {\n idle = false;\n }\n\n if (node.setValue(position, config.round)) {\n changed = true;\n }\n });\n const node = getAnimated(this);\n const currVal = node.getValue();\n\n if (idle) {\n const finalVal = getFluidValue(anim.to);\n\n if ((currVal !== finalVal || changed) && !config.decay) {\n node.setValue(finalVal);\n\n this._onChange(finalVal);\n } else if (changed && config.decay) {\n this._onChange(currVal);\n }\n\n this._stop();\n } else if (changed) {\n this._onChange(currVal);\n }\n }\n\n set(value) {\n raf.batchedUpdates(() => {\n this._stop();\n\n this._focus(value);\n\n this._set(value);\n });\n return this;\n }\n\n pause() {\n this._update({\n pause: true\n });\n }\n\n resume() {\n this._update({\n pause: false\n });\n }\n\n finish() {\n if (isAnimating(this)) {\n const {\n to,\n config\n } = this.animation;\n raf.batchedUpdates(() => {\n this._onStart();\n\n if (!config.decay) {\n this._set(to, false);\n }\n\n this._stop();\n });\n }\n\n return this;\n }\n\n update(props) {\n const queue = this.queue || (this.queue = []);\n queue.push(props);\n return this;\n }\n\n start(to, arg2) {\n let queue;\n\n if (!is.und(to)) {\n queue = [is.obj(to) ? to : _extends({}, arg2, {\n to\n })];\n } else {\n queue = this.queue || [];\n this.queue = [];\n }\n\n return Promise.all(queue.map(props => this._update(props))).then(results => getCombinedResult(this, results));\n }\n\n stop(cancel) {\n const {\n to\n } = this.animation;\n\n this._focus(this.get());\n\n stopAsync(this._state, cancel && this._lastCallId);\n raf.batchedUpdates(() => this._stop(to, cancel));\n return this;\n }\n\n reset() {\n this._update({\n reset: true\n });\n }\n\n eventObserved(event) {\n if (event.type == 'change') {\n this._start();\n } else if (event.type == 'priority') {\n this.priority = event.priority + 1;\n }\n }\n\n _prepareNode(props) {\n const key = this.key || '';\n let {\n to,\n from\n } = props;\n to = is.obj(to) ? to[key] : to;\n\n if (to == null || isAsyncTo(to)) {\n to = undefined;\n }\n\n from = is.obj(from) ? from[key] : from;\n\n if (from == null) {\n from = undefined;\n }\n\n const range = {\n to,\n from\n };\n\n if (!hasAnimated(this)) {\n if (props.reverse) [to, from] = [from, to];\n from = getFluidValue(from);\n\n if (!is.und(from)) {\n this._set(from);\n } else if (!getAnimated(this)) {\n this._set(to);\n }\n }\n\n return range;\n }\n\n _update(_ref, isLoop) {\n let props = _extends({}, _ref);\n\n const {\n key,\n defaultProps\n } = this;\n if (props.default) Object.assign(defaultProps, getDefaultProps(props, (value, prop) => /^on/.test(prop) ? resolveProp(value, key) : value));\n mergeActiveFn(this, props, 'onProps');\n sendEvent(this, 'onProps', props, this);\n\n const range = this._prepareNode(props);\n\n if (Object.isFrozen(this)) {\n throw Error('Cannot animate a `SpringValue` object that is frozen. ' + 'Did you forget to pass your component to `animated(...)` before animating its props?');\n }\n\n const state = this._state;\n return scheduleProps(++this._lastCallId, {\n key,\n props,\n defaultProps,\n state,\n actions: {\n pause: () => {\n if (!isPaused(this)) {\n setPausedBit(this, true);\n flushCalls(state.pauseQueue);\n sendEvent(this, 'onPause', getFinishedResult(this, checkFinished(this, this.animation.to)), this);\n }\n },\n resume: () => {\n if (isPaused(this)) {\n setPausedBit(this, false);\n\n if (isAnimating(this)) {\n this._resume();\n }\n\n flushCalls(state.resumeQueue);\n sendEvent(this, 'onResume', getFinishedResult(this, checkFinished(this, this.animation.to)), this);\n }\n },\n start: this._merge.bind(this, range)\n }\n }).then(result => {\n if (props.loop && result.finished && !(isLoop && result.noop)) {\n const nextProps = createLoopUpdate(props);\n\n if (nextProps) {\n return this._update(nextProps, true);\n }\n }\n\n return result;\n });\n }\n\n _merge(range, props, resolve) {\n if (props.cancel) {\n this.stop(true);\n return resolve(getCancelledResult(this));\n }\n\n const hasToProp = !is.und(range.to);\n const hasFromProp = !is.und(range.from);\n\n if (hasToProp || hasFromProp) {\n if (props.callId > this._lastToId) {\n this._lastToId = props.callId;\n } else {\n return resolve(getCancelledResult(this));\n }\n }\n\n const {\n key,\n defaultProps,\n animation: anim\n } = this;\n const {\n to: prevTo,\n from: prevFrom\n } = anim;\n let {\n to = prevTo,\n from = prevFrom\n } = range;\n\n if (hasFromProp && !hasToProp && (!props.default || is.und(to))) {\n to = from;\n }\n\n if (props.reverse) [to, from] = [from, to];\n const hasFromChanged = !isEqual(from, prevFrom);\n\n if (hasFromChanged) {\n anim.from = from;\n }\n\n from = getFluidValue(from);\n const hasToChanged = !isEqual(to, prevTo);\n\n if (hasToChanged) {\n this._focus(to);\n }\n\n const hasAsyncTo = isAsyncTo(props.to);\n const {\n config\n } = anim;\n const {\n decay,\n velocity\n } = config;\n\n if (hasToProp || hasFromProp) {\n config.velocity = 0;\n }\n\n if (props.config && !hasAsyncTo) {\n mergeConfig(config, callProp(props.config, key), props.config !== defaultProps.config ? callProp(defaultProps.config, key) : void 0);\n }\n\n let node = getAnimated(this);\n\n if (!node || is.und(to)) {\n return resolve(getFinishedResult(this, true));\n }\n\n const reset = is.und(props.reset) ? hasFromProp && !props.default : !is.und(from) && matchProp(props.reset, key);\n const value = reset ? from : this.get();\n const goal = computeGoal(to);\n const isAnimatable = is.num(goal) || is.arr(goal) || isAnimatedString(goal);\n const immediate = !hasAsyncTo && (!isAnimatable || matchProp(defaultProps.immediate || props.immediate, key));\n\n if (hasToChanged) {\n const nodeType = getAnimatedType(to);\n\n if (nodeType !== node.constructor) {\n if (immediate) {\n node = this._set(goal);\n } else throw Error(`Cannot animate between ${node.constructor.name} and ${nodeType.name}, as the \"to\" prop suggests`);\n }\n }\n\n const goalType = node.constructor;\n let started = hasFluidValue(to);\n let finished = false;\n\n if (!started) {\n const hasValueChanged = reset || !hasAnimated(this) && hasFromChanged;\n\n if (hasToChanged || hasValueChanged) {\n finished = isEqual(computeGoal(value), goal);\n started = !finished;\n }\n\n if (!isEqual(anim.immediate, immediate) && !immediate || !isEqual(config.decay, decay) || !isEqual(config.velocity, velocity)) {\n started = true;\n }\n }\n\n if (finished && isAnimating(this)) {\n if (anim.changed && !reset) {\n started = true;\n } else if (!started) {\n this._stop(prevTo);\n }\n }\n\n if (!hasAsyncTo) {\n if (started || hasFluidValue(prevTo)) {\n anim.values = node.getPayload();\n anim.toValues = hasFluidValue(to) ? null : goalType == AnimatedString ? [1] : toArray(goal);\n }\n\n if (anim.immediate != immediate) {\n anim.immediate = immediate;\n\n if (!immediate && !reset) {\n this._set(prevTo);\n }\n }\n\n if (started) {\n const {\n onRest\n } = anim;\n each(ACTIVE_EVENTS, type => mergeActiveFn(this, props, type));\n const result = getFinishedResult(this, checkFinished(this, prevTo));\n flushCalls(this._pendingCalls, result);\n\n this._pendingCalls.add(resolve);\n\n if (anim.changed) raf.batchedUpdates(() => {\n anim.changed = !reset;\n onRest == null ? void 0 : onRest(result, this);\n\n if (reset) {\n callProp(defaultProps.onRest, result);\n } else {\n anim.onStart == null ? void 0 : anim.onStart(result, this);\n }\n });\n }\n }\n\n if (reset) {\n this._set(value);\n }\n\n if (hasAsyncTo) {\n resolve(runAsync(props.to, props, this._state, this));\n } else if (started) {\n this._start();\n } else if (isAnimating(this) && !hasToChanged) {\n this._pendingCalls.add(resolve);\n } else {\n resolve(getNoopResult(value));\n }\n }\n\n _focus(value) {\n const anim = this.animation;\n\n if (value !== anim.to) {\n if (getFluidObservers(this)) {\n this._detach();\n }\n\n anim.to = value;\n\n if (getFluidObservers(this)) {\n this._attach();\n }\n }\n }\n\n _attach() {\n let priority = 0;\n const {\n to\n } = this.animation;\n\n if (hasFluidValue(to)) {\n addFluidObserver(to, this);\n\n if (isFrameValue(to)) {\n priority = to.priority + 1;\n }\n }\n\n this.priority = priority;\n }\n\n _detach() {\n const {\n to\n } = this.animation;\n\n if (hasFluidValue(to)) {\n removeFluidObserver(to, this);\n }\n }\n\n _set(arg, idle = true) {\n const value = getFluidValue(arg);\n\n if (!is.und(value)) {\n const oldNode = getAnimated(this);\n\n if (!oldNode || !isEqual(value, oldNode.getValue())) {\n const nodeType = getAnimatedType(value);\n\n if (!oldNode || oldNode.constructor != nodeType) {\n setAnimated(this, nodeType.create(value));\n } else {\n oldNode.setValue(value);\n }\n\n if (oldNode) {\n raf.batchedUpdates(() => {\n this._onChange(value, idle);\n });\n }\n }\n }\n\n return getAnimated(this);\n }\n\n _onStart() {\n const anim = this.animation;\n\n if (!anim.changed) {\n anim.changed = true;\n sendEvent(this, 'onStart', getFinishedResult(this, checkFinished(this, anim.to)), this);\n }\n }\n\n _onChange(value, idle) {\n if (!idle) {\n this._onStart();\n\n callProp(this.animation.onChange, value, this);\n }\n\n callProp(this.defaultProps.onChange, value, this);\n\n super._onChange(value, idle);\n }\n\n _start() {\n const anim = this.animation;\n getAnimated(this).reset(getFluidValue(anim.to));\n\n if (!anim.immediate) {\n anim.fromValues = anim.values.map(node => node.lastPosition);\n }\n\n if (!isAnimating(this)) {\n setActiveBit(this, true);\n\n if (!isPaused(this)) {\n this._resume();\n }\n }\n }\n\n _resume() {\n if (Globals.skipAnimation) {\n this.finish();\n } else {\n frameLoop.start(this);\n }\n }\n\n _stop(goal, cancel) {\n if (isAnimating(this)) {\n setActiveBit(this, false);\n const anim = this.animation;\n each(anim.values, node => {\n node.done = true;\n });\n\n if (anim.toValues) {\n anim.onChange = anim.onPause = anim.onResume = undefined;\n }\n\n callFluidObservers(this, {\n type: 'idle',\n parent: this\n });\n const result = cancel ? getCancelledResult(this.get()) : getFinishedResult(this.get(), checkFinished(this, goal != null ? goal : anim.to));\n flushCalls(this._pendingCalls, result);\n\n if (anim.changed) {\n anim.changed = false;\n sendEvent(this, 'onRest', result, this);\n }\n }\n }\n\n}\n\nfunction checkFinished(target, to) {\n const goal = computeGoal(to);\n const value = computeGoal(target.get());\n return isEqual(value, goal);\n}\n\nfunction createLoopUpdate(props, loop = props.loop, to = props.to) {\n let loopRet = callProp(loop);\n\n if (loopRet) {\n const overrides = loopRet !== true && inferTo(loopRet);\n const reverse = (overrides || props).reverse;\n const reset = !overrides || overrides.reset;\n return createUpdate(_extends({}, props, {\n loop,\n default: false,\n pause: undefined,\n to: !reverse || isAsyncTo(to) ? to : undefined,\n from: reset ? props.from : undefined,\n reset\n }, overrides));\n }\n}\nfunction createUpdate(props) {\n const {\n to,\n from\n } = props = inferTo(props);\n const keys = new Set();\n if (is.obj(to)) findDefined(to, keys);\n if (is.obj(from)) findDefined(from, keys);\n props.keys = keys.size ? Array.from(keys) : null;\n return props;\n}\nfunction declareUpdate(props) {\n const update = createUpdate(props);\n\n if (is.und(update.default)) {\n update.default = getDefaultProps(update);\n }\n\n return update;\n}\n\nfunction findDefined(values, keys) {\n eachProp(values, (value, key) => value != null && keys.add(key));\n}\n\nconst ACTIVE_EVENTS = ['onStart', 'onRest', 'onChange', 'onPause', 'onResume'];\n\nfunction mergeActiveFn(target, props, type) {\n target.animation[type] = props[type] !== getDefaultProp(props, type) ? resolveProp(props[type], target.key) : undefined;\n}\n\nfunction sendEvent(target, type, ...args) {\n var _target$animation$typ, _target$animation, _target$defaultProps$, _target$defaultProps;\n\n (_target$animation$typ = (_target$animation = target.animation)[type]) == null ? void 0 : _target$animation$typ.call(_target$animation, ...args);\n (_target$defaultProps$ = (_target$defaultProps = target.defaultProps)[type]) == null ? void 0 : _target$defaultProps$.call(_target$defaultProps, ...args);\n}\n\nconst BATCHED_EVENTS = ['onStart', 'onChange', 'onRest'];\nlet nextId = 1;\nclass Controller {\n constructor(props, flush) {\n this.id = nextId++;\n this.springs = {};\n this.queue = [];\n this.ref = void 0;\n this._flush = void 0;\n this._initialProps = void 0;\n this._lastAsyncId = 0;\n this._active = new Set();\n this._changed = new Set();\n this._started = false;\n this._item = void 0;\n this._state = {\n paused: false,\n pauseQueue: new Set(),\n resumeQueue: new Set(),\n timeouts: new Set()\n };\n this._events = {\n onStart: new Map(),\n onChange: new Map(),\n onRest: new Map()\n };\n this._onFrame = this._onFrame.bind(this);\n\n if (flush) {\n this._flush = flush;\n }\n\n if (props) {\n this.start(_extends({\n default: true\n }, props));\n }\n }\n\n get idle() {\n return !this._state.asyncTo && Object.values(this.springs).every(spring => spring.idle);\n }\n\n get item() {\n return this._item;\n }\n\n set item(item) {\n this._item = item;\n }\n\n get() {\n const values = {};\n this.each((spring, key) => values[key] = spring.get());\n return values;\n }\n\n set(values) {\n for (const key in values) {\n const value = values[key];\n\n if (!is.und(value)) {\n this.springs[key].set(value);\n }\n }\n }\n\n update(props) {\n if (props) {\n this.queue.push(createUpdate(props));\n }\n\n return this;\n }\n\n start(props) {\n let {\n queue\n } = this;\n\n if (props) {\n queue = toArray(props).map(createUpdate);\n } else {\n this.queue = [];\n }\n\n if (this._flush) {\n return this._flush(this, queue);\n }\n\n prepareKeys(this, queue);\n return flushUpdateQueue(this, queue);\n }\n\n stop(arg, keys) {\n if (arg !== !!arg) {\n keys = arg;\n }\n\n if (keys) {\n const springs = this.springs;\n each(toArray(keys), key => springs[key].stop(!!arg));\n } else {\n stopAsync(this._state, this._lastAsyncId);\n this.each(spring => spring.stop(!!arg));\n }\n\n return this;\n }\n\n pause(keys) {\n if (is.und(keys)) {\n this.start({\n pause: true\n });\n } else {\n const springs = this.springs;\n each(toArray(keys), key => springs[key].pause());\n }\n\n return this;\n }\n\n resume(keys) {\n if (is.und(keys)) {\n this.start({\n pause: false\n });\n } else {\n const springs = this.springs;\n each(toArray(keys), key => springs[key].resume());\n }\n\n return this;\n }\n\n each(iterator) {\n eachProp(this.springs, iterator);\n }\n\n _onFrame() {\n const {\n onStart,\n onChange,\n onRest\n } = this._events;\n const active = this._active.size > 0;\n const changed = this._changed.size > 0;\n\n if (active && !this._started || changed && !this._started) {\n this._started = true;\n flush(onStart, ([onStart, result]) => {\n result.value = this.get();\n onStart(result, this, this._item);\n });\n }\n\n const idle = !active && this._started;\n const values = changed || idle && onRest.size ? this.get() : null;\n\n if (changed && onChange.size) {\n flush(onChange, ([onChange, result]) => {\n result.value = values;\n onChange(result, this, this._item);\n });\n }\n\n if (idle) {\n this._started = false;\n flush(onRest, ([onRest, result]) => {\n result.value = values;\n onRest(result, this, this._item);\n });\n }\n }\n\n eventObserved(event) {\n if (event.type == 'change') {\n this._changed.add(event.parent);\n\n if (!event.idle) {\n this._active.add(event.parent);\n }\n } else if (event.type == 'idle') {\n this._active.delete(event.parent);\n } else return;\n\n raf.onFrame(this._onFrame);\n }\n\n}\nfunction flushUpdateQueue(ctrl, queue) {\n return Promise.all(queue.map(props => flushUpdate(ctrl, props))).then(results => getCombinedResult(ctrl, results));\n}\nasync function flushUpdate(ctrl, props, isLoop) {\n const {\n keys,\n to,\n from,\n loop,\n onRest,\n onResolve\n } = props;\n const defaults = is.obj(props.default) && props.default;\n\n if (loop) {\n props.loop = false;\n }\n\n if (to === false) props.to = null;\n if (from === false) props.from = null;\n const asyncTo = is.arr(to) || is.fun(to) ? to : undefined;\n\n if (asyncTo) {\n props.to = undefined;\n props.onRest = undefined;\n\n if (defaults) {\n defaults.onRest = undefined;\n }\n } else {\n each(BATCHED_EVENTS, key => {\n const handler = props[key];\n\n if (is.fun(handler)) {\n const queue = ctrl['_events'][key];\n\n props[key] = ({\n finished,\n cancelled\n }) => {\n const result = queue.get(handler);\n\n if (result) {\n if (!finished) result.finished = false;\n if (cancelled) result.cancelled = true;\n } else {\n queue.set(handler, {\n value: null,\n finished: finished || false,\n cancelled: cancelled || false\n });\n }\n };\n\n if (defaults) {\n defaults[key] = props[key];\n }\n }\n });\n }\n\n const state = ctrl['_state'];\n\n if (props.pause === !state.paused) {\n state.paused = props.pause;\n flushCalls(props.pause ? state.pauseQueue : state.resumeQueue);\n } else if (state.paused) {\n props.pause = true;\n }\n\n const promises = (keys || Object.keys(ctrl.springs)).map(key => ctrl.springs[key].start(props));\n const cancel = props.cancel === true || getDefaultProp(props, 'cancel') === true;\n\n if (asyncTo || cancel && state.asyncId) {\n promises.push(scheduleProps(++ctrl['_lastAsyncId'], {\n props,\n state,\n actions: {\n pause: noop,\n resume: noop,\n\n start(props, resolve) {\n if (cancel) {\n stopAsync(state, ctrl['_lastAsyncId']);\n resolve(getCancelledResult(ctrl));\n } else {\n props.onRest = onRest;\n resolve(runAsync(asyncTo, props, state, ctrl));\n }\n }\n\n }\n }));\n }\n\n if (state.paused) {\n await new Promise(resume => {\n state.resumeQueue.add(resume);\n });\n }\n\n const result = getCombinedResult(ctrl, await Promise.all(promises));\n\n if (loop && result.finished && !(isLoop && result.noop)) {\n const nextProps = createLoopUpdate(props, loop, to);\n\n if (nextProps) {\n prepareKeys(ctrl, [nextProps]);\n return flushUpdate(ctrl, nextProps, true);\n }\n }\n\n if (onResolve) {\n raf.batchedUpdates(() => onResolve(result, ctrl, ctrl.item));\n }\n\n return result;\n}\nfunction getSprings(ctrl, props) {\n const springs = _extends({}, ctrl.springs);\n\n if (props) {\n each(toArray(props), props => {\n if (is.und(props.keys)) {\n props = createUpdate(props);\n }\n\n if (!is.obj(props.to)) {\n props = _extends({}, props, {\n to: undefined\n });\n }\n\n prepareSprings(springs, props, key => {\n return createSpring(key);\n });\n });\n }\n\n setSprings(ctrl, springs);\n return springs;\n}\nfunction setSprings(ctrl, springs) {\n eachProp(springs, (spring, key) => {\n if (!ctrl.springs[key]) {\n ctrl.springs[key] = spring;\n addFluidObserver(spring, ctrl);\n }\n });\n}\n\nfunction createSpring(key, observer) {\n const spring = new SpringValue();\n spring.key = key;\n\n if (observer) {\n addFluidObserver(spring, observer);\n }\n\n return spring;\n}\n\nfunction prepareSprings(springs, props, create) {\n if (props.keys) {\n each(props.keys, key => {\n const spring = springs[key] || (springs[key] = create(key));\n spring['_prepareNode'](props);\n });\n }\n}\n\nfunction prepareKeys(ctrl, queue) {\n each(queue, props => {\n prepareSprings(ctrl.springs, props, key => {\n return createSpring(key, ctrl);\n });\n });\n}\n\nfunction _objectWithoutPropertiesLoose(source, excluded) {\n if (source == null) return {};\n var target = {};\n var sourceKeys = Object.keys(source);\n var key, i;\n\n for (i = 0; i < sourceKeys.length; i++) {\n key = sourceKeys[i];\n if (excluded.indexOf(key) >= 0) continue;\n target[key] = source[key];\n }\n\n return target;\n}\n\nconst _excluded$3 = [\"children\"];\nconst SpringContext = _ref => {\n let {\n children\n } = _ref,\n props = _objectWithoutPropertiesLoose(_ref, _excluded$3);\n\n const inherited = useContext(ctx);\n const pause = props.pause || !!inherited.pause,\n immediate = props.immediate || !!inherited.immediate;\n props = useMemoOne(() => ({\n pause,\n immediate\n }), [pause, immediate]);\n const {\n Provider\n } = ctx;\n return React.createElement(Provider, {\n value: props\n }, children);\n};\nconst ctx = makeContext(SpringContext, {});\nSpringContext.Provider = ctx.Provider;\nSpringContext.Consumer = ctx.Consumer;\n\nfunction makeContext(target, init) {\n Object.assign(target, React.createContext(init));\n target.Provider._context = target;\n target.Consumer._context = target;\n return target;\n}\n\nconst SpringRef = () => {\n const current = [];\n\n const SpringRef = function SpringRef(props) {\n deprecateDirectCall();\n const results = [];\n each(current, (ctrl, i) => {\n if (is.und(props)) {\n results.push(ctrl.start());\n } else {\n const update = _getProps(props, ctrl, i);\n\n if (update) {\n results.push(ctrl.start(update));\n }\n }\n });\n return results;\n };\n\n SpringRef.current = current;\n\n SpringRef.add = function (ctrl) {\n if (!current.includes(ctrl)) {\n current.push(ctrl);\n }\n };\n\n SpringRef.delete = function (ctrl) {\n const i = current.indexOf(ctrl);\n if (~i) current.splice(i, 1);\n };\n\n SpringRef.pause = function () {\n each(current, ctrl => ctrl.pause(...arguments));\n return this;\n };\n\n SpringRef.resume = function () {\n each(current, ctrl => ctrl.resume(...arguments));\n return this;\n };\n\n SpringRef.set = function (values) {\n each(current, ctrl => ctrl.set(values));\n };\n\n SpringRef.start = function (props) {\n const results = [];\n each(current, (ctrl, i) => {\n if (is.und(props)) {\n results.push(ctrl.start());\n } else {\n const update = this._getProps(props, ctrl, i);\n\n if (update) {\n results.push(ctrl.start(update));\n }\n }\n });\n return results;\n };\n\n SpringRef.stop = function () {\n each(current, ctrl => ctrl.stop(...arguments));\n return this;\n };\n\n SpringRef.update = function (props) {\n each(current, (ctrl, i) => ctrl.update(this._getProps(props, ctrl, i)));\n return this;\n };\n\n const _getProps = function _getProps(arg, ctrl, index) {\n return is.fun(arg) ? arg(index, ctrl) : arg;\n };\n\n SpringRef._getProps = _getProps;\n return SpringRef;\n};\n\nfunction useSprings(length, props, deps) {\n const propsFn = is.fun(props) && props;\n if (propsFn && !deps) deps = [];\n const ref = useMemo(() => propsFn || arguments.length == 3 ? SpringRef() : void 0, []);\n const layoutId = useRef(0);\n const forceUpdate = useForceUpdate();\n const state = useMemo(() => ({\n ctrls: [],\n queue: [],\n\n flush(ctrl, updates) {\n const springs = getSprings(ctrl, updates);\n const canFlushSync = layoutId.current > 0 && !state.queue.length && !Object.keys(springs).some(key => !ctrl.springs[key]);\n return canFlushSync ? flushUpdateQueue(ctrl, updates) : new Promise(resolve => {\n setSprings(ctrl, springs);\n state.queue.push(() => {\n resolve(flushUpdateQueue(ctrl, updates));\n });\n forceUpdate();\n });\n }\n\n }), []);\n const ctrls = useRef([...state.ctrls]);\n const updates = [];\n const prevLength = usePrev(length) || 0;\n useMemo(() => {\n each(ctrls.current.slice(length, prevLength), ctrl => {\n detachRefs(ctrl, ref);\n ctrl.stop(true);\n });\n ctrls.current.length = length;\n declareUpdates(prevLength, length);\n }, [length]);\n useMemo(() => {\n declareUpdates(0, Math.min(prevLength, length));\n }, deps);\n\n function declareUpdates(startIndex, endIndex) {\n for (let i = startIndex; i < endIndex; i++) {\n const ctrl = ctrls.current[i] || (ctrls.current[i] = new Controller(null, state.flush));\n const update = propsFn ? propsFn(i, ctrl) : props[i];\n\n if (update) {\n updates[i] = declareUpdate(update);\n }\n }\n }\n\n const springs = ctrls.current.map((ctrl, i) => getSprings(ctrl, updates[i]));\n const context = useContext(SpringContext);\n const prevContext = usePrev(context);\n const hasContext = context !== prevContext && hasProps(context);\n useLayoutEffect(() => {\n layoutId.current++;\n state.ctrls = ctrls.current;\n const {\n queue\n } = state;\n\n if (queue.length) {\n state.queue = [];\n each(queue, cb => cb());\n }\n\n each(ctrls.current, (ctrl, i) => {\n ref == null ? void 0 : ref.add(ctrl);\n\n if (hasContext) {\n ctrl.start({\n default: context\n });\n }\n\n const update = updates[i];\n\n if (update) {\n replaceRef(ctrl, update.ref);\n\n if (ctrl.ref) {\n ctrl.queue.push(update);\n } else {\n ctrl.start(update);\n }\n }\n });\n });\n useOnce(() => () => {\n each(state.ctrls, ctrl => ctrl.stop(true));\n });\n const values = springs.map(x => _extends({}, x));\n return ref ? [values, ref] : values;\n}\n\nfunction useSpring(props, deps) {\n const isFn = is.fun(props);\n const [[values], ref] = useSprings(1, isFn ? props : [props], isFn ? deps || [] : deps);\n return isFn || arguments.length == 2 ? [values, ref] : values;\n}\n\nconst initSpringRef = () => SpringRef();\n\nconst useSpringRef = () => useState(initSpringRef)[0];\n\nfunction useTrail(length, propsArg, deps) {\n const propsFn = is.fun(propsArg) && propsArg;\n if (propsFn && !deps) deps = [];\n let reverse = true;\n const result = useSprings(length, (i, ctrl) => {\n const props = propsFn ? propsFn(i, ctrl) : propsArg;\n reverse = reverse && props.reverse;\n return props;\n }, deps || [{}]);\n const ref = result[1];\n useLayoutEffect(() => {\n each(ref.current, (ctrl, i) => {\n const parent = ref.current[i + (reverse ? 1 : -1)];\n if (parent) ctrl.start({\n to: parent.springs\n });\n });\n }, deps);\n\n if (propsFn || arguments.length == 3) {\n ref['_getProps'] = (propsArg, ctrl, i) => {\n const props = is.fun(propsArg) ? propsArg(i, ctrl) : propsArg;\n\n if (props) {\n const parent = ref.current[i + (props.reverse ? 1 : -1)];\n if (parent) props.to = parent.springs;\n return props;\n }\n };\n\n return result;\n }\n\n return result[0];\n}\n\nlet TransitionPhase;\n\n(function (TransitionPhase) {\n TransitionPhase[\"MOUNT\"] = \"mount\";\n TransitionPhase[\"ENTER\"] = \"enter\";\n TransitionPhase[\"UPDATE\"] = \"update\";\n TransitionPhase[\"LEAVE\"] = \"leave\";\n})(TransitionPhase || (TransitionPhase = {}));\n\nfunction useTransition(data, props, deps) {\n const propsFn = is.fun(props) && props;\n const {\n reset,\n sort,\n trail = 0,\n expires = true,\n onDestroyed,\n ref: propsRef,\n config: propsConfig\n } = propsFn ? propsFn() : props;\n const ref = useMemo(() => propsFn || arguments.length == 3 ? SpringRef() : void 0, []);\n const items = toArray(data);\n const transitions = [];\n const usedTransitions = useRef(null);\n const prevTransitions = reset ? null : usedTransitions.current;\n useLayoutEffect(() => {\n usedTransitions.current = transitions;\n });\n useOnce(() => () => each(usedTransitions.current, t => {\n if (t.expired) {\n clearTimeout(t.expirationId);\n }\n\n detachRefs(t.ctrl, ref);\n t.ctrl.stop(true);\n }));\n const keys = getKeys(items, propsFn ? propsFn() : props, prevTransitions);\n const expired = reset && usedTransitions.current || [];\n useLayoutEffect(() => each(expired, ({\n ctrl,\n item,\n key\n }) => {\n detachRefs(ctrl, ref);\n callProp(onDestroyed, item, key);\n }));\n const reused = [];\n if (prevTransitions) each(prevTransitions, (t, i) => {\n if (t.expired) {\n clearTimeout(t.expirationId);\n expired.push(t);\n } else {\n i = reused[i] = keys.indexOf(t.key);\n if (~i) transitions[i] = t;\n }\n });\n each(items, (item, i) => {\n if (!transitions[i]) {\n transitions[i] = {\n key: keys[i],\n item,\n phase: TransitionPhase.MOUNT,\n ctrl: new Controller()\n };\n transitions[i].ctrl.item = item;\n }\n });\n\n if (reused.length) {\n let i = -1;\n const {\n leave\n } = propsFn ? propsFn() : props;\n each(reused, (keyIndex, prevIndex) => {\n const t = prevTransitions[prevIndex];\n\n if (~keyIndex) {\n i = transitions.indexOf(t);\n transitions[i] = _extends({}, t, {\n item: items[keyIndex]\n });\n } else if (leave) {\n transitions.splice(++i, 0, t);\n }\n });\n }\n\n if (is.fun(sort)) {\n transitions.sort((a, b) => sort(a.item, b.item));\n }\n\n let delay = -trail;\n const forceUpdate = useForceUpdate();\n const defaultProps = getDefaultProps(props);\n const changes = new Map();\n each(transitions, (t, i) => {\n const key = t.key;\n const prevPhase = t.phase;\n const p = propsFn ? propsFn() : props;\n let to;\n let phase;\n let propsDelay = callProp(p.delay || 0, key);\n\n if (prevPhase == TransitionPhase.MOUNT) {\n to = p.enter;\n phase = TransitionPhase.ENTER;\n } else {\n const isLeave = keys.indexOf(key) < 0;\n\n if (prevPhase != TransitionPhase.LEAVE) {\n if (isLeave) {\n to = p.leave;\n phase = TransitionPhase.LEAVE;\n } else if (to = p.update) {\n phase = TransitionPhase.UPDATE;\n } else return;\n } else if (!isLeave) {\n to = p.enter;\n phase = TransitionPhase.ENTER;\n } else return;\n }\n\n to = callProp(to, t.item, i);\n to = is.obj(to) ? inferTo(to) : {\n to\n };\n\n if (!to.config) {\n const config = propsConfig || defaultProps.config;\n to.config = callProp(config, t.item, i, phase);\n }\n\n delay += trail;\n\n const payload = _extends({}, defaultProps, {\n delay: propsDelay + delay,\n ref: propsRef,\n immediate: p.immediate,\n reset: false\n }, to);\n\n if (phase == TransitionPhase.ENTER && is.und(payload.from)) {\n const _p = propsFn ? propsFn() : props;\n\n const from = is.und(_p.initial) || prevTransitions ? _p.from : _p.initial;\n payload.from = callProp(from, t.item, i);\n }\n\n const {\n onResolve\n } = payload;\n\n payload.onResolve = result => {\n callProp(onResolve, result);\n const transitions = usedTransitions.current;\n const t = transitions.find(t => t.key === key);\n if (!t) return;\n\n if (result.cancelled && t.phase != TransitionPhase.UPDATE) {\n return;\n }\n\n if (t.ctrl.idle) {\n const idle = transitions.every(t => t.ctrl.idle);\n\n if (t.phase == TransitionPhase.LEAVE) {\n const expiry = callProp(expires, t.item);\n\n if (expiry !== false) {\n const expiryMs = expiry === true ? 0 : expiry;\n t.expired = true;\n\n if (!idle && expiryMs > 0) {\n if (expiryMs <= 0x7fffffff) t.expirationId = setTimeout(forceUpdate, expiryMs);\n return;\n }\n }\n }\n\n if (idle && transitions.some(t => t.expired)) {\n forceUpdate();\n }\n }\n };\n\n const springs = getSprings(t.ctrl, payload);\n changes.set(t, {\n phase,\n springs,\n payload\n });\n });\n const context = useContext(SpringContext);\n const prevContext = usePrev(context);\n const hasContext = context !== prevContext && hasProps(context);\n useLayoutEffect(() => {\n if (hasContext) each(transitions, t => {\n t.ctrl.start({\n default: context\n });\n });\n }, [context]);\n useLayoutEffect(() => {\n each(changes, ({\n phase,\n payload\n }, t) => {\n const {\n ctrl\n } = t;\n t.phase = phase;\n ref == null ? void 0 : ref.add(ctrl);\n\n if (hasContext && phase == TransitionPhase.ENTER) {\n ctrl.start({\n default: context\n });\n }\n\n if (payload) {\n replaceRef(ctrl, payload.ref);\n\n if (ctrl.ref) {\n ctrl.update(payload);\n } else {\n ctrl.start(payload);\n }\n }\n });\n }, reset ? void 0 : deps);\n\n const renderTransitions = render => React.createElement(React.Fragment, null, transitions.map((t, i) => {\n const {\n springs\n } = changes.get(t) || t.ctrl;\n const elem = render(_extends({}, springs), t.item, t, i);\n return elem && elem.type ? React.createElement(elem.type, _extends({}, elem.props, {\n key: is.str(t.key) || is.num(t.key) ? t.key : t.ctrl.id,\n ref: elem.ref\n })) : elem;\n }));\n\n return ref ? [renderTransitions, ref] : renderTransitions;\n}\nlet nextKey = 1;\n\nfunction getKeys(items, {\n key,\n keys = key\n}, prevTransitions) {\n if (keys === null) {\n const reused = new Set();\n return items.map(item => {\n const t = prevTransitions && prevTransitions.find(t => t.item === item && t.phase !== TransitionPhase.LEAVE && !reused.has(t));\n\n if (t) {\n reused.add(t);\n return t.key;\n }\n\n return nextKey++;\n });\n }\n\n return is.und(keys) ? items : is.fun(keys) ? items.map(keys) : toArray(keys);\n}\n\nconst _excluded$2 = [\"children\"];\nfunction Spring(_ref) {\n let {\n children\n } = _ref,\n props = _objectWithoutPropertiesLoose(_ref, _excluded$2);\n\n return children(useSpring(props));\n}\n\nconst _excluded$1 = [\"items\", \"children\"];\nfunction Trail(_ref) {\n let {\n items,\n children\n } = _ref,\n props = _objectWithoutPropertiesLoose(_ref, _excluded$1);\n\n const trails = useTrail(items.length, props);\n return items.map((item, index) => {\n const result = children(item, index);\n return is.fun(result) ? result(trails[index]) : result;\n });\n}\n\nconst _excluded = [\"items\", \"children\"];\nfunction Transition(_ref) {\n let {\n items,\n children\n } = _ref,\n props = _objectWithoutPropertiesLoose(_ref, _excluded);\n\n return useTransition(items, props)(children);\n}\n\nclass Interpolation extends FrameValue {\n constructor(source, args) {\n super();\n this.key = void 0;\n this.idle = true;\n this.calc = void 0;\n this._active = new Set();\n this.source = source;\n this.calc = createInterpolator(...args);\n\n const value = this._get();\n\n const nodeType = getAnimatedType(value);\n setAnimated(this, nodeType.create(value));\n }\n\n advance(_dt) {\n const value = this._get();\n\n const oldValue = this.get();\n\n if (!isEqual(value, oldValue)) {\n getAnimated(this).setValue(value);\n\n this._onChange(value, this.idle);\n }\n\n if (!this.idle && checkIdle(this._active)) {\n becomeIdle(this);\n }\n }\n\n _get() {\n const inputs = is.arr(this.source) ? this.source.map(getFluidValue) : toArray(getFluidValue(this.source));\n return this.calc(...inputs);\n }\n\n _start() {\n if (this.idle && !checkIdle(this._active)) {\n this.idle = false;\n each(getPayload(this), node => {\n node.done = false;\n });\n\n if (Globals.skipAnimation) {\n raf.batchedUpdates(() => this.advance());\n becomeIdle(this);\n } else {\n frameLoop.start(this);\n }\n }\n }\n\n _attach() {\n let priority = 1;\n each(toArray(this.source), source => {\n if (hasFluidValue(source)) {\n addFluidObserver(source, this);\n }\n\n if (isFrameValue(source)) {\n if (!source.idle) {\n this._active.add(source);\n }\n\n priority = Math.max(priority, source.priority + 1);\n }\n });\n this.priority = priority;\n\n this._start();\n }\n\n _detach() {\n each(toArray(this.source), source => {\n if (hasFluidValue(source)) {\n removeFluidObserver(source, this);\n }\n });\n\n this._active.clear();\n\n becomeIdle(this);\n }\n\n eventObserved(event) {\n if (event.type == 'change') {\n if (event.idle) {\n this.advance();\n } else {\n this._active.add(event.parent);\n\n this._start();\n }\n } else if (event.type == 'idle') {\n this._active.delete(event.parent);\n } else if (event.type == 'priority') {\n this.priority = toArray(this.source).reduce((highest, parent) => Math.max(highest, (isFrameValue(parent) ? parent.priority : 0) + 1), 0);\n }\n }\n\n}\n\nfunction isIdle(source) {\n return source.idle !== false;\n}\n\nfunction checkIdle(active) {\n return !active.size || Array.from(active).every(isIdle);\n}\n\nfunction becomeIdle(self) {\n if (!self.idle) {\n self.idle = true;\n each(getPayload(self), node => {\n node.done = true;\n });\n callFluidObservers(self, {\n type: 'idle',\n parent: self\n });\n }\n}\n\nconst to = (source, ...args) => new Interpolation(source, args);\nconst interpolate = (source, ...args) => (deprecateInterpolate(), new Interpolation(source, args));\n\nGlobals.assign({\n createStringInterpolator,\n to: (source, args) => new Interpolation(source, args)\n});\nconst update = frameLoop.advance;\n\nexport { BailSignal, Controller, FrameValue, Interpolation, Spring, SpringContext, SpringRef, SpringValue, Trail, Transition, config, inferTo, interpolate, to, update, useChain, useSpring, useSpringRef, useSprings, useTrail, useTransition };\n","import { Globals } from '@react-spring/core';\nexport * from '@react-spring/core';\nimport { unstable_batchedUpdates } from 'react-dom';\nimport { eachProp, is, toArray, FluidValue, each, getFluidValue, hasFluidValue, addFluidObserver, removeFluidObserver, callFluidObservers, createStringInterpolator, colors } from '@react-spring/shared';\nimport { AnimatedObject, createHost } from '@react-spring/animated';\n\nfunction _objectWithoutPropertiesLoose(source, excluded) {\n if (source == null) return {};\n var target = {};\n var sourceKeys = Object.keys(source);\n var key, i;\n\n for (i = 0; i < sourceKeys.length; i++) {\n key = sourceKeys[i];\n if (excluded.indexOf(key) >= 0) continue;\n target[key] = source[key];\n }\n\n return target;\n}\n\nconst _excluded$2 = [\"style\", \"children\", \"scrollTop\", \"scrollLeft\"];\nconst isCustomPropRE = /^--/;\n\nfunction dangerousStyleValue(name, value) {\n if (value == null || typeof value === 'boolean' || value === '') return '';\n if (typeof value === 'number' && value !== 0 && !isCustomPropRE.test(name) && !(isUnitlessNumber.hasOwnProperty(name) && isUnitlessNumber[name])) return value + 'px';\n return ('' + value).trim();\n}\n\nconst attributeCache = {};\nfunction applyAnimatedValues(instance, props) {\n if (!instance.nodeType || !instance.setAttribute) {\n return false;\n }\n\n const isFilterElement = instance.nodeName === 'filter' || instance.parentNode && instance.parentNode.nodeName === 'filter';\n\n const _ref = props,\n {\n style,\n children,\n scrollTop,\n scrollLeft\n } = _ref,\n attributes = _objectWithoutPropertiesLoose(_ref, _excluded$2);\n\n const values = Object.values(attributes);\n const names = Object.keys(attributes).map(name => isFilterElement || instance.hasAttribute(name) ? name : attributeCache[name] || (attributeCache[name] = name.replace(/([A-Z])/g, n => '-' + n.toLowerCase())));\n\n if (children !== void 0) {\n instance.textContent = children;\n }\n\n for (let name in style) {\n if (style.hasOwnProperty(name)) {\n const value = dangerousStyleValue(name, style[name]);\n\n if (isCustomPropRE.test(name)) {\n instance.style.setProperty(name, value);\n } else {\n instance.style[name] = value;\n }\n }\n }\n\n names.forEach((name, i) => {\n instance.setAttribute(name, values[i]);\n });\n\n if (scrollTop !== void 0) {\n instance.scrollTop = scrollTop;\n }\n\n if (scrollLeft !== void 0) {\n instance.scrollLeft = scrollLeft;\n }\n}\nlet isUnitlessNumber = {\n animationIterationCount: true,\n borderImageOutset: true,\n borderImageSlice: true,\n borderImageWidth: true,\n boxFlex: true,\n boxFlexGroup: true,\n boxOrdinalGroup: true,\n columnCount: true,\n columns: true,\n flex: true,\n flexGrow: true,\n flexPositive: true,\n flexShrink: true,\n flexNegative: true,\n flexOrder: true,\n gridRow: true,\n gridRowEnd: true,\n gridRowSpan: true,\n gridRowStart: true,\n gridColumn: true,\n gridColumnEnd: true,\n gridColumnSpan: true,\n gridColumnStart: true,\n fontWeight: true,\n lineClamp: true,\n lineHeight: true,\n opacity: true,\n order: true,\n orphans: true,\n tabSize: true,\n widows: true,\n zIndex: true,\n zoom: true,\n fillOpacity: true,\n floodOpacity: true,\n stopOpacity: true,\n strokeDasharray: true,\n strokeDashoffset: true,\n strokeMiterlimit: true,\n strokeOpacity: true,\n strokeWidth: true\n};\n\nconst prefixKey = (prefix, key) => prefix + key.charAt(0).toUpperCase() + key.substring(1);\n\nconst prefixes = ['Webkit', 'Ms', 'Moz', 'O'];\nisUnitlessNumber = Object.keys(isUnitlessNumber).reduce((acc, prop) => {\n prefixes.forEach(prefix => acc[prefixKey(prefix, prop)] = acc[prop]);\n return acc;\n}, isUnitlessNumber);\n\nconst _excluded$1 = [\"x\", \"y\", \"z\"];\nconst domTransforms = /^(matrix|translate|scale|rotate|skew)/;\nconst pxTransforms = /^(translate)/;\nconst degTransforms = /^(rotate|skew)/;\n\nconst addUnit = (value, unit) => is.num(value) && value !== 0 ? value + unit : value;\n\nconst isValueIdentity = (value, id) => is.arr(value) ? value.every(v => isValueIdentity(v, id)) : is.num(value) ? value === id : parseFloat(value) === id;\n\nclass AnimatedStyle extends AnimatedObject {\n constructor(_ref) {\n let {\n x,\n y,\n z\n } = _ref,\n style = _objectWithoutPropertiesLoose(_ref, _excluded$1);\n\n const inputs = [];\n const transforms = [];\n\n if (x || y || z) {\n inputs.push([x || 0, y || 0, z || 0]);\n transforms.push(xyz => [`translate3d(${xyz.map(v => addUnit(v, 'px')).join(',')})`, isValueIdentity(xyz, 0)]);\n }\n\n eachProp(style, (value, key) => {\n if (key === 'transform') {\n inputs.push([value || '']);\n transforms.push(transform => [transform, transform === '']);\n } else if (domTransforms.test(key)) {\n delete style[key];\n if (is.und(value)) return;\n const unit = pxTransforms.test(key) ? 'px' : degTransforms.test(key) ? 'deg' : '';\n inputs.push(toArray(value));\n transforms.push(key === 'rotate3d' ? ([x, y, z, deg]) => [`rotate3d(${x},${y},${z},${addUnit(deg, unit)})`, isValueIdentity(deg, 0)] : input => [`${key}(${input.map(v => addUnit(v, unit)).join(',')})`, isValueIdentity(input, key.startsWith('scale') ? 1 : 0)]);\n }\n });\n\n if (inputs.length) {\n style.transform = new FluidTransform(inputs, transforms);\n }\n\n super(style);\n }\n\n}\n\nclass FluidTransform extends FluidValue {\n constructor(inputs, transforms) {\n super();\n this._value = null;\n this.inputs = inputs;\n this.transforms = transforms;\n }\n\n get() {\n return this._value || (this._value = this._get());\n }\n\n _get() {\n let transform = '';\n let identity = true;\n each(this.inputs, (input, i) => {\n const arg1 = getFluidValue(input[0]);\n const [t, id] = this.transforms[i](is.arr(arg1) ? arg1 : input.map(getFluidValue));\n transform += ' ' + t;\n identity = identity && id;\n });\n return identity ? 'none' : transform;\n }\n\n observerAdded(count) {\n if (count == 1) each(this.inputs, input => each(input, value => hasFluidValue(value) && addFluidObserver(value, this)));\n }\n\n observerRemoved(count) {\n if (count == 0) each(this.inputs, input => each(input, value => hasFluidValue(value) && removeFluidObserver(value, this)));\n }\n\n eventObserved(event) {\n if (event.type == 'change') {\n this._value = null;\n }\n\n callFluidObservers(this, event);\n }\n\n}\n\nconst primitives = ['a', 'abbr', 'address', 'area', 'article', 'aside', 'audio', 'b', 'base', 'bdi', 'bdo', 'big', 'blockquote', 'body', 'br', 'button', 'canvas', 'caption', 'cite', 'code', 'col', 'colgroup', 'data', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'div', 'dl', 'dt', 'em', 'embed', 'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head', 'header', 'hgroup', 'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd', 'keygen', 'label', 'legend', 'li', 'link', 'main', 'map', 'mark', 'menu', 'menuitem', 'meta', 'meter', 'nav', 'noscript', 'object', 'ol', 'optgroup', 'option', 'output', 'p', 'param', 'picture', 'pre', 'progress', 'q', 'rp', 'rt', 'ruby', 's', 'samp', 'script', 'section', 'select', 'small', 'source', 'span', 'strong', 'style', 'sub', 'summary', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'time', 'title', 'tr', 'track', 'u', 'ul', 'var', 'video', 'wbr', 'circle', 'clipPath', 'defs', 'ellipse', 'foreignObject', 'g', 'image', 'line', 'linearGradient', 'mask', 'path', 'pattern', 'polygon', 'polyline', 'radialGradient', 'rect', 'stop', 'svg', 'text', 'tspan'];\n\nconst _excluded = [\"scrollTop\", \"scrollLeft\"];\nGlobals.assign({\n batchedUpdates: unstable_batchedUpdates,\n createStringInterpolator,\n colors\n});\nconst host = createHost(primitives, {\n applyAnimatedValues,\n createAnimatedStyle: style => new AnimatedStyle(style),\n getComponentProps: _ref => {\n let props = _objectWithoutPropertiesLoose(_ref, _excluded);\n\n return props;\n }\n});\nconst animated = host.animated;\n\nexport { animated as a, animated };\n","import { memo, useRef, PropsWithChildren, CSSProperties } from 'react'\nimport { useSpring, animated } from '@react-spring/web'\nimport {\n useTheme,\n useMotionConfig,\n // @ts-ignore\n useMeasure,\n} from '@nivo/core'\nimport { TooltipStateContextDataVisible } from './context'\n\nconst TOOLTIP_OFFSET = 14\n\nconst tooltipStyle = {\n pointerEvents: 'none' as CSSProperties['pointerEvents'],\n position: 'absolute' as CSSProperties['position'],\n zIndex: 10,\n top: 0,\n left: 0,\n}\n\nconst translate = (x: number, y: number) => `translate(${x}px, ${y}px)`\n\ninterface TooltipWrapperProps {\n position: TooltipStateContextDataVisible['position']\n anchor: TooltipStateContextDataVisible['anchor']\n}\n\nexport const TooltipWrapper = memo>(\n ({ position, anchor, children }) => {\n const theme = useTheme()\n const { animate, config: springConfig } = useMotionConfig()\n const [measureRef, bounds] = useMeasure()\n const previousPosition = useRef<[number, number] | false>(false)\n\n let to = undefined\n let immediate = false\n const hasDimension = bounds.width > 0 && bounds.height > 0\n\n let x = Math.round(position[0])\n let y = Math.round(position[1])\n\n if (hasDimension) {\n if (anchor === 'top') {\n x -= bounds.width / 2\n y -= bounds.height + TOOLTIP_OFFSET\n } else if (anchor === 'right') {\n x += TOOLTIP_OFFSET\n y -= bounds.height / 2\n } else if (anchor === 'bottom') {\n x -= bounds.width / 2\n y += TOOLTIP_OFFSET\n } else if (anchor === 'left') {\n x -= bounds.width + TOOLTIP_OFFSET\n y -= bounds.height / 2\n } else if (anchor === 'center') {\n x -= bounds.width / 2\n y -= bounds.height / 2\n }\n\n to = {\n transform: translate(x, y),\n }\n\n if (!previousPosition.current) {\n immediate = true\n }\n\n previousPosition.current = [x, y]\n }\n\n const animatedProps = useSpring<{\n transform: string\n }>({\n to,\n config: springConfig,\n immediate: !animate || immediate,\n })\n\n const style = {\n ...tooltipStyle,\n ...theme.tooltip,\n transform: animatedProps.transform ?? translate(x, y),\n }\n\n return (\n \n {children}\n \n )\n }\n)\n\nTooltipWrapper.displayName = 'TooltipWrapper'\n","import { CSSProperties, memo } from 'react'\n\ninterface ChipProps {\n size?: number\n color: string\n style?: CSSProperties\n}\n\nexport const Chip = memo(({ size = 12, color, style = {} }) => (\n \n))\n","import { memo, ReactNode } from 'react'\nimport { useTheme, ValueFormat, useValueFormatter } from '@nivo/core'\nimport { Chip } from './Chip'\n\nexport interface BasicTooltipProps {\n id: ReactNode\n value?: number | string | Date\n format?: ValueFormat\n color?: string\n enableChip?: boolean\n /**\n * @deprecated This should be replaced by custom tooltip components.\n */\n renderContent?: () => JSX.Element\n}\n\nexport const BasicTooltip = memo(\n ({ id, value: _value, format, enableChip = false, color, renderContent }) => {\n const theme = useTheme()\n const formatValue = useValueFormatter(format)\n\n let content: JSX.Element\n if (typeof renderContent === 'function') {\n content = renderContent()\n } else {\n let value = _value\n if (formatValue !== undefined && value !== undefined) {\n value = formatValue(value)\n }\n content = (\n
\n {enableChip && }\n {value !== undefined ? (\n \n {id}: {`${value}`}\n \n ) : (\n id\n )}\n
\n )\n }\n\n return
{content}
\n }\n)\n","import { CSSProperties, memo, ReactNode } from 'react'\nimport { useTheme } from '@nivo/core'\n\nconst tableStyle = {\n width: '100%',\n borderCollapse: 'collapse' as CSSProperties['borderCollapse'],\n}\n\ninterface TableTooltipProps {\n title?: ReactNode\n renderContent?: () => JSX.Element\n rows?: ReactNode[][]\n}\n\nexport const TableTooltip = memo(({ title, rows = [], renderContent }: TableTooltipProps) => {\n const theme = useTheme()\n\n if (!rows.length) return null\n\n let content\n if (typeof renderContent === 'function') {\n content = renderContent()\n } else {\n content = (\n
\n {title && title}\n \n \n {rows.map((row, i) => (\n \n {row.map((column, j) => (\n \n ))}\n \n ))}\n \n
\n {column}\n
\n
\n )\n }\n\n return
{content}
\n})\n\nTableTooltip.displayName = 'TableTooltip'\n","import { CSSProperties, memo, useMemo } from 'react'\nimport { useSpring, animated } from '@react-spring/web'\nimport { useTheme, useMotionConfig } from '@nivo/core'\n\ninterface CrosshairLineProps {\n x0: number\n x1: number\n y0: number\n y1: number\n}\n\nexport const CrosshairLine = memo(({ x0, x1, y0, y1 }: CrosshairLineProps) => {\n const theme = useTheme()\n const { animate, config: springConfig } = useMotionConfig()\n const style = useMemo(\n () => ({\n ...theme.crosshair.line,\n pointerEvents: 'none' as CSSProperties['pointerEvents'],\n }),\n [theme.crosshair.line]\n )\n\n const animatedProps = useSpring({\n x1: x0,\n x2: x1,\n y1: y0,\n y2: y1,\n config: springConfig,\n immediate: !animate,\n })\n\n return \n})\n\nCrosshairLine.displayName = 'CrosshairLine'\n","import { memo } from 'react'\nimport { CrosshairLine } from './CrosshairLine'\nimport { CrosshairType } from './types'\n\ninterface CrosshairProps {\n width: number\n height: number\n type: CrosshairType\n x: number\n y: number\n}\n\nexport const Crosshair = memo(({ width, height, type, x, y }: CrosshairProps) => {\n let xLine\n let yLine\n if (type === 'cross') {\n xLine = { x0: x, x1: x, y0: 0, y1: height }\n yLine = { x0: 0, x1: width, y0: y, y1: y }\n } else if (type === 'top-left') {\n xLine = { x0: x, x1: x, y0: 0, y1: y }\n yLine = { x0: 0, x1: x, y0: y, y1: y }\n } else if (type === 'top') {\n xLine = { x0: x, x1: x, y0: 0, y1: y }\n } else if (type === 'top-right') {\n xLine = { x0: x, x1: x, y0: 0, y1: y }\n yLine = { x0: x, x1: width, y0: y, y1: y }\n } else if (type === 'right') {\n yLine = { x0: x, x1: width, y0: y, y1: y }\n } else if (type === 'bottom-right') {\n xLine = { x0: x, x1: x, y0: y, y1: height }\n yLine = { x0: x, x1: width, y0: y, y1: y }\n } else if (type === 'bottom') {\n xLine = { x0: x, x1: x, y0: y, y1: height }\n } else if (type === 'bottom-left') {\n xLine = { x0: x, x1: x, y0: y, y1: height }\n yLine = { x0: 0, x1: x, y0: y, y1: y }\n } else if (type === 'left') {\n yLine = { x0: 0, x1: x, y0: y, y1: y }\n } else if (type === 'x') {\n xLine = { x0: x, x1: x, y0: 0, y1: height }\n } else if (type === 'y') {\n yLine = { x0: 0, x1: width, y0: y, y1: y }\n }\n\n return (\n <>\n {xLine && }\n {yLine && }\n \n )\n})\n\nCrosshair.displayName = 'Crosshair'\n","import { createContext, MouseEvent } from 'react'\nimport { TooltipAnchor } from './types'\n\nexport interface TooltipActionsContextData {\n showTooltipAt: (\n content: JSX.Element,\n position: [number, number],\n anchor?: TooltipAnchor\n ) => void\n showTooltipFromEvent: (content: JSX.Element, event: MouseEvent, anchor?: TooltipAnchor) => void\n hideTooltip: () => void\n}\n\nconst defaultActions: TooltipActionsContextData = {\n showTooltipAt: () => {},\n showTooltipFromEvent: () => {},\n hideTooltip: () => {},\n}\n\nexport const TooltipActionsContext = createContext(defaultActions)\n\nexport interface TooltipStateContextDataVisible {\n isVisible: true\n position: [number, number]\n content: JSX.Element\n anchor: TooltipAnchor\n}\n\nexport interface TooltipStateContextDataHidden {\n isVisible: false\n position: [null, null]\n content: null\n anchor: null\n}\n\nexport type TooltipStateContextData = TooltipStateContextDataVisible | TooltipStateContextDataHidden\n\nexport const hiddenTooltipState: TooltipStateContextDataHidden = {\n isVisible: false,\n position: [null, null],\n content: null,\n anchor: null,\n}\n\nexport const TooltipStateContext = createContext(hiddenTooltipState)\n","import { useState, useContext, useCallback, MutableRefObject, MouseEvent, useMemo } from 'react'\nimport {\n TooltipActionsContext,\n TooltipActionsContextData,\n TooltipStateContext,\n TooltipStateContextData,\n hiddenTooltipState,\n} from './context'\nimport { TooltipAnchor } from './types'\n\nexport const useTooltipHandlers = (container: MutableRefObject) => {\n const [state, setState] = useState(hiddenTooltipState)\n\n const showTooltipAt: TooltipActionsContextData['showTooltipAt'] = useCallback(\n (content: JSX.Element, [x, y]: [number, number], anchor: TooltipAnchor = 'top') => {\n setState({\n isVisible: true,\n position: [x, y],\n anchor,\n content,\n })\n },\n [setState]\n )\n\n const showTooltipFromEvent: TooltipActionsContextData['showTooltipFromEvent'] = useCallback(\n (content: JSX.Element, event: MouseEvent, anchor: TooltipAnchor = 'top') => {\n const bounds = container.current.getBoundingClientRect()\n const x = event.clientX - bounds.left\n const y = event.clientY - bounds.top\n\n if (anchor === 'left' || anchor === 'right') {\n if (x < bounds.width / 2) anchor = 'right'\n else anchor = 'left'\n }\n\n setState({\n isVisible: true,\n position: [x, y],\n anchor,\n content,\n })\n },\n [container, setState]\n )\n\n const hideTooltip = useCallback(() => {\n setState(hiddenTooltipState)\n }, [setState])\n\n const actions: TooltipActionsContextData = useMemo(() => {\n return {\n showTooltipAt,\n showTooltipFromEvent,\n hideTooltip,\n }\n }, [showTooltipAt, showTooltipFromEvent, hideTooltip])\n\n return {\n actions,\n state,\n }\n}\n\nexport const useTooltip = () => {\n const context = useContext(TooltipActionsContext)\n if (context === undefined) {\n throw new Error('useTooltip must be used within a TooltipProvider')\n }\n\n return context\n}\n\nexport const useTooltipState = () => {\n const context = useContext(TooltipStateContext)\n if (context === undefined) {\n throw new Error('useTooltipState must be used within a TooltipProvider')\n }\n\n return context\n}\n","import { useTooltipState } from './hooks'\nimport { TooltipWrapper } from './TooltipWrapper'\nimport { TooltipStateContextData, TooltipStateContextDataVisible } from './context'\n\nexport const isVisibleTooltipState = (\n state: TooltipStateContextData\n): state is TooltipStateContextDataVisible => state.isVisible\n\nexport const Tooltip = () => {\n const state = useTooltipState()\n\n if (!isVisibleTooltipState(state)) {\n return null\n }\n\n return (\n \n {state.content}\n \n )\n}\n","import { PropsWithChildren, MutableRefObject } from 'react'\nimport { TooltipActionsContext, TooltipStateContext } from './context'\nimport { useTooltipHandlers } from './hooks'\n\ninterface TooltipProviderProps {\n container: MutableRefObject\n}\n\nexport const TooltipProvider = ({\n container,\n children,\n}: PropsWithChildren) => {\n const { actions, state } = useTooltipHandlers(container)\n\n return (\n \n {children}\n \n )\n}\n","export default function(a, b) {\n return a = +a, b = +b, function(t) {\n return a * (1 - t) + b * t;\n };\n}\n","import number from \"./number.js\";\n\nvar reA = /[-+]?(?:\\d+\\.?\\d*|\\.?\\d+)(?:[eE][-+]?\\d+)?/g,\n reB = new RegExp(reA.source, \"g\");\n\nfunction zero(b) {\n return function() {\n return b;\n };\n}\n\nfunction one(b) {\n return function(t) {\n return b(t) + \"\";\n };\n}\n\nexport default function(a, b) {\n var bi = reA.lastIndex = reB.lastIndex = 0, // scan index for next number in b\n am, // current match in a\n bm, // current match in b\n bs, // string preceding current number in b, if any\n i = -1, // index in s\n s = [], // string constants and placeholders\n q = []; // number interpolators\n\n // Coerce inputs to strings.\n a = a + \"\", b = b + \"\";\n\n // Interpolate pairs of numbers in a & b.\n while ((am = reA.exec(a))\n && (bm = reB.exec(b))) {\n if ((bs = bm.index) > bi) { // a string precedes the next number in b\n bs = b.slice(bi, bs);\n if (s[i]) s[i] += bs; // coalesce with previous string\n else s[++i] = bs;\n }\n if ((am = am[0]) === (bm = bm[0])) { // numbers in a & b match\n if (s[i]) s[i] += bm; // coalesce with previous string\n else s[++i] = bm;\n } else { // interpolate non-matching numbers\n s[++i] = null;\n q.push({i: i, x: number(am, bm)});\n }\n bi = reB.lastIndex;\n }\n\n // Add remains of b.\n if (bi < b.length) {\n bs = b.slice(bi);\n if (s[i]) s[i] += bs; // coalesce with previous string\n else s[++i] = bs;\n }\n\n // Special optimization for only a single match.\n // Otherwise, interpolate each of the numbers and rejoin the string.\n return s.length < 2 ? (q[0]\n ? one(q[0].x)\n : zero(b))\n : (b = q.length, function(t) {\n for (var i = 0, o; i < b; ++i) s[(o = q[i]).i] = o.x(t);\n return s.join(\"\");\n });\n}\n","export function initRange(domain, range) {\n switch (arguments.length) {\n case 0: break;\n case 1: this.range(domain); break;\n default: this.range(range).domain(domain); break;\n }\n return this;\n}\n\nexport function initInterpolator(domain, interpolator) {\n switch (arguments.length) {\n case 0: break;\n case 1: {\n if (typeof domain === \"function\") this.interpolator(domain);\n else this.range(domain);\n break;\n }\n default: {\n this.domain(domain);\n if (typeof interpolator === \"function\") this.interpolator(interpolator);\n else this.range(interpolator);\n break;\n }\n }\n return this;\n}\n","import {initRange} from \"./init.js\";\n\nexport const implicit = Symbol(\"implicit\");\n\nexport default function ordinal() {\n var index = new Map(),\n domain = [],\n range = [],\n unknown = implicit;\n\n function scale(d) {\n var key = d + \"\", i = index.get(key);\n if (!i) {\n if (unknown !== implicit) return unknown;\n index.set(key, i = domain.push(d));\n }\n return range[(i - 1) % range.length];\n }\n\n scale.domain = function(_) {\n if (!arguments.length) return domain.slice();\n domain = [], index = new Map();\n for (const value of _) {\n const key = value + \"\";\n if (index.has(key)) continue;\n index.set(key, domain.push(value));\n }\n return scale;\n };\n\n scale.range = function(_) {\n return arguments.length ? (range = Array.from(_), scale) : range.slice();\n };\n\n scale.unknown = function(_) {\n return arguments.length ? (unknown = _, scale) : unknown;\n };\n\n scale.copy = function() {\n return ordinal(domain, range).unknown(unknown);\n };\n\n initRange.apply(scale, arguments);\n\n return scale;\n}\n","export default function(specifier) {\n var n = specifier.length / 6 | 0, colors = new Array(n), i = 0;\n while (i < n) colors[i] = \"#\" + specifier.slice(i * 6, ++i * 6);\n return colors;\n}\n","export default function(constructor, factory, prototype) {\n constructor.prototype = factory.prototype = prototype;\n prototype.constructor = constructor;\n}\n\nexport function extend(parent, definition) {\n var prototype = Object.create(parent.prototype);\n for (var key in definition) prototype[key] = definition[key];\n return prototype;\n}\n","import define, {extend} from \"./define.js\";\n\nexport function Color() {}\n\nexport var darker = 0.7;\nexport var brighter = 1 / darker;\n\nvar reI = \"\\\\s*([+-]?\\\\d+)\\\\s*\",\n reN = \"\\\\s*([+-]?\\\\d*\\\\.?\\\\d+(?:[eE][+-]?\\\\d+)?)\\\\s*\",\n reP = \"\\\\s*([+-]?\\\\d*\\\\.?\\\\d+(?:[eE][+-]?\\\\d+)?)%\\\\s*\",\n reHex = /^#([0-9a-f]{3,8})$/,\n reRgbInteger = new RegExp(\"^rgb\\\\(\" + [reI, reI, reI] + \"\\\\)$\"),\n reRgbPercent = new RegExp(\"^rgb\\\\(\" + [reP, reP, reP] + \"\\\\)$\"),\n reRgbaInteger = new RegExp(\"^rgba\\\\(\" + [reI, reI, reI, reN] + \"\\\\)$\"),\n reRgbaPercent = new RegExp(\"^rgba\\\\(\" + [reP, reP, reP, reN] + \"\\\\)$\"),\n reHslPercent = new RegExp(\"^hsl\\\\(\" + [reN, reP, reP] + \"\\\\)$\"),\n reHslaPercent = new RegExp(\"^hsla\\\\(\" + [reN, reP, reP, reN] + \"\\\\)$\");\n\nvar named = {\n aliceblue: 0xf0f8ff,\n antiquewhite: 0xfaebd7,\n aqua: 0x00ffff,\n aquamarine: 0x7fffd4,\n azure: 0xf0ffff,\n beige: 0xf5f5dc,\n bisque: 0xffe4c4,\n black: 0x000000,\n blanchedalmond: 0xffebcd,\n blue: 0x0000ff,\n blueviolet: 0x8a2be2,\n brown: 0xa52a2a,\n burlywood: 0xdeb887,\n cadetblue: 0x5f9ea0,\n chartreuse: 0x7fff00,\n chocolate: 0xd2691e,\n coral: 0xff7f50,\n cornflowerblue: 0x6495ed,\n cornsilk: 0xfff8dc,\n crimson: 0xdc143c,\n cyan: 0x00ffff,\n darkblue: 0x00008b,\n darkcyan: 0x008b8b,\n darkgoldenrod: 0xb8860b,\n darkgray: 0xa9a9a9,\n darkgreen: 0x006400,\n darkgrey: 0xa9a9a9,\n darkkhaki: 0xbdb76b,\n darkmagenta: 0x8b008b,\n darkolivegreen: 0x556b2f,\n darkorange: 0xff8c00,\n darkorchid: 0x9932cc,\n darkred: 0x8b0000,\n darksalmon: 0xe9967a,\n darkseagreen: 0x8fbc8f,\n darkslateblue: 0x483d8b,\n darkslategray: 0x2f4f4f,\n darkslategrey: 0x2f4f4f,\n darkturquoise: 0x00ced1,\n darkviolet: 0x9400d3,\n deeppink: 0xff1493,\n deepskyblue: 0x00bfff,\n dimgray: 0x696969,\n dimgrey: 0x696969,\n dodgerblue: 0x1e90ff,\n firebrick: 0xb22222,\n floralwhite: 0xfffaf0,\n forestgreen: 0x228b22,\n fuchsia: 0xff00ff,\n gainsboro: 0xdcdcdc,\n ghostwhite: 0xf8f8ff,\n gold: 0xffd700,\n goldenrod: 0xdaa520,\n gray: 0x808080,\n green: 0x008000,\n greenyellow: 0xadff2f,\n grey: 0x808080,\n honeydew: 0xf0fff0,\n hotpink: 0xff69b4,\n indianred: 0xcd5c5c,\n indigo: 0x4b0082,\n ivory: 0xfffff0,\n khaki: 0xf0e68c,\n lavender: 0xe6e6fa,\n lavenderblush: 0xfff0f5,\n lawngreen: 0x7cfc00,\n lemonchiffon: 0xfffacd,\n lightblue: 0xadd8e6,\n lightcoral: 0xf08080,\n lightcyan: 0xe0ffff,\n lightgoldenrodyellow: 0xfafad2,\n lightgray: 0xd3d3d3,\n lightgreen: 0x90ee90,\n lightgrey: 0xd3d3d3,\n lightpink: 0xffb6c1,\n lightsalmon: 0xffa07a,\n lightseagreen: 0x20b2aa,\n lightskyblue: 0x87cefa,\n lightslategray: 0x778899,\n lightslategrey: 0x778899,\n lightsteelblue: 0xb0c4de,\n lightyellow: 0xffffe0,\n lime: 0x00ff00,\n limegreen: 0x32cd32,\n linen: 0xfaf0e6,\n magenta: 0xff00ff,\n maroon: 0x800000,\n mediumaquamarine: 0x66cdaa,\n mediumblue: 0x0000cd,\n mediumorchid: 0xba55d3,\n mediumpurple: 0x9370db,\n mediumseagreen: 0x3cb371,\n mediumslateblue: 0x7b68ee,\n mediumspringgreen: 0x00fa9a,\n mediumturquoise: 0x48d1cc,\n mediumvioletred: 0xc71585,\n midnightblue: 0x191970,\n mintcream: 0xf5fffa,\n mistyrose: 0xffe4e1,\n moccasin: 0xffe4b5,\n navajowhite: 0xffdead,\n navy: 0x000080,\n oldlace: 0xfdf5e6,\n olive: 0x808000,\n olivedrab: 0x6b8e23,\n orange: 0xffa500,\n orangered: 0xff4500,\n orchid: 0xda70d6,\n palegoldenrod: 0xeee8aa,\n palegreen: 0x98fb98,\n paleturquoise: 0xafeeee,\n palevioletred: 0xdb7093,\n papayawhip: 0xffefd5,\n peachpuff: 0xffdab9,\n peru: 0xcd853f,\n pink: 0xffc0cb,\n plum: 0xdda0dd,\n powderblue: 0xb0e0e6,\n purple: 0x800080,\n rebeccapurple: 0x663399,\n red: 0xff0000,\n rosybrown: 0xbc8f8f,\n royalblue: 0x4169e1,\n saddlebrown: 0x8b4513,\n salmon: 0xfa8072,\n sandybrown: 0xf4a460,\n seagreen: 0x2e8b57,\n seashell: 0xfff5ee,\n sienna: 0xa0522d,\n silver: 0xc0c0c0,\n skyblue: 0x87ceeb,\n slateblue: 0x6a5acd,\n slategray: 0x708090,\n slategrey: 0x708090,\n snow: 0xfffafa,\n springgreen: 0x00ff7f,\n steelblue: 0x4682b4,\n tan: 0xd2b48c,\n teal: 0x008080,\n thistle: 0xd8bfd8,\n tomato: 0xff6347,\n turquoise: 0x40e0d0,\n violet: 0xee82ee,\n wheat: 0xf5deb3,\n white: 0xffffff,\n whitesmoke: 0xf5f5f5,\n yellow: 0xffff00,\n yellowgreen: 0x9acd32\n};\n\ndefine(Color, color, {\n copy: function(channels) {\n return Object.assign(new this.constructor, this, channels);\n },\n displayable: function() {\n return this.rgb().displayable();\n },\n hex: color_formatHex, // Deprecated! Use color.formatHex.\n formatHex: color_formatHex,\n formatHsl: color_formatHsl,\n formatRgb: color_formatRgb,\n toString: color_formatRgb\n});\n\nfunction color_formatHex() {\n return this.rgb().formatHex();\n}\n\nfunction color_formatHsl() {\n return hslConvert(this).formatHsl();\n}\n\nfunction color_formatRgb() {\n return this.rgb().formatRgb();\n}\n\nexport default function color(format) {\n var m, l;\n format = (format + \"\").trim().toLowerCase();\n return (m = reHex.exec(format)) ? (l = m[1].length, m = parseInt(m[1], 16), l === 6 ? rgbn(m) // #ff0000\n : l === 3 ? new Rgb((m >> 8 & 0xf) | (m >> 4 & 0xf0), (m >> 4 & 0xf) | (m & 0xf0), ((m & 0xf) << 4) | (m & 0xf), 1) // #f00\n : l === 8 ? rgba(m >> 24 & 0xff, m >> 16 & 0xff, m >> 8 & 0xff, (m & 0xff) / 0xff) // #ff000000\n : l === 4 ? rgba((m >> 12 & 0xf) | (m >> 8 & 0xf0), (m >> 8 & 0xf) | (m >> 4 & 0xf0), (m >> 4 & 0xf) | (m & 0xf0), (((m & 0xf) << 4) | (m & 0xf)) / 0xff) // #f000\n : null) // invalid hex\n : (m = reRgbInteger.exec(format)) ? new Rgb(m[1], m[2], m[3], 1) // rgb(255, 0, 0)\n : (m = reRgbPercent.exec(format)) ? new Rgb(m[1] * 255 / 100, m[2] * 255 / 100, m[3] * 255 / 100, 1) // rgb(100%, 0%, 0%)\n : (m = reRgbaInteger.exec(format)) ? rgba(m[1], m[2], m[3], m[4]) // rgba(255, 0, 0, 1)\n : (m = reRgbaPercent.exec(format)) ? rgba(m[1] * 255 / 100, m[2] * 255 / 100, m[3] * 255 / 100, m[4]) // rgb(100%, 0%, 0%, 1)\n : (m = reHslPercent.exec(format)) ? hsla(m[1], m[2] / 100, m[3] / 100, 1) // hsl(120, 50%, 50%)\n : (m = reHslaPercent.exec(format)) ? hsla(m[1], m[2] / 100, m[3] / 100, m[4]) // hsla(120, 50%, 50%, 1)\n : named.hasOwnProperty(format) ? rgbn(named[format]) // eslint-disable-line no-prototype-builtins\n : format === \"transparent\" ? new Rgb(NaN, NaN, NaN, 0)\n : null;\n}\n\nfunction rgbn(n) {\n return new Rgb(n >> 16 & 0xff, n >> 8 & 0xff, n & 0xff, 1);\n}\n\nfunction rgba(r, g, b, a) {\n if (a <= 0) r = g = b = NaN;\n return new Rgb(r, g, b, a);\n}\n\nexport function rgbConvert(o) {\n if (!(o instanceof Color)) o = color(o);\n if (!o) return new Rgb;\n o = o.rgb();\n return new Rgb(o.r, o.g, o.b, o.opacity);\n}\n\nexport function rgb(r, g, b, opacity) {\n return arguments.length === 1 ? rgbConvert(r) : new Rgb(r, g, b, opacity == null ? 1 : opacity);\n}\n\nexport function Rgb(r, g, b, opacity) {\n this.r = +r;\n this.g = +g;\n this.b = +b;\n this.opacity = +opacity;\n}\n\ndefine(Rgb, rgb, extend(Color, {\n brighter: function(k) {\n k = k == null ? brighter : Math.pow(brighter, k);\n return new Rgb(this.r * k, this.g * k, this.b * k, this.opacity);\n },\n darker: function(k) {\n k = k == null ? darker : Math.pow(darker, k);\n return new Rgb(this.r * k, this.g * k, this.b * k, this.opacity);\n },\n rgb: function() {\n return this;\n },\n displayable: function() {\n return (-0.5 <= this.r && this.r < 255.5)\n && (-0.5 <= this.g && this.g < 255.5)\n && (-0.5 <= this.b && this.b < 255.5)\n && (0 <= this.opacity && this.opacity <= 1);\n },\n hex: rgb_formatHex, // Deprecated! Use color.formatHex.\n formatHex: rgb_formatHex,\n formatRgb: rgb_formatRgb,\n toString: rgb_formatRgb\n}));\n\nfunction rgb_formatHex() {\n return \"#\" + hex(this.r) + hex(this.g) + hex(this.b);\n}\n\nfunction rgb_formatRgb() {\n var a = this.opacity; a = isNaN(a) ? 1 : Math.max(0, Math.min(1, a));\n return (a === 1 ? \"rgb(\" : \"rgba(\")\n + Math.max(0, Math.min(255, Math.round(this.r) || 0)) + \", \"\n + Math.max(0, Math.min(255, Math.round(this.g) || 0)) + \", \"\n + Math.max(0, Math.min(255, Math.round(this.b) || 0))\n + (a === 1 ? \")\" : \", \" + a + \")\");\n}\n\nfunction hex(value) {\n value = Math.max(0, Math.min(255, Math.round(value) || 0));\n return (value < 16 ? \"0\" : \"\") + value.toString(16);\n}\n\nfunction hsla(h, s, l, a) {\n if (a <= 0) h = s = l = NaN;\n else if (l <= 0 || l >= 1) h = s = NaN;\n else if (s <= 0) h = NaN;\n return new Hsl(h, s, l, a);\n}\n\nexport function hslConvert(o) {\n if (o instanceof Hsl) return new Hsl(o.h, o.s, o.l, o.opacity);\n if (!(o instanceof Color)) o = color(o);\n if (!o) return new Hsl;\n if (o instanceof Hsl) return o;\n o = o.rgb();\n var r = o.r / 255,\n g = o.g / 255,\n b = o.b / 255,\n min = Math.min(r, g, b),\n max = Math.max(r, g, b),\n h = NaN,\n s = max - min,\n l = (max + min) / 2;\n if (s) {\n if (r === max) h = (g - b) / s + (g < b) * 6;\n else if (g === max) h = (b - r) / s + 2;\n else h = (r - g) / s + 4;\n s /= l < 0.5 ? max + min : 2 - max - min;\n h *= 60;\n } else {\n s = l > 0 && l < 1 ? 0 : h;\n }\n return new Hsl(h, s, l, o.opacity);\n}\n\nexport function hsl(h, s, l, opacity) {\n return arguments.length === 1 ? hslConvert(h) : new Hsl(h, s, l, opacity == null ? 1 : opacity);\n}\n\nfunction Hsl(h, s, l, opacity) {\n this.h = +h;\n this.s = +s;\n this.l = +l;\n this.opacity = +opacity;\n}\n\ndefine(Hsl, hsl, extend(Color, {\n brighter: function(k) {\n k = k == null ? brighter : Math.pow(brighter, k);\n return new Hsl(this.h, this.s, this.l * k, this.opacity);\n },\n darker: function(k) {\n k = k == null ? darker : Math.pow(darker, k);\n return new Hsl(this.h, this.s, this.l * k, this.opacity);\n },\n rgb: function() {\n var h = this.h % 360 + (this.h < 0) * 360,\n s = isNaN(h) || isNaN(this.s) ? 0 : this.s,\n l = this.l,\n m2 = l + (l < 0.5 ? l : 1 - l) * s,\n m1 = 2 * l - m2;\n return new Rgb(\n hsl2rgb(h >= 240 ? h - 240 : h + 120, m1, m2),\n hsl2rgb(h, m1, m2),\n hsl2rgb(h < 120 ? h + 240 : h - 120, m1, m2),\n this.opacity\n );\n },\n displayable: function() {\n return (0 <= this.s && this.s <= 1 || isNaN(this.s))\n && (0 <= this.l && this.l <= 1)\n && (0 <= this.opacity && this.opacity <= 1);\n },\n formatHsl: function() {\n var a = this.opacity; a = isNaN(a) ? 1 : Math.max(0, Math.min(1, a));\n return (a === 1 ? \"hsl(\" : \"hsla(\")\n + (this.h || 0) + \", \"\n + (this.s || 0) * 100 + \"%, \"\n + (this.l || 0) * 100 + \"%\"\n + (a === 1 ? \")\" : \", \" + a + \")\");\n }\n}));\n\n/* From FvD 13.37, CSS Color Module Level 3 */\nfunction hsl2rgb(h, m1, m2) {\n return (h < 60 ? m1 + (m2 - m1) * h / 60\n : h < 180 ? m2\n : h < 240 ? m1 + (m2 - m1) * (240 - h) / 60\n : m1) * 255;\n}\n","export function basis(t1, v0, v1, v2, v3) {\n var t2 = t1 * t1, t3 = t2 * t1;\n return ((1 - 3 * t1 + 3 * t2 - t3) * v0\n + (4 - 6 * t2 + 3 * t3) * v1\n + (1 + 3 * t1 + 3 * t2 - 3 * t3) * v2\n + t3 * v3) / 6;\n}\n\nexport default function(values) {\n var n = values.length - 1;\n return function(t) {\n var i = t <= 0 ? (t = 0) : t >= 1 ? (t = 1, n - 1) : Math.floor(t * n),\n v1 = values[i],\n v2 = values[i + 1],\n v0 = i > 0 ? values[i - 1] : 2 * v1 - v2,\n v3 = i < n - 1 ? values[i + 2] : 2 * v2 - v1;\n return basis((t - i / n) * n, v0, v1, v2, v3);\n };\n}\n","export default x => () => x;\n","import constant from \"./constant.js\";\n\nfunction linear(a, d) {\n return function(t) {\n return a + t * d;\n };\n}\n\nfunction exponential(a, b, y) {\n return a = Math.pow(a, y), b = Math.pow(b, y) - a, y = 1 / y, function(t) {\n return Math.pow(a + t * b, y);\n };\n}\n\nexport function hue(a, b) {\n var d = b - a;\n return d ? linear(a, d > 180 || d < -180 ? d - 360 * Math.round(d / 360) : d) : constant(isNaN(a) ? b : a);\n}\n\nexport function gamma(y) {\n return (y = +y) === 1 ? nogamma : function(a, b) {\n return b - a ? exponential(a, b, y) : constant(isNaN(a) ? b : a);\n };\n}\n\nexport default function nogamma(a, b) {\n var d = b - a;\n return d ? linear(a, d) : constant(isNaN(a) ? b : a);\n}\n","import {rgb as colorRgb} from \"d3-color\";\nimport basis from \"./basis.js\";\nimport basisClosed from \"./basisClosed.js\";\nimport nogamma, {gamma} from \"./color.js\";\n\nexport default (function rgbGamma(y) {\n var color = gamma(y);\n\n function rgb(start, end) {\n var r = color((start = colorRgb(start)).r, (end = colorRgb(end)).r),\n g = color(start.g, end.g),\n b = color(start.b, end.b),\n opacity = nogamma(start.opacity, end.opacity);\n return function(t) {\n start.r = r(t);\n start.g = g(t);\n start.b = b(t);\n start.opacity = opacity(t);\n return start + \"\";\n };\n }\n\n rgb.gamma = rgbGamma;\n\n return rgb;\n})(1);\n\nfunction rgbSpline(spline) {\n return function(colors) {\n var n = colors.length,\n r = new Array(n),\n g = new Array(n),\n b = new Array(n),\n i, color;\n for (i = 0; i < n; ++i) {\n color = colorRgb(colors[i]);\n r[i] = color.r || 0;\n g[i] = color.g || 0;\n b[i] = color.b || 0;\n }\n r = spline(r);\n g = spline(g);\n b = spline(b);\n color.opacity = 1;\n return function(t) {\n color.r = r(t);\n color.g = g(t);\n color.b = b(t);\n return color + \"\";\n };\n };\n}\n\nexport var rgbBasis = rgbSpline(basis);\nexport var rgbBasisClosed = rgbSpline(basisClosed);\n","import {interpolateRgbBasis} from \"d3-interpolate\";\n\nexport default scheme => interpolateRgbBasis(scheme[scheme.length - 1]);\n","import {basis} from \"./basis.js\";\n\nexport default function(values) {\n var n = values.length;\n return function(t) {\n var i = Math.floor(((t %= 1) < 0 ? ++t : t) * n),\n v0 = values[(i + n - 1) % n],\n v1 = values[i % n],\n v2 = values[(i + 1) % n],\n v3 = values[(i + 2) % n];\n return basis((t - i / n) * n, v0, v1, v2, v3);\n };\n}\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"d8b365f5f5f55ab4ac\",\n \"a6611adfc27d80cdc1018571\",\n \"a6611adfc27df5f5f580cdc1018571\",\n \"8c510ad8b365f6e8c3c7eae55ab4ac01665e\",\n \"8c510ad8b365f6e8c3f5f5f5c7eae55ab4ac01665e\",\n \"8c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e\",\n \"8c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e\",\n \"5430058c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e003c30\",\n \"5430058c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e003c30\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"af8dc3f7f7f77fbf7b\",\n \"7b3294c2a5cfa6dba0008837\",\n \"7b3294c2a5cff7f7f7a6dba0008837\",\n \"762a83af8dc3e7d4e8d9f0d37fbf7b1b7837\",\n \"762a83af8dc3e7d4e8f7f7f7d9f0d37fbf7b1b7837\",\n \"762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b7837\",\n \"762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b7837\",\n \"40004b762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b783700441b\",\n \"40004b762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b783700441b\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e9a3c9f7f7f7a1d76a\",\n \"d01c8bf1b6dab8e1864dac26\",\n \"d01c8bf1b6daf7f7f7b8e1864dac26\",\n \"c51b7de9a3c9fde0efe6f5d0a1d76a4d9221\",\n \"c51b7de9a3c9fde0eff7f7f7e6f5d0a1d76a4d9221\",\n \"c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221\",\n \"c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221\",\n \"8e0152c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221276419\",\n \"8e0152c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221276419\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"998ec3f7f7f7f1a340\",\n \"5e3c99b2abd2fdb863e66101\",\n \"5e3c99b2abd2f7f7f7fdb863e66101\",\n \"542788998ec3d8daebfee0b6f1a340b35806\",\n \"542788998ec3d8daebf7f7f7fee0b6f1a340b35806\",\n \"5427888073acb2abd2d8daebfee0b6fdb863e08214b35806\",\n \"5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b35806\",\n \"2d004b5427888073acb2abd2d8daebfee0b6fdb863e08214b358067f3b08\",\n \"2d004b5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b358067f3b08\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"ef8a62f7f7f767a9cf\",\n \"ca0020f4a58292c5de0571b0\",\n \"ca0020f4a582f7f7f792c5de0571b0\",\n \"b2182bef8a62fddbc7d1e5f067a9cf2166ac\",\n \"b2182bef8a62fddbc7f7f7f7d1e5f067a9cf2166ac\",\n \"b2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac\",\n \"b2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac\",\n \"67001fb2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac053061\",\n \"67001fb2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac053061\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"ef8a62ffffff999999\",\n \"ca0020f4a582bababa404040\",\n \"ca0020f4a582ffffffbababa404040\",\n \"b2182bef8a62fddbc7e0e0e09999994d4d4d\",\n \"b2182bef8a62fddbc7ffffffe0e0e09999994d4d4d\",\n \"b2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d\",\n \"b2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d\",\n \"67001fb2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d1a1a1a\",\n \"67001fb2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d1a1a1a\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fc8d59ffffbf91bfdb\",\n \"d7191cfdae61abd9e92c7bb6\",\n \"d7191cfdae61ffffbfabd9e92c7bb6\",\n \"d73027fc8d59fee090e0f3f891bfdb4575b4\",\n \"d73027fc8d59fee090ffffbfe0f3f891bfdb4575b4\",\n \"d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4\",\n \"d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4\",\n \"a50026d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4313695\",\n \"a50026d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4313695\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fc8d59ffffbf91cf60\",\n \"d7191cfdae61a6d96a1a9641\",\n \"d7191cfdae61ffffbfa6d96a1a9641\",\n \"d73027fc8d59fee08bd9ef8b91cf601a9850\",\n \"d73027fc8d59fee08bffffbfd9ef8b91cf601a9850\",\n \"d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850\",\n \"d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850\",\n \"a50026d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850006837\",\n \"a50026d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850006837\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fc8d59ffffbf99d594\",\n \"d7191cfdae61abdda42b83ba\",\n \"d7191cfdae61ffffbfabdda42b83ba\",\n \"d53e4ffc8d59fee08be6f59899d5943288bd\",\n \"d53e4ffc8d59fee08bffffbfe6f59899d5943288bd\",\n \"d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd\",\n \"d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd\",\n \"9e0142d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd5e4fa2\",\n \"9e0142d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd5e4fa2\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"deebf79ecae13182bd\",\n \"eff3ffbdd7e76baed62171b5\",\n \"eff3ffbdd7e76baed63182bd08519c\",\n \"eff3ffc6dbef9ecae16baed63182bd08519c\",\n \"eff3ffc6dbef9ecae16baed64292c62171b5084594\",\n \"f7fbffdeebf7c6dbef9ecae16baed64292c62171b5084594\",\n \"f7fbffdeebf7c6dbef9ecae16baed64292c62171b508519c08306b\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e5f5e0a1d99b31a354\",\n \"edf8e9bae4b374c476238b45\",\n \"edf8e9bae4b374c47631a354006d2c\",\n \"edf8e9c7e9c0a1d99b74c47631a354006d2c\",\n \"edf8e9c7e9c0a1d99b74c47641ab5d238b45005a32\",\n \"f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45005a32\",\n \"f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45006d2c00441b\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"f0f0f0bdbdbd636363\",\n \"f7f7f7cccccc969696525252\",\n \"f7f7f7cccccc969696636363252525\",\n \"f7f7f7d9d9d9bdbdbd969696636363252525\",\n \"f7f7f7d9d9d9bdbdbd969696737373525252252525\",\n \"fffffff0f0f0d9d9d9bdbdbd969696737373525252252525\",\n \"fffffff0f0f0d9d9d9bdbdbd969696737373525252252525000000\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fee6cefdae6be6550d\",\n \"feeddefdbe85fd8d3cd94701\",\n \"feeddefdbe85fd8d3ce6550da63603\",\n \"feeddefdd0a2fdae6bfd8d3ce6550da63603\",\n \"feeddefdd0a2fdae6bfd8d3cf16913d948018c2d04\",\n \"fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d948018c2d04\",\n \"fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d94801a636037f2704\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"efedf5bcbddc756bb1\",\n \"f2f0f7cbc9e29e9ac86a51a3\",\n \"f2f0f7cbc9e29e9ac8756bb154278f\",\n \"f2f0f7dadaebbcbddc9e9ac8756bb154278f\",\n \"f2f0f7dadaebbcbddc9e9ac8807dba6a51a34a1486\",\n \"fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a34a1486\",\n \"fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a354278f3f007d\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fee0d2fc9272de2d26\",\n \"fee5d9fcae91fb6a4acb181d\",\n \"fee5d9fcae91fb6a4ade2d26a50f15\",\n \"fee5d9fcbba1fc9272fb6a4ade2d26a50f15\",\n \"fee5d9fcbba1fc9272fb6a4aef3b2ccb181d99000d\",\n \"fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181d99000d\",\n \"fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181da50f1567000d\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e5f5f999d8c92ca25f\",\n \"edf8fbb2e2e266c2a4238b45\",\n \"edf8fbb2e2e266c2a42ca25f006d2c\",\n \"edf8fbccece699d8c966c2a42ca25f006d2c\",\n \"edf8fbccece699d8c966c2a441ae76238b45005824\",\n \"f7fcfde5f5f9ccece699d8c966c2a441ae76238b45005824\",\n \"f7fcfde5f5f9ccece699d8c966c2a441ae76238b45006d2c00441b\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e0ecf49ebcda8856a7\",\n \"edf8fbb3cde38c96c688419d\",\n \"edf8fbb3cde38c96c68856a7810f7c\",\n \"edf8fbbfd3e69ebcda8c96c68856a7810f7c\",\n \"edf8fbbfd3e69ebcda8c96c68c6bb188419d6e016b\",\n \"f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d6e016b\",\n \"f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d810f7c4d004b\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e0f3dba8ddb543a2ca\",\n \"f0f9e8bae4bc7bccc42b8cbe\",\n \"f0f9e8bae4bc7bccc443a2ca0868ac\",\n \"f0f9e8ccebc5a8ddb57bccc443a2ca0868ac\",\n \"f0f9e8ccebc5a8ddb57bccc44eb3d32b8cbe08589e\",\n \"f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe08589e\",\n \"f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe0868ac084081\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fee8c8fdbb84e34a33\",\n \"fef0d9fdcc8afc8d59d7301f\",\n \"fef0d9fdcc8afc8d59e34a33b30000\",\n \"fef0d9fdd49efdbb84fc8d59e34a33b30000\",\n \"fef0d9fdd49efdbb84fc8d59ef6548d7301f990000\",\n \"fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301f990000\",\n \"fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301fb300007f0000\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"ece2f0a6bddb1c9099\",\n \"f6eff7bdc9e167a9cf02818a\",\n \"f6eff7bdc9e167a9cf1c9099016c59\",\n \"f6eff7d0d1e6a6bddb67a9cf1c9099016c59\",\n \"f6eff7d0d1e6a6bddb67a9cf3690c002818a016450\",\n \"fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016450\",\n \"fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016c59014636\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"ece7f2a6bddb2b8cbe\",\n \"f1eef6bdc9e174a9cf0570b0\",\n \"f1eef6bdc9e174a9cf2b8cbe045a8d\",\n \"f1eef6d0d1e6a6bddb74a9cf2b8cbe045a8d\",\n \"f1eef6d0d1e6a6bddb74a9cf3690c00570b0034e7b\",\n \"fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0034e7b\",\n \"fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0045a8d023858\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"e7e1efc994c7dd1c77\",\n \"f1eef6d7b5d8df65b0ce1256\",\n \"f1eef6d7b5d8df65b0dd1c77980043\",\n \"f1eef6d4b9dac994c7df65b0dd1c77980043\",\n \"f1eef6d4b9dac994c7df65b0e7298ace125691003f\",\n \"f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125691003f\",\n \"f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125698004367001f\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fde0ddfa9fb5c51b8a\",\n \"feebe2fbb4b9f768a1ae017e\",\n \"feebe2fbb4b9f768a1c51b8a7a0177\",\n \"feebe2fcc5c0fa9fb5f768a1c51b8a7a0177\",\n \"feebe2fcc5c0fa9fb5f768a1dd3497ae017e7a0177\",\n \"fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a0177\",\n \"fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a017749006a\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"edf8b17fcdbb2c7fb8\",\n \"ffffcca1dab441b6c4225ea8\",\n \"ffffcca1dab441b6c42c7fb8253494\",\n \"ffffccc7e9b47fcdbb41b6c42c7fb8253494\",\n \"ffffccc7e9b47fcdbb41b6c41d91c0225ea80c2c84\",\n \"ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea80c2c84\",\n \"ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea8253494081d58\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"f7fcb9addd8e31a354\",\n \"ffffccc2e69978c679238443\",\n \"ffffccc2e69978c67931a354006837\",\n \"ffffccd9f0a3addd8e78c67931a354006837\",\n \"ffffccd9f0a3addd8e78c67941ab5d238443005a32\",\n \"ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443005a32\",\n \"ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443006837004529\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"fff7bcfec44fd95f0e\",\n \"ffffd4fed98efe9929cc4c02\",\n \"ffffd4fed98efe9929d95f0e993404\",\n \"ffffd4fee391fec44ffe9929d95f0e993404\",\n \"ffffd4fee391fec44ffe9929ec7014cc4c028c2d04\",\n \"ffffe5fff7bcfee391fec44ffe9929ec7014cc4c028c2d04\",\n \"ffffe5fff7bcfee391fec44ffe9929ec7014cc4c02993404662506\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\nimport ramp from \"../ramp.js\";\n\nexport var scheme = new Array(3).concat(\n \"ffeda0feb24cf03b20\",\n \"ffffb2fecc5cfd8d3ce31a1c\",\n \"ffffb2fecc5cfd8d3cf03b20bd0026\",\n \"ffffb2fed976feb24cfd8d3cf03b20bd0026\",\n \"ffffb2fed976feb24cfd8d3cfc4e2ae31a1cb10026\",\n \"ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cb10026\",\n \"ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cbd0026800026\"\n).map(colors);\n\nexport default ramp(scheme);\n","import colors from \"../colors.js\";\n\nexport default colors(\"1f77b4ff7f0e2ca02cd627289467bd8c564be377c27f7f7fbcbd2217becf\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"7fc97fbeaed4fdc086ffff99386cb0f0027fbf5b17666666\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"1b9e77d95f027570b3e7298a66a61ee6ab02a6761d666666\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"a6cee31f78b4b2df8a33a02cfb9a99e31a1cfdbf6fff7f00cab2d66a3d9affff99b15928\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"fbb4aeb3cde3ccebc5decbe4fed9a6ffffcce5d8bdfddaecf2f2f2\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"b3e2cdfdcdaccbd5e8f4cae4e6f5c9fff2aef1e2cccccccc\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"e41a1c377eb84daf4a984ea3ff7f00ffff33a65628f781bf999999\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"66c2a5fc8d628da0cbe78ac3a6d854ffd92fe5c494b3b3b3\");\n","import colors from \"../colors.js\";\n\nexport default colors(\"8dd3c7ffffb3bebadafb807280b1d3fdb462b3de69fccde5d9d9d9bc80bdccebc5ffed6f\");\n","import colors from \"../colors.js\";\n\nfunction ramp(range) {\n var n = range.length;\n return function(t) {\n return range[Math.max(0, Math.min(n - 1, Math.floor(t * n)))];\n };\n}\n\nexport default ramp(colors(\"44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725\"));\n\nexport var magma = ramp(colors(\"00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf\"));\n\nexport var inferno = ramp(colors(\"00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4\"));\n\nexport var plasma = ramp(colors(\"0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921\"));\n","export const radians = Math.PI / 180;\nexport const degrees = 180 / Math.PI;\n","import define, {extend} from \"./define.js\";\nimport {Color, rgbConvert, Rgb, darker, brighter} from \"./color.js\";\nimport {degrees, radians} from \"./math.js\";\n\nvar A = -0.14861,\n B = +1.78277,\n C = -0.29227,\n D = -0.90649,\n E = +1.97294,\n ED = E * D,\n EB = E * B,\n BC_DA = B * C - D * A;\n\nfunction cubehelixConvert(o) {\n if (o instanceof Cubehelix) return new Cubehelix(o.h, o.s, o.l, o.opacity);\n if (!(o instanceof Rgb)) o = rgbConvert(o);\n var r = o.r / 255,\n g = o.g / 255,\n b = o.b / 255,\n l = (BC_DA * b + ED * r - EB * g) / (BC_DA + ED - EB),\n bl = b - l,\n k = (E * (g - l) - C * bl) / D,\n s = Math.sqrt(k * k + bl * bl) / (E * l * (1 - l)), // NaN if l=0 or l=1\n h = s ? Math.atan2(k, bl) * degrees - 120 : NaN;\n return new Cubehelix(h < 0 ? h + 360 : h, s, l, o.opacity);\n}\n\nexport default function cubehelix(h, s, l, opacity) {\n return arguments.length === 1 ? cubehelixConvert(h) : new Cubehelix(h, s, l, opacity == null ? 1 : opacity);\n}\n\nexport function Cubehelix(h, s, l, opacity) {\n this.h = +h;\n this.s = +s;\n this.l = +l;\n this.opacity = +opacity;\n}\n\ndefine(Cubehelix, cubehelix, extend(Color, {\n brighter: function(k) {\n k = k == null ? brighter : Math.pow(brighter, k);\n return new Cubehelix(this.h, this.s, this.l * k, this.opacity);\n },\n darker: function(k) {\n k = k == null ? darker : Math.pow(darker, k);\n return new Cubehelix(this.h, this.s, this.l * k, this.opacity);\n },\n rgb: function() {\n var h = isNaN(this.h) ? 0 : (this.h + 120) * radians,\n l = +this.l,\n a = isNaN(this.s) ? 0 : this.s * l * (1 - l),\n cosh = Math.cos(h),\n sinh = Math.sin(h);\n return new Rgb(\n 255 * (l + a * (A * cosh + B * sinh)),\n 255 * (l + a * (C * cosh + D * sinh)),\n 255 * (l + a * (E * cosh)),\n this.opacity\n );\n }\n}));\n","import {cubehelix as colorCubehelix} from \"d3-color\";\nimport color, {hue} from \"./color.js\";\n\nfunction cubehelix(hue) {\n return (function cubehelixGamma(y) {\n y = +y;\n\n function cubehelix(start, end) {\n var h = hue((start = colorCubehelix(start)).h, (end = colorCubehelix(end)).h),\n s = color(start.s, end.s),\n l = color(start.l, end.l),\n opacity = color(start.opacity, end.opacity);\n return function(t) {\n start.h = h(t);\n start.s = s(t);\n start.l = l(Math.pow(t, y));\n start.opacity = opacity(t);\n return start + \"\";\n };\n }\n\n cubehelix.gamma = cubehelixGamma;\n\n return cubehelix;\n })(1);\n}\n\nexport default cubehelix(hue);\nexport var cubehelixLong = cubehelix(color);\n","import {cubehelix} from \"d3-color\";\nimport {interpolateCubehelixLong} from \"d3-interpolate\";\n\nexport var warm = interpolateCubehelixLong(cubehelix(-100, 0.75, 0.35), cubehelix(80, 1.50, 0.8));\n\nexport var cool = interpolateCubehelixLong(cubehelix(260, 0.75, 0.35), cubehelix(80, 1.50, 0.8));\n\nvar c = cubehelix();\n\nexport default function(t) {\n if (t < 0 || t > 1) t -= Math.floor(t);\n var ts = Math.abs(t - 0.5);\n c.h = 360 * t - 100;\n c.s = 1.5 - 1.5 * ts;\n c.l = 0.8 - 0.9 * ts;\n return c + \"\";\n}\n","import {cubehelix} from \"d3-color\";\nimport {interpolateCubehelixLong} from \"d3-interpolate\";\n\nexport default interpolateCubehelixLong(cubehelix(300, 0.5, 0.0), cubehelix(-240, 0.5, 1.0));\n","import {rgb} from \"d3-color\";\n\nvar c = rgb(),\n pi_1_3 = Math.PI / 3,\n pi_2_3 = Math.PI * 2 / 3;\n\nexport default function(t) {\n var x;\n t = (0.5 - t) * Math.PI;\n c.r = 255 * (x = Math.sin(t)) * x;\n c.g = 255 * (x = Math.sin(t + pi_1_3)) * x;\n c.b = 255 * (x = Math.sin(t + pi_2_3)) * x;\n return c + \"\";\n}\n","export function point(that, x, y) {\n that._context.bezierCurveTo(\n (2 * that._x0 + that._x1) / 3,\n (2 * that._y0 + that._y1) / 3,\n (that._x0 + 2 * that._x1) / 3,\n (that._y0 + 2 * that._y1) / 3,\n (that._x0 + 4 * that._x1 + x) / 6,\n (that._y0 + 4 * that._y1 + y) / 6\n );\n}\n\nexport function Basis(context) {\n this._context = context;\n}\n\nBasis.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 =\n this._y0 = this._y1 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 3: point(this, this._x1, this._y1); // proceed\n case 2: this._context.lineTo(this._x1, this._y1); break;\n }\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; this._context.lineTo((5 * this._x0 + this._x1) / 6, (5 * this._y0 + this._y1) / 6); // proceed\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = x;\n this._y0 = this._y1, this._y1 = y;\n }\n};\n\nexport default function(context) {\n return new Basis(context);\n}\n","export default function() {}\n","import noop from \"../noop.js\";\nimport {point} from \"./basis.js\";\n\nfunction BasisClosed(context) {\n this._context = context;\n}\n\nBasisClosed.prototype = {\n areaStart: noop,\n areaEnd: noop,\n lineStart: function() {\n this._x0 = this._x1 = this._x2 = this._x3 = this._x4 =\n this._y0 = this._y1 = this._y2 = this._y3 = this._y4 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 1: {\n this._context.moveTo(this._x2, this._y2);\n this._context.closePath();\n break;\n }\n case 2: {\n this._context.moveTo((this._x2 + 2 * this._x3) / 3, (this._y2 + 2 * this._y3) / 3);\n this._context.lineTo((this._x3 + 2 * this._x2) / 3, (this._y3 + 2 * this._y2) / 3);\n this._context.closePath();\n break;\n }\n case 3: {\n this.point(this._x2, this._y2);\n this.point(this._x3, this._y3);\n this.point(this._x4, this._y4);\n break;\n }\n }\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._x2 = x, this._y2 = y; break;\n case 1: this._point = 2; this._x3 = x, this._y3 = y; break;\n case 2: this._point = 3; this._x4 = x, this._y4 = y; this._context.moveTo((this._x0 + 4 * this._x1 + x) / 6, (this._y0 + 4 * this._y1 + y) / 6); break;\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = x;\n this._y0 = this._y1, this._y1 = y;\n }\n};\n\nexport default function(context) {\n return new BasisClosed(context);\n}\n","import {point} from \"./basis.js\";\n\nfunction BasisOpen(context) {\n this._context = context;\n}\n\nBasisOpen.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 =\n this._y0 = this._y1 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n if (this._line || (this._line !== 0 && this._point === 3)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; var x0 = (this._x0 + 4 * this._x1 + x) / 6, y0 = (this._y0 + 4 * this._y1 + y) / 6; this._line ? this._context.lineTo(x0, y0) : this._context.moveTo(x0, y0); break;\n case 3: this._point = 4; // proceed\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = x;\n this._y0 = this._y1, this._y1 = y;\n }\n};\n\nexport default function(context) {\n return new BasisOpen(context);\n}\n","import {Basis} from \"./basis.js\";\n\nfunction Bundle(context, beta) {\n this._basis = new Basis(context);\n this._beta = beta;\n}\n\nBundle.prototype = {\n lineStart: function() {\n this._x = [];\n this._y = [];\n this._basis.lineStart();\n },\n lineEnd: function() {\n var x = this._x,\n y = this._y,\n j = x.length - 1;\n\n if (j > 0) {\n var x0 = x[0],\n y0 = y[0],\n dx = x[j] - x0,\n dy = y[j] - y0,\n i = -1,\n t;\n\n while (++i <= j) {\n t = i / j;\n this._basis.point(\n this._beta * x[i] + (1 - this._beta) * (x0 + t * dx),\n this._beta * y[i] + (1 - this._beta) * (y0 + t * dy)\n );\n }\n }\n\n this._x = this._y = null;\n this._basis.lineEnd();\n },\n point: function(x, y) {\n this._x.push(+x);\n this._y.push(+y);\n }\n};\n\nexport default (function custom(beta) {\n\n function bundle(context) {\n return beta === 1 ? new Basis(context) : new Bundle(context, beta);\n }\n\n bundle.beta = function(beta) {\n return custom(+beta);\n };\n\n return bundle;\n})(0.85);\n","export function point(that, x, y) {\n that._context.bezierCurveTo(\n that._x1 + that._k * (that._x2 - that._x0),\n that._y1 + that._k * (that._y2 - that._y0),\n that._x2 + that._k * (that._x1 - x),\n that._y2 + that._k * (that._y1 - y),\n that._x2,\n that._y2\n );\n}\n\nexport function Cardinal(context, tension) {\n this._context = context;\n this._k = (1 - tension) / 6;\n}\n\nCardinal.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 = this._x2 =\n this._y0 = this._y1 = this._y2 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 2: this._context.lineTo(this._x2, this._y2); break;\n case 3: point(this, this._x1, this._y1); break;\n }\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; this._x1 = x, this._y1 = y; break;\n case 2: this._point = 3; // proceed\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(tension) {\n\n function cardinal(context) {\n return new Cardinal(context, tension);\n }\n\n cardinal.tension = function(tension) {\n return custom(+tension);\n };\n\n return cardinal;\n})(0);\n","import noop from \"../noop.js\";\nimport {point} from \"./cardinal.js\";\n\nexport function CardinalClosed(context, tension) {\n this._context = context;\n this._k = (1 - tension) / 6;\n}\n\nCardinalClosed.prototype = {\n areaStart: noop,\n areaEnd: noop,\n lineStart: function() {\n this._x0 = this._x1 = this._x2 = this._x3 = this._x4 = this._x5 =\n this._y0 = this._y1 = this._y2 = this._y3 = this._y4 = this._y5 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 1: {\n this._context.moveTo(this._x3, this._y3);\n this._context.closePath();\n break;\n }\n case 2: {\n this._context.lineTo(this._x3, this._y3);\n this._context.closePath();\n break;\n }\n case 3: {\n this.point(this._x3, this._y3);\n this.point(this._x4, this._y4);\n this.point(this._x5, this._y5);\n break;\n }\n }\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._x3 = x, this._y3 = y; break;\n case 1: this._point = 2; this._context.moveTo(this._x4 = x, this._y4 = y); break;\n case 2: this._point = 3; this._x5 = x, this._y5 = y; break;\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(tension) {\n\n function cardinal(context) {\n return new CardinalClosed(context, tension);\n }\n\n cardinal.tension = function(tension) {\n return custom(+tension);\n };\n\n return cardinal;\n})(0);\n","import {point} from \"./cardinal.js\";\n\nexport function CardinalOpen(context, tension) {\n this._context = context;\n this._k = (1 - tension) / 6;\n}\n\nCardinalOpen.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 = this._x2 =\n this._y0 = this._y1 = this._y2 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n if (this._line || (this._line !== 0 && this._point === 3)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; this._line ? this._context.lineTo(this._x2, this._y2) : this._context.moveTo(this._x2, this._y2); break;\n case 3: this._point = 4; // proceed\n default: point(this, x, y); break;\n }\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(tension) {\n\n function cardinal(context) {\n return new CardinalOpen(context, tension);\n }\n\n cardinal.tension = function(tension) {\n return custom(+tension);\n };\n\n return cardinal;\n})(0);\n","export var abs = Math.abs;\nexport var atan2 = Math.atan2;\nexport var cos = Math.cos;\nexport var max = Math.max;\nexport var min = Math.min;\nexport var sin = Math.sin;\nexport var sqrt = Math.sqrt;\n\nexport var epsilon = 1e-12;\nexport var pi = Math.PI;\nexport var halfPi = pi / 2;\nexport var tau = 2 * pi;\n\nexport function acos(x) {\n return x > 1 ? 0 : x < -1 ? pi : Math.acos(x);\n}\n\nexport function asin(x) {\n return x >= 1 ? halfPi : x <= -1 ? -halfPi : Math.asin(x);\n}\n","import {epsilon} from \"../math.js\";\nimport {Cardinal} from \"./cardinal.js\";\n\nexport function point(that, x, y) {\n var x1 = that._x1,\n y1 = that._y1,\n x2 = that._x2,\n y2 = that._y2;\n\n if (that._l01_a > epsilon) {\n var a = 2 * that._l01_2a + 3 * that._l01_a * that._l12_a + that._l12_2a,\n n = 3 * that._l01_a * (that._l01_a + that._l12_a);\n x1 = (x1 * a - that._x0 * that._l12_2a + that._x2 * that._l01_2a) / n;\n y1 = (y1 * a - that._y0 * that._l12_2a + that._y2 * that._l01_2a) / n;\n }\n\n if (that._l23_a > epsilon) {\n var b = 2 * that._l23_2a + 3 * that._l23_a * that._l12_a + that._l12_2a,\n m = 3 * that._l23_a * (that._l23_a + that._l12_a);\n x2 = (x2 * b + that._x1 * that._l23_2a - x * that._l12_2a) / m;\n y2 = (y2 * b + that._y1 * that._l23_2a - y * that._l12_2a) / m;\n }\n\n that._context.bezierCurveTo(x1, y1, x2, y2, that._x2, that._y2);\n}\n\nfunction CatmullRom(context, alpha) {\n this._context = context;\n this._alpha = alpha;\n}\n\nCatmullRom.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 = this._x2 =\n this._y0 = this._y1 = this._y2 = NaN;\n this._l01_a = this._l12_a = this._l23_a =\n this._l01_2a = this._l12_2a = this._l23_2a =\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 2: this._context.lineTo(this._x2, this._y2); break;\n case 3: this.point(this._x2, this._y2); break;\n }\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n\n if (this._point) {\n var x23 = this._x2 - x,\n y23 = this._y2 - y;\n this._l23_a = Math.sqrt(this._l23_2a = Math.pow(x23 * x23 + y23 * y23, this._alpha));\n }\n\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; // proceed\n default: point(this, x, y); break;\n }\n\n this._l01_a = this._l12_a, this._l12_a = this._l23_a;\n this._l01_2a = this._l12_2a, this._l12_2a = this._l23_2a;\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(alpha) {\n\n function catmullRom(context) {\n return alpha ? new CatmullRom(context, alpha) : new Cardinal(context, 0);\n }\n\n catmullRom.alpha = function(alpha) {\n return custom(+alpha);\n };\n\n return catmullRom;\n})(0.5);\n","import {CardinalClosed} from \"./cardinalClosed.js\";\nimport noop from \"../noop.js\";\nimport {point} from \"./catmullRom.js\";\n\nfunction CatmullRomClosed(context, alpha) {\n this._context = context;\n this._alpha = alpha;\n}\n\nCatmullRomClosed.prototype = {\n areaStart: noop,\n areaEnd: noop,\n lineStart: function() {\n this._x0 = this._x1 = this._x2 = this._x3 = this._x4 = this._x5 =\n this._y0 = this._y1 = this._y2 = this._y3 = this._y4 = this._y5 = NaN;\n this._l01_a = this._l12_a = this._l23_a =\n this._l01_2a = this._l12_2a = this._l23_2a =\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 1: {\n this._context.moveTo(this._x3, this._y3);\n this._context.closePath();\n break;\n }\n case 2: {\n this._context.lineTo(this._x3, this._y3);\n this._context.closePath();\n break;\n }\n case 3: {\n this.point(this._x3, this._y3);\n this.point(this._x4, this._y4);\n this.point(this._x5, this._y5);\n break;\n }\n }\n },\n point: function(x, y) {\n x = +x, y = +y;\n\n if (this._point) {\n var x23 = this._x2 - x,\n y23 = this._y2 - y;\n this._l23_a = Math.sqrt(this._l23_2a = Math.pow(x23 * x23 + y23 * y23, this._alpha));\n }\n\n switch (this._point) {\n case 0: this._point = 1; this._x3 = x, this._y3 = y; break;\n case 1: this._point = 2; this._context.moveTo(this._x4 = x, this._y4 = y); break;\n case 2: this._point = 3; this._x5 = x, this._y5 = y; break;\n default: point(this, x, y); break;\n }\n\n this._l01_a = this._l12_a, this._l12_a = this._l23_a;\n this._l01_2a = this._l12_2a, this._l12_2a = this._l23_2a;\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(alpha) {\n\n function catmullRom(context) {\n return alpha ? new CatmullRomClosed(context, alpha) : new CardinalClosed(context, 0);\n }\n\n catmullRom.alpha = function(alpha) {\n return custom(+alpha);\n };\n\n return catmullRom;\n})(0.5);\n","import {CardinalOpen} from \"./cardinalOpen.js\";\nimport {point} from \"./catmullRom.js\";\n\nfunction CatmullRomOpen(context, alpha) {\n this._context = context;\n this._alpha = alpha;\n}\n\nCatmullRomOpen.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 = this._x2 =\n this._y0 = this._y1 = this._y2 = NaN;\n this._l01_a = this._l12_a = this._l23_a =\n this._l01_2a = this._l12_2a = this._l23_2a =\n this._point = 0;\n },\n lineEnd: function() {\n if (this._line || (this._line !== 0 && this._point === 3)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n\n if (this._point) {\n var x23 = this._x2 - x,\n y23 = this._y2 - y;\n this._l23_a = Math.sqrt(this._l23_2a = Math.pow(x23 * x23 + y23 * y23, this._alpha));\n }\n\n switch (this._point) {\n case 0: this._point = 1; break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; this._line ? this._context.lineTo(this._x2, this._y2) : this._context.moveTo(this._x2, this._y2); break;\n case 3: this._point = 4; // proceed\n default: point(this, x, y); break;\n }\n\n this._l01_a = this._l12_a, this._l12_a = this._l23_a;\n this._l01_2a = this._l12_2a, this._l12_2a = this._l23_2a;\n this._x0 = this._x1, this._x1 = this._x2, this._x2 = x;\n this._y0 = this._y1, this._y1 = this._y2, this._y2 = y;\n }\n};\n\nexport default (function custom(alpha) {\n\n function catmullRom(context) {\n return alpha ? new CatmullRomOpen(context, alpha) : new CardinalOpen(context, 0);\n }\n\n catmullRom.alpha = function(alpha) {\n return custom(+alpha);\n };\n\n return catmullRom;\n})(0.5);\n","function Linear(context) {\n this._context = context;\n}\n\nLinear.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._point = 0;\n },\n lineEnd: function() {\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; // proceed\n default: this._context.lineTo(x, y); break;\n }\n }\n};\n\nexport default function(context) {\n return new Linear(context);\n}\n","import noop from \"../noop.js\";\n\nfunction LinearClosed(context) {\n this._context = context;\n}\n\nLinearClosed.prototype = {\n areaStart: noop,\n areaEnd: noop,\n lineStart: function() {\n this._point = 0;\n },\n lineEnd: function() {\n if (this._point) this._context.closePath();\n },\n point: function(x, y) {\n x = +x, y = +y;\n if (this._point) this._context.lineTo(x, y);\n else this._point = 1, this._context.moveTo(x, y);\n }\n};\n\nexport default function(context) {\n return new LinearClosed(context);\n}\n","function sign(x) {\n return x < 0 ? -1 : 1;\n}\n\n// Calculate the slopes of the tangents (Hermite-type interpolation) based on\n// the following paper: Steffen, M. 1990. A Simple Method for Monotonic\n// Interpolation in One Dimension. Astronomy and Astrophysics, Vol. 239, NO.\n// NOV(II), P. 443, 1990.\nfunction slope3(that, x2, y2) {\n var h0 = that._x1 - that._x0,\n h1 = x2 - that._x1,\n s0 = (that._y1 - that._y0) / (h0 || h1 < 0 && -0),\n s1 = (y2 - that._y1) / (h1 || h0 < 0 && -0),\n p = (s0 * h1 + s1 * h0) / (h0 + h1);\n return (sign(s0) + sign(s1)) * Math.min(Math.abs(s0), Math.abs(s1), 0.5 * Math.abs(p)) || 0;\n}\n\n// Calculate a one-sided slope.\nfunction slope2(that, t) {\n var h = that._x1 - that._x0;\n return h ? (3 * (that._y1 - that._y0) / h - t) / 2 : t;\n}\n\n// According to https://en.wikipedia.org/wiki/Cubic_Hermite_spline#Representations\n// \"you can express cubic Hermite interpolation in terms of cubic Bézier curves\n// with respect to the four values p0, p0 + m0 / 3, p1 - m1 / 3, p1\".\nfunction point(that, t0, t1) {\n var x0 = that._x0,\n y0 = that._y0,\n x1 = that._x1,\n y1 = that._y1,\n dx = (x1 - x0) / 3;\n that._context.bezierCurveTo(x0 + dx, y0 + dx * t0, x1 - dx, y1 - dx * t1, x1, y1);\n}\n\nfunction MonotoneX(context) {\n this._context = context;\n}\n\nMonotoneX.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x0 = this._x1 =\n this._y0 = this._y1 =\n this._t0 = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n switch (this._point) {\n case 2: this._context.lineTo(this._x1, this._y1); break;\n case 3: point(this, this._t0, slope2(this, this._t0)); break;\n }\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n this._line = 1 - this._line;\n },\n point: function(x, y) {\n var t1 = NaN;\n\n x = +x, y = +y;\n if (x === this._x1 && y === this._y1) return; // Ignore coincident points.\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; break;\n case 2: this._point = 3; point(this, slope2(this, t1 = slope3(this, x, y)), t1); break;\n default: point(this, this._t0, t1 = slope3(this, x, y)); break;\n }\n\n this._x0 = this._x1, this._x1 = x;\n this._y0 = this._y1, this._y1 = y;\n this._t0 = t1;\n }\n}\n\nfunction MonotoneY(context) {\n this._context = new ReflectContext(context);\n}\n\n(MonotoneY.prototype = Object.create(MonotoneX.prototype)).point = function(x, y) {\n MonotoneX.prototype.point.call(this, y, x);\n};\n\nfunction ReflectContext(context) {\n this._context = context;\n}\n\nReflectContext.prototype = {\n moveTo: function(x, y) { this._context.moveTo(y, x); },\n closePath: function() { this._context.closePath(); },\n lineTo: function(x, y) { this._context.lineTo(y, x); },\n bezierCurveTo: function(x1, y1, x2, y2, x, y) { this._context.bezierCurveTo(y1, x1, y2, x2, y, x); }\n};\n\nexport function monotoneX(context) {\n return new MonotoneX(context);\n}\n\nexport function monotoneY(context) {\n return new MonotoneY(context);\n}\n","function Natural(context) {\n this._context = context;\n}\n\nNatural.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x = [];\n this._y = [];\n },\n lineEnd: function() {\n var x = this._x,\n y = this._y,\n n = x.length;\n\n if (n) {\n this._line ? this._context.lineTo(x[0], y[0]) : this._context.moveTo(x[0], y[0]);\n if (n === 2) {\n this._context.lineTo(x[1], y[1]);\n } else {\n var px = controlPoints(x),\n py = controlPoints(y);\n for (var i0 = 0, i1 = 1; i1 < n; ++i0, ++i1) {\n this._context.bezierCurveTo(px[0][i0], py[0][i0], px[1][i0], py[1][i0], x[i1], y[i1]);\n }\n }\n }\n\n if (this._line || (this._line !== 0 && n === 1)) this._context.closePath();\n this._line = 1 - this._line;\n this._x = this._y = null;\n },\n point: function(x, y) {\n this._x.push(+x);\n this._y.push(+y);\n }\n};\n\n// See https://www.particleincell.com/2012/bezier-splines/ for derivation.\nfunction controlPoints(x) {\n var i,\n n = x.length - 1,\n m,\n a = new Array(n),\n b = new Array(n),\n r = new Array(n);\n a[0] = 0, b[0] = 2, r[0] = x[0] + 2 * x[1];\n for (i = 1; i < n - 1; ++i) a[i] = 1, b[i] = 4, r[i] = 4 * x[i] + 2 * x[i + 1];\n a[n - 1] = 2, b[n - 1] = 7, r[n - 1] = 8 * x[n - 1] + x[n];\n for (i = 1; i < n; ++i) m = a[i] / b[i - 1], b[i] -= m, r[i] -= m * r[i - 1];\n a[n - 1] = r[n - 1] / b[n - 1];\n for (i = n - 2; i >= 0; --i) a[i] = (r[i] - a[i + 1]) / b[i];\n b[n - 1] = (x[n] + a[n - 1]) / 2;\n for (i = 0; i < n - 1; ++i) b[i] = 2 * x[i + 1] - a[i + 1];\n return [a, b];\n}\n\nexport default function(context) {\n return new Natural(context);\n}\n","function Step(context, t) {\n this._context = context;\n this._t = t;\n}\n\nStep.prototype = {\n areaStart: function() {\n this._line = 0;\n },\n areaEnd: function() {\n this._line = NaN;\n },\n lineStart: function() {\n this._x = this._y = NaN;\n this._point = 0;\n },\n lineEnd: function() {\n if (0 < this._t && this._t < 1 && this._point === 2) this._context.lineTo(this._x, this._y);\n if (this._line || (this._line !== 0 && this._point === 1)) this._context.closePath();\n if (this._line >= 0) this._t = 1 - this._t, this._line = 1 - this._line;\n },\n point: function(x, y) {\n x = +x, y = +y;\n switch (this._point) {\n case 0: this._point = 1; this._line ? this._context.lineTo(x, y) : this._context.moveTo(x, y); break;\n case 1: this._point = 2; // proceed\n default: {\n if (this._t <= 0) {\n this._context.lineTo(this._x, y);\n this._context.lineTo(x, y);\n } else {\n var x1 = this._x * (1 - this._t) + x * this._t;\n this._context.lineTo(x1, this._y);\n this._context.lineTo(x1, y);\n }\n break;\n }\n }\n this._x = x, this._y = y;\n }\n};\n\nexport default function(context) {\n return new Step(context, 0.5);\n}\n\nexport function stepBefore(context) {\n return new Step(context, 0);\n}\n\nexport function stepAfter(context) {\n return new Step(context, 1);\n}\n","export default function(series) {\n var n = series.length, o = new Array(n);\n while (--n >= 0) o[n] = n;\n return o;\n}\n","import none from \"./none.js\";\n\nexport default function(series) {\n var sums = series.map(sum);\n return none(series).sort(function(a, b) { return sums[a] - sums[b]; });\n}\n\nexport function sum(series) {\n var s = 0, i = -1, n = series.length, v;\n while (++i < n) if (v = +series[i][1]) s += v;\n return s;\n}\n","import none from \"./none.js\";\n\nexport default function(series) {\n var peaks = series.map(peak);\n return none(series).sort(function(a, b) { return peaks[a] - peaks[b]; });\n}\n\nfunction peak(series) {\n var i = -1, j = 0, n = series.length, vi, vj = -Infinity;\n while (++i < n) if ((vi = +series[i][1]) > vj) vj = vi, j = i;\n return j;\n}\n","export default function(series, order) {\n if (!((n = series.length) > 1)) return;\n for (var i = 1, j, s0, s1 = series[order[0]], n, m = s1.length; i < n; ++i) {\n s0 = s1, s1 = series[order[i]];\n for (j = 0; j < m; ++j) {\n s1[j][1] += s1[j][0] = isNaN(s0[j][1]) ? s0[j][0] : s0[j][1];\n }\n }\n}\n","export default function(x) {\n return Math.abs(x = Math.round(x)) >= 1e21\n ? x.toLocaleString(\"en\").replace(/,/g, \"\")\n : x.toString(10);\n}\n\n// Computes the decimal coefficient and exponent of the specified number x with\n// significant digits p, where x is positive and p is in [1, 21] or undefined.\n// For example, formatDecimalParts(1.23) returns [\"123\", 0].\nexport function formatDecimalParts(x, p) {\n if ((i = (x = p ? x.toExponential(p - 1) : x.toExponential()).indexOf(\"e\")) < 0) return null; // NaN, ±Infinity\n var i, coefficient = x.slice(0, i);\n\n // The string returned by toExponential either has the form \\d\\.\\d+e[-+]\\d+\n // (e.g., 1.2e+3) or the form \\de[-+]\\d+ (e.g., 1e+3).\n return [\n coefficient.length > 1 ? coefficient[0] + coefficient.slice(2) : coefficient,\n +x.slice(i + 1)\n ];\n}\n","// [[fill]align][sign][symbol][0][width][,][.precision][~][type]\nvar re = /^(?:(.)?([<>=^]))?([+\\-( ])?([$#])?(0)?(\\d+)?(,)?(\\.\\d+)?(~)?([a-z%])?$/i;\n\nexport default function formatSpecifier(specifier) {\n if (!(match = re.exec(specifier))) throw new Error(\"invalid format: \" + specifier);\n var match;\n return new FormatSpecifier({\n fill: match[1],\n align: match[2],\n sign: match[3],\n symbol: match[4],\n zero: match[5],\n width: match[6],\n comma: match[7],\n precision: match[8] && match[8].slice(1),\n trim: match[9],\n type: match[10]\n });\n}\n\nformatSpecifier.prototype = FormatSpecifier.prototype; // instanceof\n\nexport function FormatSpecifier(specifier) {\n this.fill = specifier.fill === undefined ? \" \" : specifier.fill + \"\";\n this.align = specifier.align === undefined ? \">\" : specifier.align + \"\";\n this.sign = specifier.sign === undefined ? \"-\" : specifier.sign + \"\";\n this.symbol = specifier.symbol === undefined ? \"\" : specifier.symbol + \"\";\n this.zero = !!specifier.zero;\n this.width = specifier.width === undefined ? undefined : +specifier.width;\n this.comma = !!specifier.comma;\n this.precision = specifier.precision === undefined ? undefined : +specifier.precision;\n this.trim = !!specifier.trim;\n this.type = specifier.type === undefined ? \"\" : specifier.type + \"\";\n}\n\nFormatSpecifier.prototype.toString = function() {\n return this.fill\n + this.align\n + this.sign\n + this.symbol\n + (this.zero ? \"0\" : \"\")\n + (this.width === undefined ? \"\" : Math.max(1, this.width | 0))\n + (this.comma ? \",\" : \"\")\n + (this.precision === undefined ? \"\" : \".\" + Math.max(0, this.precision | 0))\n + (this.trim ? \"~\" : \"\")\n + this.type;\n};\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport var prefixExponent;\n\nexport default function(x, p) {\n var d = formatDecimalParts(x, p);\n if (!d) return x + \"\";\n var coefficient = d[0],\n exponent = d[1],\n i = exponent - (prefixExponent = Math.max(-8, Math.min(8, Math.floor(exponent / 3))) * 3) + 1,\n n = coefficient.length;\n return i === n ? coefficient\n : i > n ? coefficient + new Array(i - n + 1).join(\"0\")\n : i > 0 ? coefficient.slice(0, i) + \".\" + coefficient.slice(i)\n : \"0.\" + new Array(1 - i).join(\"0\") + formatDecimalParts(x, Math.max(0, p + i - 1))[0]; // less than 1y!\n}\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport default function(x, p) {\n var d = formatDecimalParts(x, p);\n if (!d) return x + \"\";\n var coefficient = d[0],\n exponent = d[1];\n return exponent < 0 ? \"0.\" + new Array(-exponent).join(\"0\") + coefficient\n : coefficient.length > exponent + 1 ? coefficient.slice(0, exponent + 1) + \".\" + coefficient.slice(exponent + 1)\n : coefficient + new Array(exponent - coefficient.length + 2).join(\"0\");\n}\n","import formatDecimal from \"./formatDecimal.js\";\nimport formatPrefixAuto from \"./formatPrefixAuto.js\";\nimport formatRounded from \"./formatRounded.js\";\n\nexport default {\n \"%\": function(x, p) { return (x * 100).toFixed(p); },\n \"b\": function(x) { return Math.round(x).toString(2); },\n \"c\": function(x) { return x + \"\"; },\n \"d\": formatDecimal,\n \"e\": function(x, p) { return x.toExponential(p); },\n \"f\": function(x, p) { return x.toFixed(p); },\n \"g\": function(x, p) { return x.toPrecision(p); },\n \"o\": function(x) { return Math.round(x).toString(8); },\n \"p\": function(x, p) { return formatRounded(x * 100, p); },\n \"r\": formatRounded,\n \"s\": formatPrefixAuto,\n \"X\": function(x) { return Math.round(x).toString(16).toUpperCase(); },\n \"x\": function(x) { return Math.round(x).toString(16); }\n};\n","export default function(x) {\n return x;\n}\n","import exponent from \"./exponent.js\";\nimport formatGroup from \"./formatGroup.js\";\nimport formatNumerals from \"./formatNumerals.js\";\nimport formatSpecifier from \"./formatSpecifier.js\";\nimport formatTrim from \"./formatTrim.js\";\nimport formatTypes from \"./formatTypes.js\";\nimport {prefixExponent} from \"./formatPrefixAuto.js\";\nimport identity from \"./identity.js\";\n\nvar map = Array.prototype.map,\n prefixes = [\"y\",\"z\",\"a\",\"f\",\"p\",\"n\",\"µ\",\"m\",\"\",\"k\",\"M\",\"G\",\"T\",\"P\",\"E\",\"Z\",\"Y\"];\n\nexport default function(locale) {\n var group = locale.grouping === undefined || locale.thousands === undefined ? identity : formatGroup(map.call(locale.grouping, Number), locale.thousands + \"\"),\n currencyPrefix = locale.currency === undefined ? \"\" : locale.currency[0] + \"\",\n currencySuffix = locale.currency === undefined ? \"\" : locale.currency[1] + \"\",\n decimal = locale.decimal === undefined ? \".\" : locale.decimal + \"\",\n numerals = locale.numerals === undefined ? identity : formatNumerals(map.call(locale.numerals, String)),\n percent = locale.percent === undefined ? \"%\" : locale.percent + \"\",\n minus = locale.minus === undefined ? \"-\" : locale.minus + \"\",\n nan = locale.nan === undefined ? \"NaN\" : locale.nan + \"\";\n\n function newFormat(specifier) {\n specifier = formatSpecifier(specifier);\n\n var fill = specifier.fill,\n align = specifier.align,\n sign = specifier.sign,\n symbol = specifier.symbol,\n zero = specifier.zero,\n width = specifier.width,\n comma = specifier.comma,\n precision = specifier.precision,\n trim = specifier.trim,\n type = specifier.type;\n\n // The \"n\" type is an alias for \",g\".\n if (type === \"n\") comma = true, type = \"g\";\n\n // The \"\" type, and any invalid type, is an alias for \".12~g\".\n else if (!formatTypes[type]) precision === undefined && (precision = 12), trim = true, type = \"g\";\n\n // If zero fill is specified, padding goes after sign and before digits.\n if (zero || (fill === \"0\" && align === \"=\")) zero = true, fill = \"0\", align = \"=\";\n\n // Compute the prefix and suffix.\n // For SI-prefix, the suffix is lazily computed.\n var prefix = symbol === \"$\" ? currencyPrefix : symbol === \"#\" && /[boxX]/.test(type) ? \"0\" + type.toLowerCase() : \"\",\n suffix = symbol === \"$\" ? currencySuffix : /[%p]/.test(type) ? percent : \"\";\n\n // What format function should we use?\n // Is this an integer type?\n // Can this type generate exponential notation?\n var formatType = formatTypes[type],\n maybeSuffix = /[defgprs%]/.test(type);\n\n // Set the default precision if not specified,\n // or clamp the specified precision to the supported range.\n // For significant precision, it must be in [1, 21].\n // For fixed precision, it must be in [0, 20].\n precision = precision === undefined ? 6\n : /[gprs]/.test(type) ? Math.max(1, Math.min(21, precision))\n : Math.max(0, Math.min(20, precision));\n\n function format(value) {\n var valuePrefix = prefix,\n valueSuffix = suffix,\n i, n, c;\n\n if (type === \"c\") {\n valueSuffix = formatType(value) + valueSuffix;\n value = \"\";\n } else {\n value = +value;\n\n // Determine the sign. -0 is not less than 0, but 1 / -0 is!\n var valueNegative = value < 0 || 1 / value < 0;\n\n // Perform the initial formatting.\n value = isNaN(value) ? nan : formatType(Math.abs(value), precision);\n\n // Trim insignificant zeros.\n if (trim) value = formatTrim(value);\n\n // If a negative value rounds to zero after formatting, and no explicit positive sign is requested, hide the sign.\n if (valueNegative && +value === 0 && sign !== \"+\") valueNegative = false;\n\n // Compute the prefix and suffix.\n valuePrefix = (valueNegative ? (sign === \"(\" ? sign : minus) : sign === \"-\" || sign === \"(\" ? \"\" : sign) + valuePrefix;\n valueSuffix = (type === \"s\" ? prefixes[8 + prefixExponent / 3] : \"\") + valueSuffix + (valueNegative && sign === \"(\" ? \")\" : \"\");\n\n // Break the formatted value into the integer “value” part that can be\n // grouped, and fractional or exponential “suffix” part that is not.\n if (maybeSuffix) {\n i = -1, n = value.length;\n while (++i < n) {\n if (c = value.charCodeAt(i), 48 > c || c > 57) {\n valueSuffix = (c === 46 ? decimal + value.slice(i + 1) : value.slice(i)) + valueSuffix;\n value = value.slice(0, i);\n break;\n }\n }\n }\n }\n\n // If the fill character is not \"0\", grouping is applied before padding.\n if (comma && !zero) value = group(value, Infinity);\n\n // Compute the padding.\n var length = valuePrefix.length + value.length + valueSuffix.length,\n padding = length < width ? new Array(width - length + 1).join(fill) : \"\";\n\n // If the fill character is \"0\", grouping is applied after padding.\n if (comma && zero) value = group(padding + value, padding.length ? width - valueSuffix.length : Infinity), padding = \"\";\n\n // Reconstruct the final output based on the desired alignment.\n switch (align) {\n case \"<\": value = valuePrefix + value + valueSuffix + padding; break;\n case \"=\": value = valuePrefix + padding + value + valueSuffix; break;\n case \"^\": value = padding.slice(0, length = padding.length >> 1) + valuePrefix + value + valueSuffix + padding.slice(length); break;\n default: value = padding + valuePrefix + value + valueSuffix; break;\n }\n\n return numerals(value);\n }\n\n format.toString = function() {\n return specifier + \"\";\n };\n\n return format;\n }\n\n function formatPrefix(specifier, value) {\n var f = newFormat((specifier = formatSpecifier(specifier), specifier.type = \"f\", specifier)),\n e = Math.max(-8, Math.min(8, Math.floor(exponent(value) / 3))) * 3,\n k = Math.pow(10, -e),\n prefix = prefixes[8 + e / 3];\n return function(value) {\n return f(k * value) + prefix;\n };\n }\n\n return {\n format: newFormat,\n formatPrefix: formatPrefix\n };\n}\n","import formatLocale from \"./locale.js\";\n\nvar locale;\nexport var format;\nexport var formatPrefix;\n\ndefaultLocale({\n decimal: \".\",\n thousands: \",\",\n grouping: [3],\n currency: [\"$\", \"\"],\n minus: \"-\"\n});\n\nexport default function defaultLocale(definition) {\n locale = formatLocale(definition);\n format = locale.format;\n formatPrefix = locale.formatPrefix;\n return locale;\n}\n","export default function(grouping, thousands) {\n return function(value, width) {\n var i = value.length,\n t = [],\n j = 0,\n g = grouping[0],\n length = 0;\n\n while (i > 0 && g > 0) {\n if (length + g + 1 > width) g = Math.max(1, width - length);\n t.push(value.substring(i -= g, i + g));\n if ((length += g + 1) > width) break;\n g = grouping[j = (j + 1) % grouping.length];\n }\n\n return t.reverse().join(thousands);\n };\n}\n","export default function(numerals) {\n return function(value) {\n return value.replace(/[0-9]/g, function(i) {\n return numerals[+i];\n });\n };\n}\n","// Trims insignificant zeros, e.g., replaces 1.2000k with 1.2k.\nexport default function(s) {\n out: for (var n = s.length, i = 1, i0 = -1, i1; i < n; ++i) {\n switch (s[i]) {\n case \".\": i0 = i1 = i; break;\n case \"0\": if (i0 === 0) i0 = i; i1 = i; break;\n default: if (!+s[i]) break out; if (i0 > 0) i0 = 0; break;\n }\n }\n return i0 > 0 ? s.slice(0, i0) + s.slice(i1 + 1) : s;\n}\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport default function(x) {\n return x = formatDecimalParts(Math.abs(x)), x ? x[1] : NaN;\n}\n","var t0 = new Date,\n t1 = new Date;\n\nexport default function newInterval(floori, offseti, count, field) {\n\n function interval(date) {\n return floori(date = arguments.length === 0 ? new Date : new Date(+date)), date;\n }\n\n interval.floor = function(date) {\n return floori(date = new Date(+date)), date;\n };\n\n interval.ceil = function(date) {\n return floori(date = new Date(date - 1)), offseti(date, 1), floori(date), date;\n };\n\n interval.round = function(date) {\n var d0 = interval(date),\n d1 = interval.ceil(date);\n return date - d0 < d1 - date ? d0 : d1;\n };\n\n interval.offset = function(date, step) {\n return offseti(date = new Date(+date), step == null ? 1 : Math.floor(step)), date;\n };\n\n interval.range = function(start, stop, step) {\n var range = [], previous;\n start = interval.ceil(start);\n step = step == null ? 1 : Math.floor(step);\n if (!(start < stop) || !(step > 0)) return range; // also handles Invalid Date\n do range.push(previous = new Date(+start)), offseti(start, step), floori(start);\n while (previous < start && start < stop);\n return range;\n };\n\n interval.filter = function(test) {\n return newInterval(function(date) {\n if (date >= date) while (floori(date), !test(date)) date.setTime(date - 1);\n }, function(date, step) {\n if (date >= date) {\n if (step < 0) while (++step <= 0) {\n while (offseti(date, -1), !test(date)) {} // eslint-disable-line no-empty\n } else while (--step >= 0) {\n while (offseti(date, +1), !test(date)) {} // eslint-disable-line no-empty\n }\n }\n });\n };\n\n if (count) {\n interval.count = function(start, end) {\n t0.setTime(+start), t1.setTime(+end);\n floori(t0), floori(t1);\n return Math.floor(count(t0, t1));\n };\n\n interval.every = function(step) {\n step = Math.floor(step);\n return !isFinite(step) || !(step > 0) ? null\n : !(step > 1) ? interval\n : interval.filter(field\n ? function(d) { return field(d) % step === 0; }\n : function(d) { return interval.count(0, d) % step === 0; });\n };\n }\n\n return interval;\n}\n","export const durationSecond = 1000;\nexport const durationMinute = durationSecond * 60;\nexport const durationHour = durationMinute * 60;\nexport const durationDay = durationHour * 24;\nexport const durationWeek = durationDay * 7;\nexport const durationMonth = durationDay * 30;\nexport const durationYear = durationDay * 365;\n","import interval from \"./interval.js\";\nimport {durationWeek} from \"./duration.js\";\n\nfunction utcWeekday(i) {\n return interval(function(date) {\n date.setUTCDate(date.getUTCDate() - (date.getUTCDay() + 7 - i) % 7);\n date.setUTCHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setUTCDate(date.getUTCDate() + step * 7);\n }, function(start, end) {\n return (end - start) / durationWeek;\n });\n}\n\nexport var utcSunday = utcWeekday(0);\nexport var utcMonday = utcWeekday(1);\nexport var utcTuesday = utcWeekday(2);\nexport var utcWednesday = utcWeekday(3);\nexport var utcThursday = utcWeekday(4);\nexport var utcFriday = utcWeekday(5);\nexport var utcSaturday = utcWeekday(6);\n\nexport var utcSundays = utcSunday.range;\nexport var utcMondays = utcMonday.range;\nexport var utcTuesdays = utcTuesday.range;\nexport var utcWednesdays = utcWednesday.range;\nexport var utcThursdays = utcThursday.range;\nexport var utcFridays = utcFriday.range;\nexport var utcSaturdays = utcSaturday.range;\n","import interval from \"./interval.js\";\nimport {durationDay} from \"./duration.js\";\n\nvar utcDay = interval(function(date) {\n date.setUTCHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setUTCDate(date.getUTCDate() + step);\n}, function(start, end) {\n return (end - start) / durationDay;\n}, function(date) {\n return date.getUTCDate() - 1;\n});\n\nexport default utcDay;\nexport var utcDays = utcDay.range;\n","import interval from \"./interval.js\";\nimport {durationMinute, durationWeek} from \"./duration.js\";\n\nfunction weekday(i) {\n return interval(function(date) {\n date.setDate(date.getDate() - (date.getDay() + 7 - i) % 7);\n date.setHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setDate(date.getDate() + step * 7);\n }, function(start, end) {\n return (end - start - (end.getTimezoneOffset() - start.getTimezoneOffset()) * durationMinute) / durationWeek;\n });\n}\n\nexport var sunday = weekday(0);\nexport var monday = weekday(1);\nexport var tuesday = weekday(2);\nexport var wednesday = weekday(3);\nexport var thursday = weekday(4);\nexport var friday = weekday(5);\nexport var saturday = weekday(6);\n\nexport var sundays = sunday.range;\nexport var mondays = monday.range;\nexport var tuesdays = tuesday.range;\nexport var wednesdays = wednesday.range;\nexport var thursdays = thursday.range;\nexport var fridays = friday.range;\nexport var saturdays = saturday.range;\n","import interval from \"./interval.js\";\nimport {durationDay, durationMinute} from \"./duration.js\";\n\nvar day = interval(\n date => date.setHours(0, 0, 0, 0),\n (date, step) => date.setDate(date.getDate() + step),\n (start, end) => (end - start - (end.getTimezoneOffset() - start.getTimezoneOffset()) * durationMinute) / durationDay,\n date => date.getDate() - 1\n);\n\nexport default day;\nexport var days = day.range;\n","import interval from \"./interval.js\";\n\nvar year = interval(function(date) {\n date.setMonth(0, 1);\n date.setHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setFullYear(date.getFullYear() + step);\n}, function(start, end) {\n return end.getFullYear() - start.getFullYear();\n}, function(date) {\n return date.getFullYear();\n});\n\n// An optimized implementation for this simple case.\nyear.every = function(k) {\n return !isFinite(k = Math.floor(k)) || !(k > 0) ? null : interval(function(date) {\n date.setFullYear(Math.floor(date.getFullYear() / k) * k);\n date.setMonth(0, 1);\n date.setHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setFullYear(date.getFullYear() + step * k);\n });\n};\n\nexport default year;\nexport var years = year.range;\n","import interval from \"./interval.js\";\n\nvar utcYear = interval(function(date) {\n date.setUTCMonth(0, 1);\n date.setUTCHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setUTCFullYear(date.getUTCFullYear() + step);\n}, function(start, end) {\n return end.getUTCFullYear() - start.getUTCFullYear();\n}, function(date) {\n return date.getUTCFullYear();\n});\n\n// An optimized implementation for this simple case.\nutcYear.every = function(k) {\n return !isFinite(k = Math.floor(k)) || !(k > 0) ? null : interval(function(date) {\n date.setUTCFullYear(Math.floor(date.getUTCFullYear() / k) * k);\n date.setUTCMonth(0, 1);\n date.setUTCHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setUTCFullYear(date.getUTCFullYear() + step * k);\n });\n};\n\nexport default utcYear;\nexport var utcYears = utcYear.range;\n","import {\n timeDay,\n timeSunday,\n timeMonday,\n timeThursday,\n timeYear,\n utcDay,\n utcSunday,\n utcMonday,\n utcThursday,\n utcYear\n} from \"d3-time\";\n\nfunction localDate(d) {\n if (0 <= d.y && d.y < 100) {\n var date = new Date(-1, d.m, d.d, d.H, d.M, d.S, d.L);\n date.setFullYear(d.y);\n return date;\n }\n return new Date(d.y, d.m, d.d, d.H, d.M, d.S, d.L);\n}\n\nfunction utcDate(d) {\n if (0 <= d.y && d.y < 100) {\n var date = new Date(Date.UTC(-1, d.m, d.d, d.H, d.M, d.S, d.L));\n date.setUTCFullYear(d.y);\n return date;\n }\n return new Date(Date.UTC(d.y, d.m, d.d, d.H, d.M, d.S, d.L));\n}\n\nfunction newDate(y, m, d) {\n return {y: y, m: m, d: d, H: 0, M: 0, S: 0, L: 0};\n}\n\nexport default function formatLocale(locale) {\n var locale_dateTime = locale.dateTime,\n locale_date = locale.date,\n locale_time = locale.time,\n locale_periods = locale.periods,\n locale_weekdays = locale.days,\n locale_shortWeekdays = locale.shortDays,\n locale_months = locale.months,\n locale_shortMonths = locale.shortMonths;\n\n var periodRe = formatRe(locale_periods),\n periodLookup = formatLookup(locale_periods),\n weekdayRe = formatRe(locale_weekdays),\n weekdayLookup = formatLookup(locale_weekdays),\n shortWeekdayRe = formatRe(locale_shortWeekdays),\n shortWeekdayLookup = formatLookup(locale_shortWeekdays),\n monthRe = formatRe(locale_months),\n monthLookup = formatLookup(locale_months),\n shortMonthRe = formatRe(locale_shortMonths),\n shortMonthLookup = formatLookup(locale_shortMonths);\n\n var formats = {\n \"a\": formatShortWeekday,\n \"A\": formatWeekday,\n \"b\": formatShortMonth,\n \"B\": formatMonth,\n \"c\": null,\n \"d\": formatDayOfMonth,\n \"e\": formatDayOfMonth,\n \"f\": formatMicroseconds,\n \"g\": formatYearISO,\n \"G\": formatFullYearISO,\n \"H\": formatHour24,\n \"I\": formatHour12,\n \"j\": formatDayOfYear,\n \"L\": formatMilliseconds,\n \"m\": formatMonthNumber,\n \"M\": formatMinutes,\n \"p\": formatPeriod,\n \"q\": formatQuarter,\n \"Q\": formatUnixTimestamp,\n \"s\": formatUnixTimestampSeconds,\n \"S\": formatSeconds,\n \"u\": formatWeekdayNumberMonday,\n \"U\": formatWeekNumberSunday,\n \"V\": formatWeekNumberISO,\n \"w\": formatWeekdayNumberSunday,\n \"W\": formatWeekNumberMonday,\n \"x\": null,\n \"X\": null,\n \"y\": formatYear,\n \"Y\": formatFullYear,\n \"Z\": formatZone,\n \"%\": formatLiteralPercent\n };\n\n var utcFormats = {\n \"a\": formatUTCShortWeekday,\n \"A\": formatUTCWeekday,\n \"b\": formatUTCShortMonth,\n \"B\": formatUTCMonth,\n \"c\": null,\n \"d\": formatUTCDayOfMonth,\n \"e\": formatUTCDayOfMonth,\n \"f\": formatUTCMicroseconds,\n \"g\": formatUTCYearISO,\n \"G\": formatUTCFullYearISO,\n \"H\": formatUTCHour24,\n \"I\": formatUTCHour12,\n \"j\": formatUTCDayOfYear,\n \"L\": formatUTCMilliseconds,\n \"m\": formatUTCMonthNumber,\n \"M\": formatUTCMinutes,\n \"p\": formatUTCPeriod,\n \"q\": formatUTCQuarter,\n \"Q\": formatUnixTimestamp,\n \"s\": formatUnixTimestampSeconds,\n \"S\": formatUTCSeconds,\n \"u\": formatUTCWeekdayNumberMonday,\n \"U\": formatUTCWeekNumberSunday,\n \"V\": formatUTCWeekNumberISO,\n \"w\": formatUTCWeekdayNumberSunday,\n \"W\": formatUTCWeekNumberMonday,\n \"x\": null,\n \"X\": null,\n \"y\": formatUTCYear,\n \"Y\": formatUTCFullYear,\n \"Z\": formatUTCZone,\n \"%\": formatLiteralPercent\n };\n\n var parses = {\n \"a\": parseShortWeekday,\n \"A\": parseWeekday,\n \"b\": parseShortMonth,\n \"B\": parseMonth,\n \"c\": parseLocaleDateTime,\n \"d\": parseDayOfMonth,\n \"e\": parseDayOfMonth,\n \"f\": parseMicroseconds,\n \"g\": parseYear,\n \"G\": parseFullYear,\n \"H\": parseHour24,\n \"I\": parseHour24,\n \"j\": parseDayOfYear,\n \"L\": parseMilliseconds,\n \"m\": parseMonthNumber,\n \"M\": parseMinutes,\n \"p\": parsePeriod,\n \"q\": parseQuarter,\n \"Q\": parseUnixTimestamp,\n \"s\": parseUnixTimestampSeconds,\n \"S\": parseSeconds,\n \"u\": parseWeekdayNumberMonday,\n \"U\": parseWeekNumberSunday,\n \"V\": parseWeekNumberISO,\n \"w\": parseWeekdayNumberSunday,\n \"W\": parseWeekNumberMonday,\n \"x\": parseLocaleDate,\n \"X\": parseLocaleTime,\n \"y\": parseYear,\n \"Y\": parseFullYear,\n \"Z\": parseZone,\n \"%\": parseLiteralPercent\n };\n\n // These recursive directive definitions must be deferred.\n formats.x = newFormat(locale_date, formats);\n formats.X = newFormat(locale_time, formats);\n formats.c = newFormat(locale_dateTime, formats);\n utcFormats.x = newFormat(locale_date, utcFormats);\n utcFormats.X = newFormat(locale_time, utcFormats);\n utcFormats.c = newFormat(locale_dateTime, utcFormats);\n\n function newFormat(specifier, formats) {\n return function(date) {\n var string = [],\n i = -1,\n j = 0,\n n = specifier.length,\n c,\n pad,\n format;\n\n if (!(date instanceof Date)) date = new Date(+date);\n\n while (++i < n) {\n if (specifier.charCodeAt(i) === 37) {\n string.push(specifier.slice(j, i));\n if ((pad = pads[c = specifier.charAt(++i)]) != null) c = specifier.charAt(++i);\n else pad = c === \"e\" ? \" \" : \"0\";\n if (format = formats[c]) c = format(date, pad);\n string.push(c);\n j = i + 1;\n }\n }\n\n string.push(specifier.slice(j, i));\n return string.join(\"\");\n };\n }\n\n function newParse(specifier, Z) {\n return function(string) {\n var d = newDate(1900, undefined, 1),\n i = parseSpecifier(d, specifier, string += \"\", 0),\n week, day;\n if (i != string.length) return null;\n\n // If a UNIX timestamp is specified, return it.\n if (\"Q\" in d) return new Date(d.Q);\n if (\"s\" in d) return new Date(d.s * 1000 + (\"L\" in d ? d.L : 0));\n\n // If this is utcParse, never use the local timezone.\n if (Z && !(\"Z\" in d)) d.Z = 0;\n\n // The am-pm flag is 0 for AM, and 1 for PM.\n if (\"p\" in d) d.H = d.H % 12 + d.p * 12;\n\n // If the month was not specified, inherit from the quarter.\n if (d.m === undefined) d.m = \"q\" in d ? d.q : 0;\n\n // Convert day-of-week and week-of-year to day-of-year.\n if (\"V\" in d) {\n if (d.V < 1 || d.V > 53) return null;\n if (!(\"w\" in d)) d.w = 1;\n if (\"Z\" in d) {\n week = utcDate(newDate(d.y, 0, 1)), day = week.getUTCDay();\n week = day > 4 || day === 0 ? utcMonday.ceil(week) : utcMonday(week);\n week = utcDay.offset(week, (d.V - 1) * 7);\n d.y = week.getUTCFullYear();\n d.m = week.getUTCMonth();\n d.d = week.getUTCDate() + (d.w + 6) % 7;\n } else {\n week = localDate(newDate(d.y, 0, 1)), day = week.getDay();\n week = day > 4 || day === 0 ? timeMonday.ceil(week) : timeMonday(week);\n week = timeDay.offset(week, (d.V - 1) * 7);\n d.y = week.getFullYear();\n d.m = week.getMonth();\n d.d = week.getDate() + (d.w + 6) % 7;\n }\n } else if (\"W\" in d || \"U\" in d) {\n if (!(\"w\" in d)) d.w = \"u\" in d ? d.u % 7 : \"W\" in d ? 1 : 0;\n day = \"Z\" in d ? utcDate(newDate(d.y, 0, 1)).getUTCDay() : localDate(newDate(d.y, 0, 1)).getDay();\n d.m = 0;\n d.d = \"W\" in d ? (d.w + 6) % 7 + d.W * 7 - (day + 5) % 7 : d.w + d.U * 7 - (day + 6) % 7;\n }\n\n // If a time zone is specified, all fields are interpreted as UTC and then\n // offset according to the specified time zone.\n if (\"Z\" in d) {\n d.H += d.Z / 100 | 0;\n d.M += d.Z % 100;\n return utcDate(d);\n }\n\n // Otherwise, all fields are in local time.\n return localDate(d);\n };\n }\n\n function parseSpecifier(d, specifier, string, j) {\n var i = 0,\n n = specifier.length,\n m = string.length,\n c,\n parse;\n\n while (i < n) {\n if (j >= m) return -1;\n c = specifier.charCodeAt(i++);\n if (c === 37) {\n c = specifier.charAt(i++);\n parse = parses[c in pads ? specifier.charAt(i++) : c];\n if (!parse || ((j = parse(d, string, j)) < 0)) return -1;\n } else if (c != string.charCodeAt(j++)) {\n return -1;\n }\n }\n\n return j;\n }\n\n function parsePeriod(d, string, i) {\n var n = periodRe.exec(string.slice(i));\n return n ? (d.p = periodLookup.get(n[0].toLowerCase()), i + n[0].length) : -1;\n }\n\n function parseShortWeekday(d, string, i) {\n var n = shortWeekdayRe.exec(string.slice(i));\n return n ? (d.w = shortWeekdayLookup.get(n[0].toLowerCase()), i + n[0].length) : -1;\n }\n\n function parseWeekday(d, string, i) {\n var n = weekdayRe.exec(string.slice(i));\n return n ? (d.w = weekdayLookup.get(n[0].toLowerCase()), i + n[0].length) : -1;\n }\n\n function parseShortMonth(d, string, i) {\n var n = shortMonthRe.exec(string.slice(i));\n return n ? (d.m = shortMonthLookup.get(n[0].toLowerCase()), i + n[0].length) : -1;\n }\n\n function parseMonth(d, string, i) {\n var n = monthRe.exec(string.slice(i));\n return n ? (d.m = monthLookup.get(n[0].toLowerCase()), i + n[0].length) : -1;\n }\n\n function parseLocaleDateTime(d, string, i) {\n return parseSpecifier(d, locale_dateTime, string, i);\n }\n\n function parseLocaleDate(d, string, i) {\n return parseSpecifier(d, locale_date, string, i);\n }\n\n function parseLocaleTime(d, string, i) {\n return parseSpecifier(d, locale_time, string, i);\n }\n\n function formatShortWeekday(d) {\n return locale_shortWeekdays[d.getDay()];\n }\n\n function formatWeekday(d) {\n return locale_weekdays[d.getDay()];\n }\n\n function formatShortMonth(d) {\n return locale_shortMonths[d.getMonth()];\n }\n\n function formatMonth(d) {\n return locale_months[d.getMonth()];\n }\n\n function formatPeriod(d) {\n return locale_periods[+(d.getHours() >= 12)];\n }\n\n function formatQuarter(d) {\n return 1 + ~~(d.getMonth() / 3);\n }\n\n function formatUTCShortWeekday(d) {\n return locale_shortWeekdays[d.getUTCDay()];\n }\n\n function formatUTCWeekday(d) {\n return locale_weekdays[d.getUTCDay()];\n }\n\n function formatUTCShortMonth(d) {\n return locale_shortMonths[d.getUTCMonth()];\n }\n\n function formatUTCMonth(d) {\n return locale_months[d.getUTCMonth()];\n }\n\n function formatUTCPeriod(d) {\n return locale_periods[+(d.getUTCHours() >= 12)];\n }\n\n function formatUTCQuarter(d) {\n return 1 + ~~(d.getUTCMonth() / 3);\n }\n\n return {\n format: function(specifier) {\n var f = newFormat(specifier += \"\", formats);\n f.toString = function() { return specifier; };\n return f;\n },\n parse: function(specifier) {\n var p = newParse(specifier += \"\", false);\n p.toString = function() { return specifier; };\n return p;\n },\n utcFormat: function(specifier) {\n var f = newFormat(specifier += \"\", utcFormats);\n f.toString = function() { return specifier; };\n return f;\n },\n utcParse: function(specifier) {\n var p = newParse(specifier += \"\", true);\n p.toString = function() { return specifier; };\n return p;\n }\n };\n}\n\nvar pads = {\"-\": \"\", \"_\": \" \", \"0\": \"0\"},\n numberRe = /^\\s*\\d+/, // note: ignores next directive\n percentRe = /^%/,\n requoteRe = /[\\\\^$*+?|[\\]().{}]/g;\n\nfunction pad(value, fill, width) {\n var sign = value < 0 ? \"-\" : \"\",\n string = (sign ? -value : value) + \"\",\n length = string.length;\n return sign + (length < width ? new Array(width - length + 1).join(fill) + string : string);\n}\n\nfunction requote(s) {\n return s.replace(requoteRe, \"\\\\$&\");\n}\n\nfunction formatRe(names) {\n return new RegExp(\"^(?:\" + names.map(requote).join(\"|\") + \")\", \"i\");\n}\n\nfunction formatLookup(names) {\n return new Map(names.map((name, i) => [name.toLowerCase(), i]));\n}\n\nfunction parseWeekdayNumberSunday(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 1));\n return n ? (d.w = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseWeekdayNumberMonday(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 1));\n return n ? (d.u = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseWeekNumberSunday(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.U = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseWeekNumberISO(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.V = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseWeekNumberMonday(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.W = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseFullYear(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 4));\n return n ? (d.y = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseYear(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.y = +n[0] + (+n[0] > 68 ? 1900 : 2000), i + n[0].length) : -1;\n}\n\nfunction parseZone(d, string, i) {\n var n = /^(Z)|([+-]\\d\\d)(?::?(\\d\\d))?/.exec(string.slice(i, i + 6));\n return n ? (d.Z = n[1] ? 0 : -(n[2] + (n[3] || \"00\")), i + n[0].length) : -1;\n}\n\nfunction parseQuarter(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 1));\n return n ? (d.q = n[0] * 3 - 3, i + n[0].length) : -1;\n}\n\nfunction parseMonthNumber(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.m = n[0] - 1, i + n[0].length) : -1;\n}\n\nfunction parseDayOfMonth(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.d = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseDayOfYear(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 3));\n return n ? (d.m = 0, d.d = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseHour24(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.H = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseMinutes(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.M = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseSeconds(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 2));\n return n ? (d.S = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseMilliseconds(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 3));\n return n ? (d.L = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseMicroseconds(d, string, i) {\n var n = numberRe.exec(string.slice(i, i + 6));\n return n ? (d.L = Math.floor(n[0] / 1000), i + n[0].length) : -1;\n}\n\nfunction parseLiteralPercent(d, string, i) {\n var n = percentRe.exec(string.slice(i, i + 1));\n return n ? i + n[0].length : -1;\n}\n\nfunction parseUnixTimestamp(d, string, i) {\n var n = numberRe.exec(string.slice(i));\n return n ? (d.Q = +n[0], i + n[0].length) : -1;\n}\n\nfunction parseUnixTimestampSeconds(d, string, i) {\n var n = numberRe.exec(string.slice(i));\n return n ? (d.s = +n[0], i + n[0].length) : -1;\n}\n\nfunction formatDayOfMonth(d, p) {\n return pad(d.getDate(), p, 2);\n}\n\nfunction formatHour24(d, p) {\n return pad(d.getHours(), p, 2);\n}\n\nfunction formatHour12(d, p) {\n return pad(d.getHours() % 12 || 12, p, 2);\n}\n\nfunction formatDayOfYear(d, p) {\n return pad(1 + timeDay.count(timeYear(d), d), p, 3);\n}\n\nfunction formatMilliseconds(d, p) {\n return pad(d.getMilliseconds(), p, 3);\n}\n\nfunction formatMicroseconds(d, p) {\n return formatMilliseconds(d, p) + \"000\";\n}\n\nfunction formatMonthNumber(d, p) {\n return pad(d.getMonth() + 1, p, 2);\n}\n\nfunction formatMinutes(d, p) {\n return pad(d.getMinutes(), p, 2);\n}\n\nfunction formatSeconds(d, p) {\n return pad(d.getSeconds(), p, 2);\n}\n\nfunction formatWeekdayNumberMonday(d) {\n var day = d.getDay();\n return day === 0 ? 7 : day;\n}\n\nfunction formatWeekNumberSunday(d, p) {\n return pad(timeSunday.count(timeYear(d) - 1, d), p, 2);\n}\n\nfunction dISO(d) {\n var day = d.getDay();\n return (day >= 4 || day === 0) ? timeThursday(d) : timeThursday.ceil(d);\n}\n\nfunction formatWeekNumberISO(d, p) {\n d = dISO(d);\n return pad(timeThursday.count(timeYear(d), d) + (timeYear(d).getDay() === 4), p, 2);\n}\n\nfunction formatWeekdayNumberSunday(d) {\n return d.getDay();\n}\n\nfunction formatWeekNumberMonday(d, p) {\n return pad(timeMonday.count(timeYear(d) - 1, d), p, 2);\n}\n\nfunction formatYear(d, p) {\n return pad(d.getFullYear() % 100, p, 2);\n}\n\nfunction formatYearISO(d, p) {\n d = dISO(d);\n return pad(d.getFullYear() % 100, p, 2);\n}\n\nfunction formatFullYear(d, p) {\n return pad(d.getFullYear() % 10000, p, 4);\n}\n\nfunction formatFullYearISO(d, p) {\n var day = d.getDay();\n d = (day >= 4 || day === 0) ? timeThursday(d) : timeThursday.ceil(d);\n return pad(d.getFullYear() % 10000, p, 4);\n}\n\nfunction formatZone(d) {\n var z = d.getTimezoneOffset();\n return (z > 0 ? \"-\" : (z *= -1, \"+\"))\n + pad(z / 60 | 0, \"0\", 2)\n + pad(z % 60, \"0\", 2);\n}\n\nfunction formatUTCDayOfMonth(d, p) {\n return pad(d.getUTCDate(), p, 2);\n}\n\nfunction formatUTCHour24(d, p) {\n return pad(d.getUTCHours(), p, 2);\n}\n\nfunction formatUTCHour12(d, p) {\n return pad(d.getUTCHours() % 12 || 12, p, 2);\n}\n\nfunction formatUTCDayOfYear(d, p) {\n return pad(1 + utcDay.count(utcYear(d), d), p, 3);\n}\n\nfunction formatUTCMilliseconds(d, p) {\n return pad(d.getUTCMilliseconds(), p, 3);\n}\n\nfunction formatUTCMicroseconds(d, p) {\n return formatUTCMilliseconds(d, p) + \"000\";\n}\n\nfunction formatUTCMonthNumber(d, p) {\n return pad(d.getUTCMonth() + 1, p, 2);\n}\n\nfunction formatUTCMinutes(d, p) {\n return pad(d.getUTCMinutes(), p, 2);\n}\n\nfunction formatUTCSeconds(d, p) {\n return pad(d.getUTCSeconds(), p, 2);\n}\n\nfunction formatUTCWeekdayNumberMonday(d) {\n var dow = d.getUTCDay();\n return dow === 0 ? 7 : dow;\n}\n\nfunction formatUTCWeekNumberSunday(d, p) {\n return pad(utcSunday.count(utcYear(d) - 1, d), p, 2);\n}\n\nfunction UTCdISO(d) {\n var day = d.getUTCDay();\n return (day >= 4 || day === 0) ? utcThursday(d) : utcThursday.ceil(d);\n}\n\nfunction formatUTCWeekNumberISO(d, p) {\n d = UTCdISO(d);\n return pad(utcThursday.count(utcYear(d), d) + (utcYear(d).getUTCDay() === 4), p, 2);\n}\n\nfunction formatUTCWeekdayNumberSunday(d) {\n return d.getUTCDay();\n}\n\nfunction formatUTCWeekNumberMonday(d, p) {\n return pad(utcMonday.count(utcYear(d) - 1, d), p, 2);\n}\n\nfunction formatUTCYear(d, p) {\n return pad(d.getUTCFullYear() % 100, p, 2);\n}\n\nfunction formatUTCYearISO(d, p) {\n d = UTCdISO(d);\n return pad(d.getUTCFullYear() % 100, p, 2);\n}\n\nfunction formatUTCFullYear(d, p) {\n return pad(d.getUTCFullYear() % 10000, p, 4);\n}\n\nfunction formatUTCFullYearISO(d, p) {\n var day = d.getUTCDay();\n d = (day >= 4 || day === 0) ? utcThursday(d) : utcThursday.ceil(d);\n return pad(d.getUTCFullYear() % 10000, p, 4);\n}\n\nfunction formatUTCZone() {\n return \"+0000\";\n}\n\nfunction formatLiteralPercent() {\n return \"%\";\n}\n\nfunction formatUnixTimestamp(d) {\n return +d;\n}\n\nfunction formatUnixTimestampSeconds(d) {\n return Math.floor(+d / 1000);\n}\n","import formatLocale from \"./locale.js\";\n\nvar locale;\nexport var timeFormat;\nexport var timeParse;\nexport var utcFormat;\nexport var utcParse;\n\ndefaultLocale({\n dateTime: \"%x, %X\",\n date: \"%-m/%-d/%Y\",\n time: \"%-I:%M:%S %p\",\n periods: [\"AM\", \"PM\"],\n days: [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"],\n shortDays: [\"Sun\", \"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\"],\n months: [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"],\n shortMonths: [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n});\n\nexport default function defaultLocale(definition) {\n locale = formatLocale(definition);\n timeFormat = locale.format;\n timeParse = locale.parse;\n utcFormat = locale.utcFormat;\n utcParse = locale.utcParse;\n return locale;\n}\n","/**\n * Copyright (c) 2013-present, Facebook, Inc.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\nfunction componentWillMount() {\n // Call this.constructor.gDSFP to support sub-classes.\n var state = this.constructor.getDerivedStateFromProps(this.props, this.state);\n if (state !== null && state !== undefined) {\n this.setState(state);\n }\n}\n\nfunction componentWillReceiveProps(nextProps) {\n // Call this.constructor.gDSFP to support sub-classes.\n // Use the setState() updater to ensure state isn't stale in certain edge cases.\n function updater(prevState) {\n var state = this.constructor.getDerivedStateFromProps(nextProps, prevState);\n return state !== null && state !== undefined ? state : null;\n }\n // Binding \"this\" is important for shallow renderer support.\n this.setState(updater.bind(this));\n}\n\nfunction componentWillUpdate(nextProps, nextState) {\n try {\n var prevProps = this.props;\n var prevState = this.state;\n this.props = nextProps;\n this.state = nextState;\n this.__reactInternalSnapshotFlag = true;\n this.__reactInternalSnapshot = this.getSnapshotBeforeUpdate(\n prevProps,\n prevState\n );\n } finally {\n this.props = prevProps;\n this.state = prevState;\n }\n}\n\n// React may warn about cWM/cWRP/cWU methods being deprecated.\n// Add a flag to suppress these warnings for this special case.\ncomponentWillMount.__suppressDeprecationWarning = true;\ncomponentWillReceiveProps.__suppressDeprecationWarning = true;\ncomponentWillUpdate.__suppressDeprecationWarning = true;\n\nfunction polyfill(Component) {\n var prototype = Component.prototype;\n\n if (!prototype || !prototype.isReactComponent) {\n throw new Error('Can only polyfill class components');\n }\n\n if (\n typeof Component.getDerivedStateFromProps !== 'function' &&\n typeof prototype.getSnapshotBeforeUpdate !== 'function'\n ) {\n return Component;\n }\n\n // If new component APIs are defined, \"unsafe\" lifecycles won't be called.\n // Error if any of these lifecycles are present,\n // Because they would work differently between older and newer (16.3+) versions of React.\n var foundWillMountName = null;\n var foundWillReceivePropsName = null;\n var foundWillUpdateName = null;\n if (typeof prototype.componentWillMount === 'function') {\n foundWillMountName = 'componentWillMount';\n } else if (typeof prototype.UNSAFE_componentWillMount === 'function') {\n foundWillMountName = 'UNSAFE_componentWillMount';\n }\n if (typeof prototype.componentWillReceiveProps === 'function') {\n foundWillReceivePropsName = 'componentWillReceiveProps';\n } else if (typeof prototype.UNSAFE_componentWillReceiveProps === 'function') {\n foundWillReceivePropsName = 'UNSAFE_componentWillReceiveProps';\n }\n if (typeof prototype.componentWillUpdate === 'function') {\n foundWillUpdateName = 'componentWillUpdate';\n } else if (typeof prototype.UNSAFE_componentWillUpdate === 'function') {\n foundWillUpdateName = 'UNSAFE_componentWillUpdate';\n }\n if (\n foundWillMountName !== null ||\n foundWillReceivePropsName !== null ||\n foundWillUpdateName !== null\n ) {\n var componentName = Component.displayName || Component.name;\n var newApiName =\n typeof Component.getDerivedStateFromProps === 'function'\n ? 'getDerivedStateFromProps()'\n : 'getSnapshotBeforeUpdate()';\n\n throw Error(\n 'Unsafe legacy lifecycles will not be called for components using new component APIs.\\n\\n' +\n componentName +\n ' uses ' +\n newApiName +\n ' but also contains the following legacy lifecycles:' +\n (foundWillMountName !== null ? '\\n ' + foundWillMountName : '') +\n (foundWillReceivePropsName !== null\n ? '\\n ' + foundWillReceivePropsName\n : '') +\n (foundWillUpdateName !== null ? '\\n ' + foundWillUpdateName : '') +\n '\\n\\nThe above lifecycles should be removed. Learn more about this warning here:\\n' +\n 'https://fb.me/react-async-component-lifecycle-hooks'\n );\n }\n\n // React <= 16.2 does not support static getDerivedStateFromProps.\n // As a workaround, use cWM and cWRP to invoke the new static lifecycle.\n // Newer versions of React will ignore these lifecycles if gDSFP exists.\n if (typeof Component.getDerivedStateFromProps === 'function') {\n prototype.componentWillMount = componentWillMount;\n prototype.componentWillReceiveProps = componentWillReceiveProps;\n }\n\n // React <= 16.2 does not support getSnapshotBeforeUpdate.\n // As a workaround, use cWU to invoke the new lifecycle.\n // Newer versions of React will ignore that lifecycle if gSBU exists.\n if (typeof prototype.getSnapshotBeforeUpdate === 'function') {\n if (typeof prototype.componentDidUpdate !== 'function') {\n throw new Error(\n 'Cannot polyfill getSnapshotBeforeUpdate() for components that do not define componentDidUpdate() on the prototype'\n );\n }\n\n prototype.componentWillUpdate = componentWillUpdate;\n\n var componentDidUpdate = prototype.componentDidUpdate;\n\n prototype.componentDidUpdate = function componentDidUpdatePolyfill(\n prevProps,\n prevState,\n maybeSnapshot\n ) {\n // 16.3+ will not execute our will-update method;\n // It will pass a snapshot value to did-update though.\n // Older versions will require our polyfilled will-update value.\n // We need to handle both cases, but can't just check for the presence of \"maybeSnapshot\",\n // Because for <= 15.x versions this might be a \"prevContext\" object.\n // We also can't just check \"__reactInternalSnapshot\",\n // Because get-snapshot might return a falsy value.\n // So check for the explicit __reactInternalSnapshotFlag flag to determine behavior.\n var snapshot = this.__reactInternalSnapshotFlag\n ? this.__reactInternalSnapshot\n : maybeSnapshot;\n\n componentDidUpdate.call(this, prevProps, prevState, snapshot);\n };\n }\n\n return Component;\n}\n\nexport { polyfill };\n","/**\n * Copyright (c) 2013-present, Facebook, Inc.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n *\n * @providesModule shallowEqual\n * @typechecks\n */\n\n/* eslint-disable no-self-compare */\n\nconst hasOwnProperty = Object.prototype.hasOwnProperty\n\n/**\n * inlined Object.is polyfill to avoid requiring consumers ship their own\n * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is\n */\nfunction is(x: unknown, y: unknown): boolean {\n // SameValue algorithm\n if (x === y) {\n // Steps 1-5, 7-10\n // Steps 6.b-6.e: +0 != -0\n // Added the nonzero y check to make Flow happy, but it is redundant\n return x !== 0 || y !== 0 || 1 / x === 1 / y\n }\n // Step 6.a: NaN == NaN\n return x !== x && y !== y\n}\n\n/**\n * Performs equality by iterating through keys on an object and returning false\n * when any key has values which are not strictly equal between the arguments.\n * Returns true when the values of all keys are strictly equal.\n */\nexport function shallowEqual(\n objA: Record,\n objB: Record\n): boolean {\n if (is(objA, objB)) {\n return true\n }\n\n if (typeof objA !== 'object' || objA === null || typeof objB !== 'object' || objB === null) {\n return false\n }\n\n const keysA = Object.keys(objA)\n const keysB = Object.keys(objB)\n\n if (keysA.length !== keysB.length) {\n return false\n }\n\n // Test for A's keys different from B.\n for (let i = 0; i < keysA.length; i++) {\n if (!hasOwnProperty.call(objB, keysA[i]) || !is(objA[keysA[i]], objB[keysA[i]])) {\n return false\n }\n }\n\n return true\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport PropTypes from 'prop-types'\n\nconst textProps = {\n fill: PropTypes.string,\n fontSize: PropTypes.number,\n fontFamily: PropTypes.string,\n}\n\nexport const axisThemePropType = PropTypes.shape({\n domain: PropTypes.shape({\n line: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n strokeDasharray: PropTypes.string,\n }).isRequired,\n }).isRequired,\n ticks: PropTypes.shape({\n line: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n strokeDasharray: PropTypes.string,\n }).isRequired,\n text: PropTypes.shape({ ...textProps }).isRequired,\n }).isRequired,\n legend: PropTypes.shape({\n text: PropTypes.shape({ ...textProps }).isRequired,\n }).isRequired,\n})\n\nexport const gridThemePropType = PropTypes.shape({\n line: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n strokeDasharray: PropTypes.string,\n }).isRequired,\n})\n\nexport const legendsThemePropType = PropTypes.shape({\n hidden: PropTypes.shape({\n symbol: PropTypes.shape({\n fill: PropTypes.string.isRequired,\n opacity: PropTypes.number,\n }).isRequired,\n text: PropTypes.shape({ ...textProps, opacity: PropTypes.number }).isRequired,\n }).isRequired,\n text: PropTypes.shape({ ...textProps }).isRequired,\n})\n\nexport const labelsThemePropType = PropTypes.shape({\n text: PropTypes.shape({ ...textProps }).isRequired,\n})\n\nexport const dotsThemePropType = PropTypes.shape({\n text: PropTypes.shape({ ...textProps }).isRequired,\n})\n\nexport const markersThemePropType = PropTypes.shape({\n text: PropTypes.shape({ ...textProps }).isRequired,\n})\n\nexport const crosshairPropType = PropTypes.shape({\n line: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n strokeDasharray: PropTypes.string,\n }).isRequired,\n})\n\nexport const annotationsPropType = PropTypes.shape({\n text: PropTypes.shape({\n ...textProps,\n outlineWidth: PropTypes.number.isRequired,\n outlineColor: PropTypes.string.isRequired,\n }).isRequired,\n link: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n outlineWidth: PropTypes.number.isRequired,\n outlineColor: PropTypes.string.isRequired,\n }).isRequired,\n outline: PropTypes.shape({\n stroke: PropTypes.string.isRequired,\n strokeWidth: PropTypes.number.isRequired,\n outlineWidth: PropTypes.number.isRequired,\n outlineColor: PropTypes.string.isRequired,\n }).isRequired,\n symbol: PropTypes.shape({\n fill: PropTypes.string.isRequired,\n outlineWidth: PropTypes.number.isRequired,\n outlineColor: PropTypes.string.isRequired,\n }).isRequired,\n})\n\nexport const themePropType = PropTypes.shape({\n background: PropTypes.string.isRequired,\n fontFamily: PropTypes.string.isRequired,\n fontSize: PropTypes.number.isRequired,\n textColor: PropTypes.string.isRequired,\n axis: axisThemePropType.isRequired,\n grid: gridThemePropType.isRequired,\n legends: legendsThemePropType.isRequired,\n labels: labelsThemePropType.isRequired,\n dots: dotsThemePropType.isRequired,\n markers: markersThemePropType,\n crosshair: crosshairPropType.isRequired,\n annotations: annotationsPropType.isRequired,\n})\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nexport const defaultTheme = {\n background: 'transparent',\n fontFamily: 'sans-serif',\n fontSize: 11,\n textColor: '#333333',\n axis: {\n domain: {\n line: {\n stroke: 'transparent',\n strokeWidth: 1,\n },\n },\n ticks: {\n line: {\n stroke: '#777777',\n strokeWidth: 1,\n },\n text: {},\n },\n legend: {\n text: {\n fontSize: 12,\n },\n },\n },\n grid: {\n line: {\n stroke: '#dddddd',\n strokeWidth: 1,\n },\n },\n legends: {\n hidden: {\n symbol: {\n fill: '#333333',\n opacity: 0.6,\n },\n text: {\n fill: '#333333',\n opacity: 0.6,\n },\n },\n text: {},\n ticks: {\n line: {\n stroke: '#777777',\n strokeWidth: 1,\n },\n text: {\n fontSize: 10,\n },\n },\n title: {\n text: {},\n },\n },\n labels: {\n text: {},\n },\n markers: {\n lineColor: '#000000',\n lineStrokeWidth: 1,\n text: {},\n },\n dots: {\n text: {},\n },\n tooltip: {\n container: {\n background: 'white',\n color: 'inherit',\n fontSize: 'inherit',\n borderRadius: '2px',\n boxShadow: '0 1px 2px rgba(0, 0, 0, 0.25)',\n padding: '5px 9px',\n },\n basic: {\n whiteSpace: 'pre',\n display: 'flex',\n alignItems: 'center',\n },\n chip: {\n marginRight: 7,\n },\n table: {},\n tableCell: {\n padding: '3px 5px',\n },\n tableCellValue: {\n fontWeight: 'bold',\n },\n },\n crosshair: {\n line: {\n stroke: '#000000',\n strokeWidth: 1,\n strokeOpacity: 0.75,\n strokeDasharray: '6 6',\n },\n },\n annotations: {\n text: {\n fontSize: 13,\n outlineWidth: 2,\n outlineColor: '#ffffff',\n outlineOpacity: 1,\n },\n link: {\n stroke: '#000000',\n strokeWidth: 1,\n outlineWidth: 2,\n outlineColor: '#ffffff',\n outlineOpacity: 1,\n },\n outline: {\n fill: 'none',\n stroke: '#000000',\n strokeWidth: 2,\n outlineWidth: 2,\n outlineColor: '#ffffff',\n outlineOpacity: 1,\n },\n symbol: {\n fill: '#000000',\n outlineWidth: 2,\n outlineColor: '#ffffff',\n outlineOpacity: 1,\n },\n },\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport merge from 'lodash/merge'\nimport get from 'lodash/get'\nimport set from 'lodash/set'\n\nconst fontProps = [\n 'axis.ticks.text',\n 'axis.legend.text',\n 'legends.title.text',\n 'legends.text',\n 'legends.ticks.text',\n 'legends.title.text',\n 'labels.text',\n 'dots.text',\n 'markers.text',\n 'annotations.text',\n]\n\nexport const extendDefaultTheme = (defaultTheme, customTheme) => {\n const theme = merge({}, defaultTheme, customTheme)\n\n fontProps.forEach(prop => {\n if (get(theme, `${prop}.fontFamily`) === undefined) {\n set(theme, `${prop}.fontFamily`, theme.fontFamily)\n }\n if (get(theme, `${prop}.fontSize`) === undefined) {\n set(theme, `${prop}.fontSize`, theme.fontSize)\n }\n if (get(theme, `${prop}.fill`) === undefined) {\n set(theme, `${prop}.fill`, theme.textColor)\n }\n })\n\n return theme\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { createContext, useMemo } from 'react'\nimport { isString } from 'lodash'\nimport PropTypes from 'prop-types'\nimport { config as presets } from '@react-spring/web'\n\nexport const motionConfigContext = createContext()\n\n/**\n * For now we're supporting both react-motion and react-spring,\n * however, react-motion will be gradually replaced by react-spring.\n */\nexport const MotionConfigProvider = ({ children, animate, stiffness, damping, config }) => {\n const value = useMemo(() => {\n const reactSpringConfig = isString(config) ? presets[config] : config\n\n return {\n animate,\n springConfig: { stiffness, damping },\n config: reactSpringConfig,\n }\n }, [animate, stiffness, damping, config])\n\n return {children}\n}\n\nexport const motionPropTypes = {\n animate: PropTypes.bool,\n motionStiffness: PropTypes.number,\n motionDamping: PropTypes.number,\n motionConfig: PropTypes.oneOfType([\n PropTypes.oneOf(Object.keys(presets)),\n PropTypes.shape({\n mass: PropTypes.number,\n tension: PropTypes.number,\n friction: PropTypes.number,\n clamp: PropTypes.bool,\n precision: PropTypes.number,\n velocity: PropTypes.number,\n duration: PropTypes.number,\n easing: PropTypes.func,\n }),\n ]),\n}\n\nMotionConfigProvider.propTypes = {\n children: PropTypes.node.isRequired,\n animate: motionPropTypes.animate,\n stiffness: motionPropTypes.motionStiffness,\n damping: motionPropTypes.motionDamping,\n config: motionPropTypes.motionConfig,\n}\n\nexport const motionDefaultProps = {\n animate: true,\n stiffness: 90,\n damping: 15,\n config: 'default',\n}\n\nMotionConfigProvider.defaultProps = motionDefaultProps\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { useContext } from 'react'\nimport { motionConfigContext } from './context'\n\nexport const useMotionConfig = () => useContext(motionConfigContext)\n","import { interpolateString } from 'd3-interpolate'\nimport { useEffect, useMemo, useRef } from 'react'\nimport { useSpring, to } from '@react-spring/web'\nimport { useMotionConfig } from '../motion'\n\nconst usePrevious = value => {\n const ref = useRef()\n\n useEffect(() => {\n ref.current = value\n }, [value])\n\n return ref.current\n}\n\nexport const useAnimatedPath = path => {\n const { animate, config: springConfig } = useMotionConfig()\n\n const previousPath = usePrevious(path)\n const interpolator = useMemo(() => interpolateString(previousPath, path), [previousPath, path])\n\n const { value } = useSpring({\n from: { value: 0 },\n to: { value: 1 },\n reset: true,\n config: springConfig,\n immediate: !animate,\n })\n\n return to(value, interpolator)\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport last from 'lodash/last'\nimport isArray from 'lodash/isArray'\nimport isFunction from 'lodash/isFunction'\nimport { scaleQuantize } from 'd3-scale'\nimport {\n // Diverging\n schemeBrBG,\n schemePRGn,\n schemePiYG,\n schemePuOr,\n schemeRdBu,\n schemeRdGy,\n schemeRdYlBu,\n schemeRdYlGn,\n schemeSpectral,\n\n // Sequential (Single Hue)\n schemeBlues,\n schemeGreens,\n schemeGreys,\n schemeOranges,\n schemePurples,\n schemeReds,\n\n // Sequential (Multi-Hue)\n schemeBuGn,\n schemeBuPu,\n schemeGnBu,\n schemeOrRd,\n schemePuBuGn,\n schemePuBu,\n schemePuRd,\n schemeRdPu,\n schemeYlGnBu,\n schemeYlGn,\n schemeYlOrBr,\n schemeYlOrRd,\n} from 'd3-scale-chromatic'\n\nexport const quantizeColorScales = {\n nivo: ['#d76445', '#f47560', '#e8c1a0', '#97e3d5', '#61cdbb', '#00b0a7'],\n\n // Diverging\n BrBG: last(schemeBrBG),\n PRGn: last(schemePRGn),\n PiYG: last(schemePiYG),\n PuOr: last(schemePuOr),\n RdBu: last(schemeRdBu),\n RdGy: last(schemeRdGy),\n RdYlBu: last(schemeRdYlBu),\n RdYlGn: last(schemeRdYlGn),\n spectral: last(schemeSpectral),\n\n // Sequential (Single Hue)\n blues: last(schemeBlues),\n greens: last(schemeGreens),\n greys: last(schemeGreys),\n oranges: last(schemeOranges),\n purples: last(schemePurples),\n reds: last(schemeReds),\n\n // Sequential (Multi-Hue)\n BuGn: last(schemeBuGn),\n BuPu: last(schemeBuPu),\n GnBu: last(schemeGnBu),\n OrRd: last(schemeOrRd),\n PuBuGn: last(schemePuBuGn),\n PuBu: last(schemePuBu),\n PuRd: last(schemePuRd),\n RdPu: last(schemeRdPu),\n YlGnBu: last(schemeYlGnBu),\n YlGn: last(schemeYlGn),\n YlOrBr: last(schemeYlOrBr),\n YlOrRd: last(schemeYlOrRd),\n}\n\nexport const quantizeColorScalesKeys = Object.keys(quantizeColorScales)\n\nexport const guessQuantizeColorScale = colors => {\n // colors is already a valid scale\n if (isFunction(colors)) {\n if (!isFunction(colors.domain)) {\n throw new Error(\n `Provided colors should be a valid quantize scale providing a 'domain()' function`\n )\n }\n\n return colors\n }\n\n if (quantizeColorScales[colors]) {\n // use predefined d3 quantize color scale\n return scaleQuantize().range(quantizeColorScales[colors])\n }\n\n // user defined colors\n if (isArray(colors)) return scaleQuantize().range(colors)\n\n throw new Error(\n `Unable to guess quantize color scale from '${colors}',\\nmust be a function or one of:\\n'${quantizeColorScalesKeys.join(\n `', '`\n )}'`\n )\n}\n","import PropTypes from 'prop-types'\nimport without from 'lodash/without'\nimport {\n curveBasis,\n curveBasisClosed,\n curveBasisOpen,\n curveBundle,\n curveCardinal,\n curveCardinalClosed,\n curveCardinalOpen,\n curveCatmullRom,\n curveCatmullRomClosed,\n curveCatmullRomOpen,\n curveLinear,\n curveLinearClosed,\n curveMonotoneX,\n curveMonotoneY,\n curveNatural,\n curveStep,\n curveStepAfter,\n curveStepBefore,\n} from 'd3-shape'\n\nexport const curvePropMapping = {\n basis: curveBasis,\n basisClosed: curveBasisClosed,\n basisOpen: curveBasisOpen,\n bundle: curveBundle,\n cardinal: curveCardinal,\n cardinalClosed: curveCardinalClosed,\n cardinalOpen: curveCardinalOpen,\n catmullRom: curveCatmullRom,\n catmullRomClosed: curveCatmullRomClosed,\n catmullRomOpen: curveCatmullRomOpen,\n linear: curveLinear,\n linearClosed: curveLinearClosed,\n monotoneX: curveMonotoneX,\n monotoneY: curveMonotoneY,\n natural: curveNatural,\n step: curveStep,\n stepAfter: curveStepAfter,\n stepBefore: curveStepBefore,\n}\n\nexport const curvePropKeys = Object.keys(curvePropMapping)\n\nexport const curvePropType = PropTypes.oneOf(curvePropKeys)\n\nexport const closedCurvePropKeys = curvePropKeys.filter(c => c.endsWith('Closed'))\n\n// Safe curves to be used with d3 area shape generator\nexport const areaCurvePropKeys = without(\n curvePropKeys,\n 'bundle',\n 'basisClosed',\n 'basisOpen',\n 'cardinalClosed',\n 'cardinalOpen',\n 'catmullRomClosed',\n 'catmullRomOpen',\n 'linearClosed'\n)\n\n// Safe curves to be used with d3 line shape generator\nexport const lineCurvePropKeys = without(\n curvePropKeys,\n 'bundle',\n 'basisClosed',\n 'basisOpen',\n 'cardinalClosed',\n 'cardinalOpen',\n 'catmullRomClosed',\n 'catmullRomOpen',\n 'linearClosed'\n)\n\nexport const lineCurvePropType = PropTypes.oneOf(lineCurvePropKeys)\n\n/**\n * Returns curve interpolator from given identifier.\n *\n * @param {string} id - Curve interpolator identifier\n * @return {Function}\n */\nexport const curveFromProp = id => {\n const curveInterpolator = curvePropMapping[id]\n if (!curveInterpolator) {\n throw new TypeError(`'${id}', is not a valid curve interpolator identifier.`)\n }\n\n return curvePropMapping[id]\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport last from 'lodash/last'\nimport isArray from 'lodash/isArray'\nimport isString from 'lodash/isString'\nimport { scaleOrdinal, scaleSequential } from 'd3-scale'\nimport {\n // categorical\n schemeCategory10,\n schemeAccent,\n schemeDark2,\n schemePaired,\n schemePastel1,\n schemePastel2,\n schemeSet1,\n schemeSet2,\n schemeSet3,\n // diverging\n interpolateBrBG,\n schemeBrBG,\n interpolatePRGn,\n schemePRGn,\n interpolatePiYG,\n schemePiYG,\n interpolatePuOr,\n schemePuOr,\n interpolateRdBu,\n schemeRdBu,\n interpolateRdGy,\n schemeRdGy,\n interpolateRdYlBu,\n schemeRdYlBu,\n interpolateRdYlGn,\n schemeRdYlGn,\n interpolateSpectral,\n schemeSpectral,\n // sequential single hue\n interpolateBlues,\n schemeBlues,\n interpolateGreens,\n schemeGreens,\n interpolateGreys,\n schemeGreys,\n interpolateOranges,\n schemeOranges,\n interpolatePurples,\n schemePurples,\n interpolateReds,\n schemeReds,\n // sequential multi hue\n interpolateViridis,\n interpolateInferno,\n interpolateMagma,\n interpolatePlasma,\n interpolateWarm,\n interpolateCool,\n interpolateCubehelixDefault,\n interpolateBuGn,\n schemeBuGn,\n interpolateBuPu,\n schemeBuPu,\n interpolateGnBu,\n schemeGnBu,\n interpolateOrRd,\n schemeOrRd,\n interpolatePuBuGn,\n schemePuBuGn,\n interpolatePuBu,\n schemePuBu,\n interpolatePuRd,\n schemePuRd,\n interpolateRdPu,\n schemeRdPu,\n interpolateYlGnBu,\n schemeYlGnBu,\n interpolateYlGn,\n schemeYlGn,\n interpolateYlOrBr,\n schemeYlOrBr,\n interpolateYlOrRd,\n schemeYlOrRd,\n // cyclical\n interpolateRainbow,\n interpolateSinebow,\n} from 'd3-scale-chromatic'\n\n// used for ordinal color scales\nconst colorSchemes = {\n nivo: ['#e8c1a0', '#f47560', '#f1e15b', '#e8a838', '#61cdbb', '#97e3d5'],\n // categorical\n category10: schemeCategory10,\n accent: schemeAccent,\n dark2: schemeDark2,\n paired: schemePaired,\n pastel1: schemePastel1,\n pastel2: schemePastel2,\n set1: schemeSet1,\n set2: schemeSet2,\n set3: schemeSet3,\n // diverging\n brown_blueGreen: last(schemeBrBG),\n purpleRed_green: last(schemePRGn),\n pink_yellowGreen: last(schemePiYG),\n purple_orange: last(schemePuOr),\n red_blue: last(schemeRdBu),\n red_grey: last(schemeRdGy),\n red_yellow_blue: last(schemeRdYlBu),\n red_yellow_green: last(schemeRdYlGn),\n spectral: last(schemeSpectral),\n // sequential single hue\n blues: last(schemeBlues),\n greens: last(schemeGreens),\n greys: last(schemeGreys),\n oranges: last(schemeOranges),\n purples: last(schemePurples),\n reds: last(schemeReds),\n // sequential multi hue\n blue_green: last(schemeBuGn),\n blue_purple: last(schemeBuPu),\n green_blue: last(schemeGnBu),\n orange_red: last(schemeOrRd),\n purple_blue_green: last(schemePuBuGn),\n purple_blue: last(schemePuBu),\n purple_red: last(schemePuRd),\n red_purple: last(schemeRdPu),\n yellow_green_blue: last(schemeYlGnBu),\n yellow_green: last(schemeYlGn),\n yellow_orange_brown: last(schemeYlOrBr),\n yellow_orange_red: last(schemeYlOrRd),\n}\n\nexport const colorSchemeIds = [\n 'nivo',\n // categorical\n 'category10',\n 'accent',\n 'dark2',\n 'paired',\n 'pastel1',\n 'pastel2',\n 'set1',\n 'set2',\n 'set3',\n // diverging\n 'brown_blueGreen',\n 'purpleRed_green',\n 'pink_yellowGreen',\n 'purple_orange',\n 'red_blue',\n 'red_grey',\n 'red_yellow_blue',\n 'red_yellow_green',\n 'spectral',\n // sequential single hue\n 'blues',\n 'greens',\n 'greys',\n 'oranges',\n 'purples',\n 'reds',\n // sequential multi hue\n 'blue_green',\n 'blue_purple',\n 'green_blue',\n 'orange_red',\n 'purple_blue_green',\n 'purple_blue',\n 'purple_red',\n 'red_purple',\n 'yellow_green_blue',\n 'yellow_green',\n 'yellow_orange_brown',\n 'yellow_orange_red',\n]\n\n// used for sequential color scales\nexport const colorInterpolators = {\n // diverging\n brown_blueGreen: interpolateBrBG,\n purpleRed_green: interpolatePRGn,\n pink_yellowGreen: interpolatePiYG,\n purple_orange: interpolatePuOr,\n red_blue: interpolateRdBu,\n red_grey: interpolateRdGy,\n red_yellow_blue: interpolateRdYlBu,\n red_yellow_green: interpolateRdYlGn,\n spectral: interpolateSpectral,\n // sequential single hue\n blues: interpolateBlues,\n greens: interpolateGreens,\n greys: interpolateGreys,\n oranges: interpolateOranges,\n purples: interpolatePurples,\n reds: interpolateReds,\n // sequential multi hue\n viridis: interpolateViridis,\n inferno: interpolateInferno,\n magma: interpolateMagma,\n plasma: interpolatePlasma,\n warm: interpolateWarm,\n cool: interpolateCool,\n cubehelixDefault: interpolateCubehelixDefault,\n blue_green: interpolateBuGn,\n blue_purple: interpolateBuPu,\n green_blue: interpolateGnBu,\n orange_red: interpolateOrRd,\n purple_blue_green: interpolatePuBuGn,\n purple_blue: interpolatePuBu,\n purple_red: interpolatePuRd,\n red_purple: interpolateRdPu,\n yellow_green_blue: interpolateYlGnBu,\n yellow_green: interpolateYlGn,\n yellow_orange_brown: interpolateYlOrBr,\n yellow_orange_red: interpolateYlOrRd,\n // cyclical\n rainbow: interpolateRainbow,\n sinebow: interpolateSinebow,\n}\n\nexport const colorInterpolatorIds = [\n // diverging\n 'brown_blueGreen',\n 'purpleRed_green',\n 'pink_yellowGreen',\n 'purple_orange',\n 'red_blue',\n 'red_grey',\n 'red_yellow_blue',\n 'red_yellow_green',\n 'spectral',\n // sequential single hue\n 'blues',\n 'greens',\n 'greys',\n 'oranges',\n 'purples',\n 'reds',\n // sequential multi hue\n 'viridis',\n 'inferno',\n 'magma',\n 'plasma',\n 'warm',\n 'cool',\n 'cubehelixDefault',\n 'blue_green',\n 'blue_purple',\n 'green_blue',\n 'orange_red',\n 'purple_blue_green',\n 'purple_blue',\n 'purple_red',\n 'red_purple',\n 'yellow_green_blue',\n 'yellow_green',\n 'yellow_orange_brown',\n 'yellow_orange_red',\n // cyclical\n 'rainbow',\n 'sinebow',\n]\n\nexport const nivoCategoricalColors = () =>\n scaleOrdinal(['#e8c1a0', '#f47560', '#f1e15b', '#e8a838', '#61cdbb', '#97e3d5'])\n\nexport const getColorScale = (colors, dataScale) => {\n if (isString(colors)) {\n const scheme = colorSchemes[colors]\n if (scheme !== undefined) {\n const scale = scaleOrdinal(scheme)\n scale.type = 'ordinal'\n\n return scale\n }\n\n if (dataScale !== undefined && colors.indexOf('seq:') === 0) {\n const interpolator = colorInterpolators[colors.slice(4)]\n if (interpolator !== undefined) {\n const scale = scaleSequential(interpolator).domain(dataScale.domain())\n scale.type = 'sequential'\n\n return scale\n }\n }\n }\n\n if (isArray(colors)) {\n const scale = scaleOrdinal(colors)\n scale.type = 'ordinal'\n\n return scale\n }\n\n // just use provided value,\n // all elements will have identical color\n return () => colors\n}\n\nexport * from './quantize'\n","import PropTypes from 'prop-types'\nimport { quantizeColorScalesKeys } from '../lib/colors'\n\nexport const quantizeColorScalePropType = PropTypes.oneOfType([\n PropTypes.oneOf(quantizeColorScalesKeys),\n PropTypes.func,\n PropTypes.arrayOf(PropTypes.string),\n])\n","import PropTypes from 'prop-types'\n\nexport const defsPropTypes = {\n defs: PropTypes.arrayOf(\n PropTypes.shape({\n id: PropTypes.string.isRequired,\n })\n ).isRequired,\n fill: PropTypes.arrayOf(\n PropTypes.shape({\n id: PropTypes.string.isRequired,\n match: PropTypes.oneOfType([PropTypes.oneOf(['*']), PropTypes.object, PropTypes.func])\n .isRequired,\n })\n ).isRequired,\n}\n","import PropTypes from 'prop-types'\nimport {\n // order\n stackOrderAscending,\n stackOrderDescending,\n stackOrderInsideOut,\n stackOrderNone,\n stackOrderReverse,\n // offset\n stackOffsetExpand,\n stackOffsetDiverging,\n stackOffsetNone,\n stackOffsetSilhouette,\n stackOffsetWiggle,\n} from 'd3-shape'\n\nexport const stackOrderPropMapping = {\n ascending: stackOrderAscending,\n descending: stackOrderDescending,\n insideOut: stackOrderInsideOut,\n none: stackOrderNone,\n reverse: stackOrderReverse,\n}\n\nexport const stackOrderPropKeys = Object.keys(stackOrderPropMapping)\n\nexport const stackOrderPropType = PropTypes.oneOf(stackOrderPropKeys)\n\nexport const stackOrderFromProp = prop => stackOrderPropMapping[prop]\n\nexport const stackOffsetPropMapping = {\n expand: stackOffsetExpand,\n diverging: stackOffsetDiverging,\n none: stackOffsetNone,\n silhouette: stackOffsetSilhouette,\n wiggle: stackOffsetWiggle,\n}\n\nexport const stackOffsetPropKeys = Object.keys(stackOffsetPropMapping)\n\nexport const stackOffsetPropType = PropTypes.oneOf(stackOffsetPropKeys)\n\nexport const stackOffsetFromProp = prop => stackOffsetPropMapping[prop]\n","import ascending from \"./ascending.js\";\n\nexport default function(series) {\n return ascending(series).reverse();\n}\n","import appearance from \"./appearance.js\";\nimport {sum} from \"./ascending.js\";\n\nexport default function(series) {\n var n = series.length,\n i,\n j,\n sums = series.map(sum),\n order = appearance(series),\n top = 0,\n bottom = 0,\n tops = [],\n bottoms = [];\n\n for (i = 0; i < n; ++i) {\n j = order[i];\n if (top < bottom) {\n top += sums[j];\n tops.push(j);\n } else {\n bottom += sums[j];\n bottoms.push(j);\n }\n }\n\n return bottoms.reverse().concat(tops);\n}\n","import none from \"./none.js\";\n\nexport default function(series) {\n return none(series).reverse();\n}\n","import none from \"./none.js\";\n\nexport default function(series, order) {\n if (!((n = series.length) > 0)) return;\n for (var i, n, j = 0, m = series[0].length, y; j < m; ++j) {\n for (y = i = 0; i < n; ++i) y += series[i][j][1] || 0;\n if (y) for (i = 0; i < n; ++i) series[i][j][1] /= y;\n }\n none(series, order);\n}\n","export default function(series, order) {\n if (!((n = series.length) > 0)) return;\n for (var i, j = 0, d, dy, yp, yn, n, m = series[order[0]].length; j < m; ++j) {\n for (yp = yn = 0, i = 0; i < n; ++i) {\n if ((dy = (d = series[order[i]][j])[1] - d[0]) > 0) {\n d[0] = yp, d[1] = yp += dy;\n } else if (dy < 0) {\n d[1] = yn, d[0] = yn += dy;\n } else {\n d[0] = 0, d[1] = dy;\n }\n }\n }\n}\n","import none from \"./none.js\";\n\nexport default function(series, order) {\n if (!((n = series.length) > 0)) return;\n for (var j = 0, s0 = series[order[0]], n, m = s0.length; j < m; ++j) {\n for (var i = 0, y = 0; i < n; ++i) y += series[i][j][1] || 0;\n s0[j][1] += s0[j][0] = -y / 2;\n }\n none(series, order);\n}\n","import none from \"./none.js\";\n\nexport default function(series, order) {\n if (!((n = series.length) > 0) || !((m = (s0 = series[order[0]]).length) > 0)) return;\n for (var y = 0, j = 1, s0, m, n; j < m; ++j) {\n for (var i = 0, s1 = 0, s2 = 0; i < n; ++i) {\n var si = series[order[i]],\n sij0 = si[j][1] || 0,\n sij1 = si[j - 1][1] || 0,\n s3 = (sij0 - sij1) / 2;\n for (var k = 0; k < i; ++k) {\n var sk = series[order[k]],\n skj0 = sk[j][1] || 0,\n skj1 = sk[j - 1][1] || 0;\n s3 += skj0 - skj1;\n }\n s1 += sij0, s2 += s3 * sij0;\n }\n s0[j - 1][1] += s0[j - 1][0] = y;\n if (s1) y -= s2 / s1;\n }\n s0[j - 1][1] += s0[j - 1][0] = y;\n none(series, order);\n}\n","import PropTypes from 'prop-types'\n\nexport const marginPropType = PropTypes.shape({\n top: PropTypes.number,\n right: PropTypes.number,\n bottom: PropTypes.number,\n left: PropTypes.number,\n}).isRequired\n\nexport const blendModes = [\n 'normal',\n 'multiply',\n 'screen',\n 'overlay',\n 'darken',\n 'lighten',\n 'color-dodge',\n 'color-burn',\n 'hard-light',\n 'soft-light',\n 'difference',\n 'exclusion',\n 'hue',\n 'saturation',\n 'color',\n 'luminosity',\n]\n\nexport const blendModePropType = PropTypes.oneOf(blendModes)\n\nexport * from './colors'\nexport * from './curve'\nexport * from './defs'\nexport * from './stack'\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { scaleOrdinal } from 'd3-scale'\nimport { schemeSet3 } from 'd3-scale-chromatic'\nimport { nivoCategoricalColors } from '../lib/colors'\n\n// motion\nexport const defaultAnimate = true\nexport const defaultMotionStiffness = 90\nexport const defaultMotionDamping = 15\n\n// colors\nexport const defaultCategoricalColors = nivoCategoricalColors\nexport const defaultColorRange = scaleOrdinal(schemeSet3)\n\n// margin\nexport const defaultMargin = {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0,\n}\n","import { useMemo } from 'react'\nimport { defaultMargin } from '../defaults'\n\nexport const useDimensions = (width, height, partialMargin = {}) =>\n useMemo(() => {\n const margin = {\n ...defaultMargin,\n ...partialMargin,\n }\n\n return {\n margin,\n innerWidth: width - margin.left - margin.right,\n innerHeight: height - margin.top - margin.bottom,\n outerWidth: width,\n outerHeight: height,\n }\n }, [\n width,\n height,\n partialMargin.top,\n partialMargin.right,\n partialMargin.bottom,\n partialMargin.left,\n ])\n","import { useRef, useState, useEffect } from 'react'\n\nexport const useMeasure = () => {\n const measureRef = useRef(null)\n const [bounds, setBounds] = useState({\n left: 0,\n top: 0,\n width: 0,\n height: 0,\n })\n const [observer] = useState(() => new ResizeObserver(([entry]) => setBounds(entry.contentRect)))\n\n useEffect(() => {\n if (measureRef.current) {\n observer.observe(measureRef.current)\n }\n\n return () => observer.disconnect()\n }, [])\n\n return [measureRef, bounds]\n}\n","import { useMemo } from 'react'\nimport { format as d3Format } from 'd3-format'\nimport { timeFormat as d3TimeFormat } from 'd3-time-format'\n\nexport const getValueFormatter = format => {\n // user defined function\n if (typeof format === 'function') return format\n\n if (typeof format === 'string') {\n // time format specifier\n if (format.indexOf('time:') === 0) {\n return d3TimeFormat(format.slice('5'))\n }\n\n // standard format specifier\n return d3Format(format)\n }\n\n // no formatting\n return value => `${value}`\n}\n\nexport const useValueFormatter = format => useMemo(() => getValueFormatter(format), [format])\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { createContext, useContext } from 'react'\nimport PropTypes from 'prop-types'\nimport { usePartialTheme } from '../hooks'\n\nexport const themeContext = createContext()\n\n// required to preserve equality\nconst defaultPartialTheme = {}\n\nexport const ThemeProvider = ({ theme: partialTheme = defaultPartialTheme, children }) => {\n const theme = usePartialTheme(partialTheme)\n\n return {children}\n}\n\nThemeProvider.propTypes = {\n children: PropTypes.node.isRequired,\n theme: PropTypes.object,\n}\n\nexport const useTheme = () => useContext(themeContext)\n","import { useMemo } from 'react'\nimport { defaultTheme, extendDefaultTheme } from '../theming'\n\nexport const usePartialTheme = partialTheme =>\n useMemo(() => extendDefaultTheme(defaultTheme, partialTheme), [partialTheme])\n","import { cloneElement } from 'react'\nimport PropTypes from 'prop-types'\n\n// type ConditionalWrapperProps = {\n// children: JSX.Element\n// condition: boolean\n// wrapper: (children: JSX.Element) => JSX.Element\n// }\n\nexport const ConditionalWrapper = ({ children, condition, wrapper }) => {\n if (!condition) return children\n\n return cloneElement(wrapper, {}, children)\n}\n\nConditionalWrapper.propTypes = {\n children: PropTypes.node.isRequired,\n condition: PropTypes.bool.isRequired,\n wrapper: PropTypes.element.isRequired,\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { useRef } from 'react'\nimport PropTypes from 'prop-types'\nimport { TooltipProvider, Tooltip } from '@nivo/tooltip'\nimport { ThemeProvider } from '../theming'\nimport { MotionConfigProvider } from '../motion'\nimport { ConditionalWrapper } from './ConditionalWrapper'\n\nconst containerStyle = {\n position: 'relative',\n}\n\nexport const Container = ({\n children,\n theme,\n renderWrapper = true,\n isInteractive = true,\n animate,\n motionStiffness,\n motionDamping,\n motionConfig,\n}) => {\n const container = useRef(null)\n\n return (\n \n \n \n {/* we should not render the div element if using the HTTP API */}\n }\n >\n {children}\n {isInteractive && }\n \n \n \n \n )\n}\n\nContainer.propTypes = {\n children: PropTypes.element.isRequired,\n isInteractive: PropTypes.bool,\n renderWrapper: PropTypes.bool,\n theme: PropTypes.object,\n animate: PropTypes.bool,\n motionStiffness: PropTypes.number,\n motionDamping: PropTypes.number,\n motionConfig: PropTypes.string,\n}\n\nexport default Container\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { useRef, useMemo, useCallback } from 'react'\nimport PropTypes from 'prop-types'\nimport {\n TooltipActionsContext,\n TooltipStateContext,\n useTooltipHandlers,\n Tooltip,\n} from '@nivo/tooltip'\nimport noop from '../lib/noop'\nimport { ThemeProvider } from '../theming'\nimport { MotionConfigProvider } from '../motion'\nimport { ConditionalWrapper } from './ConditionalWrapper'\n\nconst containerStyle = {\n position: 'relative',\n}\n\n/**\n * This component should only be used when relying on render props,\n * passing `showTooltip`, `hideTooltip`, but you should use the regular\n * `Container` component.\n *\n * @deprecated\n */\nexport const LegacyContainer = ({\n children,\n theme,\n isInteractive = true,\n renderWrapper = true,\n animate,\n motionStiffness,\n motionDamping,\n motionConfig,\n}) => {\n const container = useRef(null)\n const { actions: tooltipActions, state: tooltipState } = useTooltipHandlers(container)\n\n const showTooltip = useCallback(\n (content, event) => tooltipActions.showTooltipFromEvent(content, event),\n [tooltipActions.showTooltipFromEvent]\n )\n\n const handlers = useMemo(\n () => ({\n showTooltip: isInteractive ? showTooltip : noop,\n hideTooltip: isInteractive ? tooltipActions.hideTooltip : noop,\n }),\n [tooltipActions.hideTooltip, isInteractive, showTooltip]\n )\n\n return (\n \n \n \n \n {/* we should not render the div element if using the HTTP API */}\n }\n >\n {children(handlers)}\n {isInteractive && }\n \n \n \n \n \n )\n}\n\nLegacyContainer.propTypes = {\n children: PropTypes.func.isRequired,\n isInteractive: PropTypes.bool,\n renderWrapper: PropTypes.bool,\n theme: PropTypes.object.isRequired,\n animate: PropTypes.bool.isRequired,\n motionStiffness: PropTypes.number,\n motionDamping: PropTypes.number,\n motionConfig: PropTypes.string,\n}\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport PropTypes from 'prop-types'\nimport { useMeasure } from '../hooks'\n\nconst ResponsiveWrapper = ({ children }) => {\n const [measureRef, bounds] = useMeasure()\n const shouldRender = bounds.width > 0 && bounds.height > 0\n\n return (\n
\n {shouldRender && children({ width: bounds.width, height: bounds.height })}\n
\n )\n}\n\nResponsiveWrapper.propTypes = {\n children: PropTypes.func.isRequired,\n}\n\nexport default ResponsiveWrapper\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport PropTypes from 'prop-types'\n\nexport const LinearGradient = ({ id, colors, ...rest }) => (\n \n {colors.map(({ offset, color, opacity }) => (\n \n ))}\n \n)\n\nLinearGradient.propTypes = {\n id: PropTypes.string.isRequired,\n colors: PropTypes.arrayOf(\n PropTypes.shape({\n offset: PropTypes.number.isRequired,\n color: PropTypes.string.isRequired,\n opacity: PropTypes.number,\n })\n ).isRequired,\n gradientTransform: PropTypes.string,\n}\n\nexport const linearGradientDef = (id, colors, options = {}) => ({\n id,\n type: 'linearGradient',\n colors,\n ...options,\n})\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { LinearGradient } from './LinearGradient'\n\nexport const gradientTypes = {\n linearGradient: LinearGradient,\n}\n\nexport * from './LinearGradient'\n","import { memo } from 'react'\nimport PropTypes from 'prop-types'\n\nexport const PatternDots = memo(({ id, background, color, size, padding, stagger }) => {\n let fullSize = size + padding\n const radius = size / 2\n const halfPadding = padding / 2\n if (stagger === true) {\n fullSize = size * 2 + padding * 2\n }\n\n return (\n \n \n \n {stagger && (\n \n )}\n \n )\n})\n\nPatternDots.displayName = 'PatternDots'\nPatternDots.propTypes = {\n id: PropTypes.string.isRequired,\n color: PropTypes.string.isRequired,\n background: PropTypes.string.isRequired,\n size: PropTypes.number.isRequired,\n padding: PropTypes.number.isRequired,\n stagger: PropTypes.bool.isRequired,\n}\n\nPatternDots.defaultProps = {\n color: '#000000',\n background: '#ffffff',\n size: 4,\n padding: 4,\n stagger: false,\n}\n\nexport const patternDotsDef = (id, options = {}) => ({\n id,\n type: 'patternDots',\n ...options,\n})\n","export const TWO_PI = Math.PI * 2\n\nexport const degreesToRadians = degrees => (degrees * Math.PI) / 180\n\nexport const radiansToDegrees = radians => (180 * radians) / Math.PI\n\nexport const midAngle = arc => arc.startAngle + (arc.endAngle - arc.startAngle) / 2\n\nexport const positionFromAngle = (angle, distance) => ({\n x: Math.cos(angle) * distance,\n y: Math.sin(angle) * distance,\n})\n\n/**\n * Normalize given angle (degrees) in the 0~360 range.\n *\n * @param {number} angle\n *\n * @return {number}\n */\nexport const absoluteAngleDegrees = angle => {\n let absAngle = angle % 360\n if (absAngle < 0) {\n absAngle += 360\n }\n\n return absAngle\n}\n\nexport const absoluteAngleRadians = angle => angle - TWO_PI * Math.floor((angle + Math.PI) / TWO_PI)\n\n/**\n * Ensure angle is always between 0~360.\n *\n * @param {number} rawAngle - in degrees\n *\n * @returns {number}\n */\nexport const normalizeAngle = rawAngle => {\n if (rawAngle < 0) {\n return 360 - (-rawAngle % 360)\n }\n\n return rawAngle % 360\n}\n\n/**\n * Ensure the absolute difference between start and end angles\n * is at most given length.\n *\n * @param startAngle - in degrees\n * @param endAngle - in degrees\n * @param length - in degrees\n *\n * @returns {[number, number]}\n */\nexport const clampArc = (startAngle, endAngle, length = 360) => {\n let clampedEndAngle = endAngle\n if (Math.abs(endAngle - startAngle) > length) {\n clampedEndAngle = startAngle + (endAngle > startAngle ? length : -length)\n }\n\n return [startAngle, clampedEndAngle]\n}\n","export const textPropsByEngine = {\n svg: {\n align: {\n left: 'start',\n center: 'middle',\n right: 'end',\n start: 'start',\n middle: 'middle',\n end: 'end',\n },\n baseline: {\n top: 'text-before-edge',\n center: 'central',\n bottom: 'alphabetic',\n },\n },\n canvas: {\n align: {\n left: 'left',\n center: 'center',\n right: 'right',\n start: 'left',\n middle: 'center',\n end: 'right',\n },\n baseline: {\n top: 'top',\n center: 'middle',\n bottom: 'bottom',\n },\n },\n}\n","import { memo } from 'react'\nimport PropTypes from 'prop-types'\nimport { degreesToRadians } from '../../../lib/polar'\n\nexport const PatternLines = memo(\n ({ id, spacing: _spacing, rotation: _rotation, background, color, lineWidth }) => {\n let rotation = Math.round(_rotation) % 360\n const spacing = Math.abs(_spacing)\n\n if (rotation > 180) rotation = rotation - 360\n else if (rotation > 90) rotation = rotation - 180\n else if (rotation < -180) rotation = rotation + 360\n else if (rotation < -90) rotation = rotation + 180\n\n let width = spacing\n let height = spacing\n let path\n\n if (rotation === 0) {\n path = `\n M 0 0 L ${width} 0\n M 0 ${height} L ${width} ${height}\n `\n } else if (rotation === 90) {\n path = `\n M 0 0 L 0 ${height}\n M ${width} 0 L ${width} ${height}\n `\n } else {\n width = Math.abs(spacing / Math.sin(degreesToRadians(rotation)))\n height = spacing / Math.sin(degreesToRadians(90 - rotation))\n\n if (rotation > 0) {\n path = `\n M 0 ${-height} L ${width * 2} ${height}\n M ${-width} ${-height} L ${width} ${height}\n M ${-width} 0 L ${width} ${height * 2}\n `\n } else {\n path = `\n M ${-width} ${height} L ${width} ${-height}\n M ${-width} ${height * 2} L ${width * 2} ${-height}\n M 0 ${height * 2} L ${width * 2} 0\n `\n }\n }\n\n return (\n \n \n \n \n )\n }\n)\n\nPatternLines.displayName = 'PatternLines'\nPatternLines.propTypes = {\n id: PropTypes.string.isRequired,\n spacing: PropTypes.number.isRequired,\n rotation: PropTypes.number.isRequired,\n background: PropTypes.string.isRequired,\n color: PropTypes.string.isRequired,\n lineWidth: PropTypes.number.isRequired,\n}\nPatternLines.defaultProps = {\n spacing: 5,\n rotation: 0,\n color: '#000000',\n background: '#ffffff',\n lineWidth: 2,\n}\n\nexport const patternLinesDef = (id, options = {}) => ({\n id,\n type: 'patternLines',\n ...options,\n})\n","import { memo } from 'react'\nimport PropTypes from 'prop-types'\n\nexport const PatternSquares = memo(({ id, background, color, size, padding, stagger }) => {\n let fullSize = size + padding\n const halfPadding = padding / 2\n if (stagger === true) {\n fullSize = size * 2 + padding * 2\n }\n\n return (\n \n \n \n {stagger && (\n \n )}\n \n )\n})\n\nPatternSquares.displayName = 'PatternSquares'\nPatternSquares.propTypes = {\n id: PropTypes.string.isRequired,\n color: PropTypes.string.isRequired,\n background: PropTypes.string.isRequired,\n size: PropTypes.number.isRequired,\n padding: PropTypes.number.isRequired,\n stagger: PropTypes.bool.isRequired,\n}\nPatternSquares.defaultProps = {\n color: '#000000',\n background: '#ffffff',\n size: 4,\n padding: 4,\n stagger: false,\n}\n\nexport const patternSquaresDef = (id, options = {}) => ({\n id,\n type: 'patternSquares',\n ...options,\n})\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { PatternDots } from './PatternDots'\nimport { PatternLines } from './PatternLines'\nimport { PatternSquares } from './PatternSquares'\n\nexport const patternTypes = {\n patternDots: PatternDots,\n patternLines: PatternLines,\n patternSquares: PatternSquares,\n}\n\nexport * from './PatternDots'\nexport * from './PatternLines'\nexport * from './PatternSquares'\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { createElement, memo } from 'react'\nimport PropTypes from 'prop-types'\nimport { gradientTypes } from './gradients'\nimport { patternTypes } from './patterns'\n\nexport const defsMapping = {\n ...gradientTypes,\n ...patternTypes,\n}\n\nconst Defs = ({ defs: definitions }) => {\n if (!definitions || definitions.length < 1) return null\n\n return (\n \n {definitions.map(({ type, ...def }) => {\n if (defsMapping[type])\n return createElement(defsMapping[type], { key: def.id, ...def })\n\n return null\n })}\n \n )\n}\n\nDefs.propTypes = {\n defs: PropTypes.arrayOf(\n PropTypes.shape({\n type: PropTypes.oneOf(Object.keys(defsMapping)).isRequired,\n id: PropTypes.string.isRequired,\n })\n ),\n}\n\nexport default memo(Defs)\n","import PropTypes from 'prop-types'\nimport { Defs } from './defs'\nimport { useTheme } from '../theming'\n\nconst SvgWrapper = ({\n width,\n height,\n margin,\n defs,\n children,\n role,\n ariaLabel,\n ariaLabelledBy,\n ariaDescribedBy,\n isFocusable,\n}) => {\n const theme = useTheme()\n\n return (\n \n \n \n {children}\n \n )\n}\n\nSvgWrapper.propTypes = {\n width: PropTypes.number.isRequired,\n height: PropTypes.number.isRequired,\n margin: PropTypes.shape({\n top: PropTypes.number.isRequired,\n left: PropTypes.number.isRequired,\n }).isRequired,\n defs: PropTypes.array,\n children: PropTypes.oneOfType([PropTypes.arrayOf(PropTypes.node), PropTypes.node]).isRequired,\n role: PropTypes.string,\n isFocusable: PropTypes.bool,\n ariaLabel: PropTypes.string,\n ariaLabelledBy: PropTypes.string,\n ariaDescribedBy: PropTypes.string,\n}\n\nexport default SvgWrapper\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { memo } from 'react'\nimport PropTypes from 'prop-types'\n\nconst DotsItemSymbol = ({ size, color, borderWidth, borderColor }) => (\n \n)\n\nDotsItemSymbol.propTypes = {\n size: PropTypes.number.isRequired,\n color: PropTypes.string.isRequired,\n borderWidth: PropTypes.number.isRequired,\n borderColor: PropTypes.string.isRequired,\n}\n\nexport default memo(DotsItemSymbol)\n","import { createElement, memo } from 'react'\nimport PropTypes from 'prop-types'\nimport { useSpring, animated } from '@react-spring/web'\nimport { useTheme } from '../../theming'\nimport { useMotionConfig } from '../../motion'\nimport DotsItemSymbol from './DotsItemSymbol'\n\nconst DotsItem = ({\n x,\n y,\n symbol = DotsItemSymbol,\n size,\n datum,\n color,\n borderWidth,\n borderColor,\n label,\n labelTextAnchor = 'middle',\n labelYOffset = -12,\n}) => {\n const theme = useTheme()\n\n const { animate, config: springConfig } = useMotionConfig()\n const animatedProps = useSpring({\n transform: `translate(${x}, ${y})`,\n config: springConfig,\n immediate: !animate,\n })\n\n return (\n \n {createElement(symbol, {\n size,\n color,\n datum,\n borderWidth,\n borderColor,\n })}\n {label && (\n \n {label}\n \n )}\n \n )\n}\n\nDotsItem.propTypes = {\n x: PropTypes.number.isRequired,\n y: PropTypes.number.isRequired,\n datum: PropTypes.object.isRequired,\n\n size: PropTypes.number.isRequired,\n color: PropTypes.string.isRequired,\n borderWidth: PropTypes.number.isRequired,\n borderColor: PropTypes.string.isRequired,\n\n symbol: PropTypes.oneOfType([PropTypes.func, PropTypes.object]),\n\n label: PropTypes.oneOfType([PropTypes.string, PropTypes.number]),\n labelTextAnchor: PropTypes.oneOf(['start', 'middle', 'end']),\n labelYOffset: PropTypes.number.isRequired,\n}\n\nexport default memo(DotsItem)\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { memo } from 'react'\nimport PropTypes from 'prop-types'\nimport { useTheme } from '../../../theming'\n\n/**\n *\n * @param {string} axis\n * @param {number} width\n * @param {number} height\n * @param {string} position\n * @param {number} offsetX\n * @param {number} offsetY\n * @param {string} orientation\n * @return {{ x: number, y: number, textAnchor: string }}\n */\nconst computeLabel = ({ axis, width, height, position, offsetX, offsetY, orientation }) => {\n let x = 0\n let y = 0\n const rotation = orientation === 'vertical' ? -90 : 0\n let textAnchor = 'start'\n\n if (axis === 'x') {\n switch (position) {\n case 'top-left':\n x = -offsetX\n y = offsetY\n textAnchor = 'end'\n break\n case 'top':\n y = -offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'middle'\n } else {\n textAnchor = 'start'\n }\n break\n case 'top-right':\n x = offsetX\n y = offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'start'\n } else {\n textAnchor = 'end'\n }\n break\n case 'right':\n x = offsetX\n y = height / 2\n if (orientation === 'horizontal') {\n textAnchor = 'start'\n } else {\n textAnchor = 'middle'\n }\n break\n case 'bottom-right':\n x = offsetX\n y = height - offsetY\n textAnchor = 'start'\n break\n case 'bottom':\n y = height + offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'middle'\n } else {\n textAnchor = 'end'\n }\n break\n case 'bottom-left':\n y = height - offsetY\n x = -offsetX\n if (orientation === 'horizontal') {\n textAnchor = 'end'\n } else {\n textAnchor = 'start'\n }\n break\n case 'left':\n x = -offsetX\n y = height / 2\n if (orientation === 'horizontal') {\n textAnchor = 'end'\n } else {\n textAnchor = 'middle'\n }\n break\n }\n } else {\n switch (position) {\n case 'top-left':\n x = offsetX\n y = -offsetY\n textAnchor = 'start'\n break\n case 'top':\n x = width / 2\n y = -offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'middle'\n } else {\n textAnchor = 'start'\n }\n break\n case 'top-right':\n x = width - offsetX\n y = -offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'end'\n } else {\n textAnchor = 'start'\n }\n break\n case 'right':\n x = width + offsetX\n if (orientation === 'horizontal') {\n textAnchor = 'start'\n } else {\n textAnchor = 'middle'\n }\n break\n case 'bottom-right':\n x = width - offsetX\n y = offsetY\n textAnchor = 'end'\n break\n case 'bottom':\n x = width / 2\n y = offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'middle'\n } else {\n textAnchor = 'end'\n }\n break\n case 'bottom-left':\n x = offsetX\n y = offsetY\n if (orientation === 'horizontal') {\n textAnchor = 'start'\n } else {\n textAnchor = 'end'\n }\n break\n case 'left':\n x = -offsetX\n if (orientation === 'horizontal') {\n textAnchor = 'end'\n } else {\n textAnchor = 'middle'\n }\n break\n }\n }\n\n return { x, y, rotation, textAnchor }\n}\n\nconst CartesianMarkersItem = ({\n width,\n height,\n axis,\n scale,\n value,\n lineStyle,\n textStyle,\n legend,\n legendPosition,\n legendOffsetX,\n legendOffsetY,\n legendOrientation,\n}) => {\n const theme = useTheme()\n\n let x = 0\n let x2 = 0\n let y = 0\n let y2 = 0\n\n if (axis === 'y') {\n y = scale(value)\n x2 = width\n } else {\n x = scale(value)\n y2 = height\n }\n\n let legendNode = null\n if (legend) {\n const legendProps = computeLabel({\n axis,\n width,\n height,\n position: legendPosition,\n offsetX: legendOffsetX,\n offsetY: legendOffsetY,\n orientation: legendOrientation,\n })\n legendNode = (\n \n {legend}\n \n )\n }\n\n return (\n \n \n {legendNode}\n \n )\n}\n\nCartesianMarkersItem.propTypes = {\n width: PropTypes.number.isRequired,\n height: PropTypes.number.isRequired,\n\n axis: PropTypes.oneOf(['x', 'y']).isRequired,\n scale: PropTypes.func.isRequired,\n value: PropTypes.oneOfType([PropTypes.number, PropTypes.string, PropTypes.instanceOf(Date)])\n .isRequired,\n lineStyle: PropTypes.object,\n textStyle: PropTypes.object,\n\n legend: PropTypes.string,\n legendPosition: PropTypes.oneOf([\n 'top-left',\n 'top',\n 'top-right',\n 'right',\n 'bottom-right',\n 'bottom',\n 'bottom-left',\n 'left',\n ]),\n legendOffsetX: PropTypes.number.isRequired,\n legendOffsetY: PropTypes.number.isRequired,\n legendOrientation: PropTypes.oneOf(['horizontal', 'vertical']).isRequired,\n}\nCartesianMarkersItem.defaultProps = {\n legendPosition: 'top-right',\n legendOffsetX: 14,\n legendOffsetY: 14,\n legendOrientation: 'horizontal',\n}\n\nexport default memo(CartesianMarkersItem)\n","import { memo } from 'react'\nimport PropTypes from 'prop-types'\nimport CartesianMarkersItem from './CartesianMarkersItem'\n\nconst CartesianMarkers = ({ markers, width, height, xScale, yScale }) => {\n if (!markers || markers.length === 0) return null\n\n return markers.map((marker, i) => (\n \n ))\n}\n\nCartesianMarkers.propTypes = {\n width: PropTypes.number.isRequired,\n height: PropTypes.number.isRequired,\n\n xScale: PropTypes.func.isRequired,\n yScale: PropTypes.func.isRequired,\n\n markers: PropTypes.arrayOf(\n PropTypes.shape({\n axis: PropTypes.oneOf(['x', 'y']).isRequired,\n value: PropTypes.oneOfType([\n PropTypes.number,\n PropTypes.string,\n PropTypes.instanceOf(Date),\n ]).isRequired,\n lineStyle: PropTypes.object,\n textStyle: PropTypes.object,\n })\n ),\n}\n\nexport default memo(CartesianMarkers)\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { compose, setPropTypes, withPropsOnChange } from '@nivo/recompose'\nimport PropTypes from 'prop-types'\nimport { defaultTheme, extendDefaultTheme } from '../theming'\n\n/**\n * This HOC watch theme prop change\n * and returns it deeply merged with default theme.\n * Using it prevent from having a new ref each time\n * we pass through the component, useful for shallow comparison.\n */\nexport default ({ srcKey = 'theme', destKey = 'theme' } = {}) =>\n compose(\n setPropTypes({\n [srcKey]: PropTypes.object,\n }),\n withPropsOnChange([srcKey], props => ({\n [destKey]: extendDefaultTheme(defaultTheme, props[srcKey]),\n }))\n )\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\nimport { Component } from 'react'\nimport { Container } from '../components/Container'\n\nexport const withContainer = WrappedComponent => {\n // eslint-disable-next-line react/display-name\n return class extends Component {\n render() {\n // eslint-disable-next-line react/prop-types\n const {\n theme,\n renderWrapper,\n animate,\n motionStiffness,\n motionDamping,\n motionConfig,\n ...childProps\n } = this.props\n\n return (\n \n \n \n )\n }\n }\n}\n","import isFunction from 'lodash/isFunction'\nimport get from 'lodash/get'\nimport { format } from 'd3-format'\nimport { useMemo } from 'react'\n\nexport const getLabelGenerator = (_label, labelFormat) => {\n const getRawLabel = isFunction(_label) ? _label : d => get(d, _label)\n let formatter\n if (labelFormat) {\n formatter = isFunction(labelFormat) ? labelFormat : format(labelFormat)\n }\n\n if (formatter) return d => formatter(getRawLabel(d))\n return getRawLabel\n}\n\nexport const getPropertyAccessor = accessor =>\n isFunction(accessor) ? accessor : d => get(d, accessor)\n\nexport const usePropertyAccessor = accessor =>\n useMemo(() => getPropertyAccessor(accessor), [accessor])\n","/*\n * This file is part of the nivo project.\n *\n * Copyright 2016-present, Raphaël Benitte.\n *\n * For the full copyright and license information, please view the LICENSE\n * file that was distributed with this source code.\n */\n\nexport * from './detect'\n\nexport const getRelativeCursor = (el, event) => {\n const { clientX, clientY } = event\n const bounds = el.getBoundingClientRect()\n\n return [clientX - bounds.left, clientY - bounds.top]\n}\n","import isFunction from 'lodash/isFunction'\nimport isPlainObject from 'lodash/isPlainObject'\nimport pick from 'lodash/pick'\nimport isEqual from 'lodash/isEqual'\nimport get from 'lodash/get'\nimport set from 'lodash/set'\nimport { gradientTypes, patternTypes } from '../components/defs'\n\nconst gradientKeys = Object.keys(gradientTypes)\nconst patternKeys = Object.keys(patternTypes)\n\n/**\n * Check a node matches given def predicate.\n *\n * @param {string|Function|Object} predicate\n * @param {Object} node\n * @param {string} [dataKey] - Optional path to access node data\n * @returns {boolean}\n */\nexport const isMatchingDef = (predicate, node, dataKey) => {\n if (predicate === '*') {\n return true\n } else if (isFunction(predicate)) {\n return predicate(node)\n } else if (isPlainObject(predicate)) {\n const data = dataKey ? get(node, dataKey) : node\n return isEqual(pick(data, Object.keys(predicate)), predicate)\n }\n\n return false\n}\n\n/**\n * Compute SVG defs.\n *\n * @param {Array.} defs - Base SVG defs configs\n * @param {Array.} nodes - Data nodes to apply defs on\n * @param {Array.} rules - Rules used to conditionally apply defs on data nodes\n * @param {string} [dataKey] - Path to node data, used for rule object query based predicate\n * @param {string} [colorKey='color'] - Node color path, required when inheritance is involved\n * @param {string} [targetKey='fill'] - Node target property to apply def ID on\n * @returns {Array}\n */\nexport const bindDefs = (\n defs,\n nodes,\n rules,\n { dataKey, colorKey = 'color', targetKey = 'fill' } = {}\n) => {\n let boundDefs = []\n\n // will hold generated variation ids,\n // to avoid generating multiple identical defs\n const generatedIds = {}\n\n if (defs.length && nodes.length) {\n // first, add base defs\n boundDefs = [...defs]\n\n nodes.forEach(node => {\n for (let i = 0; i < rules.length; i++) {\n const { id, match } = rules[i]\n if (isMatchingDef(match, node, dataKey)) {\n const def = defs.find(({ id: defId }) => defId === id)\n if (def) {\n if (patternKeys.includes(def.type)) {\n if (def.background === 'inherit' || def.color === 'inherit') {\n const nodeColor = get(node, colorKey)\n let background = def.background\n let color = def.color\n\n let inheritedId = id\n if (def.background === 'inherit') {\n inheritedId = `${inheritedId}.bg.${nodeColor}`\n background = nodeColor\n }\n if (def.color === 'inherit') {\n inheritedId = `${inheritedId}.fg.${nodeColor}`\n color = nodeColor\n }\n\n set(node, targetKey, `url(#${inheritedId})`)\n if (!generatedIds[inheritedId]) {\n boundDefs.push({\n ...def,\n id: inheritedId,\n background,\n color,\n })\n generatedIds[inheritedId] = 1\n }\n } else {\n // do not generate new def as there's no inheritance involved\n set(node, targetKey, `url(#${id})`)\n }\n } else if (gradientKeys.includes(def.type)) {\n const allColors = def.colors.map(({ color }) => color)\n\n if (allColors.includes('inherit')) {\n const nodeColor = get(node, colorKey)\n\n let inheritedId = id\n const inheritedDef = {\n ...def,\n colors: def.colors.map((colorStop, i) => {\n if (colorStop.color !== 'inherit') return colorStop\n\n inheritedId = `${inheritedId}.${i}.${nodeColor}`\n\n return {\n ...colorStop,\n color:\n colorStop.color === 'inherit'\n ? nodeColor\n : colorStop.color,\n }\n }),\n }\n inheritedDef.id = inheritedId\n\n set(node, targetKey, `url(#${inheritedId})`)\n if (!generatedIds[inheritedId]) {\n boundDefs.push(inheritedDef)\n generatedIds[inheritedId] = 1\n }\n } else {\n // do not generate new def as there's no inheritance involved\n set(node, targetKey, `url(#${id})`)\n }\n }\n }\n\n // break loop on first match\n break\n }\n }\n })\n }\n\n return boundDefs\n}\n","var e10 = Math.sqrt(50),\n e5 = Math.sqrt(10),\n e2 = Math.sqrt(2);\n\nexport default function(start, stop, count) {\n var reverse,\n i = -1,\n n,\n ticks,\n step;\n\n stop = +stop, start = +start, count = +count;\n if (start === stop && count > 0) return [start];\n if (reverse = stop < start) n = start, start = stop, stop = n;\n if ((step = tickIncrement(start, stop, count)) === 0 || !isFinite(step)) return [];\n\n if (step > 0) {\n let r0 = Math.round(start / step), r1 = Math.round(stop / step);\n if (r0 * step < start) ++r0;\n if (r1 * step > stop) --r1;\n ticks = new Array(n = r1 - r0 + 1);\n while (++i < n) ticks[i] = (r0 + i) * step;\n } else {\n step = -step;\n let r0 = Math.round(start * step), r1 = Math.round(stop * step);\n if (r0 / step < start) ++r0;\n if (r1 / step > stop) --r1;\n ticks = new Array(n = r1 - r0 + 1);\n while (++i < n) ticks[i] = (r0 + i) / step;\n }\n\n if (reverse) ticks.reverse();\n\n return ticks;\n}\n\nexport function tickIncrement(start, stop, count) {\n var step = (stop - start) / Math.max(0, count),\n power = Math.floor(Math.log(step) / Math.LN10),\n error = step / Math.pow(10, power);\n return power >= 0\n ? (error >= e10 ? 10 : error >= e5 ? 5 : error >= e2 ? 2 : 1) * Math.pow(10, power)\n : -Math.pow(10, -power) / (error >= e10 ? 10 : error >= e5 ? 5 : error >= e2 ? 2 : 1);\n}\n\nexport function tickStep(start, stop, count) {\n var step0 = Math.abs(stop - start) / Math.max(0, count),\n step1 = Math.pow(10, Math.floor(Math.log(step0) / Math.LN10)),\n error = step0 / step1;\n if (error >= e10) step1 *= 10;\n else if (error >= e5) step1 *= 5;\n else if (error >= e2) step1 *= 2;\n return stop < start ? -step1 : step1;\n}\n","export default function(a, b) {\n return a < b ? -1 : a > b ? 1 : a >= b ? 0 : NaN;\n}\n","import ascending from \"./ascending.js\";\n\nexport default function(f) {\n let delta = f;\n let compare = f;\n\n if (f.length === 1) {\n delta = (d, x) => f(d) - x;\n compare = ascendingComparator(f);\n }\n\n function left(a, x, lo, hi) {\n if (lo == null) lo = 0;\n if (hi == null) hi = a.length;\n while (lo < hi) {\n const mid = (lo + hi) >>> 1;\n if (compare(a[mid], x) < 0) lo = mid + 1;\n else hi = mid;\n }\n return lo;\n }\n\n function right(a, x, lo, hi) {\n if (lo == null) lo = 0;\n if (hi == null) hi = a.length;\n while (lo < hi) {\n const mid = (lo + hi) >>> 1;\n if (compare(a[mid], x) > 0) hi = mid;\n else lo = mid + 1;\n }\n return lo;\n }\n\n function center(a, x, lo, hi) {\n if (lo == null) lo = 0;\n if (hi == null) hi = a.length;\n const i = left(a, x, lo, hi - 1);\n return i > lo && delta(a[i - 1], x) > -delta(a[i], x) ? i - 1 : i;\n }\n\n return {left, center, right};\n}\n\nfunction ascendingComparator(f) {\n return (d, x) => ascending(f(d), x);\n}\n","import ascending from \"./ascending.js\";\nimport bisector from \"./bisector.js\";\nimport number from \"./number.js\";\n\nconst ascendingBisect = bisector(ascending);\nexport const bisectRight = ascendingBisect.right;\nexport const bisectLeft = ascendingBisect.left;\nexport const bisectCenter = bisector(number).center;\nexport default bisectRight;\n","export default function(x) {\n return x === null ? NaN : +x;\n}\n\nexport function* numbers(values, valueof) {\n if (valueof === undefined) {\n for (let value of values) {\n if (value != null && (value = +value) >= value) {\n yield value;\n }\n }\n } else {\n let index = -1;\n for (let value of values) {\n if ((value = valueof(value, ++index, values)) != null && (value = +value) >= value) {\n yield value;\n }\n }\n }\n}\n","import value from \"./value.js\";\nimport numberArray, {isNumberArray} from \"./numberArray.js\";\n\nexport default function(a, b) {\n return (isNumberArray(b) ? numberArray : genericArray)(a, b);\n}\n\nexport function genericArray(a, b) {\n var nb = b ? b.length : 0,\n na = a ? Math.min(nb, a.length) : 0,\n x = new Array(na),\n c = new Array(nb),\n i;\n\n for (i = 0; i < na; ++i) x[i] = value(a[i], b[i]);\n for (; i < nb; ++i) c[i] = b[i];\n\n return function(t) {\n for (i = 0; i < na; ++i) c[i] = x[i](t);\n return c;\n };\n}\n","export default function(a, b) {\n var d = new Date;\n return a = +a, b = +b, function(t) {\n return d.setTime(a * (1 - t) + b * t), d;\n };\n}\n","import value from \"./value.js\";\n\nexport default function(a, b) {\n var i = {},\n c = {},\n k;\n\n if (a === null || typeof a !== \"object\") a = {};\n if (b === null || typeof b !== \"object\") b = {};\n\n for (k in b) {\n if (k in a) {\n i[k] = value(a[k], b[k]);\n } else {\n c[k] = b[k];\n }\n }\n\n return function(t) {\n for (k in i) c[k] = i[k](t);\n return c;\n };\n}\n","export default function(a, b) {\n if (!b) b = [];\n var n = a ? Math.min(b.length, a.length) : 0,\n c = b.slice(),\n i;\n return function(t) {\n for (i = 0; i < n; ++i) c[i] = a[i] * (1 - t) + b[i] * t;\n return c;\n };\n}\n\nexport function isNumberArray(x) {\n return ArrayBuffer.isView(x) && !(x instanceof DataView);\n}\n","import {color} from \"d3-color\";\nimport rgb from \"./rgb.js\";\nimport {genericArray} from \"./array.js\";\nimport date from \"./date.js\";\nimport number from \"./number.js\";\nimport object from \"./object.js\";\nimport string from \"./string.js\";\nimport constant from \"./constant.js\";\nimport numberArray, {isNumberArray} from \"./numberArray.js\";\n\nexport default function(a, b) {\n var t = typeof b, c;\n return b == null || t === \"boolean\" ? constant(b)\n : (t === \"number\" ? number\n : t === \"string\" ? ((c = color(b)) ? (b = c, rgb) : string)\n : b instanceof color ? rgb\n : b instanceof Date ? date\n : isNumberArray(b) ? numberArray\n : Array.isArray(b) ? genericArray\n : typeof b.valueOf !== \"function\" && typeof b.toString !== \"function\" || isNaN(b) ? object\n : number)(a, b);\n}\n","export default function(a, b) {\n return a = +a, b = +b, function(t) {\n return Math.round(a * (1 - t) + b * t);\n };\n}\n","export default function number(x) {\n return +x;\n}\n","import {bisect} from \"d3-array\";\nimport {interpolate as interpolateValue, interpolateNumber, interpolateRound} from \"d3-interpolate\";\nimport constant from \"./constant.js\";\nimport number from \"./number.js\";\n\nvar unit = [0, 1];\n\nexport function identity(x) {\n return x;\n}\n\nfunction normalize(a, b) {\n return (b -= (a = +a))\n ? function(x) { return (x - a) / b; }\n : constant(isNaN(b) ? NaN : 0.5);\n}\n\nfunction clamper(a, b) {\n var t;\n if (a > b) t = a, a = b, b = t;\n return function(x) { return Math.max(a, Math.min(b, x)); };\n}\n\n// normalize(a, b)(x) takes a domain value x in [a,b] and returns the corresponding parameter t in [0,1].\n// interpolate(a, b)(t) takes a parameter t in [0,1] and returns the corresponding range value x in [a,b].\nfunction bimap(domain, range, interpolate) {\n var d0 = domain[0], d1 = domain[1], r0 = range[0], r1 = range[1];\n if (d1 < d0) d0 = normalize(d1, d0), r0 = interpolate(r1, r0);\n else d0 = normalize(d0, d1), r0 = interpolate(r0, r1);\n return function(x) { return r0(d0(x)); };\n}\n\nfunction polymap(domain, range, interpolate) {\n var j = Math.min(domain.length, range.length) - 1,\n d = new Array(j),\n r = new Array(j),\n i = -1;\n\n // Reverse descending domains.\n if (domain[j] < domain[0]) {\n domain = domain.slice().reverse();\n range = range.slice().reverse();\n }\n\n while (++i < j) {\n d[i] = normalize(domain[i], domain[i + 1]);\n r[i] = interpolate(range[i], range[i + 1]);\n }\n\n return function(x) {\n var i = bisect(domain, x, 1, j) - 1;\n return r[i](d[i](x));\n };\n}\n\nexport function copy(source, target) {\n return target\n .domain(source.domain())\n .range(source.range())\n .interpolate(source.interpolate())\n .clamp(source.clamp())\n .unknown(source.unknown());\n}\n\nexport function transformer() {\n var domain = unit,\n range = unit,\n interpolate = interpolateValue,\n transform,\n untransform,\n unknown,\n clamp = identity,\n piecewise,\n output,\n input;\n\n function rescale() {\n var n = Math.min(domain.length, range.length);\n if (clamp !== identity) clamp = clamper(domain[0], domain[n - 1]);\n piecewise = n > 2 ? polymap : bimap;\n output = input = null;\n return scale;\n }\n\n function scale(x) {\n return x == null || isNaN(x = +x) ? unknown : (output || (output = piecewise(domain.map(transform), range, interpolate)))(transform(clamp(x)));\n }\n\n scale.invert = function(y) {\n return clamp(untransform((input || (input = piecewise(range, domain.map(transform), interpolateNumber)))(y)));\n };\n\n scale.domain = function(_) {\n return arguments.length ? (domain = Array.from(_, number), rescale()) : domain.slice();\n };\n\n scale.range = function(_) {\n return arguments.length ? (range = Array.from(_), rescale()) : range.slice();\n };\n\n scale.rangeRound = function(_) {\n return range = Array.from(_), interpolate = interpolateRound, rescale();\n };\n\n scale.clamp = function(_) {\n return arguments.length ? (clamp = _ ? true : identity, rescale()) : clamp !== identity;\n };\n\n scale.interpolate = function(_) {\n return arguments.length ? (interpolate = _, rescale()) : interpolate;\n };\n\n scale.unknown = function(_) {\n return arguments.length ? (unknown = _, scale) : unknown;\n };\n\n return function(t, u) {\n transform = t, untransform = u;\n return rescale();\n };\n}\n\nexport default function continuous() {\n return transformer()(identity, identity);\n}\n","export default function constants(x) {\n return function() {\n return x;\n };\n}\n","// [[fill]align][sign][symbol][0][width][,][.precision][~][type]\nvar re = /^(?:(.)?([<>=^]))?([+\\-( ])?([$#])?(0)?(\\d+)?(,)?(\\.\\d+)?(~)?([a-z%])?$/i;\n\nexport default function formatSpecifier(specifier) {\n if (!(match = re.exec(specifier))) throw new Error(\"invalid format: \" + specifier);\n var match;\n return new FormatSpecifier({\n fill: match[1],\n align: match[2],\n sign: match[3],\n symbol: match[4],\n zero: match[5],\n width: match[6],\n comma: match[7],\n precision: match[8] && match[8].slice(1),\n trim: match[9],\n type: match[10]\n });\n}\n\nformatSpecifier.prototype = FormatSpecifier.prototype; // instanceof\n\nexport function FormatSpecifier(specifier) {\n this.fill = specifier.fill === undefined ? \" \" : specifier.fill + \"\";\n this.align = specifier.align === undefined ? \">\" : specifier.align + \"\";\n this.sign = specifier.sign === undefined ? \"-\" : specifier.sign + \"\";\n this.symbol = specifier.symbol === undefined ? \"\" : specifier.symbol + \"\";\n this.zero = !!specifier.zero;\n this.width = specifier.width === undefined ? undefined : +specifier.width;\n this.comma = !!specifier.comma;\n this.precision = specifier.precision === undefined ? undefined : +specifier.precision;\n this.trim = !!specifier.trim;\n this.type = specifier.type === undefined ? \"\" : specifier.type + \"\";\n}\n\nFormatSpecifier.prototype.toString = function() {\n return this.fill\n + this.align\n + this.sign\n + this.symbol\n + (this.zero ? \"0\" : \"\")\n + (this.width === undefined ? \"\" : Math.max(1, this.width | 0))\n + (this.comma ? \",\" : \"\")\n + (this.precision === undefined ? \"\" : \".\" + Math.max(0, this.precision | 0))\n + (this.trim ? \"~\" : \"\")\n + this.type;\n};\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport var prefixExponent;\n\nexport default function(x, p) {\n var d = formatDecimalParts(x, p);\n if (!d) return x + \"\";\n var coefficient = d[0],\n exponent = d[1],\n i = exponent - (prefixExponent = Math.max(-8, Math.min(8, Math.floor(exponent / 3))) * 3) + 1,\n n = coefficient.length;\n return i === n ? coefficient\n : i > n ? coefficient + new Array(i - n + 1).join(\"0\")\n : i > 0 ? coefficient.slice(0, i) + \".\" + coefficient.slice(i)\n : \"0.\" + new Array(1 - i).join(\"0\") + formatDecimalParts(x, Math.max(0, p + i - 1))[0]; // less than 1y!\n}\n","export default function(x) {\n return Math.abs(x = Math.round(x)) >= 1e21\n ? x.toLocaleString(\"en\").replace(/,/g, \"\")\n : x.toString(10);\n}\n\n// Computes the decimal coefficient and exponent of the specified number x with\n// significant digits p, where x is positive and p is in [1, 21] or undefined.\n// For example, formatDecimalParts(1.23) returns [\"123\", 0].\nexport function formatDecimalParts(x, p) {\n if ((i = (x = p ? x.toExponential(p - 1) : x.toExponential()).indexOf(\"e\")) < 0) return null; // NaN, ±Infinity\n var i, coefficient = x.slice(0, i);\n\n // The string returned by toExponential either has the form \\d\\.\\d+e[-+]\\d+\n // (e.g., 1.2e+3) or the form \\de[-+]\\d+ (e.g., 1e+3).\n return [\n coefficient.length > 1 ? coefficient[0] + coefficient.slice(2) : coefficient,\n +x.slice(i + 1)\n ];\n}\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport default function(x) {\n return x = formatDecimalParts(Math.abs(x)), x ? x[1] : NaN;\n}\n","import {formatDecimalParts} from \"./formatDecimal.js\";\n\nexport default function(x, p) {\n var d = formatDecimalParts(x, p);\n if (!d) return x + \"\";\n var coefficient = d[0],\n exponent = d[1];\n return exponent < 0 ? \"0.\" + new Array(-exponent).join(\"0\") + coefficient\n : coefficient.length > exponent + 1 ? coefficient.slice(0, exponent + 1) + \".\" + coefficient.slice(exponent + 1)\n : coefficient + new Array(exponent - coefficient.length + 2).join(\"0\");\n}\n","import formatDecimal from \"./formatDecimal.js\";\nimport formatPrefixAuto from \"./formatPrefixAuto.js\";\nimport formatRounded from \"./formatRounded.js\";\n\nexport default {\n \"%\": (x, p) => (x * 100).toFixed(p),\n \"b\": (x) => Math.round(x).toString(2),\n \"c\": (x) => x + \"\",\n \"d\": formatDecimal,\n \"e\": (x, p) => x.toExponential(p),\n \"f\": (x, p) => x.toFixed(p),\n \"g\": (x, p) => x.toPrecision(p),\n \"o\": (x) => Math.round(x).toString(8),\n \"p\": (x, p) => formatRounded(x * 100, p),\n \"r\": formatRounded,\n \"s\": formatPrefixAuto,\n \"X\": (x) => Math.round(x).toString(16).toUpperCase(),\n \"x\": (x) => Math.round(x).toString(16)\n};\n","export default function(x) {\n return x;\n}\n","import exponent from \"./exponent.js\";\nimport formatGroup from \"./formatGroup.js\";\nimport formatNumerals from \"./formatNumerals.js\";\nimport formatSpecifier from \"./formatSpecifier.js\";\nimport formatTrim from \"./formatTrim.js\";\nimport formatTypes from \"./formatTypes.js\";\nimport {prefixExponent} from \"./formatPrefixAuto.js\";\nimport identity from \"./identity.js\";\n\nvar map = Array.prototype.map,\n prefixes = [\"y\",\"z\",\"a\",\"f\",\"p\",\"n\",\"µ\",\"m\",\"\",\"k\",\"M\",\"G\",\"T\",\"P\",\"E\",\"Z\",\"Y\"];\n\nexport default function(locale) {\n var group = locale.grouping === undefined || locale.thousands === undefined ? identity : formatGroup(map.call(locale.grouping, Number), locale.thousands + \"\"),\n currencyPrefix = locale.currency === undefined ? \"\" : locale.currency[0] + \"\",\n currencySuffix = locale.currency === undefined ? \"\" : locale.currency[1] + \"\",\n decimal = locale.decimal === undefined ? \".\" : locale.decimal + \"\",\n numerals = locale.numerals === undefined ? identity : formatNumerals(map.call(locale.numerals, String)),\n percent = locale.percent === undefined ? \"%\" : locale.percent + \"\",\n minus = locale.minus === undefined ? \"−\" : locale.minus + \"\",\n nan = locale.nan === undefined ? \"NaN\" : locale.nan + \"\";\n\n function newFormat(specifier) {\n specifier = formatSpecifier(specifier);\n\n var fill = specifier.fill,\n align = specifier.align,\n sign = specifier.sign,\n symbol = specifier.symbol,\n zero = specifier.zero,\n width = specifier.width,\n comma = specifier.comma,\n precision = specifier.precision,\n trim = specifier.trim,\n type = specifier.type;\n\n // The \"n\" type is an alias for \",g\".\n if (type === \"n\") comma = true, type = \"g\";\n\n // The \"\" type, and any invalid type, is an alias for \".12~g\".\n else if (!formatTypes[type]) precision === undefined && (precision = 12), trim = true, type = \"g\";\n\n // If zero fill is specified, padding goes after sign and before digits.\n if (zero || (fill === \"0\" && align === \"=\")) zero = true, fill = \"0\", align = \"=\";\n\n // Compute the prefix and suffix.\n // For SI-prefix, the suffix is lazily computed.\n var prefix = symbol === \"$\" ? currencyPrefix : symbol === \"#\" && /[boxX]/.test(type) ? \"0\" + type.toLowerCase() : \"\",\n suffix = symbol === \"$\" ? currencySuffix : /[%p]/.test(type) ? percent : \"\";\n\n // What format function should we use?\n // Is this an integer type?\n // Can this type generate exponential notation?\n var formatType = formatTypes[type],\n maybeSuffix = /[defgprs%]/.test(type);\n\n // Set the default precision if not specified,\n // or clamp the specified precision to the supported range.\n // For significant precision, it must be in [1, 21].\n // For fixed precision, it must be in [0, 20].\n precision = precision === undefined ? 6\n : /[gprs]/.test(type) ? Math.max(1, Math.min(21, precision))\n : Math.max(0, Math.min(20, precision));\n\n function format(value) {\n var valuePrefix = prefix,\n valueSuffix = suffix,\n i, n, c;\n\n if (type === \"c\") {\n valueSuffix = formatType(value) + valueSuffix;\n value = \"\";\n } else {\n value = +value;\n\n // Determine the sign. -0 is not less than 0, but 1 / -0 is!\n var valueNegative = value < 0 || 1 / value < 0;\n\n // Perform the initial formatting.\n value = isNaN(value) ? nan : formatType(Math.abs(value), precision);\n\n // Trim insignificant zeros.\n if (trim) value = formatTrim(value);\n\n // If a negative value rounds to zero after formatting, and no explicit positive sign is requested, hide the sign.\n if (valueNegative && +value === 0 && sign !== \"+\") valueNegative = false;\n\n // Compute the prefix and suffix.\n valuePrefix = (valueNegative ? (sign === \"(\" ? sign : minus) : sign === \"-\" || sign === \"(\" ? \"\" : sign) + valuePrefix;\n valueSuffix = (type === \"s\" ? prefixes[8 + prefixExponent / 3] : \"\") + valueSuffix + (valueNegative && sign === \"(\" ? \")\" : \"\");\n\n // Break the formatted value into the integer “value” part that can be\n // grouped, and fractional or exponential “suffix” part that is not.\n if (maybeSuffix) {\n i = -1, n = value.length;\n while (++i < n) {\n if (c = value.charCodeAt(i), 48 > c || c > 57) {\n valueSuffix = (c === 46 ? decimal + value.slice(i + 1) : value.slice(i)) + valueSuffix;\n value = value.slice(0, i);\n break;\n }\n }\n }\n }\n\n // If the fill character is not \"0\", grouping is applied before padding.\n if (comma && !zero) value = group(value, Infinity);\n\n // Compute the padding.\n var length = valuePrefix.length + value.length + valueSuffix.length,\n padding = length < width ? new Array(width - length + 1).join(fill) : \"\";\n\n // If the fill character is \"0\", grouping is applied after padding.\n if (comma && zero) value = group(padding + value, padding.length ? width - valueSuffix.length : Infinity), padding = \"\";\n\n // Reconstruct the final output based on the desired alignment.\n switch (align) {\n case \"<\": value = valuePrefix + value + valueSuffix + padding; break;\n case \"=\": value = valuePrefix + padding + value + valueSuffix; break;\n case \"^\": value = padding.slice(0, length = padding.length >> 1) + valuePrefix + value + valueSuffix + padding.slice(length); break;\n default: value = padding + valuePrefix + value + valueSuffix; break;\n }\n\n return numerals(value);\n }\n\n format.toString = function() {\n return specifier + \"\";\n };\n\n return format;\n }\n\n function formatPrefix(specifier, value) {\n var f = newFormat((specifier = formatSpecifier(specifier), specifier.type = \"f\", specifier)),\n e = Math.max(-8, Math.min(8, Math.floor(exponent(value) / 3))) * 3,\n k = Math.pow(10, -e),\n prefix = prefixes[8 + e / 3];\n return function(value) {\n return f(k * value) + prefix;\n };\n }\n\n return {\n format: newFormat,\n formatPrefix: formatPrefix\n };\n}\n","import formatLocale from \"./locale.js\";\n\nvar locale;\nexport var format;\nexport var formatPrefix;\n\ndefaultLocale({\n thousands: \",\",\n grouping: [3],\n currency: [\"$\", \"\"]\n});\n\nexport default function defaultLocale(definition) {\n locale = formatLocale(definition);\n format = locale.format;\n formatPrefix = locale.formatPrefix;\n return locale;\n}\n","export default function(grouping, thousands) {\n return function(value, width) {\n var i = value.length,\n t = [],\n j = 0,\n g = grouping[0],\n length = 0;\n\n while (i > 0 && g > 0) {\n if (length + g + 1 > width) g = Math.max(1, width - length);\n t.push(value.substring(i -= g, i + g));\n if ((length += g + 1) > width) break;\n g = grouping[j = (j + 1) % grouping.length];\n }\n\n return t.reverse().join(thousands);\n };\n}\n","export default function(numerals) {\n return function(value) {\n return value.replace(/[0-9]/g, function(i) {\n return numerals[+i];\n });\n };\n}\n","// Trims insignificant zeros, e.g., replaces 1.2000k with 1.2k.\nexport default function(s) {\n out: for (var n = s.length, i = 1, i0 = -1, i1; i < n; ++i) {\n switch (s[i]) {\n case \".\": i0 = i1 = i; break;\n case \"0\": if (i0 === 0) i0 = i; i1 = i; break;\n default: if (!+s[i]) break out; if (i0 > 0) i0 = 0; break;\n }\n }\n return i0 > 0 ? s.slice(0, i0) + s.slice(i1 + 1) : s;\n}\n","import {tickStep} from \"d3-array\";\nimport {format, formatPrefix, formatSpecifier, precisionFixed, precisionPrefix, precisionRound} from \"d3-format\";\n\nexport default function tickFormat(start, stop, count, specifier) {\n var step = tickStep(start, stop, count),\n precision;\n specifier = formatSpecifier(specifier == null ? \",f\" : specifier);\n switch (specifier.type) {\n case \"s\": {\n var value = Math.max(Math.abs(start), Math.abs(stop));\n if (specifier.precision == null && !isNaN(precision = precisionPrefix(step, value))) specifier.precision = precision;\n return formatPrefix(specifier, value);\n }\n case \"\":\n case \"e\":\n case \"g\":\n case \"p\":\n case \"r\": {\n if (specifier.precision == null && !isNaN(precision = precisionRound(step, Math.max(Math.abs(start), Math.abs(stop))))) specifier.precision = precision - (specifier.type === \"e\");\n break;\n }\n case \"f\":\n case \"%\": {\n if (specifier.precision == null && !isNaN(precision = precisionFixed(step))) specifier.precision = precision - (specifier.type === \"%\") * 2;\n break;\n }\n }\n return format(specifier);\n}\n","import exponent from \"./exponent.js\";\n\nexport default function(step, value) {\n return Math.max(0, Math.max(-8, Math.min(8, Math.floor(exponent(value) / 3))) * 3 - exponent(Math.abs(step)));\n}\n","import exponent from \"./exponent.js\";\n\nexport default function(step, max) {\n step = Math.abs(step), max = Math.abs(max) - step;\n return Math.max(0, exponent(max) - exponent(step)) + 1;\n}\n","import exponent from \"./exponent.js\";\n\nexport default function(step) {\n return Math.max(0, -exponent(Math.abs(step)));\n}\n","import {ticks, tickIncrement} from \"d3-array\";\nimport continuous, {copy} from \"./continuous.js\";\nimport {initRange} from \"./init.js\";\nimport tickFormat from \"./tickFormat.js\";\n\nexport function linearish(scale) {\n var domain = scale.domain;\n\n scale.ticks = function(count) {\n var d = domain();\n return ticks(d[0], d[d.length - 1], count == null ? 10 : count);\n };\n\n scale.tickFormat = function(count, specifier) {\n var d = domain();\n return tickFormat(d[0], d[d.length - 1], count == null ? 10 : count, specifier);\n };\n\n scale.nice = function(count) {\n if (count == null) count = 10;\n\n var d = domain();\n var i0 = 0;\n var i1 = d.length - 1;\n var start = d[i0];\n var stop = d[i1];\n var prestep;\n var step;\n var maxIter = 10;\n\n if (stop < start) {\n step = start, start = stop, stop = step;\n step = i0, i0 = i1, i1 = step;\n }\n \n while (maxIter-- > 0) {\n step = tickIncrement(start, stop, count);\n if (step === prestep) {\n d[i0] = start\n d[i1] = stop\n return domain(d);\n } else if (step > 0) {\n start = Math.floor(start / step) * step;\n stop = Math.ceil(stop / step) * step;\n } else if (step < 0) {\n start = Math.ceil(start * step) / step;\n stop = Math.floor(stop * step) / step;\n } else {\n break;\n }\n prestep = step;\n }\n\n return scale;\n };\n\n return scale;\n}\n\nexport default function linear() {\n var scale = continuous();\n\n scale.copy = function() {\n return copy(scale, linear());\n };\n\n initRange.apply(scale, arguments);\n\n return linearish(scale);\n}\n","import {range as sequence} from \"d3-array\";\nimport {initRange} from \"./init.js\";\nimport ordinal from \"./ordinal.js\";\n\nexport default function band() {\n var scale = ordinal().unknown(undefined),\n domain = scale.domain,\n ordinalRange = scale.range,\n r0 = 0,\n r1 = 1,\n step,\n bandwidth,\n round = false,\n paddingInner = 0,\n paddingOuter = 0,\n align = 0.5;\n\n delete scale.unknown;\n\n function rescale() {\n var n = domain().length,\n reverse = r1 < r0,\n start = reverse ? r1 : r0,\n stop = reverse ? r0 : r1;\n step = (stop - start) / Math.max(1, n - paddingInner + paddingOuter * 2);\n if (round) step = Math.floor(step);\n start += (stop - start - step * (n - paddingInner)) * align;\n bandwidth = step * (1 - paddingInner);\n if (round) start = Math.round(start), bandwidth = Math.round(bandwidth);\n var values = sequence(n).map(function(i) { return start + step * i; });\n return ordinalRange(reverse ? values.reverse() : values);\n }\n\n scale.domain = function(_) {\n return arguments.length ? (domain(_), rescale()) : domain();\n };\n\n scale.range = function(_) {\n return arguments.length ? ([r0, r1] = _, r0 = +r0, r1 = +r1, rescale()) : [r0, r1];\n };\n\n scale.rangeRound = function(_) {\n return [r0, r1] = _, r0 = +r0, r1 = +r1, round = true, rescale();\n };\n\n scale.bandwidth = function() {\n return bandwidth;\n };\n\n scale.step = function() {\n return step;\n };\n\n scale.round = function(_) {\n return arguments.length ? (round = !!_, rescale()) : round;\n };\n\n scale.padding = function(_) {\n return arguments.length ? (paddingInner = Math.min(1, paddingOuter = +_), rescale()) : paddingInner;\n };\n\n scale.paddingInner = function(_) {\n return arguments.length ? (paddingInner = Math.min(1, _), rescale()) : paddingInner;\n };\n\n scale.paddingOuter = function(_) {\n return arguments.length ? (paddingOuter = +_, rescale()) : paddingOuter;\n };\n\n scale.align = function(_) {\n return arguments.length ? (align = Math.max(0, Math.min(1, _)), rescale()) : align;\n };\n\n scale.copy = function() {\n return band(domain(), [r0, r1])\n .round(round)\n .paddingInner(paddingInner)\n .paddingOuter(paddingOuter)\n .align(align);\n };\n\n return initRange.apply(rescale(), arguments);\n}\n\nfunction pointish(scale) {\n var copy = scale.copy;\n\n scale.padding = scale.paddingOuter;\n delete scale.paddingInner;\n delete scale.paddingOuter;\n\n scale.copy = function() {\n return pointish(copy());\n };\n\n return scale;\n}\n\nexport function point() {\n return pointish(band.apply(null, arguments).paddingInner(1));\n}\n","export default function(start, stop, step) {\n start = +start, stop = +stop, step = (n = arguments.length) < 2 ? (stop = start, start = 0, 1) : n < 3 ? 1 : +step;\n\n var i = -1,\n n = Math.max(0, Math.ceil((stop - start) / step)) | 0,\n range = new Array(n);\n\n while (++i < n) {\n range[i] = start + i * step;\n }\n\n return range;\n}\n","import interval from \"./interval.js\";\n\nvar millisecond = interval(function() {\n // noop\n}, function(date, step) {\n date.setTime(+date + step);\n}, function(start, end) {\n return end - start;\n});\n\n// An optimized implementation for this simple case.\nmillisecond.every = function(k) {\n k = Math.floor(k);\n if (!isFinite(k) || !(k > 0)) return null;\n if (!(k > 1)) return millisecond;\n return interval(function(date) {\n date.setTime(Math.floor(date / k) * k);\n }, function(date, step) {\n date.setTime(+date + step * k);\n }, function(start, end) {\n return (end - start) / k;\n });\n};\n\nexport default millisecond;\nexport var milliseconds = millisecond.range;\n","import interval from \"./interval.js\";\nimport {durationSecond} from \"./duration.js\";\n\nvar second = interval(function(date) {\n date.setTime(date - date.getMilliseconds());\n}, function(date, step) {\n date.setTime(+date + step * durationSecond);\n}, function(start, end) {\n return (end - start) / durationSecond;\n}, function(date) {\n return date.getUTCSeconds();\n});\n\nexport default second;\nexport var seconds = second.range;\n","import interval from \"./interval.js\";\nimport {durationMinute, durationSecond} from \"./duration.js\";\n\nvar minute = interval(function(date) {\n date.setTime(date - date.getMilliseconds() - date.getSeconds() * durationSecond);\n}, function(date, step) {\n date.setTime(+date + step * durationMinute);\n}, function(start, end) {\n return (end - start) / durationMinute;\n}, function(date) {\n return date.getMinutes();\n});\n\nexport default minute;\nexport var minutes = minute.range;\n","import interval from \"./interval.js\";\nimport {durationHour, durationMinute, durationSecond} from \"./duration.js\";\n\nvar hour = interval(function(date) {\n date.setTime(date - date.getMilliseconds() - date.getSeconds() * durationSecond - date.getMinutes() * durationMinute);\n}, function(date, step) {\n date.setTime(+date + step * durationHour);\n}, function(start, end) {\n return (end - start) / durationHour;\n}, function(date) {\n return date.getHours();\n});\n\nexport default hour;\nexport var hours = hour.range;\n","import interval from \"./interval.js\";\n\nvar month = interval(function(date) {\n date.setDate(1);\n date.setHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setMonth(date.getMonth() + step);\n}, function(start, end) {\n return end.getMonth() - start.getMonth() + (end.getFullYear() - start.getFullYear()) * 12;\n}, function(date) {\n return date.getMonth();\n});\n\nexport default month;\nexport var months = month.range;\n","import interval from \"./interval.js\";\nimport {durationMinute} from \"./duration.js\";\n\nvar utcMinute = interval(function(date) {\n date.setUTCSeconds(0, 0);\n}, function(date, step) {\n date.setTime(+date + step * durationMinute);\n}, function(start, end) {\n return (end - start) / durationMinute;\n}, function(date) {\n return date.getUTCMinutes();\n});\n\nexport default utcMinute;\nexport var utcMinutes = utcMinute.range;\n","import interval from \"./interval.js\";\nimport {durationHour} from \"./duration.js\";\n\nvar utcHour = interval(function(date) {\n date.setUTCMinutes(0, 0, 0);\n}, function(date, step) {\n date.setTime(+date + step * durationHour);\n}, function(start, end) {\n return (end - start) / durationHour;\n}, function(date) {\n return date.getUTCHours();\n});\n\nexport default utcHour;\nexport var utcHours = utcHour.range;\n","import interval from \"./interval.js\";\n\nvar utcMonth = interval(function(date) {\n date.setUTCDate(1);\n date.setUTCHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setUTCMonth(date.getUTCMonth() + step);\n}, function(start, end) {\n return end.getUTCMonth() - start.getUTCMonth() + (end.getUTCFullYear() - start.getUTCFullYear()) * 12;\n}, function(date) {\n return date.getUTCMonth();\n});\n\nexport default utcMonth;\nexport var utcMonths = utcMonth.range;\n","import {bisector, tickStep} from \"d3-array\";\nimport {durationDay, durationHour, durationMinute, durationMonth, durationSecond, durationWeek, durationYear} from \"./duration.js\";\nimport millisecond from \"./millisecond.js\";\nimport second from \"./second.js\";\nimport minute from \"./minute.js\";\nimport hour from \"./hour.js\";\nimport day from \"./day.js\";\nimport {sunday as week} from \"./week.js\";\nimport month from \"./month.js\";\nimport year from \"./year.js\";\nimport utcMinute from \"./utcMinute.js\";\nimport utcHour from \"./utcHour.js\";\nimport utcDay from \"./utcDay.js\";\nimport {utcSunday as utcWeek} from \"./utcWeek.js\";\nimport utcMonth from \"./utcMonth.js\";\nimport utcYear from \"./utcYear.js\";\n\nfunction ticker(year, month, week, day, hour, minute) {\n\n const tickIntervals = [\n [second, 1, durationSecond],\n [second, 5, 5 * durationSecond],\n [second, 15, 15 * durationSecond],\n [second, 30, 30 * durationSecond],\n [minute, 1, durationMinute],\n [minute, 5, 5 * durationMinute],\n [minute, 15, 15 * durationMinute],\n [minute, 30, 30 * durationMinute],\n [ hour, 1, durationHour ],\n [ hour, 3, 3 * durationHour ],\n [ hour, 6, 6 * durationHour ],\n [ hour, 12, 12 * durationHour ],\n [ day, 1, durationDay ],\n [ day, 2, 2 * durationDay ],\n [ week, 1, durationWeek ],\n [ month, 1, durationMonth ],\n [ month, 3, 3 * durationMonth ],\n [ year, 1, durationYear ]\n ];\n\n function ticks(start, stop, count) {\n const reverse = stop < start;\n if (reverse) [start, stop] = [stop, start];\n const interval = count && typeof count.range === \"function\" ? count : tickInterval(start, stop, count);\n const ticks = interval ? interval.range(start, +stop + 1) : []; // inclusive stop\n return reverse ? ticks.reverse() : ticks;\n }\n\n function tickInterval(start, stop, count) {\n const target = Math.abs(stop - start) / count;\n const i = bisector(([,, step]) => step).right(tickIntervals, target);\n if (i === tickIntervals.length) return year.every(tickStep(start / durationYear, stop / durationYear, count));\n if (i === 0) return millisecond.every(Math.max(tickStep(start, stop, count), 1));\n const [t, step] = tickIntervals[target / tickIntervals[i - 1][2] < tickIntervals[i][2] / target ? i - 1 : i];\n return t.every(step);\n }\n\n return [ticks, tickInterval];\n}\n\nconst [utcTicks, utcTickInterval] = ticker(utcYear, utcMonth, utcWeek, utcDay, utcHour, utcMinute);\nconst [timeTicks, timeTickInterval] = ticker(year, month, week, day, hour, minute);\n\nexport {utcTicks, utcTickInterval, timeTicks, timeTickInterval};\n","export default function nice(domain, interval) {\n domain = domain.slice();\n\n var i0 = 0,\n i1 = domain.length - 1,\n x0 = domain[i0],\n x1 = domain[i1],\n t;\n\n if (x1 < x0) {\n t = i0, i0 = i1, i1 = t;\n t = x0, x0 = x1, x1 = t;\n }\n\n domain[i0] = interval.floor(x0);\n domain[i1] = interval.ceil(x1);\n return domain;\n}\n","import {timeYear, timeMonth, timeWeek, timeDay, timeHour, timeMinute, timeSecond, timeTicks, timeTickInterval} from \"d3-time\";\nimport {timeFormat} from \"d3-time-format\";\nimport continuous, {copy} from \"./continuous.js\";\nimport {initRange} from \"./init.js\";\nimport nice from \"./nice.js\";\n\nfunction date(t) {\n return new Date(t);\n}\n\nfunction number(t) {\n return t instanceof Date ? +t : +new Date(+t);\n}\n\nexport function calendar(ticks, tickInterval, year, month, week, day, hour, minute, second, format) {\n var scale = continuous(),\n invert = scale.invert,\n domain = scale.domain;\n\n var formatMillisecond = format(\".%L\"),\n formatSecond = format(\":%S\"),\n formatMinute = format(\"%I:%M\"),\n formatHour = format(\"%I %p\"),\n formatDay = format(\"%a %d\"),\n formatWeek = format(\"%b %d\"),\n formatMonth = format(\"%B\"),\n formatYear = format(\"%Y\");\n\n function tickFormat(date) {\n return (second(date) < date ? formatMillisecond\n : minute(date) < date ? formatSecond\n : hour(date) < date ? formatMinute\n : day(date) < date ? formatHour\n : month(date) < date ? (week(date) < date ? formatDay : formatWeek)\n : year(date) < date ? formatMonth\n : formatYear)(date);\n }\n\n scale.invert = function(y) {\n return new Date(invert(y));\n };\n\n scale.domain = function(_) {\n return arguments.length ? domain(Array.from(_, number)) : domain().map(date);\n };\n\n scale.ticks = function(interval) {\n var d = domain();\n return ticks(d[0], d[d.length - 1], interval == null ? 10 : interval);\n };\n\n scale.tickFormat = function(count, specifier) {\n return specifier == null ? tickFormat : format(specifier);\n };\n\n scale.nice = function(interval) {\n var d = domain();\n if (!interval || typeof interval.range !== \"function\") interval = tickInterval(d[0], d[d.length - 1], interval == null ? 10 : interval);\n return interval ? domain(nice(d, interval)) : scale;\n };\n\n scale.copy = function() {\n return copy(scale, calendar(ticks, tickInterval, year, month, week, day, hour, minute, second, format));\n };\n\n return scale;\n}\n\nexport default function time() {\n return initRange.apply(calendar(timeTicks, timeTickInterval, timeYear, timeMonth, timeWeek, timeDay, timeHour, timeMinute, timeSecond, timeFormat).domain([new Date(2000, 0, 1), new Date(2000, 0, 2)]), arguments);\n}\n","import {ticks} from \"d3-array\";\nimport {format} from \"d3-format\";\nimport nice from \"./nice.js\";\nimport {copy, transformer} from \"./continuous.js\";\nimport {initRange} from \"./init.js\";\n\nfunction transformLog(x) {\n return Math.log(x);\n}\n\nfunction transformExp(x) {\n return Math.exp(x);\n}\n\nfunction transformLogn(x) {\n return -Math.log(-x);\n}\n\nfunction transformExpn(x) {\n return -Math.exp(-x);\n}\n\nfunction pow10(x) {\n return isFinite(x) ? +(\"1e\" + x) : x < 0 ? 0 : x;\n}\n\nfunction powp(base) {\n return base === 10 ? pow10\n : base === Math.E ? Math.exp\n : function(x) { return Math.pow(base, x); };\n}\n\nfunction logp(base) {\n return base === Math.E ? Math.log\n : base === 10 && Math.log10\n || base === 2 && Math.log2\n || (base = Math.log(base), function(x) { return Math.log(x) / base; });\n}\n\nfunction reflect(f) {\n return function(x) {\n return -f(-x);\n };\n}\n\nexport function loggish(transform) {\n var scale = transform(transformLog, transformExp),\n domain = scale.domain,\n base = 10,\n logs,\n pows;\n\n function rescale() {\n logs = logp(base), pows = powp(base);\n if (domain()[0] < 0) {\n logs = reflect(logs), pows = reflect(pows);\n transform(transformLogn, transformExpn);\n } else {\n transform(transformLog, transformExp);\n }\n return scale;\n }\n\n scale.base = function(_) {\n return arguments.length ? (base = +_, rescale()) : base;\n };\n\n scale.domain = function(_) {\n return arguments.length ? (domain(_), rescale()) : domain();\n };\n\n scale.ticks = function(count) {\n var d = domain(),\n u = d[0],\n v = d[d.length - 1],\n r;\n\n if (r = v < u) i = u, u = v, v = i;\n\n var i = logs(u),\n j = logs(v),\n p,\n k,\n t,\n n = count == null ? 10 : +count,\n z = [];\n\n if (!(base % 1) && j - i < n) {\n i = Math.floor(i), j = Math.ceil(j);\n if (u > 0) for (; i <= j; ++i) {\n for (k = 1, p = pows(i); k < base; ++k) {\n t = p * k;\n if (t < u) continue;\n if (t > v) break;\n z.push(t);\n }\n } else for (; i <= j; ++i) {\n for (k = base - 1, p = pows(i); k >= 1; --k) {\n t = p * k;\n if (t < u) continue;\n if (t > v) break;\n z.push(t);\n }\n }\n if (z.length * 2 < n) z = ticks(u, v, n);\n } else {\n z = ticks(i, j, Math.min(j - i, n)).map(pows);\n }\n\n return r ? z.reverse() : z;\n };\n\n scale.tickFormat = function(count, specifier) {\n if (specifier == null) specifier = base === 10 ? \".0e\" : \",\";\n if (typeof specifier !== \"function\") specifier = format(specifier);\n if (count === Infinity) return specifier;\n if (count == null) count = 10;\n var k = Math.max(1, base * count / scale.ticks().length); // TODO fast estimate?\n return function(d) {\n var i = d / pows(Math.round(logs(d)));\n if (i * base < base - 0.5) i *= base;\n return i <= k ? specifier(d) : \"\";\n };\n };\n\n scale.nice = function() {\n return domain(nice(domain(), {\n floor: function(x) { return pows(Math.floor(logs(x))); },\n ceil: function(x) { return pows(Math.ceil(logs(x))); }\n }));\n };\n\n return scale;\n}\n\nexport default function log() {\n var scale = loggish(transformer()).domain([1, 10]);\n\n scale.copy = function() {\n return copy(scale, log()).base(scale.base());\n };\n\n initRange.apply(scale, arguments);\n\n return scale;\n}\n","import {linearish} from \"./linear.js\";\nimport {copy, transformer} from \"./continuous.js\";\nimport {initRange} from \"./init.js\";\n\nfunction transformSymlog(c) {\n return function(x) {\n return Math.sign(x) * Math.log1p(Math.abs(x / c));\n };\n}\n\nfunction transformSymexp(c) {\n return function(x) {\n return Math.sign(x) * Math.expm1(Math.abs(x)) * c;\n };\n}\n\nexport function symlogish(transform) {\n var c = 1, scale = transform(transformSymlog(c), transformSymexp(c));\n\n scale.constant = function(_) {\n return arguments.length ? transform(transformSymlog(c = +_), transformSymexp(c)) : c;\n };\n\n return linearish(scale);\n}\n\nexport default function symlog() {\n var scale = symlogish(transformer());\n\n scale.copy = function() {\n return copy(scale, symlog()).constant(scale.constant());\n };\n\n return initRange.apply(scale, arguments);\n}\n","var t0 = new Date,\n t1 = new Date;\n\nexport default function newInterval(floori, offseti, count, field) {\n\n function interval(date) {\n return floori(date = arguments.length === 0 ? new Date : new Date(+date)), date;\n }\n\n interval.floor = function(date) {\n return floori(date = new Date(+date)), date;\n };\n\n interval.ceil = function(date) {\n return floori(date = new Date(date - 1)), offseti(date, 1), floori(date), date;\n };\n\n interval.round = function(date) {\n var d0 = interval(date),\n d1 = interval.ceil(date);\n return date - d0 < d1 - date ? d0 : d1;\n };\n\n interval.offset = function(date, step) {\n return offseti(date = new Date(+date), step == null ? 1 : Math.floor(step)), date;\n };\n\n interval.range = function(start, stop, step) {\n var range = [], previous;\n start = interval.ceil(start);\n step = step == null ? 1 : Math.floor(step);\n if (!(start < stop) || !(step > 0)) return range; // also handles Invalid Date\n do range.push(previous = new Date(+start)), offseti(start, step), floori(start);\n while (previous < start && start < stop);\n return range;\n };\n\n interval.filter = function(test) {\n return newInterval(function(date) {\n if (date >= date) while (floori(date), !test(date)) date.setTime(date - 1);\n }, function(date, step) {\n if (date >= date) {\n if (step < 0) while (++step <= 0) {\n while (offseti(date, -1), !test(date)) {} // eslint-disable-line no-empty\n } else while (--step >= 0) {\n while (offseti(date, +1), !test(date)) {} // eslint-disable-line no-empty\n }\n }\n });\n };\n\n if (count) {\n interval.count = function(start, end) {\n t0.setTime(+start), t1.setTime(+end);\n floori(t0), floori(t1);\n return Math.floor(count(t0, t1));\n };\n\n interval.every = function(step) {\n step = Math.floor(step);\n return !isFinite(step) || !(step > 0) ? null\n : !(step > 1) ? interval\n : interval.filter(field\n ? function(d) { return field(d) % step === 0; }\n : function(d) { return interval.count(0, d) % step === 0; });\n };\n }\n\n return interval;\n}\n","import interval from \"./interval.js\";\n\nvar millisecond = interval(function() {\n // noop\n}, function(date, step) {\n date.setTime(+date + step);\n}, function(start, end) {\n return end - start;\n});\n\n// An optimized implementation for this simple case.\nmillisecond.every = function(k) {\n k = Math.floor(k);\n if (!isFinite(k) || !(k > 0)) return null;\n if (!(k > 1)) return millisecond;\n return interval(function(date) {\n date.setTime(Math.floor(date / k) * k);\n }, function(date, step) {\n date.setTime(+date + step * k);\n }, function(start, end) {\n return (end - start) / k;\n });\n};\n\nexport default millisecond;\nexport var milliseconds = millisecond.range;\n","export var durationSecond = 1e3;\nexport var durationMinute = 6e4;\nexport var durationHour = 36e5;\nexport var durationDay = 864e5;\nexport var durationWeek = 6048e5;\n","import interval from \"./interval.js\";\nimport {durationSecond} from \"./duration.js\";\n\nvar second = interval(function(date) {\n date.setTime(date - date.getMilliseconds());\n}, function(date, step) {\n date.setTime(+date + step * durationSecond);\n}, function(start, end) {\n return (end - start) / durationSecond;\n}, function(date) {\n return date.getUTCSeconds();\n});\n\nexport default second;\nexport var seconds = second.range;\n","import interval from \"./interval.js\";\nimport {durationMinute, durationSecond} from \"./duration.js\";\n\nvar minute = interval(function(date) {\n date.setTime(date - date.getMilliseconds() - date.getSeconds() * durationSecond);\n}, function(date, step) {\n date.setTime(+date + step * durationMinute);\n}, function(start, end) {\n return (end - start) / durationMinute;\n}, function(date) {\n return date.getMinutes();\n});\n\nexport default minute;\nexport var minutes = minute.range;\n","import interval from \"./interval.js\";\nimport {durationMinute} from \"./duration.js\";\n\nvar utcMinute = interval(function(date) {\n date.setUTCSeconds(0, 0);\n}, function(date, step) {\n date.setTime(+date + step * durationMinute);\n}, function(start, end) {\n return (end - start) / durationMinute;\n}, function(date) {\n return date.getUTCMinutes();\n});\n\nexport default utcMinute;\nexport var utcMinutes = utcMinute.range;\n","import interval from \"./interval.js\";\nimport {durationHour, durationMinute, durationSecond} from \"./duration.js\";\n\nvar hour = interval(function(date) {\n date.setTime(date - date.getMilliseconds() - date.getSeconds() * durationSecond - date.getMinutes() * durationMinute);\n}, function(date, step) {\n date.setTime(+date + step * durationHour);\n}, function(start, end) {\n return (end - start) / durationHour;\n}, function(date) {\n return date.getHours();\n});\n\nexport default hour;\nexport var hours = hour.range;\n","import interval from \"./interval.js\";\nimport {durationHour} from \"./duration.js\";\n\nvar utcHour = interval(function(date) {\n date.setUTCMinutes(0, 0, 0);\n}, function(date, step) {\n date.setTime(+date + step * durationHour);\n}, function(start, end) {\n return (end - start) / durationHour;\n}, function(date) {\n return date.getUTCHours();\n});\n\nexport default utcHour;\nexport var utcHours = utcHour.range;\n","import interval from \"./interval.js\";\nimport {durationMinute, durationWeek} from \"./duration.js\";\n\nfunction weekday(i) {\n return interval(function(date) {\n date.setDate(date.getDate() - (date.getDay() + 7 - i) % 7);\n date.setHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setDate(date.getDate() + step * 7);\n }, function(start, end) {\n return (end - start - (end.getTimezoneOffset() - start.getTimezoneOffset()) * durationMinute) / durationWeek;\n });\n}\n\nexport var sunday = weekday(0);\nexport var monday = weekday(1);\nexport var tuesday = weekday(2);\nexport var wednesday = weekday(3);\nexport var thursday = weekday(4);\nexport var friday = weekday(5);\nexport var saturday = weekday(6);\n\nexport var sundays = sunday.range;\nexport var mondays = monday.range;\nexport var tuesdays = tuesday.range;\nexport var wednesdays = wednesday.range;\nexport var thursdays = thursday.range;\nexport var fridays = friday.range;\nexport var saturdays = saturday.range;\n","import interval from \"./interval.js\";\nimport {durationWeek} from \"./duration.js\";\n\nfunction utcWeekday(i) {\n return interval(function(date) {\n date.setUTCDate(date.getUTCDate() - (date.getUTCDay() + 7 - i) % 7);\n date.setUTCHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setUTCDate(date.getUTCDate() + step * 7);\n }, function(start, end) {\n return (end - start) / durationWeek;\n });\n}\n\nexport var utcSunday = utcWeekday(0);\nexport var utcMonday = utcWeekday(1);\nexport var utcTuesday = utcWeekday(2);\nexport var utcWednesday = utcWeekday(3);\nexport var utcThursday = utcWeekday(4);\nexport var utcFriday = utcWeekday(5);\nexport var utcSaturday = utcWeekday(6);\n\nexport var utcSundays = utcSunday.range;\nexport var utcMondays = utcMonday.range;\nexport var utcTuesdays = utcTuesday.range;\nexport var utcWednesdays = utcWednesday.range;\nexport var utcThursdays = utcThursday.range;\nexport var utcFridays = utcFriday.range;\nexport var utcSaturdays = utcSaturday.range;\n","import interval from \"./interval.js\";\n\nvar month = interval(function(date) {\n date.setDate(1);\n date.setHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setMonth(date.getMonth() + step);\n}, function(start, end) {\n return end.getMonth() - start.getMonth() + (end.getFullYear() - start.getFullYear()) * 12;\n}, function(date) {\n return date.getMonth();\n});\n\nexport default month;\nexport var months = month.range;\n","import interval from \"./interval.js\";\n\nvar utcMonth = interval(function(date) {\n date.setUTCDate(1);\n date.setUTCHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setUTCMonth(date.getUTCMonth() + step);\n}, function(start, end) {\n return end.getUTCMonth() - start.getUTCMonth() + (end.getUTCFullYear() - start.getUTCFullYear()) * 12;\n}, function(date) {\n return date.getUTCMonth();\n});\n\nexport default utcMonth;\nexport var utcMonths = utcMonth.range;\n","import interval from \"./interval.js\";\n\nvar year = interval(function(date) {\n date.setMonth(0, 1);\n date.setHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setFullYear(date.getFullYear() + step);\n}, function(start, end) {\n return end.getFullYear() - start.getFullYear();\n}, function(date) {\n return date.getFullYear();\n});\n\n// An optimized implementation for this simple case.\nyear.every = function(k) {\n return !isFinite(k = Math.floor(k)) || !(k > 0) ? null : interval(function(date) {\n date.setFullYear(Math.floor(date.getFullYear() / k) * k);\n date.setMonth(0, 1);\n date.setHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setFullYear(date.getFullYear() + step * k);\n });\n};\n\nexport default year;\nexport var years = year.range;\n","import interval from \"./interval.js\";\n\nvar utcYear = interval(function(date) {\n date.setUTCMonth(0, 1);\n date.setUTCHours(0, 0, 0, 0);\n}, function(date, step) {\n date.setUTCFullYear(date.getUTCFullYear() + step);\n}, function(start, end) {\n return end.getUTCFullYear() - start.getUTCFullYear();\n}, function(date) {\n return date.getUTCFullYear();\n});\n\n// An optimized implementation for this simple case.\nutcYear.every = function(k) {\n return !isFinite(k = Math.floor(k)) || !(k > 0) ? null : interval(function(date) {\n date.setUTCFullYear(Math.floor(date.getUTCFullYear() / k) * k);\n date.setUTCMonth(0, 1);\n date.setUTCHours(0, 0, 0, 0);\n }, function(date, step) {\n date.setUTCFullYear(date.getUTCFullYear() + step * k);\n });\n};\n\nexport default utcYear;\nexport var utcYears = utcYear.range;\n","import { timeParse, utcParse } from 'd3-time-format'\n\nexport const timePrecisions = [\n 'millisecond',\n 'second',\n 'minute',\n 'hour',\n 'day',\n 'month',\n 'year',\n] as const\n\nexport type TIME_PRECISION = typeof timePrecisions[number]\n\nexport const precisionCutOffs: ((date: Date) => void)[] = [\n date => date.setMilliseconds(0),\n date => date.setSeconds(0),\n date => date.setMinutes(0),\n date => date.setHours(0),\n date => date.setDate(1),\n date => date.setMonth(0),\n]\n\nexport const precisionCutOffsByType: Record void)[]> = {\n millisecond: [],\n second: precisionCutOffs.slice(0, 1),\n minute: precisionCutOffs.slice(0, 2),\n hour: precisionCutOffs.slice(0, 3),\n day: precisionCutOffs.slice(0, 4),\n month: precisionCutOffs.slice(0, 5),\n year: precisionCutOffs.slice(0, 6),\n}\n\nexport const createPrecisionMethod = (precision: TIME_PRECISION) => (date: Date) => {\n precisionCutOffsByType[precision].forEach(cutOff => {\n cutOff(date)\n })\n\n return date\n}\n\nexport const createDateNormalizer = ({\n format = 'native',\n precision = 'millisecond',\n useUTC = true,\n}: {\n format?: 'native' | string\n precision?: TIME_PRECISION\n useUTC?: boolean\n}) => {\n const precisionFn = createPrecisionMethod(precision)\n\n return (value: Date | string | undefined) => {\n if (value === undefined) {\n return value\n }\n\n if (format === 'native' || value instanceof Date) {\n return precisionFn(value as Date)\n }\n\n const parseTime = useUTC ? utcParse(format) : timeParse(format)\n return precisionFn(parseTime(value as string) as Date)\n }\n}\n","import { NumberValue, scaleLinear, ScaleLinear as D3ScaleLinear } from 'd3-scale'\nimport { ScaleLinearSpec, ScaleLinear, ComputedSerieAxis, ScaleAxis } from './types'\n\nexport const createLinearScale = (\n {\n min = 0,\n max = 'auto',\n stacked = false,\n reverse = false,\n clamp = false,\n nice = false,\n }: ScaleLinearSpec,\n data: ComputedSerieAxis,\n size: number,\n axis: ScaleAxis\n) => {\n let minValue: NumberValue\n if (min === 'auto') {\n minValue = stacked === true ? data.minStacked ?? 0 : data.min\n } else {\n minValue = min\n }\n\n let maxValue: NumberValue\n if (max === 'auto') {\n maxValue = stacked === true ? data.maxStacked ?? 0 : data.max\n } else {\n maxValue = max\n }\n\n const scale = scaleLinear()\n .rangeRound(axis === 'x' ? [0, size] : [size, 0])\n .domain(reverse ? [maxValue, minValue] : [minValue, maxValue])\n .clamp(clamp)\n\n if (nice === true) scale.nice()\n else if (typeof nice === 'number') scale.nice(nice)\n\n return castLinearScale(scale, stacked)\n}\n\nexport const castLinearScale = (\n scale: D3ScaleLinear,\n stacked = false\n) => {\n const typedScale = scale as unknown as ScaleLinear\n typedScale.type = 'linear'\n typedScale.stacked = stacked\n\n return typedScale\n}\n","import { scaleBand, ScaleBand as D3ScaleBand } from 'd3-scale'\nimport { ComputedSerieAxis, ScaleBand, ScaleBandSpec, StringValue, ScaleAxis } from './types'\n\nexport const createBandScale = (\n { round = true }: ScaleBandSpec,\n data: ComputedSerieAxis,\n size: number,\n axis: ScaleAxis\n) => {\n const scale = scaleBand()\n .range(axis === 'x' ? [0, size] : [size, 0])\n .domain(data.all)\n .round(round)\n\n return castBandScale(scale)\n}\n\nexport const castBandScale = (scale: D3ScaleBand) => {\n const typedScale = scale as ScaleBand\n typedScale.type = 'band'\n\n return typedScale\n}\n","import { NumberValue, scaleTime, scaleUtc } from 'd3-scale'\nimport { createDateNormalizer } from './timeHelpers'\nimport { ComputedSerieAxis, ScaleTime, ScaleTimeSpec } from './types'\n\nexport const createTimeScale = (\n {\n format = 'native',\n precision = 'millisecond',\n min = 'auto',\n max = 'auto',\n useUTC = true,\n nice = false,\n }: ScaleTimeSpec,\n data: ComputedSerieAxis,\n size: number\n) => {\n const normalize = createDateNormalizer({ format, precision, useUTC })\n\n let minValue: Date | undefined\n if (min === 'auto') {\n minValue = normalize(data.min)\n } else if (format !== 'native') {\n minValue = normalize(min)\n } else {\n minValue = min as Date\n }\n\n let maxValue: Date | undefined\n if (max === 'auto') {\n maxValue = normalize(data.max)\n } else if (format !== 'native') {\n maxValue = normalize(max)\n } else {\n maxValue = max as Date\n }\n\n const scale = useUTC ? scaleUtc() : scaleTime()\n\n scale.range([0, size])\n\n if (minValue && maxValue) scale.domain([minValue, maxValue])\n\n if (nice === true) scale.nice()\n else if (typeof nice === 'object' || typeof nice === 'number') scale.nice(nice)\n\n const typedScale = scale as unknown as ScaleTime\n\n typedScale.type = 'time'\n typedScale.useUTC = useUTC\n\n return typedScale\n}\n","import {utcYear, utcMonth, utcWeek, utcDay, utcHour, utcMinute, utcSecond, utcTicks, utcTickInterval} from \"d3-time\";\nimport {utcFormat} from \"d3-time-format\";\nimport {calendar} from \"./time.js\";\nimport {initRange} from \"./init.js\";\n\nexport default function utcTime() {\n return initRange.apply(calendar(utcTicks, utcTickInterval, utcYear, utcMonth, utcWeek, utcDay, utcHour, utcMinute, utcSecond, utcFormat).domain([Date.UTC(2000, 0, 1), Date.UTC(2000, 0, 2)]), arguments);\n}\n","import { scaleLog } from 'd3-scale'\nimport { ComputedSerieAxis, ScaleAxis, ScaleLog, ScaleLogSpec } from './types'\n\nexport const createLogScale = (\n { base = 10, min = 'auto', max = 'auto' }: ScaleLogSpec,\n data: ComputedSerieAxis,\n size: number,\n axis: ScaleAxis\n) => {\n const hasZero = data.all.some(v => v === 0)\n if (hasZero) {\n throw new Error(`a log scale domain must not include or cross zero`)\n }\n\n let sign: number\n let hasMixedSign = false\n data.all\n .filter(v => v != null)\n .forEach(v => {\n if (hasMixedSign) return\n if (sign === undefined) {\n sign = Math.sign(v)\n } else if (Math.sign(v) !== sign) {\n hasMixedSign = true\n }\n })\n\n if (hasMixedSign) {\n throw new Error(`a log scale domain must be strictly-positive or strictly-negative`)\n }\n\n let minValue: number\n if (min === 'auto') {\n minValue = data.min\n } else {\n minValue = min\n }\n\n let maxValue: number\n if (max === 'auto') {\n maxValue = data.max\n } else {\n maxValue = max\n }\n\n const scale = scaleLog()\n .domain([minValue, maxValue])\n .rangeRound(axis === 'x' ? [0, size] : [size, 0])\n .base(base)\n .nice()\n\n const typedScale = scale as ScaleLog\n typedScale.type = 'log'\n\n return scale\n}\n","import uniq from 'lodash/uniq'\nimport uniqBy from 'lodash/uniqBy'\nimport sortBy from 'lodash/sortBy'\nimport last from 'lodash/last'\nimport isDate from 'lodash/isDate'\nimport { createDateNormalizer } from './timeHelpers'\nimport { ScaleAxis, ScaleSpec, ScaleValue, SerieAxis, ComputedSerieAxis } from './types'\nimport { createLinearScale } from './linearScale'\nimport { createPointScale } from './pointScale'\nimport { createBandScale } from './bandScale'\nimport { createTimeScale } from './timeScale'\nimport { createLogScale } from './logScale'\nimport { createSymlogScale } from './symlogScale'\n\ntype XY = ReturnType\n\ntype StackedXY = {\n [K in keyof XY]: XY[K] & {\n maxStacked: number\n minStacked: number\n }\n}\n\ninterface SerieDatum {\n x: number | string | Date\n // only numbers can be stacked\n xStacked?: number | null\n y: number | string | Date\n // only numbers can be stacked\n yStacked?: number | null\n}\n\ntype Serie = S & {\n data: D[]\n}\n\ntype NestedSerie = S & {\n data: {\n data: D\n }[]\n}\n\nexport type ComputedSerie = S & {\n data: {\n data: D\n position: {\n x: number | null\n y: number | null\n }\n }[]\n}\n\ntype Compare = (a: T, b: T) => boolean\n\nexport const getOtherAxis = (axis: ScaleAxis): ScaleAxis => (axis === 'x' ? 'y' : 'x')\n\nexport const compareValues = (a: string | number, b: string | number) => a === b\nexport const compareDateValues = (a: Date, b: Date) => a.getTime() === b.getTime()\n\nexport function computeScale(\n spec: ScaleSpec,\n data: ComputedSerieAxis,\n size: number,\n axis: ScaleAxis\n) {\n switch (spec.type) {\n case 'linear':\n return createLinearScale(spec, data, size, axis)\n case 'point':\n return createPointScale(spec, data, size)\n case 'band':\n return createBandScale(spec, data, size, axis)\n case 'time':\n return createTimeScale(spec, data, size)\n case 'log':\n return createLogScale(spec, data, size, axis)\n case 'symlog':\n return createSymlogScale(spec, data, size, axis)\n default:\n throw new Error('invalid scale spec')\n }\n}\n\n/**\n * Convert serie data to have the original data stored in a nested prop.\n *\n * We do this in order to avoid conflicts between raw & computed properties.\n * <- { data: { x: 1, y: 3 }[] }\n * -> { data: { data: { x: 1, y: 3 } }[] }\n */\nconst nestSerieData = (\n serie: Serie\n): NestedSerie => ({\n ...serie,\n data: serie.data.map(d => ({ data: { ...d } })),\n})\n\nconst getDatumAxisPosition = (\n datum: { data: D },\n axis: ScaleAxis,\n scale: any\n): number | null => {\n if ('stacked' in scale && scale.stacked) {\n const stackedValue = datum.data[axis === 'x' ? 'xStacked' : 'yStacked']\n if (stackedValue === null || stackedValue === undefined) {\n return null\n }\n\n return scale(stackedValue)\n }\n\n return scale(datum.data[axis]) ?? null\n}\n\n/**\n * Compute x/y d3 scales from an array of data series, and scale specifications.\n *\n * We use generics as it's not uncommon to have extra properties such as an id\n * added to the series, or extra props on data, in such case, you should override\n * the default types.\n */\nexport const computeXYScalesForSeries = (\n series: Serie[],\n xScaleSpec: ScaleSpec,\n yScaleSpec: ScaleSpec,\n width: number,\n height: number\n) => {\n // first nest series to avoid property conflicts\n const nestedSeries = series.map(serie => nestSerieData(serie))\n\n // then compute data for each axis: all, min, max values\n const xy = generateSeriesXY(nestedSeries, xScaleSpec, yScaleSpec)\n\n // stack x values depending on xScale\n if ('stacked' in xScaleSpec && xScaleSpec.stacked === true) {\n stackX(xy as StackedXY, nestedSeries)\n }\n\n // stack y values depending on yScale\n if ('stacked' in yScaleSpec && yScaleSpec.stacked === true) {\n stackY(xy as StackedXY, nestedSeries)\n }\n\n // computes scales\n const xScale = computeScale(xScaleSpec, xy.x, width, 'x')\n const yScale = computeScale(yScaleSpec, xy.y, height, 'y')\n\n // assign position to each datum in every scale\n const computedSeries: ComputedSerie[] = nestedSeries.map(serie => ({\n ...serie,\n data: serie.data.map(datum => ({\n ...datum,\n position: {\n x: getDatumAxisPosition(datum, 'x', xScale),\n y: getDatumAxisPosition(datum, 'y', yScale),\n },\n })),\n }))\n\n return {\n ...xy,\n series: computedSeries,\n xScale,\n yScale,\n }\n}\n\nexport const generateSeriesXY = (\n series: NestedSerie[],\n xScaleSpec: ScaleSpec,\n yScaleSpec: ScaleSpec\n) => ({\n x: generateSeriesAxis<'x', D['x']>(series, 'x', xScaleSpec),\n y: generateSeriesAxis<'y', D['y']>(series, 'y', yScaleSpec),\n})\n\n/**\n * Normalize data according to scale type, (time => Date, linear => Number)\n * compute sorted unique values and min/max.\n */\nexport const generateSeriesAxis = (\n series: SerieAxis,\n axis: Axis,\n scaleSpec: ScaleSpec,\n {\n getValue = d => d.data[axis],\n setValue = (d, v) => {\n d.data[axis] = v\n },\n }: {\n getValue?: (d: { data: Record }) => Value | null\n setValue?: (d: { data: Record }, v: Value) => void\n } = {}\n) => {\n if (scaleSpec.type === 'linear') {\n series.forEach(serie => {\n serie.data.forEach(d => {\n const value = getValue(d)\n\n if (value) {\n setValue(d, parseFloat(String(value)) as unknown as Value)\n }\n })\n })\n } else if (scaleSpec.type === 'time' && scaleSpec.format !== 'native') {\n // `native` means we already have Date instances,\n // otherwise we have to convert the values to Date.\n const parseTime = createDateNormalizer(scaleSpec)\n\n series.forEach(serie => {\n serie.data.forEach(d => {\n const value = getValue(d)\n\n if (value) {\n setValue(d, parseTime(value as Date) as unknown as Value)\n }\n })\n })\n }\n\n const values: unknown[] = []\n\n series.forEach(serie => {\n serie.data.forEach(d => {\n values.push(getValue(d))\n })\n })\n\n switch (scaleSpec.type) {\n case 'linear': {\n const all = sortBy(\n // filer null values to deal with holes in linechart\n uniq(values as number[]).filter(v => v !== null),\n v => v\n )\n\n return { all, min: Math.min(...all), max: Math.max(...all) }\n }\n case 'time': {\n const all = uniqBy(values as Date[], v => v.getTime())\n .slice(0)\n .sort((a, b) => b.getTime() - a.getTime())\n .reverse()\n\n return { all, min: all[0], max: last(all) }\n }\n default: {\n const all = uniq(values)\n\n return { all, min: all[0], max: last(all) }\n }\n }\n}\n\nexport const stackAxis = (\n axis: ScaleAxis,\n xy: StackedXY,\n series: NestedSerie[]\n) => {\n const otherAxis = getOtherAxis(axis)\n const all: number[] = []\n\n xy[otherAxis].all.forEach(v => {\n const compare = (isDate(v) ? compareDateValues : compareValues) as Compare\n const stack: Array = []\n\n series.forEach(serie => {\n const datum = serie.data.find(d => compare(d.data[otherAxis], v))\n let value = null\n let stackValue = null\n\n if (datum !== undefined) {\n // stacked values only support numbers\n value = datum.data[axis] as number\n if (value !== null) {\n const head = last(stack)\n if (head === undefined) {\n stackValue = value\n } else if (head !== null) {\n stackValue = head + value\n }\n }\n\n datum.data[axis === 'x' ? 'xStacked' : 'yStacked'] = stackValue\n }\n\n stack.push(stackValue)\n\n if (stackValue !== null) {\n all.push(stackValue)\n }\n })\n })\n\n xy[axis].minStacked = Math.min(...all)\n xy[axis].maxStacked = Math.max(...all)\n}\n\nconst stackX = (\n xy: StackedXY,\n series: NestedSerie[]\n) => stackAxis('x', xy, series)\n\nconst stackY = (\n xy: StackedXY,\n series: NestedSerie[]\n) => stackAxis('y', xy, series)\n","import { scalePoint, ScalePoint as D3ScalePoint } from 'd3-scale'\nimport { ComputedSerieAxis, ScalePoint, ScalePointSpec, StringValue } from './types'\n\nexport const createPointScale = (\n _spec: ScalePointSpec,\n data: ComputedSerieAxis,\n size: number\n) => {\n const scale = scalePoint().range([0, size]).domain(data.all)\n\n const typedScale = scale as ScalePoint\n typedScale.type = 'point'\n\n return typedScale\n}\n\nexport const castPointScale = (scale: D3ScalePoint) => {\n const typedScale = scale as ScalePoint\n typedScale.type = 'point'\n\n return typedScale\n}\n","import { scaleSymlog } from 'd3-scale'\nimport { ComputedSerieAxis, ScaleAxis, ScaleSymlog, ScaleSymlogSpec } from './types'\n\nexport const createSymlogScale = (\n { constant = 1, min = 'auto', max = 'auto', reverse = false }: ScaleSymlogSpec,\n data: ComputedSerieAxis,\n size: number,\n axis: ScaleAxis\n) => {\n let minValue: number\n if (min === 'auto') {\n minValue = data.min\n } else {\n minValue = min\n }\n\n let maxValue: number\n if (max === 'auto') {\n maxValue = data.max\n } else {\n maxValue = max\n }\n\n const scale = scaleSymlog()\n .constant(constant)\n .rangeRound(axis === 'x' ? [0, size] : [size, 0])\n .nice()\n\n if (reverse === true) scale.domain([maxValue, minValue])\n else scale.domain([minValue, maxValue])\n\n const typedScale = scale as ScaleSymlog\n typedScale.type = 'symlog'\n\n return typedScale\n}\n","import {\n CountableTimeInterval,\n timeMillisecond,\n utcMillisecond,\n timeSecond,\n utcSecond,\n timeMinute,\n utcMinute,\n timeHour,\n utcHour,\n timeWeek,\n utcWeek,\n timeSunday,\n utcSunday,\n timeMonday,\n utcMonday,\n timeTuesday,\n utcTuesday,\n timeWednesday,\n utcWednesday,\n timeThursday,\n utcThursday,\n timeFriday,\n utcFriday,\n timeSaturday,\n utcSaturday,\n timeMonth,\n utcMonth,\n timeYear,\n utcYear,\n timeInterval,\n} from 'd3-time'\nimport { ScaleValue, TicksSpec, AnyScale, ScaleWithBandwidth } from './types'\n\nexport const centerScale = (scale: ScaleWithBandwidth) => {\n const bandwidth = scale.bandwidth()\n\n if (bandwidth === 0) return scale\n\n let offset = bandwidth / 2\n if (scale.round()) {\n offset = Math.round(offset)\n }\n\n return (d: T) => (scale(d) ?? 0) + offset\n}\n\nconst timeDay = timeInterval(\n date => date.setHours(0, 0, 0, 0),\n (date, step) => date.setDate(date.getDate() + step),\n (start, end) => (end.getTime() - start.getTime()) / 864e5,\n date => Math.floor(date.getTime() / 864e5)\n)\n\nconst utcDay = timeInterval(\n date => date.setUTCHours(0, 0, 0, 0),\n (date, step) => date.setUTCDate(date.getUTCDate() + step),\n (start, end) => (end.getTime() - start.getTime()) / 864e5,\n date => Math.floor(date.getTime() / 864e5)\n)\n\nconst timeByType: Record = {\n millisecond: [timeMillisecond, utcMillisecond],\n second: [timeSecond, utcSecond],\n minute: [timeMinute, utcMinute],\n hour: [timeHour, utcHour],\n day: [timeDay, utcDay],\n week: [timeWeek, utcWeek],\n sunday: [timeSunday, utcSunday],\n monday: [timeMonday, utcMonday],\n tuesday: [timeTuesday, utcTuesday],\n wednesday: [timeWednesday, utcWednesday],\n thursday: [timeThursday, utcThursday],\n friday: [timeFriday, utcFriday],\n saturday: [timeSaturday, utcSaturday],\n month: [timeMonth, utcMonth],\n year: [timeYear, utcYear],\n}\n\nconst timeTypes = Object.keys(timeByType)\nconst timeIntervalRegexp = new RegExp(`^every\\\\s*(\\\\d+)?\\\\s*(${timeTypes.join('|')})s?$`, 'i')\n\nconst isInteger = (value: unknown): value is number =>\n typeof value === 'number' && isFinite(value) && Math.floor(value) === value\n\nexport const getScaleTicks = (\n scale: AnyScale,\n spec?: TicksSpec\n) => {\n // specific values\n if (Array.isArray(spec)) {\n return spec\n }\n\n if (typeof spec === 'string' && 'useUTC' in scale) {\n // time interval\n const matches = spec.match(timeIntervalRegexp)\n\n if (matches) {\n const [, amount, type] = matches\n // UTC is used as it's more predictable\n // however local time could be used too\n // let's see how it fits users' requirements\n const timeType = timeByType[type][scale.useUTC ? 1 : 0]\n\n if (type === 'day') {\n const [start, originalStop] = scale.domain()\n const stop = new Date(originalStop)\n\n // Set range to include last day in the domain since `interval.range` function is exclusive stop\n stop.setDate(stop.getDate() + 1)\n\n return timeType.every(Number(amount ?? 1))?.range(start, stop) ?? []\n }\n\n if (amount === undefined) {\n return scale.ticks(timeType)\n }\n\n const interval = timeType.every(Number(amount))\n\n if (interval) {\n return scale.ticks(interval)\n }\n }\n\n throw new Error(`Invalid tickValues: ${spec}`)\n }\n\n // continuous scales\n if ('ticks' in scale) {\n // default behaviour\n if (spec === undefined) {\n return scale.ticks()\n }\n\n // specific tick count\n if (isInteger(spec)) {\n return scale.ticks(spec)\n }\n }\n\n // non linear scale default\n return scale.domain()\n}\n","import { timeFormat } from 'd3-time-format'\nimport { format as d3Format } from 'd3-format'\n// @ts-ignore\nimport { textPropsByEngine } from '@nivo/core'\nimport { ScaleValue, AnyScale, TicksSpec, getScaleTicks, centerScale } from '@nivo/scales'\nimport { Point, ValueFormatter, Line } from './types'\n\nconst isArray = (value: unknown): value is T[] => Array.isArray(value)\n\nexport const computeCartesianTicks = ({\n axis,\n scale,\n ticksPosition,\n tickValues,\n tickSize,\n tickPadding,\n tickRotation,\n engine = 'svg',\n}: {\n axis: 'x' | 'y'\n scale: AnyScale\n ticksPosition?: 'after' | 'before'\n tickValues?: TicksSpec\n tickSize: number\n tickPadding: number\n tickRotation: number\n engine?: 'svg' | 'canvas'\n}) => {\n const values = getScaleTicks(scale, tickValues)\n\n const textProps = textPropsByEngine[engine]\n\n const position = 'bandwidth' in scale ? centerScale(scale) : scale\n const line = { lineX: 0, lineY: 0 }\n const text = { textX: 0, textY: 0 }\n\n const isRTL = typeof document === 'object' ? document.dir === 'rtl' : false\n let translate: (value: Value) => Point\n let textAlign: CanvasTextAlign = textProps.align.center\n let textBaseline: CanvasTextBaseline = textProps.baseline.center\n\n if (axis === 'x') {\n translate = d => ({ x: position(d) ?? 0, y: 0 })\n\n line.lineY = tickSize * (ticksPosition === 'after' ? 1 : -1)\n text.textY = (tickSize + tickPadding) * (ticksPosition === 'after' ? 1 : -1)\n\n if (ticksPosition === 'after') {\n textBaseline = textProps.baseline.top\n } else {\n textBaseline = textProps.baseline.bottom\n }\n\n if (tickRotation === 0) {\n textAlign = textProps.align.center\n } else if (\n (ticksPosition === 'after' && tickRotation < 0) ||\n (ticksPosition === 'before' && tickRotation > 0)\n ) {\n textAlign = textProps.align[isRTL ? 'left' : 'right']\n textBaseline = textProps.baseline.center\n } else if (\n (ticksPosition === 'after' && tickRotation > 0) ||\n (ticksPosition === 'before' && tickRotation < 0)\n ) {\n textAlign = textProps.align[isRTL ? 'right' : 'left']\n textBaseline = textProps.baseline.center\n }\n } else {\n translate = d => ({ x: 0, y: position(d) ?? 0 })\n\n line.lineX = tickSize * (ticksPosition === 'after' ? 1 : -1)\n text.textX = (tickSize + tickPadding) * (ticksPosition === 'after' ? 1 : -1)\n\n if (ticksPosition === 'after') {\n textAlign = textProps.align.left\n } else {\n textAlign = textProps.align.right\n }\n }\n\n const ticks = values.map(value => ({\n key: typeof value === 'number' || typeof value === 'string' ? value : `${value}`,\n value,\n ...translate(value),\n ...line,\n ...text,\n }))\n\n return {\n ticks,\n textAlign,\n textBaseline,\n }\n}\n\nexport const getFormatter = (\n format: string | ValueFormatter | undefined,\n scale: AnyScale\n): ValueFormatter | undefined => {\n if (typeof format === 'undefined' || typeof format === 'function') return format\n\n if (scale.type === 'time') {\n const formatter = timeFormat(format)\n\n return ((d: any) => formatter(d instanceof Date ? d : new Date(d))) as ValueFormatter\n }\n\n return d3Format(format) as unknown as ValueFormatter\n}\n\nexport const computeGridLines = ({\n width,\n height,\n scale,\n axis,\n values: _values,\n}: {\n width: number\n height: number\n scale: AnyScale\n axis: 'x' | 'y'\n values?: TicksSpec\n}) => {\n const lineValues = isArray(_values) ? _values : undefined\n const values = lineValues || getScaleTicks(scale, _values)\n const position = 'bandwidth' in scale ? centerScale(scale) : scale\n\n const lines: Line[] =\n axis === 'x'\n ? values.map(value => ({\n key: `${value}`,\n x1: position(value) ?? 0,\n x2: position(value) ?? 0,\n y1: 0,\n y2: height,\n }))\n : values.map(value => ({\n key: `${value}`,\n x1: 0,\n x2: width,\n y1: position(value) ?? 0,\n y2: position(value) ?? 0,\n }))\n\n return lines\n}\n","import { useMemo, memo } from 'react'\nimport * as React from 'react'\nimport { animated } from '@react-spring/web'\nimport { useTheme } from '@nivo/core'\nimport { ScaleValue } from '@nivo/scales'\nimport { AxisTickProps } from '../types'\n\nconst AxisTick = ({\n value: _value,\n format,\n lineX,\n lineY,\n onClick,\n textBaseline,\n textAnchor,\n animatedProps,\n}: AxisTickProps) => {\n const theme = useTheme()\n\n const value = format?.(_value) ?? _value\n\n const props = useMemo(() => {\n const style = { opacity: animatedProps.opacity }\n\n if (!onClick) {\n return { style }\n }\n\n return {\n style: { ...style, cursor: 'pointer' },\n onClick: (event: React.MouseEvent) => onClick(event, value),\n }\n }, [animatedProps.opacity, onClick, value])\n\n return (\n \n \n \n {value}\n \n \n )\n}\n\nconst memoizedAxisTick = memo(AxisTick) as typeof AxisTick\n\nexport { memoizedAxisTick as AxisTick }\n","import { useMemo, memo } from 'react'\nimport * as React from 'react'\nimport { useSpring, useTransition, animated } from '@react-spring/web'\nimport { useTheme, useMotionConfig } from '@nivo/core'\nimport { ScaleValue, AnyScale } from '@nivo/scales'\nimport { computeCartesianTicks, getFormatter } from '../compute'\nimport { AxisTick } from './AxisTick'\nimport { AxisProps } from '../types'\n\nconst Axis = ({\n axis,\n scale,\n x = 0,\n y = 0,\n length,\n ticksPosition,\n tickValues,\n tickSize = 5,\n tickPadding = 5,\n tickRotation = 0,\n format,\n renderTick = AxisTick,\n legend,\n legendPosition = 'end',\n legendOffset = 0,\n onClick,\n ariaHidden,\n}: AxisProps & {\n axis: 'x' | 'y'\n scale: AnyScale\n x?: number\n y?: number\n length: number\n onClick?: (event: React.MouseEvent, value: Value | string) => void\n}) => {\n const theme = useTheme()\n\n const formatValue = useMemo(() => getFormatter(format, scale), [format, scale])\n\n const { ticks, textAlign, textBaseline } = computeCartesianTicks({\n axis,\n scale,\n ticksPosition,\n tickValues,\n tickSize,\n tickPadding,\n tickRotation,\n })\n\n let legendNode = null\n if (legend !== undefined) {\n let legendX = 0\n let legendY = 0\n let legendRotation = 0\n let textAnchor\n\n if (axis === 'y') {\n legendRotation = -90\n legendX = legendOffset\n if (legendPosition === 'start') {\n textAnchor = 'start'\n legendY = length\n } else if (legendPosition === 'middle') {\n textAnchor = 'middle'\n legendY = length / 2\n } else if (legendPosition === 'end') {\n textAnchor = 'end'\n }\n } else {\n legendY = legendOffset\n if (legendPosition === 'start') {\n textAnchor = 'start'\n } else if (legendPosition === 'middle') {\n textAnchor = 'middle'\n legendX = length / 2\n } else if (legendPosition === 'end') {\n textAnchor = 'end'\n legendX = length\n }\n }\n\n legendNode = (\n \n {legend}\n \n )\n }\n\n const { animate, config: springConfig } = useMotionConfig()\n\n const animatedProps = useSpring({\n transform: `translate(${x},${y})`,\n lineX2: axis === 'x' ? length : 0,\n lineY2: axis === 'x' ? 0 : length,\n config: springConfig,\n immediate: !animate,\n })\n\n const transition = useTransition<\n typeof ticks[0],\n { opacity: number; transform: string; textTransform: string }\n >(ticks, {\n keys: tick => tick.key,\n initial: tick => ({\n opacity: 1,\n transform: `translate(${tick.x},${tick.y})`,\n textTransform: `translate(${tick.textX},${tick.textY}) rotate(${tickRotation})`,\n }),\n from: tick => ({\n opacity: 0,\n transform: `translate(${tick.x},${tick.y})`,\n textTransform: `translate(${tick.textX},${tick.textY}) rotate(${tickRotation})`,\n }),\n enter: tick => ({\n opacity: 1,\n transform: `translate(${tick.x},${tick.y})`,\n textTransform: `translate(${tick.textX},${tick.textY}) rotate(${tickRotation})`,\n }),\n update: tick => ({\n opacity: 1,\n transform: `translate(${tick.x},${tick.y})`,\n textTransform: `translate(${tick.textX},${tick.textY}) rotate(${tickRotation})`,\n }),\n leave: {\n opacity: 0,\n },\n config: springConfig,\n immediate: !animate,\n })\n\n return (\n \n {transition((transitionProps, tick, _state, tickIndex) => {\n return React.createElement(renderTick, {\n tickIndex,\n format: formatValue,\n rotate: tickRotation,\n textBaseline,\n textAnchor: textAlign,\n animatedProps: transitionProps,\n ...tick,\n ...(onClick ? { onClick } : {}),\n })\n })}\n \n {legendNode}\n \n )\n}\n\nconst memoizedAxis = memo(Axis) as typeof Axis\n\nexport { memoizedAxis as Axis }\n","import PropTypes from 'prop-types'\n\nexport const axisPropTypes = {\n ticksPosition: PropTypes.oneOf(['before', 'after']),\n tickValues: PropTypes.oneOfType([\n PropTypes.number,\n PropTypes.arrayOf(\n PropTypes.oneOfType([PropTypes.number, PropTypes.string, PropTypes.instanceOf(Date)])\n ),\n PropTypes.string,\n ]),\n tickSize: PropTypes.number,\n tickPadding: PropTypes.number,\n tickRotation: PropTypes.number,\n format: PropTypes.oneOfType([PropTypes.func, PropTypes.string]),\n renderTick: PropTypes.func,\n legend: PropTypes.node,\n legendPosition: PropTypes.oneOf(['start', 'middle', 'end']),\n legendOffset: PropTypes.number,\n ariaHidden: PropTypes.bool,\n}\n\nexport const axisPropType = PropTypes.shape(axisPropTypes)\n\nexport const positions = ['top', 'right', 'bottom', 'left'] as const\n","import { memo } from 'react'\nimport { ScaleValue, AnyScale } from '@nivo/scales'\nimport { Axis } from './Axis'\nimport { positions } from '../props'\nimport { AxisProps } from '../types'\n\nexport const Axes = memo(\n ({\n xScale,\n yScale,\n width,\n height,\n top,\n right,\n bottom,\n left,\n }: {\n xScale: AnyScale\n yScale: AnyScale\n width: number\n height: number\n top?: AxisProps | null\n right?: AxisProps | null\n bottom?: AxisProps | null\n left?: AxisProps | null\n }) => {\n const axes = { top, right, bottom, left }\n\n return (\n <>\n {positions.map(position => {\n const axis = axes[position] as typeof position extends 'bottom' | 'top'\n ? AxisProps | undefined\n : AxisProps | undefined\n\n if (!axis) return null\n\n const isXAxis = position === 'top' || position === 'bottom'\n const ticksPosition =\n position === 'top' || position === 'left' ? 'before' : 'after'\n\n return (\n \n )\n })}\n \n )\n }\n)\n","import { memo } from 'react'\nimport { SpringValues, animated } from '@react-spring/web'\nimport { useTheme } from '@nivo/core'\n\nexport const GridLine = memo(\n ({\n animatedProps,\n }: {\n animatedProps: SpringValues<{\n opacity: number\n x1: number\n x2: number\n y1: number\n y2: number\n }>\n }) => {\n const theme = useTheme()\n\n return \n }\n)\n","import { memo } from 'react'\nimport { useTransition } from '@react-spring/web'\nimport { useMotionConfig } from '@nivo/core'\nimport { GridLine } from './GridLine'\nimport { Line } from '../types'\n\nexport const GridLines = memo(({ lines }: { lines: Line[] }) => {\n const { animate, config: springConfig } = useMotionConfig()\n\n const transition = useTransition>(\n lines,\n {\n keys: line => line.key,\n initial: line => ({\n opacity: 1,\n x1: line.x1,\n x2: line.x2,\n y1: line.y1,\n y2: line.y2,\n }),\n from: line => ({\n opacity: 0,\n x1: line.x1,\n x2: line.x2,\n y1: line.y1,\n y2: line.y2,\n }),\n enter: line => ({\n opacity: 1,\n x1: line.x1,\n x2: line.x2,\n y1: line.y1,\n y2: line.y2,\n }),\n update: line => ({\n opacity: 1,\n x1: line.x1,\n x2: line.x2,\n y1: line.y1,\n y2: line.y2,\n }),\n leave: {\n opacity: 0,\n },\n config: springConfig,\n immediate: !animate,\n }\n )\n\n return (\n \n {transition((animatedProps, line) => (\n \n ))}\n \n )\n})\n","import { useMemo, memo } from 'react'\nimport { ScaleValue, AnyScale, TicksSpec } from '@nivo/scales'\nimport { GridLines } from './GridLines'\nimport { computeGridLines } from '../compute'\n\nexport const Grid = memo(\n ({\n width,\n height,\n xScale,\n yScale,\n xValues,\n yValues,\n }: {\n width: number\n height: number\n xScale?: AnyScale | null\n xValues?: TicksSpec\n yScale?: AnyScale | null\n yValues?: TicksSpec\n }) => {\n const xLines = useMemo(() => {\n if (!xScale) return false\n\n return computeGridLines({\n width,\n height,\n scale: xScale,\n axis: 'x',\n values: xValues,\n })\n }, [xScale, xValues, width, height])\n\n const yLines = useMemo(() => {\n if (!yScale) return false\n\n return computeGridLines({\n width,\n height,\n scale: yScale,\n axis: 'y',\n values: yValues,\n })\n }, [height, width, yScale, yValues])\n\n return (\n <>\n {xLines && }\n {yLines && }\n \n )\n }\n)\n","import { degreesToRadians, CompleteTheme } from '@nivo/core'\nimport { ScaleValue, AnyScale, TicksSpec } from '@nivo/scales'\nimport { computeCartesianTicks, getFormatter, computeGridLines } from './compute'\nimport { positions } from './props'\nimport { AxisLegendPosition, CanvasAxisProps, ValueFormatter } from './types'\n\nexport const renderAxisToCanvas = (\n ctx: CanvasRenderingContext2D,\n {\n axis,\n scale,\n x = 0,\n y = 0,\n length,\n\n ticksPosition,\n tickValues,\n tickSize = 5,\n tickPadding = 5,\n tickRotation = 0,\n format: _format,\n\n legend,\n legendPosition = 'end',\n legendOffset = 0,\n\n theme,\n }: {\n axis: 'x' | 'y'\n scale: AnyScale\n x?: number\n y?: number\n length: number\n ticksPosition: 'before' | 'after'\n tickValues?: TicksSpec\n tickSize?: number\n tickPadding?: number\n tickRotation?: number\n format?: string | ValueFormatter\n legend?: string\n legendPosition?: AxisLegendPosition\n legendOffset?: number\n theme: CompleteTheme\n }\n) => {\n const { ticks, textAlign, textBaseline } = computeCartesianTicks({\n axis,\n scale,\n ticksPosition,\n tickValues,\n tickSize,\n tickPadding,\n tickRotation,\n engine: 'canvas',\n })\n\n ctx.save()\n ctx.translate(x, y)\n\n ctx.textAlign = textAlign\n ctx.textBaseline = textBaseline\n ctx.font = `${theme.axis.ticks.text.fontWeight ? `${theme.axis.ticks.text.fontWeight} ` : ''}${\n theme.axis.ticks.text.fontSize\n }px ${theme.axis.ticks.text.fontFamily}`\n\n if ((theme.axis.domain.line.strokeWidth ?? 0) > 0) {\n ctx.lineWidth = Number(theme.axis.domain.line.strokeWidth)\n ctx.lineCap = 'square'\n\n if (theme.axis.domain.line.stroke) {\n ctx.strokeStyle = theme.axis.domain.line.stroke\n }\n\n ctx.beginPath()\n ctx.moveTo(0, 0)\n ctx.lineTo(axis === 'x' ? length : 0, axis === 'x' ? 0 : length)\n ctx.stroke()\n }\n\n const format = typeof _format === 'function' ? _format : (value: unknown) => `${value}`\n\n ticks.forEach(tick => {\n if ((theme.axis.ticks.line.strokeWidth ?? 0) > 0) {\n ctx.lineWidth = Number(theme.axis.ticks.line.strokeWidth)\n ctx.lineCap = 'square'\n\n if (theme.axis.ticks.line.stroke) {\n ctx.strokeStyle = theme.axis.ticks.line.stroke\n }\n\n ctx.beginPath()\n ctx.moveTo(tick.x, tick.y)\n ctx.lineTo(tick.x + tick.lineX, tick.y + tick.lineY)\n ctx.stroke()\n }\n\n const value = format(tick.value)\n\n ctx.save()\n ctx.translate(tick.x + tick.textX, tick.y + tick.textY)\n ctx.rotate(degreesToRadians(tickRotation))\n\n if (theme.axis.ticks.text.fill) {\n ctx.fillStyle = theme.axis.ticks.text.fill\n }\n\n ctx.fillText(String(value), 0, 0)\n ctx.restore()\n })\n\n if (legend !== undefined) {\n let legendX = 0\n let legendY = 0\n let legendRotation = 0\n let textAlign: CanvasTextAlign = 'center'\n\n if (axis === 'y') {\n legendRotation = -90\n legendX = legendOffset\n if (legendPosition === 'start') {\n textAlign = 'start'\n legendY = length\n } else if (legendPosition === 'middle') {\n textAlign = 'center'\n legendY = length / 2\n } else if (legendPosition === 'end') {\n textAlign = 'end'\n }\n } else {\n legendY = legendOffset\n if (legendPosition === 'start') {\n textAlign = 'start'\n } else if (legendPosition === 'middle') {\n textAlign = 'center'\n legendX = length / 2\n } else if (legendPosition === 'end') {\n textAlign = 'end'\n legendX = length\n }\n }\n\n ctx.translate(legendX, legendY)\n ctx.rotate(degreesToRadians(legendRotation))\n ctx.font = `${\n theme.axis.legend.text.fontWeight ? `${theme.axis.legend.text.fontWeight} ` : ''\n }${theme.axis.legend.text.fontSize}px ${theme.axis.legend.text.fontFamily}`\n\n if (theme.axis.legend.text.fill) {\n ctx.fillStyle = theme.axis.legend.text.fill\n }\n\n ctx.textAlign = textAlign\n ctx.textBaseline = 'middle'\n ctx.fillText(legend, 0, 0)\n }\n\n ctx.restore()\n}\n\nexport const renderAxesToCanvas = (\n ctx: CanvasRenderingContext2D,\n {\n xScale,\n yScale,\n width,\n height,\n\n top,\n right,\n bottom,\n left,\n\n theme,\n }: {\n xScale: AnyScale\n yScale: AnyScale\n width: number\n height: number\n top?: CanvasAxisProps | null\n right?: CanvasAxisProps | null\n bottom?: CanvasAxisProps | null\n left?: CanvasAxisProps | null\n theme: CompleteTheme\n }\n) => {\n const axes = { top, right, bottom, left }\n\n positions.forEach(position => {\n const axis = axes[position] as typeof position extends 'bottom' | 'top'\n ? CanvasAxisProps | undefined\n : CanvasAxisProps | undefined\n\n if (!axis) return null\n\n const isXAxis = position === 'top' || position === 'bottom'\n const ticksPosition = position === 'top' || position === 'left' ? 'before' : 'after'\n const scale = isXAxis ? xScale : yScale\n const format = getFormatter(axis.format, scale)\n\n renderAxisToCanvas(ctx, {\n ...axis,\n axis: isXAxis ? 'x' : 'y',\n x: position === 'right' ? width : 0,\n y: position === 'bottom' ? height : 0,\n scale,\n format,\n length: isXAxis ? width : height,\n ticksPosition,\n theme,\n })\n })\n}\n\nexport const renderGridLinesToCanvas = (\n ctx: CanvasRenderingContext2D,\n {\n width,\n height,\n scale,\n axis,\n values,\n }: {\n width: number\n height: number\n scale: AnyScale\n axis: 'x' | 'y'\n values?: TicksSpec\n }\n) => {\n const lines = computeGridLines({ width, height, scale, axis, values })\n\n lines.forEach(line => {\n ctx.beginPath()\n ctx.moveTo(line.x1, line.y1)\n ctx.lineTo(line.x2, line.y2)\n ctx.stroke()\n })\n}\n","import {interpolate, interpolateRound} from \"d3-interpolate\";\nimport {identity} from \"./continuous.js\";\nimport {initInterpolator} from \"./init.js\";\nimport {linearish} from \"./linear.js\";\nimport {loggish} from \"./log.js\";\nimport {symlogish} from \"./symlog.js\";\nimport {powish} from \"./pow.js\";\n\nfunction transformer() {\n var x0 = 0,\n x1 = 1,\n t0,\n t1,\n k10,\n transform,\n interpolator = identity,\n clamp = false,\n unknown;\n\n function scale(x) {\n return x == null || isNaN(x = +x) ? unknown : interpolator(k10 === 0 ? 0.5 : (x = (transform(x) - t0) * k10, clamp ? Math.max(0, Math.min(1, x)) : x));\n }\n\n scale.domain = function(_) {\n return arguments.length ? ([x0, x1] = _, t0 = transform(x0 = +x0), t1 = transform(x1 = +x1), k10 = t0 === t1 ? 0 : 1 / (t1 - t0), scale) : [x0, x1];\n };\n\n scale.clamp = function(_) {\n return arguments.length ? (clamp = !!_, scale) : clamp;\n };\n\n scale.interpolator = function(_) {\n return arguments.length ? (interpolator = _, scale) : interpolator;\n };\n\n function range(interpolate) {\n return function(_) {\n var r0, r1;\n return arguments.length ? ([r0, r1] = _, interpolator = interpolate(r0, r1), scale) : [interpolator(0), interpolator(1)];\n };\n }\n\n scale.range = range(interpolate);\n\n scale.rangeRound = range(interpolateRound);\n\n scale.unknown = function(_) {\n return arguments.length ? (unknown = _, scale) : unknown;\n };\n\n return function(t) {\n transform = t, t0 = t(x0), t1 = t(x1), k10 = t0 === t1 ? 0 : 1 / (t1 - t0);\n return scale;\n };\n}\n\nexport function copy(source, target) {\n return target\n .domain(source.domain())\n .interpolator(source.interpolator())\n .clamp(source.clamp())\n .unknown(source.unknown());\n}\n\nexport default function sequential() {\n var scale = linearish(transformer()(identity));\n\n scale.copy = function() {\n return copy(scale, sequential());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function sequentialLog() {\n var scale = loggish(transformer()).domain([1, 10]);\n\n scale.copy = function() {\n return copy(scale, sequentialLog()).base(scale.base());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function sequentialSymlog() {\n var scale = symlogish(transformer());\n\n scale.copy = function() {\n return copy(scale, sequentialSymlog()).constant(scale.constant());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function sequentialPow() {\n var scale = powish(transformer());\n\n scale.copy = function() {\n return copy(scale, sequentialPow()).exponent(scale.exponent());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function sequentialSqrt() {\n return sequentialPow.apply(null, arguments).exponent(0.5);\n}\n","import {interpolate, interpolateRound, piecewise} from \"d3-interpolate\";\nimport {identity} from \"./continuous.js\";\nimport {initInterpolator} from \"./init.js\";\nimport {linearish} from \"./linear.js\";\nimport {loggish} from \"./log.js\";\nimport {copy} from \"./sequential.js\";\nimport {symlogish} from \"./symlog.js\";\nimport {powish} from \"./pow.js\";\n\nfunction transformer() {\n var x0 = 0,\n x1 = 0.5,\n x2 = 1,\n s = 1,\n t0,\n t1,\n t2,\n k10,\n k21,\n interpolator = identity,\n transform,\n clamp = false,\n unknown;\n\n function scale(x) {\n return isNaN(x = +x) ? unknown : (x = 0.5 + ((x = +transform(x)) - t1) * (s * x < s * t1 ? k10 : k21), interpolator(clamp ? Math.max(0, Math.min(1, x)) : x));\n }\n\n scale.domain = function(_) {\n return arguments.length ? ([x0, x1, x2] = _, t0 = transform(x0 = +x0), t1 = transform(x1 = +x1), t2 = transform(x2 = +x2), k10 = t0 === t1 ? 0 : 0.5 / (t1 - t0), k21 = t1 === t2 ? 0 : 0.5 / (t2 - t1), s = t1 < t0 ? -1 : 1, scale) : [x0, x1, x2];\n };\n\n scale.clamp = function(_) {\n return arguments.length ? (clamp = !!_, scale) : clamp;\n };\n\n scale.interpolator = function(_) {\n return arguments.length ? (interpolator = _, scale) : interpolator;\n };\n\n function range(interpolate) {\n return function(_) {\n var r0, r1, r2;\n return arguments.length ? ([r0, r1, r2] = _, interpolator = piecewise(interpolate, [r0, r1, r2]), scale) : [interpolator(0), interpolator(0.5), interpolator(1)];\n };\n }\n\n scale.range = range(interpolate);\n\n scale.rangeRound = range(interpolateRound);\n\n scale.unknown = function(_) {\n return arguments.length ? (unknown = _, scale) : unknown;\n };\n\n return function(t) {\n transform = t, t0 = t(x0), t1 = t(x1), t2 = t(x2), k10 = t0 === t1 ? 0 : 0.5 / (t1 - t0), k21 = t1 === t2 ? 0 : 0.5 / (t2 - t1), s = t1 < t0 ? -1 : 1;\n return scale;\n };\n}\n\nexport default function diverging() {\n var scale = linearish(transformer()(identity));\n\n scale.copy = function() {\n return copy(scale, diverging());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function divergingLog() {\n var scale = loggish(transformer()).domain([0.1, 1, 10]);\n\n scale.copy = function() {\n return copy(scale, divergingLog()).base(scale.base());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function divergingSymlog() {\n var scale = symlogish(transformer());\n\n scale.copy = function() {\n return copy(scale, divergingSymlog()).constant(scale.constant());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function divergingPow() {\n var scale = powish(transformer());\n\n scale.copy = function() {\n return copy(scale, divergingPow()).exponent(scale.exponent());\n };\n\n return initInterpolator.apply(scale, arguments);\n}\n\nexport function divergingSqrt() {\n return divergingPow.apply(null, arguments).exponent(0.5);\n}\n","import {default as value} from \"./value.js\";\n\nexport default function piecewise(interpolate, values) {\n if (values === undefined) values = interpolate, interpolate = value;\n var i = 0, n = values.length - 1, v = values[0], I = new Array(n < 0 ? 0 : n);\n while (i < n) I[i] = interpolate(v, v = values[++i]);\n return function(t) {\n var i = Math.max(0, Math.min(n - 1, Math.floor(t *= n)));\n return I[i](t - i);\n };\n}\n","import {bisect} from \"d3-array\";\nimport {linearish} from \"./linear.js\";\nimport {initRange} from \"./init.js\";\n\nexport default function quantize() {\n var x0 = 0,\n x1 = 1,\n n = 1,\n domain = [0.5],\n range = [0, 1],\n unknown;\n\n function scale(x) {\n return x != null && x <= x ? range[bisect(domain, x, 0, n)] : unknown;\n }\n\n function rescale() {\n var i = -1;\n domain = new Array(n);\n while (++i < n) domain[i] = ((i + 1) * x1 - (i - n) * x0) / (n + 1);\n return scale;\n }\n\n scale.domain = function(_) {\n return arguments.length ? ([x0, x1] = _, x0 = +x0, x1 = +x1, rescale()) : [x0, x1];\n };\n\n scale.range = function(_) {\n return arguments.length ? (n = (range = Array.from(_)).length - 1, rescale()) : range.slice();\n };\n\n scale.invertExtent = function(y) {\n var i = range.indexOf(y);\n return i < 0 ? [NaN, NaN]\n : i < 1 ? [x0, domain[0]]\n : i >= n ? [domain[n - 1], x1]\n : [domain[i - 1], domain[i]];\n };\n\n scale.unknown = function(_) {\n return arguments.length ? (unknown = _, scale) : scale;\n };\n\n scale.thresholds = function() {\n return domain.slice();\n };\n\n scale.copy = function() {\n return quantize()\n .domain([x0, x1])\n .range(range)\n .unknown(unknown);\n };\n\n return initRange.apply(linearish(scale), arguments);\n}\n","import {\n schemeCategory10,\n schemeAccent,\n schemeDark2,\n schemePaired,\n schemePastel1,\n schemePastel2,\n schemeSet1,\n schemeSet2,\n schemeSet3,\n} from 'd3-scale-chromatic'\n\nexport const categoricalColorSchemes = {\n nivo: ['#e8c1a0', '#f47560', '#f1e15b', '#e8a838', '#61cdbb', '#97e3d5'],\n category10: schemeCategory10,\n accent: schemeAccent,\n dark2: schemeDark2,\n paired: schemePaired,\n pastel1: schemePastel1,\n pastel2: schemePastel2,\n set1: schemeSet1,\n set2: schemeSet2,\n set3: schemeSet3,\n}\n\nexport type CategoricalColorSchemeId = keyof typeof categoricalColorSchemes\n\nexport const categoricalColorSchemeIds = Object.keys(\n categoricalColorSchemes\n) as CategoricalColorSchemeId[]\n","import {\n schemeBrBG,\n interpolateBrBG,\n schemePRGn,\n interpolatePRGn,\n schemePiYG,\n interpolatePiYG,\n schemePuOr,\n interpolatePuOr,\n schemeRdBu,\n interpolateRdBu,\n schemeRdGy,\n interpolateRdGy,\n schemeRdYlBu,\n interpolateRdYlBu,\n schemeRdYlGn,\n interpolateRdYlGn,\n schemeSpectral,\n interpolateSpectral,\n} from 'd3-scale-chromatic'\n\n// Diverging color schemes support a size k ranging from 3 to 11\nexport const divergingColorSchemes = {\n brown_blueGreen: schemeBrBG,\n purpleRed_green: schemePRGn,\n pink_yellowGreen: schemePiYG,\n purple_orange: schemePuOr,\n red_blue: schemeRdBu,\n red_grey: schemeRdGy,\n red_yellow_blue: schemeRdYlBu,\n red_yellow_green: schemeRdYlGn,\n spectral: schemeSpectral,\n}\n\nexport type DivergingColorSchemeId = keyof typeof divergingColorSchemes\n\nexport const divergingColorSchemeIds = Object.keys(\n divergingColorSchemes\n) as DivergingColorSchemeId[]\n\nexport const divergingColorInterpolators = {\n brown_blueGreen: interpolateBrBG,\n purpleRed_green: interpolatePRGn,\n pink_yellowGreen: interpolatePiYG,\n purple_orange: interpolatePuOr,\n red_blue: interpolateRdBu,\n red_grey: interpolateRdGy,\n red_yellow_blue: interpolateRdYlBu,\n red_yellow_green: interpolateRdYlGn,\n spectral: interpolateSpectral,\n}\n\nexport type DivergingColorInterpolatorId = keyof typeof divergingColorInterpolators\n","import {\n // single hue\n schemeBlues,\n interpolateBlues,\n schemeGreens,\n interpolateGreens,\n schemeGreys,\n interpolateGreys,\n schemeOranges,\n interpolateOranges,\n schemePurples,\n interpolatePurples,\n schemeReds,\n interpolateReds,\n // multi hue\n schemeBuGn,\n interpolateBuGn,\n schemeBuPu,\n interpolateBuPu,\n schemeGnBu,\n interpolateGnBu,\n schemeOrRd,\n interpolateOrRd,\n schemePuBuGn,\n interpolatePuBuGn,\n schemePuBu,\n interpolatePuBu,\n schemePuRd,\n interpolatePuRd,\n schemeRdPu,\n interpolateRdPu,\n schemeYlGnBu,\n interpolateYlGnBu,\n schemeYlGn,\n interpolateYlGn,\n schemeYlOrBr,\n interpolateYlOrBr,\n schemeYlOrRd,\n interpolateYlOrRd,\n interpolateTurbo,\n interpolateViridis,\n interpolateInferno,\n interpolateMagma,\n interpolatePlasma,\n interpolateCividis,\n interpolateWarm,\n interpolateCool,\n interpolateCubehelixDefault,\n} from 'd3-scale-chromatic'\n\n// Sequential, single-hue color schemes support a size k ranging from 3 to 9\n// Sequential, multi-hue color schemes support a size k ranging from 3 to 9\nexport const sequentialColorSchemes = {\n // single hue\n blues: schemeBlues,\n greens: schemeGreens,\n greys: schemeGreys,\n oranges: schemeOranges,\n purples: schemePurples,\n reds: schemeReds,\n // multi hue\n blue_green: schemeBuGn,\n blue_purple: schemeBuPu,\n green_blue: schemeGnBu,\n orange_red: schemeOrRd,\n purple_blue_green: schemePuBuGn,\n purple_blue: schemePuBu,\n purple_red: schemePuRd,\n red_purple: schemeRdPu,\n yellow_green_blue: schemeYlGnBu,\n yellow_green: schemeYlGn,\n yellow_orange_brown: schemeYlOrBr,\n yellow_orange_red: schemeYlOrRd,\n}\n\nexport type SequentialColorSchemeId = keyof typeof sequentialColorSchemes\n\nexport const sequentialColorSchemeIds = Object.keys(\n sequentialColorSchemes\n) as SequentialColorSchemeId[]\n\nexport const sequentialColorInterpolators = {\n // single hue\n blues: interpolateBlues,\n greens: interpolateGreens,\n greys: interpolateGreys,\n oranges: interpolateOranges,\n purples: interpolatePurples,\n reds: interpolateReds,\n // multi hue\n turbo: interpolateTurbo,\n viridis: interpolateViridis,\n inferno: interpolateInferno,\n magma: interpolateMagma,\n plasma: interpolatePlasma,\n cividis: interpolateCividis,\n warm: interpolateWarm,\n cool: interpolateCool,\n cubehelixDefault: interpolateCubehelixDefault,\n blue_green: interpolateBuGn,\n blue_purple: interpolateBuPu,\n green_blue: interpolateGnBu,\n orange_red: interpolateOrRd,\n purple_blue_green: interpolatePuBuGn,\n purple_blue: interpolatePuBu,\n purple_red: interpolatePuRd,\n red_purple: interpolateRdPu,\n yellow_green_blue: interpolateYlGnBu,\n yellow_green: interpolateYlGn,\n yellow_orange_brown: interpolateYlOrBr,\n yellow_orange_red: interpolateYlOrRd,\n}\n\nexport type SequentialColorInterpolatorId = keyof typeof sequentialColorInterpolators\n","export default function(t) {\n t = Math.max(0, Math.min(1, t));\n return \"rgb(\"\n + Math.max(0, Math.min(255, Math.round(34.61 + t * (1172.33 - t * (10793.56 - t * (33300.12 - t * (38394.49 - t * 14825.05))))))) + \", \"\n + Math.max(0, Math.min(255, Math.round(23.31 + t * (557.33 + t * (1225.33 - t * (3574.96 - t * (1073.77 + t * 707.56))))))) + \", \"\n + Math.max(0, Math.min(255, Math.round(27.2 + t * (3211.1 - t * (15327.97 - t * (27814 - t * (22569.18 - t * 6838.66)))))))\n + \")\";\n}\n","export default function(t) {\n t = Math.max(0, Math.min(1, t));\n return \"rgb(\"\n + Math.max(0, Math.min(255, Math.round(-4.54 - t * (35.34 - t * (2381.73 - t * (6402.7 - t * (7024.72 - t * 2710.57))))))) + \", \"\n + Math.max(0, Math.min(255, Math.round(32.49 + t * (170.73 + t * (52.82 - t * (131.46 - t * (176.58 - t * 67.37))))))) + \", \"\n + Math.max(0, Math.min(255, Math.round(81.24 + t * (442.36 - t * (2482.43 - t * (6167.24 - t * (6614.94 - t * 2475.67)))))))\n + \")\";\n}\n","import {\n categoricalColorSchemes,\n categoricalColorSchemeIds,\n CategoricalColorSchemeId,\n} from './categorical'\nimport { divergingColorSchemes, divergingColorSchemeIds, DivergingColorSchemeId } from './diverging'\nimport {\n sequentialColorSchemes,\n sequentialColorSchemeIds,\n SequentialColorSchemeId,\n} from './sequential'\n\nexport const colorSchemes = {\n ...categoricalColorSchemes,\n ...divergingColorSchemes,\n ...sequentialColorSchemes,\n}\n\nexport type ColorSchemeId =\n | CategoricalColorSchemeId\n | DivergingColorSchemeId\n | SequentialColorSchemeId\n\nexport const colorSchemeIds = Object.keys(colorSchemes) as ColorSchemeId[]\n\nexport const isCategoricalColorScheme = (\n scheme: ColorSchemeId\n): scheme is CategoricalColorSchemeId =>\n categoricalColorSchemeIds.includes(scheme as CategoricalColorSchemeId)\n\nexport const isDivergingColorScheme = (scheme: ColorSchemeId): scheme is DivergingColorSchemeId =>\n divergingColorSchemeIds.includes(scheme as DivergingColorSchemeId)\n\nexport const isSequentialColorScheme = (scheme: ColorSchemeId): scheme is SequentialColorSchemeId =>\n sequentialColorSchemeIds.includes(scheme as SequentialColorSchemeId)\n","import { divergingColorInterpolators, DivergingColorInterpolatorId } from './diverging'\nimport { sequentialColorInterpolators, SequentialColorInterpolatorId } from './sequential'\nimport { cyclicalColorInterpolators, CyclicalColorInterpolatorId } from './cyclical'\n\nexport const colorInterpolators = {\n ...divergingColorInterpolators,\n ...sequentialColorInterpolators,\n ...cyclicalColorInterpolators,\n}\n\nexport type ColorInterpolatorId =\n | DivergingColorInterpolatorId\n | SequentialColorInterpolatorId\n | CyclicalColorInterpolatorId\n\nexport const colorInterpolatorIds = Object.keys(colorInterpolators) as ColorInterpolatorId[]\n","import { interpolateRainbow, interpolateSinebow } from 'd3-scale-chromatic'\n\nexport const cyclicalColorInterpolators = {\n rainbow: interpolateRainbow,\n sinebow: interpolateSinebow,\n}\n\nexport type CyclicalColorInterpolatorId = keyof typeof cyclicalColorInterpolators\n","import { useMemo } from 'react'\nimport { get, isPlainObject } from 'lodash'\nimport { rgb, RGBColor } from 'd3-color'\nimport { Theme } from '@nivo/core'\n\nexport type ColorModifierBrightness = ['brighter', number]\n\nexport type ColorModifierDarkness = ['darker', number]\n\nexport type ColorModifierOpacity = ['opacity', number]\n\nexport type ColorModifier = ColorModifierBrightness | ColorModifierDarkness | ColorModifierOpacity\n\nexport type ColorModifierFunction = (color: RGBColor) => RGBColor\n\nexport type InheritedColorConfigStaticColor = string\n\nexport type InheritedColorConfigCustomFunction = (d: Datum, ...drest: Datum[]) => string\n\nexport interface InheritedColorConfigFromTheme {\n theme: string\n}\n\nexport interface InheritedColorConfigFromContext {\n from: string\n modifiers?: ColorModifier[]\n}\n\nexport type InheritedColorConfig =\n | InheritedColorConfigStaticColor\n | InheritedColorConfigCustomFunction\n | InheritedColorConfigFromTheme\n | InheritedColorConfigFromContext\n\nconst isInheritedColorConfigFromTheme = (\n config: InheritedColorConfig\n): config is InheritedColorConfigFromTheme => {\n return (config as InheritedColorConfigFromTheme).theme !== undefined\n}\n\nconst isInheritedColorConfigFromContext = (\n config: InheritedColorConfig\n): config is InheritedColorConfigFromContext => {\n return (config as InheritedColorConfigFromContext).from !== undefined\n}\n\n/**\n * Create a color generator for items which\n * might inherit from parent context,\n * for example labels, outlines…\n *\n * Support the following strategies:\n * - custom function\n * - color from theme\n * - color from parent, with optional color modifiers\n * - static color\n */\nexport const getInheritedColorGenerator = (\n config: InheritedColorConfig,\n theme?: Theme\n) => {\n // user provided function\n if (typeof config === 'function') {\n return config\n }\n\n if (isPlainObject(config)) {\n // use color from theme\n if (isInheritedColorConfigFromTheme(config)) {\n if (theme === undefined) {\n throw new Error(`Unable to use color from theme as no theme was provided`)\n }\n\n const themeColor = get(theme, config.theme)\n if (themeColor === undefined) {\n throw new Error(`Color from theme is undefined at path: '${config.theme}'`)\n }\n\n return () => themeColor\n }\n\n // use color from parent with optional color modifiers\n if (isInheritedColorConfigFromContext(config)) {\n const getColor = (d: Datum) => get(d, config.from)\n\n if (Array.isArray(config.modifiers)) {\n const modifiers: ColorModifierFunction[] = []\n for (const modifier of config.modifiers) {\n const [modifierType, amount] = modifier\n if (modifierType === 'brighter') {\n modifiers.push(color => color.brighter(amount))\n } else if (modifierType === 'darker') {\n modifiers.push(color => color.darker(amount))\n } else if (modifierType === 'opacity') {\n modifiers.push(color => {\n color.opacity = amount\n\n return color\n })\n } else {\n throw new Error(\n `Invalid color modifier: '${modifierType}', must be one of: 'brighter', 'darker', 'opacity'`\n )\n }\n }\n\n if (modifiers.length === 0) return getColor\n\n return (datum: Datum) =>\n modifiers\n .reduce((color, modify) => modify(color), rgb(getColor(datum)))\n .toString()\n }\n\n // no modifier\n return getColor\n }\n\n throw new Error(\n `Invalid color spec, you should either specify 'theme' or 'from' when using a config object`\n )\n }\n\n // use provided color statically\n return () => config as string\n}\n\nexport const useInheritedColor = (\n config: InheritedColorConfig,\n theme?: Theme\n) => useMemo(() => getInheritedColorGenerator(config, theme), [config, theme])\n","import PropTypes from 'prop-types'\nimport { colorSchemeIds } from './schemes'\n\nexport const ordinalColorsPropType = PropTypes.oneOfType([\n PropTypes.func,\n PropTypes.arrayOf(PropTypes.string),\n PropTypes.shape({\n scheme: PropTypes.oneOf(colorSchemeIds).isRequired,\n size: PropTypes.number,\n }),\n PropTypes.shape({\n datum: PropTypes.string.isRequired,\n }),\n PropTypes.string,\n])\n\nexport const inheritedColorPropType = PropTypes.oneOfType([\n PropTypes.string,\n PropTypes.func,\n PropTypes.shape({\n theme: PropTypes.string.isRequired,\n }),\n PropTypes.shape({\n from: PropTypes.string.isRequired,\n modifiers: PropTypes.arrayOf(PropTypes.array),\n }),\n])\n","import { useMemo } from 'react'\nimport { scaleSequential } from 'd3-scale'\nimport { colorInterpolators, ColorInterpolatorId } from '../schemes'\n\nexport interface SequentialColorScaleConfig {\n type: 'sequential'\n scheme?: ColorInterpolatorId\n minValue?: number\n maxValue?: number\n}\n\nexport interface SequentialColorScaleValues {\n min: number\n max: number\n}\n\nexport const sequentialColorScaleDefaults: {\n scheme: ColorInterpolatorId\n} = {\n scheme: 'turbo',\n}\n\nexport const getSequentialColorScale = (\n {\n scheme = sequentialColorScaleDefaults.scheme,\n minValue,\n maxValue,\n }: SequentialColorScaleConfig,\n values: SequentialColorScaleValues\n) => {\n const min = minValue !== undefined ? minValue : values.min\n const max = maxValue !== undefined ? maxValue : values.max\n\n return scaleSequential().domain([min, max]).interpolator(colorInterpolators[scheme])\n}\n\nexport const useSequentialColorScale = (\n config: SequentialColorScaleConfig,\n values: SequentialColorScaleValues\n) => useMemo(() => getSequentialColorScale(config, values), [config, values])\n","import { useMemo } from 'react'\nimport { scaleDiverging } from 'd3-scale'\nimport { colorInterpolators, ColorInterpolatorId } from '../schemes'\n\nexport interface DivergingColorScaleConfig {\n type: 'diverging'\n scheme?: ColorInterpolatorId\n minValue?: number\n maxValue?: number\n divergeAt?: number\n}\n\nexport interface DivergingColorScaleValues {\n min: number\n max: number\n}\n\nexport const divergingColorScaleDefaults: {\n scheme: ColorInterpolatorId\n divergeAt: number\n} = {\n scheme: 'red_yellow_blue',\n divergeAt: 0.5,\n}\n\nexport const getDivergingColorScale = (\n {\n scheme = divergingColorScaleDefaults.scheme,\n divergeAt = divergingColorScaleDefaults.divergeAt,\n minValue,\n maxValue,\n }: DivergingColorScaleConfig,\n values: DivergingColorScaleValues\n) => {\n const min = minValue !== undefined ? minValue : values.min\n const max = maxValue !== undefined ? maxValue : values.max\n const domain = [min, min + (max - min) / 2, max]\n\n const interpolator = colorInterpolators[scheme]\n const offset = 0.5 - divergeAt\n const offsetInterpolator = (t: number) => interpolator(t + offset)\n\n return scaleDiverging(offsetInterpolator).domain(domain).clamp(true)\n}\n\nexport const useDivergingColorScale = (\n config: DivergingColorScaleConfig,\n values: DivergingColorScaleValues\n) => useMemo(() => getDivergingColorScale(config, values), [config, values])\n","import { useMemo } from 'react'\nimport { scaleQuantize } from 'd3-scale'\nimport { colorInterpolators, ColorInterpolatorId } from '../schemes'\n\n// colors from a scheme\nexport interface QuantizeColorScaleSchemeConfig {\n type: 'quantize'\n domain?: [number, number]\n scheme?: ColorInterpolatorId\n steps?: number\n}\n\n// explicit colors\nexport interface QuantizeColorScaleColorsConfig {\n type: 'quantize'\n domain?: [number, number]\n colors: string[]\n}\n\nexport type QuantizeColorScaleConfig =\n | QuantizeColorScaleSchemeConfig\n | QuantizeColorScaleColorsConfig\n\nexport interface QuantizeColorScaleValues {\n min: number\n max: number\n}\n\nexport const quantizeColorScaleDefaults: {\n scheme: ColorInterpolatorId\n steps: NonNullable\n} = {\n scheme: 'turbo',\n steps: 7,\n}\n\nexport const getQuantizeColorScale = (\n config: QuantizeColorScaleConfig,\n values: QuantizeColorScaleValues\n) => {\n const colorScale = scaleQuantize()\n .domain(config.domain || [values.min, values.max])\n .nice()\n\n if ('colors' in config) {\n colorScale.range(config.colors)\n } else {\n const scheme = config.scheme || quantizeColorScaleDefaults.scheme\n const steps = config.steps === undefined ? quantizeColorScaleDefaults.steps : config.steps\n const interpolator = colorInterpolators[scheme]\n const colors = Array.from({ length: steps }).map((_, step) =>\n interpolator(step * (1 / (steps - 1)))\n )\n\n colorScale.range(colors)\n }\n\n return colorScale\n}\n\nexport const useQuantizeColorScale = (\n config: QuantizeColorScaleConfig,\n values: QuantizeColorScaleValues\n) => useMemo(() => getQuantizeColorScale(config, values), [config, values])\n","import { useMemo } from 'react'\nimport { ScaleDiverging, ScaleQuantize, ScaleSequential, scaleLinear } from 'd3-scale'\nimport {\n SequentialColorScaleConfig,\n SequentialColorScaleValues,\n getSequentialColorScale,\n} from './sequentialColorScale'\nimport {\n DivergingColorScaleConfig,\n DivergingColorScaleValues,\n getDivergingColorScale,\n} from './divergingColorScale'\nimport {\n QuantizeColorScaleConfig,\n QuantizeColorScaleValues,\n getQuantizeColorScale,\n} from './quantizeColorScale'\n\nexport type ContinuousColorScaleConfig =\n | SequentialColorScaleConfig\n | DivergingColorScaleConfig\n | QuantizeColorScaleConfig\n\nexport type ContinuousColorScaleValues =\n | SequentialColorScaleValues\n | DivergingColorScaleValues\n | QuantizeColorScaleValues\n\nconst isSequentialColorScaleConfig = (\n config: ContinuousColorScaleConfig\n): config is SequentialColorScaleConfig => config.type === 'sequential'\n\nconst isDivergingColorScaleConfig = (\n config: ContinuousColorScaleConfig\n): config is DivergingColorScaleConfig => config.type === 'diverging'\n\nconst isQuantizeColorScaleConfig = (\n config: ContinuousColorScaleConfig\n): config is QuantizeColorScaleConfig => config.type === 'quantize'\n\nexport const getContinuousColorScale = (\n config: Config,\n values: ContinuousColorScaleValues\n) => {\n if (isSequentialColorScaleConfig(config)) {\n return getSequentialColorScale(config, values)\n }\n\n if (isDivergingColorScaleConfig(config)) {\n return getDivergingColorScale(config, values)\n }\n\n if (isQuantizeColorScaleConfig(config)) {\n return getQuantizeColorScale(config, values)\n }\n\n throw new Error('Invalid continuous color scale config')\n}\n\nexport const useContinuousColorScale = (\n config: ContinuousColorScaleConfig,\n values: ContinuousColorScaleValues\n) => useMemo(() => getContinuousColorScale(config, values), [config, values])\n\nexport const computeContinuousColorScaleColorStops = (\n scale: ScaleSequential | ScaleDiverging | ScaleQuantize,\n steps = 16\n) => {\n const domain = scale.domain()\n\n // quantize\n if ('thresholds' in scale) {\n const stops: {\n key: string\n offset: number\n stopColor: string\n }[] = []\n\n const normalizedScale = scaleLinear().domain(domain).range([0, 1])\n scale.range().forEach((color, index) => {\n const [start, end] = scale.invertExtent(color)\n\n stops.push({\n key: `${index}.0`,\n offset: normalizedScale(start),\n stopColor: color,\n })\n stops.push({\n key: `${index}.1`,\n offset: normalizedScale(end),\n stopColor: color,\n })\n })\n\n return stops\n }\n\n const colorStopsScale = scale.copy()\n if (domain.length === 2) {\n // sequential\n colorStopsScale.domain([0, 1])\n } else if (domain.length === 3) {\n // diverging\n colorStopsScale.domain([0, 0.5, 1])\n }\n\n return ((colorStopsScale as any).ticks(steps) as number[]).map((value: number) => ({\n key: `${value}`,\n offset: value,\n stopColor: `${colorStopsScale(value)}`,\n }))\n}\n","import { useMemo } from 'react'\nimport { get, isPlainObject } from 'lodash'\nimport { scaleOrdinal } from 'd3-scale'\nimport {\n ColorSchemeId,\n colorSchemes,\n isCategoricalColorScheme,\n isSequentialColorScheme,\n isDivergingColorScheme,\n} from '../schemes'\n\n/**\n * Static color.\n */\nexport type OrdinalColorScaleConfigStaticColor = string\n\n/**\n * User defined function, receiving the current datum.\n */\nexport type OrdinalColorScaleConfigCustomFunction = (d: Datum) => string\n\n/**\n * Pre-defined color scheme.\n */\nexport interface OrdinalColorScaleConfigScheme {\n scheme: ColorSchemeId\n // size is useful for diverging & sequential colors,\n // as they are array of array, whereas categorical colors\n // are simple arrays, if the size isn't specified,\n // the bigger array will be selected, this means the 11th\n // for diverging colors and 9th for sequential ones.\n size?: number\n}\n\n/**\n * User defined colors.\n */\nexport type OrdinalColorScaleConfigCustomColors = string[]\n\n/**\n * Get color from datum.\n */\nexport interface OrdinalColorScaleConfigDatumProperty {\n // path to the color property\n datum: string\n}\n\nexport type OrdinalColorScaleConfig =\n | OrdinalColorScaleConfigStaticColor\n | OrdinalColorScaleConfigCustomFunction\n | OrdinalColorScaleConfigScheme\n | OrdinalColorScaleConfigCustomColors\n | OrdinalColorScaleConfigDatumProperty\n\nconst isOrdinalColorScaleConfigScheme = (\n config: OrdinalColorScaleConfig\n): config is OrdinalColorScaleConfigScheme => {\n return (config as OrdinalColorScaleConfigScheme).scheme !== undefined\n}\n\nconst isOrdinalColorScaleConfigDatumProperty = (\n config: OrdinalColorScaleConfig\n): config is OrdinalColorScaleConfigDatumProperty => {\n return (config as OrdinalColorScaleConfigDatumProperty).datum !== undefined\n}\n\nexport type DatumIdentityAccessor = (datum: Datum) => string | number\n\nexport type OrdinalColorScale = (d: Datum) => string\n\n/**\n * Compute an ordinal color scale\n */\nexport const getOrdinalColorScale = (\n config: OrdinalColorScaleConfig,\n identity?: string | DatumIdentityAccessor\n): OrdinalColorScale => {\n // user defined function\n if (typeof config === 'function') {\n return config\n }\n\n // compute accessor to the datum identity\n const getIdentity =\n typeof identity === 'function' ? identity : (datum: Datum) => get(datum, identity as string)\n\n // user defined color array\n if (Array.isArray(config)) {\n const scale = scaleOrdinal(config)\n const generator = (datum: Datum) => scale(getIdentity(datum))\n generator.scale = scale\n\n return generator as OrdinalColorScale\n }\n\n if (isPlainObject(config)) {\n // use color from current datum\n if (isOrdinalColorScaleConfigDatumProperty(config)) {\n return (datum: Datum) => get(datum, config.datum)\n }\n\n // ordinal scale from predefined scheme\n if (isOrdinalColorScaleConfigScheme(config)) {\n // categorical color scheme\n if (isCategoricalColorScheme(config.scheme)) {\n const scale = scaleOrdinal(colorSchemes[config.scheme])\n const generator = (datum: Datum) => scale(getIdentity(datum))\n generator.scale = scale\n\n return generator as OrdinalColorScale\n }\n\n // Diverging color schemes support a size k ranging from 3 to 11\n if (isDivergingColorScheme(config.scheme)) {\n if (config.size !== undefined && (config.size < 3 || config.size > 11)) {\n throw new Error(\n `Invalid size '${config.size}' for diverging color scheme '${config.scheme}', must be between 3~11`\n )\n }\n\n const scale = scaleOrdinal(colorSchemes[config.scheme][config.size || 11])\n const generator = (d: Datum) => scale(getIdentity(d))\n generator.scale = scale\n\n return generator as OrdinalColorScale\n }\n\n // Sequential, single-hue color schemes support a size k ranging from 3 to 9.\n // Sequential, multi-hue color schemes support a size k ranging from 3 to 9.\n if (isSequentialColorScheme(config.scheme)) {\n if (config.size !== undefined && (config.size < 3 || config.size > 9)) {\n throw new Error(\n `Invalid size '${config.size}' for sequential color scheme '${config.scheme}', must be between 3~9`\n )\n }\n\n const scale = scaleOrdinal(colorSchemes[config.scheme][config.size || 9])\n const generator = (d: Datum) => scale(getIdentity(d))\n generator.scale = scale\n\n return generator as OrdinalColorScale\n }\n }\n\n throw new Error(\n `Invalid colors, when using an object, you should either pass a 'datum' or a 'scheme' property`\n )\n }\n\n // static color\n return () => config as string\n}\n\nexport const useOrdinalColorScale = (\n config: OrdinalColorScaleConfig,\n identity: string | DatumIdentityAccessor\n) => useMemo(() => getOrdinalColorScale(config, identity), [config, identity])\n","import { SymbolProps } from './types'\n\nexport const SymbolTriangle = ({\n x,\n y,\n size,\n fill,\n opacity = 1,\n borderWidth = 0,\n borderColor = 'transparent',\n}: SymbolProps) => {\n return (\n \n \n \n )\n}\n","import { ContinuousColorsLegendProps } from './types'\n\nexport const continuousColorsLegendDefaults: {\n length: NonNullable\n thickness: NonNullable\n direction: NonNullable\n tickPosition: NonNullable\n tickSize: NonNullable\n tickSpacing: NonNullable\n tickOverlap: NonNullable\n tickFormat: NonNullable\n titleAlign: NonNullable\n titleOffset: NonNullable\n} = {\n length: 200,\n thickness: 16,\n direction: 'row',\n tickPosition: 'after',\n tickSize: 4,\n tickSpacing: 3,\n tickOverlap: false,\n tickFormat: (value: number) => `${value}`,\n titleAlign: 'start',\n titleOffset: 4,\n}\n","import { scaleLinear } from 'd3-scale'\nimport { getValueFormatter } from '@nivo/core'\nimport { computeContinuousColorScaleColorStops } from '@nivo/colors'\nimport {\n BoxLegendSvgProps,\n ContinuousColorsLegendProps,\n LegendAnchor,\n LegendItemDirection,\n} from './types'\nimport { continuousColorsLegendDefaults } from './defaults'\n\nconst isObject = (item: unknown): item is T =>\n typeof item === 'object' && !Array.isArray(item) && item !== null\n\nconst zeroPadding = {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0,\n}\n\nexport const computeDimensions = ({\n direction,\n itemsSpacing,\n padding: _padding,\n itemCount,\n itemWidth,\n itemHeight,\n}: Pick &\n Record<'itemsSpacing' | 'itemCount' | 'itemWidth' | 'itemHeight', number>) => {\n if (typeof _padding !== 'number' && !isObject(_padding)) {\n throw new Error('Invalid property padding, must be one of: number, object')\n }\n\n const padding =\n typeof _padding === 'number'\n ? {\n top: _padding,\n right: _padding,\n bottom: _padding,\n left: _padding,\n }\n : {\n ...zeroPadding,\n ..._padding,\n }\n\n const horizontalPadding = padding.left + padding.right\n const verticalPadding = padding.top + padding.bottom\n let width = itemWidth + horizontalPadding\n let height = itemHeight + verticalPadding\n const spacing = (itemCount - 1) * itemsSpacing\n if (direction === 'row') {\n width = itemWidth * itemCount + spacing + horizontalPadding\n } else if (direction === 'column') {\n height = itemHeight * itemCount + spacing + verticalPadding\n }\n\n return { width, height, padding }\n}\n\nexport const computePositionFromAnchor = ({\n anchor,\n translateX,\n translateY,\n containerWidth,\n containerHeight,\n width,\n height,\n}: { anchor: LegendAnchor } & Record<\n 'translateX' | 'translateY' | 'containerWidth' | 'containerHeight' | 'width' | 'height',\n number\n>) => {\n let x = translateX\n let y = translateY\n\n switch (anchor) {\n case 'top':\n x += (containerWidth - width) / 2\n break\n\n case 'top-right':\n x += containerWidth - width\n break\n\n case 'right':\n x += containerWidth - width\n y += (containerHeight - height) / 2\n break\n\n case 'bottom-right':\n x += containerWidth - width\n y += containerHeight - height\n break\n\n case 'bottom':\n x += (containerWidth - width) / 2\n y += containerHeight - height\n break\n\n case 'bottom-left':\n y += containerHeight - height\n break\n\n case 'left':\n y += (containerHeight - height) / 2\n break\n\n case 'center':\n x += (containerWidth - width) / 2\n y += (containerHeight - height) / 2\n break\n }\n\n return { x, y }\n}\n\nexport const computeItemLayout = ({\n direction,\n justify,\n symbolSize,\n symbolSpacing,\n width,\n height,\n}: {\n direction: LegendItemDirection\n justify: boolean\n} & Record<'symbolSize' | 'symbolSpacing' | 'width' | 'height', number>) => {\n let symbolX\n let symbolY\n\n let labelX\n let labelY\n let labelAnchor: 'start' | 'middle' | 'end'\n let labelAlignment: 'alphabetic' | 'central' | 'text-before-edge'\n\n switch (direction) {\n case 'left-to-right':\n symbolX = 0\n symbolY = (height - symbolSize) / 2\n\n labelY = height / 2\n labelAlignment = 'central'\n if (justify) {\n labelX = width\n labelAnchor = 'end'\n } else {\n labelX = symbolSize + symbolSpacing\n labelAnchor = 'start'\n }\n break\n\n case 'right-to-left':\n symbolX = width - symbolSize\n symbolY = (height - symbolSize) / 2\n\n labelY = height / 2\n labelAlignment = 'central'\n if (justify) {\n labelX = 0\n labelAnchor = 'start'\n } else {\n labelX = width - symbolSize - symbolSpacing\n labelAnchor = 'end'\n }\n break\n\n case 'top-to-bottom':\n symbolX = (width - symbolSize) / 2\n symbolY = 0\n\n labelX = width / 2\n\n labelAnchor = 'middle'\n if (justify) {\n labelY = height\n labelAlignment = 'alphabetic'\n } else {\n labelY = symbolSize + symbolSpacing\n labelAlignment = 'text-before-edge'\n }\n break\n\n case 'bottom-to-top':\n symbolX = (width - symbolSize) / 2\n symbolY = height - symbolSize\n\n labelX = width / 2\n labelAnchor = 'middle'\n if (justify) {\n labelY = 0\n labelAlignment = 'text-before-edge'\n } else {\n labelY = height - symbolSize - symbolSpacing\n labelAlignment = 'alphabetic'\n }\n break\n }\n\n return {\n symbolX,\n symbolY,\n labelX,\n labelY,\n labelAnchor,\n labelAlignment,\n }\n}\n\nexport const computeContinuousColorsLegend = ({\n scale,\n ticks,\n length = continuousColorsLegendDefaults.length,\n thickness = continuousColorsLegendDefaults.thickness,\n direction = continuousColorsLegendDefaults.direction,\n tickPosition = continuousColorsLegendDefaults.tickPosition,\n tickSize = continuousColorsLegendDefaults.tickSize,\n tickSpacing = continuousColorsLegendDefaults.tickSpacing,\n tickOverlap = continuousColorsLegendDefaults.tickOverlap,\n tickFormat = continuousColorsLegendDefaults.tickFormat,\n title,\n titleAlign = continuousColorsLegendDefaults.titleAlign,\n titleOffset = continuousColorsLegendDefaults.titleOffset,\n}: ContinuousColorsLegendProps) => {\n // left to right for `row`, bottom to top for `column`\n const domain = direction === 'column' ? [...scale.domain()].reverse() : scale.domain()\n\n const positionScale = scaleLinear().domain(domain)\n if (domain.length === 2) {\n // sequential, quantize\n positionScale.range([0, length])\n } else if (domain.length === 3) {\n // diverging\n positionScale.range([0, length / 2, length])\n }\n\n let values: number[]\n if ('thresholds' in scale) {\n // quantize\n values = [domain[0], ...scale.thresholds(), domain[1]]\n } else {\n // sequential, diverging\n values = Array.isArray(ticks) ? ticks : (scale as any).ticks(ticks)\n }\n\n const colorStops = computeContinuousColorScaleColorStops(scale, 32)\n\n const formatValue = getValueFormatter(tickFormat)\n\n const computedTicks: {\n x1: number\n y1: number\n x2: number\n y2: number\n text: string\n textX: number\n textY: number\n textHorizontalAlign: 'start' | 'middle' | 'end'\n textVerticalAlign: 'alphabetic' | 'central' | 'hanging'\n }[] = []\n\n let width: number\n let height: number\n\n const gradientX1 = 0\n let gradientY1 = 0\n let gradientX2 = 0\n const gradientY2 = 0\n\n let titleX: number\n let titleY: number\n let titleRotation: number\n let titleVerticalAlign: 'alphabetic' | 'hanging'\n\n if (direction === 'row') {\n width = length\n height = thickness\n\n gradientX2 = 1\n\n let y1: number\n let y2: number\n\n let textY: number\n const textHorizontalAlign = 'middle'\n let textVerticalAlign: 'alphabetic' | 'hanging'\n\n titleRotation = 0\n if (titleAlign === 'start') {\n titleX = 0\n } else if (titleAlign === 'middle') {\n titleX = length / 2\n } else {\n titleX = length\n }\n\n if (tickPosition === 'before') {\n y1 = -tickSize\n y2 = tickOverlap ? thickness : 0\n\n textY = -tickSize - tickSpacing\n textVerticalAlign = 'alphabetic'\n\n titleY = thickness + titleOffset\n titleVerticalAlign = 'hanging'\n } else {\n y1 = tickOverlap ? 0 : thickness\n y2 = thickness + tickSize\n\n textY = y2 + tickSpacing\n textVerticalAlign = 'hanging'\n\n titleY = -titleOffset\n titleVerticalAlign = 'alphabetic'\n }\n\n values.forEach(value => {\n const x = positionScale(value)\n\n computedTicks.push({\n x1: x,\n y1,\n x2: x,\n y2,\n text: formatValue(value),\n textX: x,\n textY,\n textHorizontalAlign,\n textVerticalAlign,\n })\n })\n } else {\n width = thickness\n height = length\n\n gradientY1 = 1\n\n let x1: number\n let x2: number\n\n let textX: number\n let textHorizontalAlign: 'start' | 'end'\n const textVerticalAlign = 'central'\n\n titleRotation = -90\n if (titleAlign === 'start') {\n titleY = length\n } else if (titleAlign === 'middle') {\n titleY = length / 2\n } else {\n titleY = 0\n }\n\n if (tickPosition === 'before') {\n x1 = -tickSize\n x2 = tickOverlap ? thickness : 0\n\n textX = x1 - tickSpacing\n textHorizontalAlign = 'end'\n\n titleX = thickness + titleOffset\n titleVerticalAlign = 'hanging'\n } else {\n x1 = tickOverlap ? 0 : thickness\n x2 = thickness + tickSize\n\n textX = x2 + tickSpacing\n textHorizontalAlign = 'start'\n\n titleX = -titleOffset\n titleVerticalAlign = 'alphabetic'\n }\n\n values.forEach(value => {\n const y = positionScale(value)\n\n computedTicks.push({\n x1,\n y1: y,\n x2,\n y2: y,\n text: formatValue(value),\n textX,\n textY: y,\n textHorizontalAlign,\n textVerticalAlign,\n })\n })\n }\n\n return {\n width,\n height,\n gradientX1,\n gradientY1,\n gradientX2,\n gradientY2,\n colorStops,\n ticks: computedTicks,\n titleText: title,\n titleX,\n titleY,\n titleRotation,\n titleHorizontalAlign: titleAlign,\n titleVerticalAlign,\n }\n}\n","import { Fragment } from 'react'\nimport { useTheme } from '@nivo/core'\nimport { computeContinuousColorsLegend } from '../compute'\nimport { ContinuousColorsLegendProps } from '../types'\nimport { continuousColorsLegendDefaults } from '../defaults'\n\nexport const ContinuousColorsLegendSvg = ({\n scale,\n ticks,\n length = continuousColorsLegendDefaults.length,\n thickness = continuousColorsLegendDefaults.thickness,\n direction = continuousColorsLegendDefaults.direction,\n tickPosition = continuousColorsLegendDefaults.tickPosition,\n tickSize = continuousColorsLegendDefaults.tickSize,\n tickSpacing = continuousColorsLegendDefaults.tickSpacing,\n tickOverlap = continuousColorsLegendDefaults.tickOverlap,\n tickFormat = continuousColorsLegendDefaults.tickFormat,\n title,\n titleAlign = continuousColorsLegendDefaults.titleAlign,\n titleOffset = continuousColorsLegendDefaults.titleOffset,\n}: ContinuousColorsLegendProps) => {\n const {\n width,\n height,\n gradientX1,\n gradientY1,\n gradientX2,\n gradientY2,\n ticks: computedTicks,\n colorStops,\n titleText,\n titleX,\n titleY,\n titleRotation,\n titleVerticalAlign,\n titleHorizontalAlign,\n } = computeContinuousColorsLegend({\n scale,\n ticks,\n length,\n thickness,\n direction,\n tickPosition,\n tickSize,\n tickSpacing,\n tickOverlap,\n tickFormat,\n title,\n titleAlign,\n titleOffset,\n })\n\n const theme = useTheme()\n\n const id = `ContinuousColorsLegendSvgGradient.${direction}.${colorStops\n .map(stop => stop.offset)\n .join('_')}`\n\n return (\n \n \n \n {colorStops.map(colorStop => (\n \n ))}\n \n \n {titleText && (\n \n {titleText}\n \n )}\n \n {computedTicks.map((tick, index) => (\n \n \n \n {tick.text}\n \n \n ))}\n \n )\n}\n","import { AnchoredContinuousColorsLegendProps } from '../types'\nimport { computePositionFromAnchor } from '../compute'\nimport { continuousColorsLegendDefaults } from '../defaults'\nimport { ContinuousColorsLegendSvg } from './ContinuousColorsLegendSvg'\n\nexport const AnchoredContinuousColorsLegendSvg = ({\n containerWidth,\n containerHeight,\n anchor,\n translateX = 0,\n translateY = 0,\n length = continuousColorsLegendDefaults.length,\n thickness = continuousColorsLegendDefaults.thickness,\n direction = continuousColorsLegendDefaults.direction,\n ...legendProps\n}: AnchoredContinuousColorsLegendProps) => {\n let width: number\n let height: number\n if (direction === 'row') {\n width = length\n height = thickness\n } else {\n width = thickness\n height = length\n }\n\n const { x, y } = computePositionFromAnchor({\n anchor,\n translateX,\n translateY,\n containerWidth,\n containerHeight,\n width,\n height,\n })\n\n return (\n \n \n \n )\n}\n","import { useState, useCallback } from 'react'\nimport * as React from 'react'\nimport { useTheme } from '@nivo/core'\nimport { LegendSvgItemProps } from '../types'\nimport { computeItemLayout } from '../compute'\nimport { SymbolCircle, SymbolDiamond, SymbolSquare, SymbolTriangle } from './symbols'\n\ntype Style = Partial<{\n itemBackground: string\n itemOpacity: number\n itemTextColor: string\n symbolBorderColor: string\n symbolBorderWidth: number\n symbolSize: number\n}>\n\nconst symbolByShape = {\n circle: SymbolCircle,\n diamond: SymbolDiamond,\n square: SymbolSquare,\n triangle: SymbolTriangle,\n}\n\nexport const LegendSvgItem = ({\n x,\n y,\n width,\n height,\n data,\n direction = 'left-to-right',\n justify = false,\n textColor,\n background = 'transparent',\n opacity = 1,\n\n symbolShape = 'square',\n symbolSize = 16,\n symbolSpacing = 8,\n symbolBorderWidth = 0,\n symbolBorderColor = 'transparent',\n\n onClick,\n onMouseEnter,\n onMouseLeave,\n toggleSerie,\n\n effects,\n}: LegendSvgItemProps) => {\n const [style, setStyle] = useState

(\r\n type: T\r\n): PayloadActionCreator\r\n\r\n/**\r\n * A utility function to create an action creator for the given action type\r\n * string. The action creator accepts a single argument, which will be included\r\n * in the action object as a field called payload. The action creator function\r\n * will also have its toString() overridden so that it returns the action type,\r\n * allowing it to be used in reducer logic that is looking for that action type.\r\n *\r\n * @param type The action type to use for created actions.\r\n * @param prepare (optional) a method that takes any number of arguments and returns { payload } or { payload, meta }.\r\n * If this is given, the resulting action creator will pass its arguments to this method to calculate payload & meta.\r\n *\r\n * @public\r\n */\r\nexport function createAction<\r\n PA extends PrepareAction,\r\n T extends string = string\r\n>(\r\n type: T,\r\n prepareAction: PA\r\n): PayloadActionCreator['payload'], T, PA>\r\n\r\nexport function createAction(type: string, prepareAction?: Function): any {\r\n function actionCreator(...args: any[]) {\r\n if (prepareAction) {\r\n let prepared = prepareAction(...args)\r\n if (!prepared) {\r\n throw new Error('prepareAction did not return an object')\r\n }\r\n\r\n return {\r\n type,\r\n payload: prepared.payload,\r\n ...('meta' in prepared && { meta: prepared.meta }),\r\n ...('error' in prepared && { error: prepared.error }),\r\n }\r\n }\r\n return { type, payload: args[0] }\r\n }\r\n\r\n actionCreator.toString = () => `${type}`\r\n\r\n actionCreator.type = type\r\n\r\n actionCreator.match = (action: Action): action is PayloadAction =>\r\n action.type === type\r\n\r\n return actionCreator\r\n}\r\n\r\n/**\r\n * Returns true if value is a plain object with a `type` property.\r\n */\r\nexport function isAction(action: unknown): action is Action {\r\n return isPlainObject(action) && 'type' in action\r\n}\r\n\r\n/**\r\n * Returns true if value is an action with a string type and valid Flux Standard Action keys.\r\n */\r\nexport function isFSA(action: unknown): action is {\r\n type: string\r\n payload?: unknown\r\n error?: unknown\r\n meta?: unknown\r\n} {\r\n return (\r\n isAction(action) &&\r\n typeof action.type === 'string' &&\r\n Object.keys(action).every(isValidKey)\r\n )\r\n}\r\n\r\nfunction isValidKey(key: string) {\r\n return ['type', 'payload', 'error', 'meta'].indexOf(key) > -1\r\n}\r\n\r\n/**\r\n * Returns the action type of the actions created by the passed\r\n * `createAction()`-generated action creator (arbitrary action creators\r\n * are not supported).\r\n *\r\n * @param action The action creator whose action type to get.\r\n * @returns The action type used by the action creator.\r\n *\r\n * @public\r\n */\r\nexport function getType(\r\n actionCreator: PayloadActionCreator\r\n): T {\r\n return `${actionCreator}` as T\r\n}\r\n\r\n// helper types for more readable typings\r\n\r\ntype IfPrepareActionMethodProvided<\r\n PA extends PrepareAction | void,\r\n True,\r\n False\r\n> = PA extends (...args: any[]) => any ? True : False\r\n","import type { Action, AnyAction } from 'redux'\r\nimport type {\r\n CaseReducer,\r\n CaseReducers,\r\n ActionMatcherDescriptionCollection,\r\n} from './createReducer'\r\nimport type { TypeGuard } from './tsHelpers'\r\n\r\nexport interface TypedActionCreator {\r\n (...args: any[]): Action\r\n type: Type\r\n}\r\n\r\n/**\r\n * A builder for an action <-> reducer map.\r\n *\r\n * @public\r\n */\r\nexport interface ActionReducerMapBuilder {\r\n /**\r\n * Adds a case reducer to handle a single exact action type.\r\n * @remarks\r\n * All calls to `builder.addCase` must come before any calls to `builder.addMatcher` or `builder.addDefaultCase`.\r\n * @param actionCreator - Either a plain action type string, or an action creator generated by [`createAction`](./createAction) that can be used to determine the action type.\r\n * @param reducer - The actual case reducer function.\r\n */\r\n addCase>(\r\n actionCreator: ActionCreator,\r\n reducer: CaseReducer>\r\n ): ActionReducerMapBuilder\r\n /**\r\n * Adds a case reducer to handle a single exact action type.\r\n * @remarks\r\n * All calls to `builder.addCase` must come before any calls to `builder.addMatcher` or `builder.addDefaultCase`.\r\n * @param actionCreator - Either a plain action type string, or an action creator generated by [`createAction`](./createAction) that can be used to determine the action type.\r\n * @param reducer - The actual case reducer function.\r\n */\r\n addCase>(\r\n type: Type,\r\n reducer: CaseReducer\r\n ): ActionReducerMapBuilder\r\n\r\n /**\r\n * Allows you to match your incoming actions against your own filter function instead of only the `action.type` property.\r\n * @remarks\r\n * If multiple matcher reducers match, all of them will be executed in the order\r\n * they were defined in - even if a case reducer already matched.\r\n * All calls to `builder.addMatcher` must come after any calls to `builder.addCase` and before any calls to `builder.addDefaultCase`.\r\n * @param matcher - A matcher function. In TypeScript, this should be a [type predicate](https://www.typescriptlang.org/docs/handbook/advanced-types.html#using-type-predicates)\r\n * function\r\n * @param reducer - The actual case reducer function.\r\n *\r\n * @example\r\n```ts\r\nimport {\r\n createAction,\r\n createReducer,\r\n AsyncThunk,\r\n AnyAction,\r\n} from \"@reduxjs/toolkit\";\r\n\r\ntype GenericAsyncThunk = AsyncThunk;\r\n\r\ntype PendingAction = ReturnType;\r\ntype RejectedAction = ReturnType;\r\ntype FulfilledAction = ReturnType;\r\n\r\nconst initialState: Record = {};\r\nconst resetAction = createAction(\"reset-tracked-loading-state\");\r\n\r\nfunction isPendingAction(action: AnyAction): action is PendingAction {\r\n return action.type.endsWith(\"/pending\");\r\n}\r\n\r\nconst reducer = createReducer(initialState, (builder) => {\r\n builder\r\n .addCase(resetAction, () => initialState)\r\n // matcher can be defined outside as a type predicate function\r\n .addMatcher(isPendingAction, (state, action) => {\r\n state[action.meta.requestId] = \"pending\";\r\n })\r\n .addMatcher(\r\n // matcher can be defined inline as a type predicate function\r\n (action): action is RejectedAction => action.type.endsWith(\"/rejected\"),\r\n (state, action) => {\r\n state[action.meta.requestId] = \"rejected\";\r\n }\r\n )\r\n // matcher can just return boolean and the matcher can receive a generic argument\r\n .addMatcher(\r\n (action) => action.type.endsWith(\"/fulfilled\"),\r\n (state, action) => {\r\n state[action.meta.requestId] = \"fulfilled\";\r\n }\r\n );\r\n});\r\n```\r\n */\r\n addMatcher(\r\n matcher: TypeGuard | ((action: any) => boolean),\r\n reducer: CaseReducer\r\n ): Omit, 'addCase'>\r\n\r\n /**\r\n * Adds a \"default case\" reducer that is executed if no case reducer and no matcher\r\n * reducer was executed for this action.\r\n * @param reducer - The fallback \"default case\" reducer function.\r\n *\r\n * @example\r\n```ts\r\nimport { createReducer } from '@reduxjs/toolkit'\r\nconst initialState = { otherActions: 0 }\r\nconst reducer = createReducer(initialState, builder => {\r\n builder\r\n // .addCase(...)\r\n // .addMatcher(...)\r\n .addDefaultCase((state, action) => {\r\n state.otherActions++\r\n })\r\n})\r\n```\r\n */\r\n addDefaultCase(reducer: CaseReducer): {}\r\n}\r\n\r\nexport function executeReducerBuilderCallback(\r\n builderCallback: (builder: ActionReducerMapBuilder) => void\r\n): [\r\n CaseReducers,\r\n ActionMatcherDescriptionCollection,\r\n CaseReducer | undefined\r\n] {\r\n const actionsMap: CaseReducers = {}\r\n const actionMatchers: ActionMatcherDescriptionCollection = []\r\n let defaultCaseReducer: CaseReducer | undefined\r\n const builder = {\r\n addCase(\r\n typeOrActionCreator: string | TypedActionCreator,\r\n reducer: CaseReducer\r\n ) {\r\n if (process.env.NODE_ENV !== 'production') {\r\n /*\r\n to keep the definition by the user in line with actual behavior, \r\n we enforce `addCase` to always be called before calling `addMatcher`\r\n as matching cases take precedence over matchers\r\n */\r\n if (actionMatchers.length > 0) {\r\n throw new Error(\r\n '`builder.addCase` should only be called before calling `builder.addMatcher`'\r\n )\r\n }\r\n if (defaultCaseReducer) {\r\n throw new Error(\r\n '`builder.addCase` should only be called before calling `builder.addDefaultCase`'\r\n )\r\n }\r\n }\r\n const type =\r\n typeof typeOrActionCreator === 'string'\r\n ? typeOrActionCreator\r\n : typeOrActionCreator.type\r\n if (type in actionsMap) {\r\n throw new Error(\r\n 'addCase cannot be called with two reducers for the same action type'\r\n )\r\n }\r\n actionsMap[type] = reducer\r\n return builder\r\n },\r\n addMatcher(\r\n matcher: TypeGuard,\r\n reducer: CaseReducer\r\n ) {\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (defaultCaseReducer) {\r\n throw new Error(\r\n '`builder.addMatcher` should only be called before calling `builder.addDefaultCase`'\r\n )\r\n }\r\n }\r\n actionMatchers.push({ matcher, reducer })\r\n return builder\r\n },\r\n addDefaultCase(reducer: CaseReducer) {\r\n if (process.env.NODE_ENV !== 'production') {\r\n if (defaultCaseReducer) {\r\n throw new Error('`builder.addDefaultCase` can only be called once')\r\n }\r\n }\r\n defaultCaseReducer = reducer\r\n return builder\r\n },\r\n }\r\n builderCallback(builder)\r\n return [actionsMap, actionMatchers, defaultCaseReducer]\r\n}\r\n","import type { AnyAction, Reducer } from 'redux'\r\nimport { createNextState } from '.'\r\nimport type {\r\n ActionCreatorWithoutPayload,\r\n PayloadAction,\r\n PayloadActionCreator,\r\n PrepareAction,\r\n _ActionCreatorWithPreparedPayload,\r\n} from './createAction'\r\nimport { createAction } from './createAction'\r\nimport type {\r\n CaseReducer,\r\n CaseReducers,\r\n ReducerWithInitialState,\r\n} from './createReducer'\r\nimport { createReducer, NotFunction } from './createReducer'\r\nimport type { ActionReducerMapBuilder } from './mapBuilders'\r\nimport { executeReducerBuilderCallback } from './mapBuilders'\r\nimport type { NoInfer } from './tsHelpers'\r\nimport { freezeDraftable } from './utils'\r\n\r\nlet hasWarnedAboutObjectNotation = false\r\n\r\n/**\r\n * An action creator attached to a slice.\r\n *\r\n * @deprecated please use PayloadActionCreator directly\r\n *\r\n * @public\r\n */\r\nexport type SliceActionCreator