0?(n=!1,t.classList.add("active")):(i<0&&(r=t),t.classList.remove("active"))})),n&&(null==r||r.classList.add("active"));const i=null===(e=t.find((t=>t.a$.classList.contains("active"))))||void 0===e?void 0:e.a$;if(i){const t=i.parentElement;t.scrollHeight!=t.offsetHeight&&(t.scrollTop=i.offsetTop-t.offsetHeight/3)}}e(),window.addEventListener("navigation",e),t.length>0&&(r(),document.addEventListener("scroll",(()=>setTimeout(r,1))),window.addEventListener("navigation",r))})),Yo((()=>{const t=()=>{setTimeout((()=>{document.querySelectorAll("iframe[deferred-src]").forEach((t=>{t.src=t.getAttribute("deferred-src")||""}))}),100)};t(),window.addEventListener("navigation",t)})),na(),Yo((()=>{if(ia=location.pathname,!window.__smooth_loading_plugged){window.__smooth_loading_plugged=!0,document.addEventListener("click",(t=>{var e;let r=t.target;for(;r&&!r.href;)r=r.parentNode;if(r&&(null===(e=r.getAttribute("href"))||void 0===e?void 0:e.startsWith("/"))&&"_blank"!==r.getAttribute("target")){const e=r.getAttribute("href")||"";return t.preventDefault(),void oa(e)}}));const t=/^((?!chrome|android).)*safari/i.test(navigator.userAgent);window.addEventListener("popstate",(e=>{location.pathname!==ia&&(ia=location.pathname,t?window.location.href=e.state||window.location.href:oa(e.state||window.location.href,!1))}))}})),function(){let t;function e(e){const r=document.getElementById("-codedoc-toc");if(r){let n;r.querySelectorAll("a").forEach((t=>{t.getAttribute("href")===e?(n||(n=t),t.classList.add("current")):t.classList.remove("current")})),n&&(n!==t&&(null==t||t.dispatchEvent(new CustomEvent("collapse-close",{bubbles:!0}))),n.dispatchEvent(new CustomEvent("collapse-open",{bubbles:!0})),t=n)}}na(),Yo((()=>setTimeout((()=>e(location.pathname)),200))),window.addEventListener("navigation-start",(t=>e(t.detail.url)))}(),function(){let t,e;window.addEventListener("on-navigation-search",(e=>{t=e.detail.query})),window.addEventListener("navigation",(()=>{t&&setTimeout((()=>{n(t||""),t=void 0}),300)})),window.addEventListener("same-page-navigation",(()=>{t&&n(t),t=void 0}));const r=Zn();function n(t){const n=t.toLowerCase(),i=[],o=document.getElementById("-codedoc-container");if(o){const t=(e,n)=>{if(e instanceof Text){const o=e.textContent,s=n.exec((null==o?void 0:o.toLowerCase())||"");if(o&&s){const a=s[0],c=o.substr(0,s.index),u=o.substr(s.index,a.length),l=o.substr(s.index+a.length);let h=Vo()(window.getComputedStyle(e.parentElement).color);h=h.saturationv()<.2?h.isLight()?"teal":"yellow":h.rotate(90).alpha(.35),e.textContent=l;const d=document.createTextNode(c),f=r.create("span",{"data-no-search":!0,style:`\n background: ${h.toString()}; \n display: inline-block; \n vertical-align: middle;\n transform-origin: center;\n transition: transform .15s\n `},u);i.push(f),r.render(r.create("fragment",null,d,f)).before(e),t(d,n),t(e,n)}}else{if(e instanceof HTMLElement&&(e.hasAttribute("data-no-search")||e.classList.contains("icon-font")))return;e.childNodes.forEach((e=>t(e,n)))}};if(t(o,new RegExp(n)),0==i.length){const e=n.split(" ");e.length>0&&(t(o,new RegExp(e.join("\\s+.*\\s+"))),0==i.length&&e.forEach((e=>t(o,new RegExp(e)))))}}e&&e.remove(),e=r.create(aa,{elements:i,query:t}),r.render(e).on(document.body)}window._find=n}(),function(){const t=Zn();Yo((()=>{const e=()=>{document.querySelectorAll("pre>code>.-codedoc-code-line").forEach((e=>{const r=e.querySelector(".-codedoc-line-counter");null==r||r.addEventListener("click",(r=>{r.stopPropagation();const n=function(t){var e,r;if(t.getAttribute("id")){if(t.classList.contains("selected")){const n=[];let i;return null===(e=t.parentElement)||void 0===e||e.querySelectorAll(".-codedoc-code-line").forEach(((t,e)=>{t.classList.contains("selected")?i?i[1]=e:i=[e,e]:i&&(n.push(i),i=void 0)})),i&&(n.push(i),i=void 0),window.location.toString().split("#")[0]+"#"+(null===(r=t.getAttribute("id"))||void 0===r?void 0:r.split("-")[0])+"-"+n.map((t=>t[0]===t[1]?`l${t[0]+1}`:`l${t[0]+1}:l${t[1]+1}`)).join("-")}return window.location.toString().split("#")[0]+"#"+t.getAttribute("id")}}(e);Ko(n,(()=>t.render(t.create(ns,null,"Link Copied to Clipboard!",t.create("br",null),t.create("a",{href:n,style:"font-size: 12px; color: white"},n))).on(document.body)))})),null==r||r.addEventListener("mousedown",(t=>t.stopPropagation())),null==r||r.addEventListener("mouseup",(t=>t.stopPropagation()))}))},r=()=>{var t;const e=function(){const t=window.location.toString().split("#")[1];if(t&&t.startsWith("code")){const e=t.split("-"),r=[];return e.slice(1).forEach((t=>{const n=t.split(":").map((t=>parseInt(t.substr(1))));if(2===n.length)for(let t=n[0];t<=n[1];t++){const n=document.querySelector(`#${e[0]}-l${t}`);n&&r.push(n)}else{const t=document.querySelector(`#${e[0]}-l${n[0]}`);t&&r.push(t)}})),r}return[]}();e.length>0&&(null===(t=e[0].parentElement)||void 0===t||t.querySelectorAll(".selected").forEach((t=>t.classList.remove("selected")))),e.forEach((t=>{var e;null==t||t.classList.add("selected"),null===(e=null==t?void 0:t.parentElement)||void 0===e||e.classList.add("has-selection")})),e.length>0&&setTimeout((()=>{var t;return null===(t=e[0])||void 0===t?void 0:t.scrollIntoView({block:"center"})}),300)};e(),r(),window.addEventListener("navigation",(()=>{e(),r()})),window.addEventListener("hashchange",r)}))}(),function(){const t=Zn();Yo((()=>{const e=()=>{let e=1,r=[];const n={};document.querySelectorAll("[data-footnote], [data-footnotes]").forEach((i=>{if(i.hasAttribute("data-footnote")){const o=`--codedoc-footnote-${i.getAttribute("data-footnote-id")||e}`,s=o in n?n[o]:n[o]=e++;i.childNodes.length>0&&(i.setAttribute("id",o),i.setAttribute("data-footnote-index",`${s}`),r.push({index:s,$:i})),i.hasAttribute("data-footnote-block")||t.render(t.create("sup",null,t.create("a",{href:`#${o}`,style:"text-decoration: none"},t.create("b",null,s)))).before(i),i.remove()}else r.sort(((t,e)=>t.index-e.index)).forEach((e=>{t.render(t.create("div",null,t.create("span",null,t.create("a",null,t.create("b",null,e.index)))," ",e.$)).on(i)})),r=[]}))};e(),window.addEventListener("navigation",e)}))}();const Xa={"eEn4kdbhsrFbIhF5rFNzng==":function(t,e){const r=this.theme.classes(ca),n=m();return this.track({bind(){setTimeout((()=>{const i=document.getElementById("-codedoc-toc");if(i){let o,s,a;i.querySelectorAll("a").forEach((t=>{const e=t.getAttribute("href")||"";e!==location.pathname||s?s&&e.startsWith("/")&&!a?a=t:!s&&e.startsWith("/")&&(o=t):s=t})),o&&"false"!==t.prev&&e.render(e.create("a",{class:`${r.button} prev`,href:o.getAttribute("href")||""},e.create("div",null,e.create("span",{class:r.label},t["prev-label"]||"Previous"),e.create("span",{class:r.title},o.textContent)),e.create("span",{class:"icon-font"},t["prev-icon"]||"arrow_back_ios"))).on(n.$),a&&"false"!==t.next&&e.render(e.create("a",{class:`${r.button} next`,href:a.getAttribute("href")||""},e.create("div",null,e.create("span",{class:r.label},t["next-label"]||"Next"),e.create("span",{class:r.title},a.textContent)),e.create("span",{class:"icon-font"},t["next-icon"]||"arrow_forward_ios"))).on(n.$)}}),10)}}),e.create("div",{class:r.prevnext,_ref:n})},"BR5Z0MA6Aj4P2zER2ZLlUg==":function(t,e){const r=m();return this.track({bind(){const t=r.$.parentElement;t&&(t.addEventListener("collapse-open",(()=>t.classList.add("open"))),t.addEventListener("collapse-close",(()=>t.classList.remove("open"))),t.addEventListener("collapse-toggle",(()=>t.classList.toggle("open"))))}}),e.create("span",{hidden:!0,_ref:r})},"CEp7LAl0nnWrqHIN8Qnt6g==":function(t,e){const r=new at,n=new RegExp(t.pick),i=new RegExp(t.drop),o={},s=r.pipe(Ns((e=>{return e in o?sr({result:o[e]}):ja.getJSON(`https://api.github.com/search/code?q=${encodeURIComponent(e)}+in:file+path:${t.root}+extension:md+repo:${t.user}/${t.repo}`).pipe((r=()=>sr(void 0),function(t){var e=new Aa(r),n=t.lift(e);return e.caught=n}));var r})),ut((e=>e?function(t){return void 0!==t.result}(e)?e.result:e.items.map((t=>t.path)).filter((t=>n.test(t))).filter((t=>!i.test(t))).map((t=>t.substr(0,t.length-3))).map((e=>e.substr(t.root.length))).map((t=>"/index"===t?"/":t)):[])),Ve());return ar(r,s).pipe(kt((([t,e])=>{e.length>0&&(o[t]=e)}))).subscribe(),e.create(Da,{label:t.label,query:r,results:s})},"KKHOIeoEcuIIR8G+qI09PQ==":function(t,e){const r=this.theme.classes(Va),n=m(),i=er(!1);return this.track({bind(){const t=document.getElementById("-codedoc-toc");t&&(n.resolve(t),"true"===localStorage.getItem("-codedoc-toc-active")&&(i.value=!0),setTimeout((()=>t.classList.add("animated")),1)),window.codedocToggleToC=t=>{i.value=void 0!==t?t:!i.value}}}),this.track(i.to(ce((t=>{n.resolved&&(t?n.$.classList.add("active"):n.$.classList.remove("active")),localStorage.setItem("-codedoc-toc-active",!0===t?"true":"false")})))),e.create("div",{class:ks`${r.tocToggle} ${Es({active:i})}`,onclick:()=>i.value=!i.value},e.create("div",{class:r.bar}),e.create("div",{class:r.bar}),e.create("div",{class:r.bar}))},"Xodqq8f8LP13F67p+cusew==":function(t,e){const r=this.theme.classes(Ua),n=function(){const t=er(Ba.Light).bind();if(window.matchMedia){const e=window.matchMedia("(prefers-color-scheme: dark)");e.matches&&(t.value=Ba.Dark),e.addListener((()=>{e.matches?t.value=Ba.Dark:t.value=Ba.Light}))}return t}(),i=er();let o=!1;return n.to(i),this.track(i.to(ce((t=>{t===Ba.Light?document.body.classList.remove("dark"):document.body.classList.add("dark"),o&&(t!==n.value?localStorage.setItem("dark-mode",t===Ba.Light?"false":"true"):localStorage.removeItem("dark-mode"))})))),this.track({bind(){localStorage.getItem("dark-mode")&&(i.value="true"===localStorage.getItem("dark-mode")?Ba.Dark:Ba.Light),document.body.classList.add("dark-mode-animate"),o=!0}}),e.create("div",{class:r.dmSwitch,onclick:()=>i.value=i.value===Ba.Light?Ba.Dark:Ba.Light},e.create("div",{class:"arc"}),e.create("div",{class:"darc"}),e.create("div",{class:"ray one"}),e.create("div",{class:"ray two"}),e.create("div",{class:"ray three"}),e.create("div",{class:"ray four"}),e.create("div",{class:"ray five"}),e.create("div",{class:"ray six"}),e.create("div",{class:"ray seven"}),e.create("div",{class:"ray eight"}))},"3GUK3xGbIE9fCSzaoTX0bA==":function(t,e){return window.__codedoc_conf=t,e.create("fragment",null)},"U3mNxP3yuRq+EtG14oT75g==":function(t,e){const r=er([]),n=er(),i=m();return this.track({bind(){var t;n.bind();const e=[];null===(t=i.$.parentElement)||void 0===t||t.querySelectorAll(".tab").forEach((t=>{e.push({title:t.getAttribute("data-tab-title")||"",id:t.getAttribute("data-tab-id")||"",el$:t,icon:t.getAttribute("data-tab-icon")||void 0}),t.classList.contains("selected")&&(n.value=t.getAttribute("data-tab-title"))})),r.bind(),r.value=e}}),e.create("div",{class:"selector",_ref:i},e.create(Na,{of:r,each:t=>e.create("button",{class:Es({selected:n.to(nr((e=>e===t.value.id)))}),"data-tab-title":t.value.title,"data-tab-id":t.value.id,onclick:()=>{r.value.forEach((t=>{t.el$.classList.remove("selected")})),t.value.el$.classList.add("selected"),n.value=t.value.id}},t.value.title,t.value.icon?e.create("span",{class:"icon-font"},t.value.icon):"")}))}},Ya=Zn(),Ga=window.__sdh_transport;window.__sdh_transport=function(t,e,r){if(e in Xa){const n=document.getElementById(t);Ya.render(Ya.create(Xa[e],r)).after(n),n.remove()}else Ga&&Ga(t,e,r)}})()})();
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.js.LICENSE.txt b/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.js.LICENSE.txt
new file mode 100644
index 000000000..c18ab1d93
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.js.LICENSE.txt
@@ -0,0 +1,14 @@
+/*! *****************************************************************************
+Copyright (c) Microsoft Corporation.
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+***************************************************************************** */
diff --git a/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.meta.json b/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.meta.json
new file mode 100644
index 000000000..2147fe0b2
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/assets/codedoc-bundle.meta.json
@@ -0,0 +1,121 @@
+{
+ "init": [
+ {
+ "name": "initJssCs",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/transport/setup-jss.js",
+ "hash": "DaZGb1e3/pQ92YQ1/ipEkg=="
+ },
+ {
+ "name": "installTheme",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/content/theme.ts",
+ "hash": "cpvvFgp6G4m73V2WnXNUkg=="
+ },
+ {
+ "name": "codeSelection",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/selection.js",
+ "hash": "u6XqveUOG2QFQAoRyDUJfA=="
+ },
+ {
+ "name": "sameLineLengthInCodes",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/same-line-length.js",
+ "hash": "cfQsooKYclC3SMcGweltjQ=="
+ },
+ {
+ "name": "initHintBox",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/line-hint/index.js",
+ "hash": "mBz7nf1rrUBdJ/5gNkFJOw=="
+ },
+ {
+ "name": "initCodeLineRef",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/line-ref/index.js",
+ "hash": "/bci3Hlnk/rvVVTSi7ZGqw=="
+ },
+ {
+ "name": "initSmartCopy",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/smart-copy.js",
+ "hash": "XANhijDQTXbi82Hw7V2Cbg=="
+ },
+ {
+ "name": "copyHeadings",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/heading/copy-headings.js",
+ "hash": "2I0adMFW/ZBAosgDXOWiqw=="
+ },
+ {
+ "name": "contentNavHighlight",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/page/contentnav/highlight.js",
+ "hash": "pkSPtTfW/EvOoaZKtpukrA=="
+ },
+ {
+ "name": "loadDeferredIFrames",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/transport/deferred-iframe.js",
+ "hash": "2kFw5/pW6uDfBqYE3JsjzA=="
+ },
+ {
+ "name": "smoothLoading",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/transport/smooth-loading.js",
+ "hash": "964KUgrnj4PNS+t3WDbQKw=="
+ },
+ {
+ "name": "tocHighlight",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/page/toc/toc-highlight.js",
+ "hash": "8iLDauHZfBl4iOKRy3ehFQ=="
+ },
+ {
+ "name": "postNavSearch",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/page/toc/search/post-nav/index.js",
+ "hash": "bKAJiuvFBuLzrbHQ2PvTRA=="
+ },
+ {
+ "name": "copyLineLinks",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/code/line-links/copy-line-link.js",
+ "hash": "diyK2F3bkb+jp/bk7/fX1g=="
+ },
+ {
+ "name": "gatherFootnotes",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/footnote/gather-footnotes.js",
+ "hash": "xQ6x4bVhgWv141WiBZFUqA=="
+ }
+ ],
+ "components": [
+ {
+ "name": "ToCPrevNext",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/page/toc/prevnext/index.js",
+ "hash": "eEn4kdbhsrFbIhF5rFNzng=="
+ },
+ {
+ "name": "CollapseControl",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/collapse/collapse-control.js",
+ "hash": "BR5Z0MA6Aj4P2zER2ZLlUg=="
+ },
+ {
+ "name": "GithubSearch",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/misc/github/search.js",
+ "hash": "CEp7LAl0nnWrqHIN8Qnt6g=="
+ },
+ {
+ "name": "ToCToggle",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/page/toc/toggle/index.js",
+ "hash": "KKHOIeoEcuIIR8G+qI09PQ=="
+ },
+ {
+ "name": "DarkModeSwitch",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/darkmode/index.js",
+ "hash": "Xodqq8f8LP13F67p+cusew=="
+ },
+ {
+ "name": "ConfigTransport",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/transport/config.js",
+ "hash": "3GUK3xGbIE9fCSzaoTX0bA=="
+ },
+ {
+ "name": "TabSelector",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/components/tabs/selector.js",
+ "hash": "U3mNxP3yuRq+EtG14oT75g=="
+ }
+ ],
+ "renderer": {
+ "name": "getRenderer",
+ "filename": "/Users/chandralegend/Desktop/Jaseci/mtllm/docs/.codedoc/node_modules/@codedoc/core/dist/es6/transport/renderer.js",
+ "hash": "cZDb4jXUy1/NvEqBmnP6Pg=="
+ }
+}
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/assets/codedoc-styles.css b/support/plugins/mtllm/docs/docs/assets/codedoc-styles.css
new file mode 100644
index 000000000..31ba981b5
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/assets/codedoc-styles.css
@@ -0,0 +1,1206 @@
+.darklight-0-0-1 {
+ overflow: hidden;
+ position: relative;
+}
+body.dark-mode-animate .darklight-0-0-1>.light, .darklight-0-0-1>.dark {
+ transition: opacity .3s, z-index .3s;
+}
+.darklight-0-0-1>.dark {
+ top: 0;
+ left: 0;
+ right: 0;
+ opacity: 0;
+ z-index: -1;
+ position: absolute;
+}
+body.dark .darklight-0-0-1>.dark {
+ opacity: 1;
+ z-index: 1;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .darklight-0-0-1>.dark {
+ opacity: 1;
+ z-index: 1;
+ }
+}
+ body.dark .darklight-0-0-1>.light {
+ opacity: 0;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .darklight-0-0-1>.light {
+ opacity: 0;
+ }
+}
+ .code-0-0-2 {
+ color: #e0e0e0;
+ display: block;
+ outline: none;
+ padding: 24px 0;
+ position: relative;
+ font-size: 13px;
+ background: #212121;
+ box-shadow: 0 6px 12px rgba(0, 0, 0, .25);
+ overflow-x: auto;
+ user-select: none;
+ border-radius: 3px;
+ -webkit-user-select: none;
+ }
+ pre.with-bar .code-0-0-2 {
+ padding-top: 0;
+ }
+ .code-0-0-2 .error, .code-0-0-2 .warning {
+ display: inline-block;
+ position: relative;
+ }
+ .code-0-0-2 .token.keyword {
+ color: #7187ff;
+ }
+ .code-0-0-2 .token.string {
+ color: #69f0ae;
+ }
+ .code-0-0-2 .token.number {
+ color: #ffc400;
+ }
+ .code-0-0-2 .token.boolean {
+ color: #ffc400;
+ }
+ .code-0-0-2 .token.operator {
+ color: #18ffff;
+ }
+ .code-0-0-2 .token.function {
+ color: #e0e0e0;
+ }
+ .code-0-0-2 .token.parameter {
+ color: #e0e0e0;
+ }
+ .code-0-0-2 .token.comment {
+ color: #757575;
+ }
+ .code-0-0-2 .token.tag {
+ color: #ffa372;
+ }
+ .code-0-0-2 .token.builtin {
+ color: #e0e0e0;
+ }
+ .code-0-0-2 .token.punctuation {
+ color: #fcf7bb;
+ }
+ .code-0-0-2 .token.class-name {
+ color: #e0e0e0;
+ }
+ .code-0-0-2 .token.attr-name {
+ color: #f6d186;
+ }
+ .code-0-0-2 .token.attr-value {
+ color: #69f0ae;
+ }
+ .code-0-0-2 .token.plain-text {
+ color: #bdbdbd;
+ }
+ .code-0-0-2 .token.script {
+ color: #e0e0e0;
+ }
+ .code-0-0-2 .token.placeholder {
+ color: #18ffff;
+ }
+ .code-0-0-2 .token.selector {
+ color: #ffa372;
+ }
+ .code-0-0-2 .token.property {
+ color: #f6d186;
+ }
+ .code-0-0-2 .token.important {
+ color: #be79df;
+ }
+ .code-0-0-2.scss .token.function, .code-0-0-2.css .token.function, .code-0-0-2.sass .token.function {
+ color: #9aceff;
+ }
+ .code-0-0-2 .token.key {
+ color: #f6d186;
+ }
+ .code-0-0-2 .error .wave {
+ color: #e8505b;
+ }
+ .code-0-0-2 .warning .wave {
+ color: #ffa931ee;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .code-0-0-2 {
+ color: #e0e0e0;
+ background: #000000;
+ box-shadow: 0 6px 12px #121212;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.keyword {
+ color: #7187ff;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.string {
+ color: #69f0ae;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.number {
+ color: #ffc400;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.boolean {
+ color: #ffc400;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.operator {
+ color: #18ffff;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.function {
+ color: #e0e0e0;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.parameter {
+ color: #e0e0e0;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.comment {
+ color: #757575;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.tag {
+ color: #ffa372;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.builtin {
+ color: #e0e0e0;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.punctuation {
+ color: #fcf7bb;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.class-name {
+ color: #e0e0e0;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.attr-name {
+ color: #f6d186;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.attr-value {
+ color: #69f0ae;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.plain-text {
+ color: #bdbdbd;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.script {
+ color: #e0e0e0;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.placeholder {
+ color: #18ffff;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.selector {
+ color: #ffa372;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.property {
+ color: #f6d186;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.important {
+ color: #be79df;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2.scss .token.function, body:not(.dark-mode-animate) .code-0-0-2.css .token.function, body:not(.dark-mode-animate) .code-0-0-2.sass .token.function {
+ color: #9aceff;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .token.key {
+ color: #f6d186;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .error .wave {
+ color: #e8505b;
+ }
+ body:not(.dark-mode-animate) .code-0-0-2 .warning .wave {
+ color: #ffa931ee;
+ }
+}
+ body.dark .code-0-0-2 {
+ color: #e0e0e0;
+ background: #000000;
+ box-shadow: 0 6px 12px #121212;
+ }
+ body.dark .code-0-0-2 .token.keyword {
+ color: #7187ff;
+ }
+ body.dark .code-0-0-2 .token.string {
+ color: #69f0ae;
+ }
+ body.dark .code-0-0-2 .token.number {
+ color: #ffc400;
+ }
+ body.dark .code-0-0-2 .token.boolean {
+ color: #ffc400;
+ }
+ body.dark .code-0-0-2 .token.operator {
+ color: #18ffff;
+ }
+ body.dark .code-0-0-2 .token.function {
+ color: #e0e0e0;
+ }
+ body.dark .code-0-0-2 .token.parameter {
+ color: #e0e0e0;
+ }
+ body.dark .code-0-0-2 .token.comment {
+ color: #757575;
+ }
+ body.dark .code-0-0-2 .token.tag {
+ color: #ffa372;
+ }
+ body.dark .code-0-0-2 .token.builtin {
+ color: #e0e0e0;
+ }
+ body.dark .code-0-0-2 .token.punctuation {
+ color: #fcf7bb;
+ }
+ body.dark .code-0-0-2 .token.class-name {
+ color: #e0e0e0;
+ }
+ body.dark .code-0-0-2 .token.attr-name {
+ color: #f6d186;
+ }
+ body.dark .code-0-0-2 .token.attr-value {
+ color: #69f0ae;
+ }
+ body.dark .code-0-0-2 .token.plain-text {
+ color: #bdbdbd;
+ }
+ body.dark .code-0-0-2 .token.script {
+ color: #e0e0e0;
+ }
+ body.dark .code-0-0-2 .token.placeholder {
+ color: #18ffff;
+ }
+ body.dark .code-0-0-2 .token.selector {
+ color: #ffa372;
+ }
+ body.dark .code-0-0-2 .token.property {
+ color: #f6d186;
+ }
+ body.dark .code-0-0-2 .token.important {
+ color: #be79df;
+ }
+ body.dark .code-0-0-2.scss .token.function, body.dark .code-0-0-2.css .token.function, body.dark .code-0-0-2.sass .token.function {
+ color: #9aceff;
+ }
+ body.dark .code-0-0-2 .token.key {
+ color: #f6d186;
+ }
+ body.dark .code-0-0-2 .error .wave {
+ color: #e8505b;
+ }
+ body.dark .code-0-0-2 .warning .wave {
+ color: #ffa931ee;
+ }
+ .code-0-0-2 .error .wave, .code-0-0-2 .warning .wave {
+ left: 0;
+ right: 0;
+ bottom: -1rem;
+ position: absolute;
+ font-size: 1.5rem;
+ font-weight: 100;
+ letter-spacing: -.43rem;
+ }
+ .lineCounter-0-0-3 {
+ left: 0;
+ color: transparent;
+ width: 24px;
+ height: 1.25rem;
+ display: inline-flex;
+ position: sticky;
+ font-size: 10px;
+ background: #212121;
+ transition: color .3s, background .3s;
+ align-items: center;
+ border-right: 2px solid rgba(255, 255, 255, .015);
+ margin-right: 12px;
+ padding-right: 12px;
+ flex-direction: row-reverse;
+ vertical-align: top;
+ }
+ .lineCounter-0-0-3.prim {
+ color: #616161;
+ }
+ .lineCounter-0-0-3 .-codedoc-line-link {
+ top: -2px;
+ left: 0;
+ color: #e0e0e0;
+ right: 0;
+ bottom: 0;
+ opacity: 0;
+ position: absolute;
+ font-size: 12px;
+ text-align: center;
+ transition: opacity .15s;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .lineCounter-0-0-3 {
+ background: #000000;
+ border-color: rgba(255, 255, 255, .015);
+ }
+ body:not(.dark-mode-animate) .lineCounter-0-0-3.prim {
+ color: #616161;
+ }
+ body:not(.dark-mode-animate) .lineCounter-0-0-3 .-codedoc-line-link {
+ color: #e0e0e0;
+ }
+}
+ body.dark .lineCounter-0-0-3 {
+ background: #000000;
+ border-color: rgba(255, 255, 255, .015);
+ }
+ body.dark .lineCounter-0-0-3.prim {
+ color: #616161;
+ }
+ body.dark .lineCounter-0-0-3 .-codedoc-line-link {
+ color: #e0e0e0;
+ }
+ .lineCounter-0-0-3 .-codedoc-line-link .icon-font {
+ transform: scale(.75);
+ }
+ .termPrefix-0-0-4 {
+ color: #616161;
+ transition: color .3s;
+ font-weight: bold;
+ margin-right: 8px;
+ }
+ body.dark .termPrefix-0-0-4 {
+ color: #616161;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .termPrefix-0-0-4 {
+ color: #616161;
+ }
+}
+ .termOutput-0-0-5 {
+ color: #757575;
+ display: block;
+ padding: 8px;
+ background: rgba(255, 255, 255, .06);
+ transition: color .3s, background .3s;
+ padding-left: 48px;
+ }
+ body.dark .termOutput-0-0-5 {
+ color: #757575;
+ background: rgba(255, 255, 255, .06);
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .termOutput-0-0-5 {
+ color: #757575;
+ background: rgba(255, 255, 255, .06);
+ }
+}
+ .termOutput-0-0-5:last-child {
+ margin-bottom: -24px;
+ padding-bottom: 12px;
+ }
+ .line-0-0-6 {
+ cursor: pointer;
+ height: 1.25rem;
+ display: inline-block;
+ min-width: 100%;
+ background: transparent;
+ transition: opacity .15s, color .3s, background .3s;
+ }
+ .has-selection .line-0-0-6:not(.selected) {
+ opacity: 0.35;
+ transition: opacity 3s;
+ }
+ .line-0-0-6.highlight {
+ color: #ffffff;
+ background: rgb(40, 46, 73);
+ }
+ .line-0-0-6.added {
+ color: #ffffff;
+ position: relative;
+ background: #002d2d;
+ }
+ .line-0-0-6.removed {
+ color: #ffffff;
+ position: relative;
+ background: #3e0c1b;
+ }
+ .line-0-0-6.selected .lineCounter-0-0-3 {
+ border-color: #7187ff !important;
+ }
+ .line-0-0-6:hover, .line-0-0-6.selected {
+ background: #3b3b3b;
+ }
+ .line-0-0-6:hover .lineCounter-0-0-3 {
+ border-color: rgba(255, 255, 255, .1);
+ }
+ body.dark .line-0-0-6:hover .lineCounter-0-0-3 {
+ border-color: rgba(255, 255, 255, .1);
+ }
+ .line-0-0-6:hover .lineCounter-0-0-3, .line-0-0-6.selected .lineCounter-0-0-3 {
+ color: #7187ff;
+ background: #3b3b3b !important;
+ }
+ body.dark .line-0-0-6:hover, body.dark .line-0-0-6.selected {
+ background: #1a1a1a !important;
+ }
+ .line-0-0-6:hover .lineCounter-0-0-3:hover, .line-0-0-6.selected .lineCounter-0-0-3:hover {
+ color: transparent !important;
+ }
+ .line-0-0-6:hover .lineCounter-0-0-3:hover .-codedoc-line-link, .line-0-0-6.selected .lineCounter-0-0-3:hover .-codedoc-line-link {
+ opacity: 1;
+ }
+ body.dark .line-0-0-6:hover .lineCounter-0-0-3, body.dark .line-0-0-6.selected .lineCounter-0-0-3 {
+ color: #7187ff;
+ background: #1a1a1a !important;
+ }
+ body.dark .line-0-0-6.selected .lineCounter-0-0-3 {
+ border-color: #7187ff !important;
+ }
+ .line-0-0-6.removed:before {
+ top: -.05rem;
+ left: 2.5rem;
+ color: #ff0000;
+ content: "-";
+ position: absolute;
+ font-size: 1rem;
+ font-weight: bold;
+ }
+ .line-0-0-6.removed .lineCounter-0-0-3 {
+ background: #3e0c1b;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .line-0-0-6.removed {
+ color: #ffffff;
+ background: #3e0c1b;
+ }
+ body:not(.dark-mode-animate) .line-0-0-6.removed:before {
+ color: #ff0000;
+ }
+ body:not(.dark-mode-animate) .line-0-0-6.removed .lineCounter-0-0-3 {
+ background: #3e0c1b;
+ }
+}
+ body.dark .line-0-0-6.removed {
+ color: #ffffff;
+ background: #3e0c1b;
+ }
+ body.dark .line-0-0-6.removed:before {
+ color: #ff0000;
+ }
+ body.dark .line-0-0-6.removed .lineCounter-0-0-3 {
+ background: #3e0c1b;
+ }
+ .line-0-0-6.added:before {
+ top: -.05rem;
+ left: 2.5rem;
+ color: #44e08a;
+ content: "+";
+ position: absolute;
+ font-size: 1rem;
+ transition: color .3s;
+ font-weight: bold;
+ }
+ .line-0-0-6.added .lineCounter-0-0-3 {
+ background: #002d2d;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .line-0-0-6.added {
+ color: #ffffff;
+ background: #002d2d;
+ }
+ body:not(.dark-mode-animate) .line-0-0-6.added:before {
+ color: #44e08a;
+ }
+ body:not(.dark-mode-animate) .line-0-0-6.added .lineCounter-0-0-3 {
+ background: #002d2d;
+ }
+}
+ body.dark .line-0-0-6.added {
+ color: #ffffff;
+ background: #002d2d;
+ }
+ body.dark .line-0-0-6.added:before {
+ color: #44e08a;
+ }
+ body.dark .line-0-0-6.added .lineCounter-0-0-3 {
+ background: #002d2d;
+ }
+ .line-0-0-6.highlight .lineCounter-0-0-3 {
+ background: rgb(40, 46, 73);
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .line-0-0-6.highlight {
+ color: #ffffff;
+ background: rgb(28, 29, 48);
+ }
+ body:not(.dark-mode-animate) .line-0-0-6.highlight .lineCounter-0-0-3 {
+ background: rgb(28, 29, 48);
+ }
+}
+ body.dark .line-0-0-6.highlight {
+ color: #ffffff;
+ background: rgb(28, 29, 48);
+ }
+ body.dark .line-0-0-6.highlight .lineCounter-0-0-3 {
+ background: rgb(28, 29, 48);
+ }
+ .wmbar-0-0-7 {
+ left: 0;
+ display: none;
+ padding: 16px;
+ position: sticky;
+ }
+ .wmbar-0-0-7>span {
+ display: block;
+ opacity: 0.5;
+ flex-grow: 1;
+ font-size: 12px;
+ text-align: center;
+ font-family: sans-serif;
+ margin-right: 64px;
+ }
+ .wmbar-0-0-7>span:first-child, .wmbar-0-0-7>span:nth-child(2), .wmbar-0-0-7>span:nth-child(3) {
+ width: 8px;
+ height: 8px;
+ opacity: 1;
+ flex-grow: 0;
+ margin-right: 8px;
+ border-radius: 8px;
+ }
+ pre.with-bar .wmbar-0-0-7 {
+ display: flex;
+ }
+ .wmbar-0-0-7>span:first-child:first-child, .wmbar-0-0-7>span:nth-child(2):first-child, .wmbar-0-0-7>span:nth-child(3):first-child {
+ background: rgb(255, 95, 86);
+ }
+ .wmbar-0-0-7>span:first-child:nth-child(2), .wmbar-0-0-7>span:nth-child(2):nth-child(2), .wmbar-0-0-7>span:nth-child(3):nth-child(2) {
+ background: rgb(255, 189, 46);
+ }
+ .wmbar-0-0-7>span:first-child:nth-child(3), .wmbar-0-0-7>span:nth-child(2):nth-child(3), .wmbar-0-0-7>span:nth-child(3):nth-child(3) {
+ background: rgb(39, 201, 63);
+ }
+ .collapse-0-0-8>.label {
+ cursor: pointer;
+ margin: 8px 0;
+ display: flex;
+ align-items: center;
+ user-select: none;
+ }
+ .collapse-0-0-8>.content {
+ opacity: 0;
+ max-height: 0;
+ transition: opacity .3s;
+ visibility: hidden;
+ border-left: 2px solid rgba(224, 224, 224, 0.5);
+ padding-left: 16px;
+ }
+ .collapse-0-0-8.open>.content {
+ opacity: 1;
+ max-height: none;
+ visibility: visible;
+ }
+ .collapse-0-0-8.open>.label .icon-font {
+ transform: rotate(90deg);
+ }
+ body.dark-mode-animate .collapse-0-0-8>.content {
+ transition: transform .15s, opacity .15s, border-color .3s;
+ }
+ body.dark .collapse-0-0-8>.content {
+ border-color: rgba(49, 49, 49, 0.5);
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .collapse-0-0-8>.content {
+ border-color: rgba(49, 49, 49, 0.5);
+ }
+}
+ .collapse-0-0-8>.label .text {
+ flex-grow: 1;
+ }
+ .collapse-0-0-8>.label .icon-font {
+ margin-right: 32px;
+ }
+ .collapse-0-0-8>.label:hover {
+ color: #DC5F00;
+ transition: color .15s;
+ }
+ body.dark .collapse-0-0-8>.label:hover {
+ color: #DC5F00;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .collapse-0-0-8>.label:hover {
+ color: #DC5F00;
+ }
+}
+ body.dark-mode-animate .collapse-0-0-8>.label .icon-font {
+ transition: transform .15s;
+ }
+ .header-0-0-9 {
+ top: 0;
+ right: 0;
+ padding: 32px;
+ z-index: 100;
+ position: fixed;
+ text-align: right;
+ }
+ .footer-0-0-10 {
+ left: 0;
+ right: 0;
+ bottom: 0;
+ height: 64px;
+ display: flex;
+ z-index: 102;
+ position: fixed;
+ background: rgba(245, 245, 245, 0.85);
+ box-shadow: 0 -2px 6px rgba(0, 0, 0, .03);
+ align-items: center;
+ backdrop-filter: blur(12px);
+ justify-content: center;
+ -webkit-backdrop-filter: blur(12px);
+ }
+ body.dark-mode-animate .footer-0-0-10 {
+ transition: background .3s;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .footer-0-0-10 {
+ background: rgba(33, 33, 33, 0.85);
+ }
+}
+ body.dark .footer-0-0-10 {
+ background: rgba(33, 33, 33, 0.85);
+ }
+ .footer-0-0-10 .main {
+ overflow: hidden;
+ flex-grow: 1;
+ text-align: center;
+ }
+ .footer-0-0-10 .left {
+ padding-left: 32px;
+ }
+ .footer-0-0-10 .right {
+ padding-right: 32px;
+ }
+@media screen and (max-width: 800px) {
+ .footer-0-0-10 .left {
+ padding-left: 16px;
+ }
+ .footer-0-0-10 .right {
+ padding-right: 16px;
+ }
+}
+ .footer-0-0-10 .main>.inside {
+ display: inline-flex;
+ overflow: auto;
+ max-width: 100%;
+ align-items: center;
+ }
+ .footer-0-0-10 .main>.inside hr {
+ width: 2px;
+ border: none;
+ height: 16px;
+ margin: 16px;
+ background: #e0e0e0;
+ }
+ .footer-0-0-10 .main>.inside a {
+ text-decoration: none;
+ }
+ .footer-0-0-10 .main>.inside a:hover {
+ text-decoration: underline ;
+ }
+ body.dark-mode-animate .footer-0-0-10 .main>.inside hr {
+ transition: background .3s;
+ }
+ body.dark .footer-0-0-10 .main>.inside hr {
+ background: #313131;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .footer-0-0-10 .main>.inside hr {
+ background: #313131;
+ }
+}
+ .toc-0-0-11 {
+ top: 0;
+ left: 0;
+ width: calc(50vw - 464px);
+ bottom: 0;
+ display: flex;
+ z-index: 101;
+ position: fixed;
+ transform: translateX(-50vw);
+ background: #f1f1f1;
+ border-right: 1px solid #e7e7e7;
+ flex-direction: column;
+ padding-bottom: 64px;
+ }
+ body.dark-mode-animate .toc-0-0-11 {
+ transition: background .3s, border-color .3s;
+ }
+ body.dark .toc-0-0-11 {
+ background: #1f1f1f;
+ border-color: #282828;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .toc-0-0-11 {
+ background: #1f1f1f;
+ border-color: #282828;
+ }
+}
+@media screen and (max-width: 1200px) {
+ .toc-0-0-11 {
+ width: 100vw;
+ transform: translateX(-110vw);
+ }
+}
+ .toc-0-0-11.animated {
+ transition: transform .3s;
+ }
+ .toc-0-0-11.active {
+ transform: translateX(0);
+ }
+ .toc-0-0-11 p {
+ margin: 0;
+ }
+ .toc-0-0-11 a {
+ border: 1px solid transparent;
+ display: block;
+ padding: 8px;
+ margin-left: -8px;
+ border-right: none;
+ margin-right: 1px;
+ border-radius: 3px;
+ text-decoration: none;
+ }
+ body.dark-mode-animate .toc-0-0-11 a {
+ transition: border-color .3s, background .3s;
+ }
+ .toc-0-0-11 a:hover {
+ background: #f5f5f5;
+ text-decoration: none;
+ }
+ .toc-0-0-11 a.current {
+ background: #f5f5f5;
+ border-color: #e7e7e7;
+ margin-right: 0;
+ border-top-right-radius: 0;
+ border-bottom-right-radius: 0;
+ }
+ body.dark .toc-0-0-11 a.current {
+ background: hsl(0, 0%, 13.2%);
+ border-color: #282828;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .toc-0-0-11 a.current {
+ background: #212121;
+ border-color: #282828;
+ }
+}
+@media screen and (max-width: 1200px) {
+ .toc-0-0-11 a.current {
+ border-right: 1px solid;
+ margin-right: -8px;
+ border-radius: 3px;
+ }
+}
+ body.dark .toc-0-0-11 a:hover {
+ background: hsl(0, 0%, 13.2%);
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .toc-0-0-11 a:hover {
+ background: hsl(0, 0%, 13.2%);
+ }
+}
+ body.dark-mode-animate .toc-0-0-11.animated {
+ transition: transform .3s, background .3s, border-color .3s;
+ }
+ .content-0-0-12 {
+ padding: 32px;
+ overflow: auto;
+ flex-grow: 1;
+ margin-right: -1px;
+ padding-right: 0;
+ }
+@media screen and (max-width: 1200px) {
+ .content-0-0-12 {
+ margin-right: 0;
+ padding-right: 32px;
+ }
+}
+ .contentnav-0-0-14 {
+ right: 0;
+ width: calc(50vw - 496px);
+ bottom: 96px;
+ overflow: auto;
+ position: fixed;
+ font-size: 12px;
+ max-height: 45vh;
+ border-left: 1px dashed #e0e0e0;
+ margin-left: 64px;
+ padding-left: 48px;
+ scroll-behavior: initial;
+ }
+@media screen and (max-width: 1200px) {
+ .contentnav-0-0-14 {
+ display: none;
+ }
+}
+ .contentnav-0-0-14 a {
+ color: #424242;
+ display: block;
+ opacity: 0.2;
+ text-decoration: none;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .contentnav-0-0-14 {
+ border-color: #313131;
+ }
+ body:not(.dark-mode-animate) .contentnav-0-0-14 a {
+ color: #eeeeee;
+ }
+ body:not(.dark-mode-animate) .contentnav-0-0-14 a:hover, body:not(.dark-mode-animate) .contentnav-0-0-14 a.active {
+ color: #DC5F00;
+ }
+}
+ body.dark .contentnav-0-0-14 {
+ border-color: #313131;
+ }
+ body.dark .contentnav-0-0-14 a {
+ color: #eeeeee;
+ }
+ body.dark .contentnav-0-0-14 a:hover, body.dark .contentnav-0-0-14 a.active {
+ color: #DC5F00;
+ }
+ body.dark-mode-animate .contentnav-0-0-14 a {
+ transition: color .3s, opacity .3s;
+ }
+ .contentnav-0-0-14 a:hover, .contentnav-0-0-14 a.active {
+ color: #DC5F00;
+ opacity: 1;
+ }
+ .contentnav-0-0-14 a.h2 {
+ margin-left: 12px;
+ }
+ .contentnav-0-0-14 a.h3 {
+ margin-left: 24px;
+ }
+ .contentnav-0-0-14 a.h4 {
+ margin-left: 36px;
+ }
+ .contentnav-0-0-14 a.h5 {
+ margin-left: 48px;
+ }
+ .contentnav-0-0-14 a.h6 {
+ margin-left: 60px;
+ }
+* {
+ touch-action: manipulation;
+ scroll-behavior: smooth;
+ -webkit-tap-highlight-color: transparent;
+}
+body {
+ color: #424242;
+ width: 100vw;
+ margin: 0;
+ padding: 0;
+ background: #f5f5f5;
+ overflow-x: hidden;
+ backface-visibility: hidden;
+ -webkit-backface-visibility: hidden;
+}
+body.dark-mode-animate {
+ transition: color .3s, background .3s;
+}
+a {
+ color: #DC5F00;
+}
+a:hover {
+ text-decoration: underline;
+ text-decoration-thickness: 2px;
+}
+body.dark-mode-animate a {
+ transition: color .3s;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) a {
+ color: #DC5F00;
+ }
+}
+body.dark a {
+ color: #DC5F00;
+}
+.container {
+ margin: 0 auto;
+ padding: 96px 16px;
+ max-width: 768px;
+ transition: opacity .15s;
+}
+table {
+ margin: 0 auto;
+ overflow: auto;
+ max-width: 100%;
+ min-width: 400px;
+ table-layout: fixed;
+ border-collapse: collapse;
+}
+table th, table td {
+ padding: 8px 16px;
+ text-align: left;
+}
+table th {
+ border-bottom: 1px solid #C8C8C8;
+}
+table td {
+ border-bottom: 1px solid #e0e0e0;
+}
+table tr:nth-child(even) {
+ background: #eeeeee;
+ border-radius: 3px;
+}
+table tr:last-child > td {
+ border-bottom: none;
+}
+body.dark table tr:nth-child(even) {
+ background: #282828;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) table tr:nth-child(even) {
+ background: #282828;
+ }
+}
+body.dark-mode-animate table tr:nth-child(even) {
+ transition: background .3s;
+}
+body.dark table td {
+ border-color: #313131;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) table td {
+ border-color: #313131;
+ }
+}
+body.dark table th {
+ border-color: #4D4D4D;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) table th {
+ border-color: #4D4D4D;
+ }
+}
+body.dark-mode-animate table th, body.dark-mode-animate table td {
+ transition: border-color .3s;
+}
+hr {
+ border: none;
+ margin: 64px;
+ background: none;
+ border-top: 1px solid #e0e0e0;
+}
+body.dark-mode-animate hr {
+ transition: border-color .3s;
+}
+body.dark hr {
+ border-color: #313131;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) hr {
+ border-color: #313131;
+ }
+}
+#-codedoc-toc hr {
+ margin: 16px 0;
+ margin-right: 32px;
+}
+blockquote {
+ color: #757575;
+ margin: 0;
+ padding: 16px 40px;
+ position: relative;
+ background: #eeeeee;
+ border-radius: 3px;
+}
+body.dark-mode-animate blockquote {
+ transition: color .3s, background .3s;
+}
+blockquote:after {
+ top: 16px;
+ left: 16px;
+ width: 8px;
+ bottom: 16px;
+ content: '';
+ display: block;
+ position: absolute;
+ background: radial-gradient(circle at center, #e0e0e0 50%, transparent 52%),transparent;
+ background-size: 4px 4px;
+}
+body.dark-mode-animate blockquote:after {
+ transition: color .3s, background .3s;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) {
+ color: #eeeeee;
+ background: #212121;
+ }
+ body:not(.dark-mode-animate) blockquote {
+ color: #cacaca;
+ background: #282828;
+ }
+ body:not(.dark-mode-animate) blockquote:after {
+ background: radial-gradient(circle at center, #363636 50%, transparent 52%),transparent;
+ background-size: 4px 4px;
+ }
+}
+body.dark {
+ color: #eeeeee;
+ background: #212121;
+}
+body.dark blockquote {
+ color: #cacaca;
+ background: #282828;
+}
+body.dark blockquote:after {
+ background: radial-gradient(circle at center, #363636 50%, transparent 52%),transparent;
+ background-size: 4px 4px;
+}
+img {
+ max-width: 100%;
+}
+iframe {
+ width: 100%;
+ border: none;
+ background: white;
+ border-radius: 3px;
+}
+code {
+ color: #616161;
+ padding: 4px;
+ font-size: .85em;
+ background: #eeeeee;
+ border-radius: 3px;
+}
+body.dark-mode-animate code {
+ transition: color .3s, background .3s;
+}
+body.dark code {
+ color: #e0e0e0;
+ background: #282828;
+}
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) code {
+ color: #e0e0e0;
+ background: #282828;
+ }
+}
+ .heading-0-0-15 {
+ cursor: pointer;
+ position: relative;
+ }
+ .anchor-0-0-16 {
+ top: 0;
+ left: -32px;
+ bottom: 0;
+ display: flex;
+ opacity: 0;
+ position: absolute;
+ transform: translateX(-8px);
+ transition: opacity .1s, transform .1s;
+ align-items: center;
+ padding-right: 8px;
+ }
+ .heading-0-0-15:hover .anchor-0-0-16 {
+ opacity: 0.5;
+ transform: none;
+ }
+@media screen and (max-width: 1200px) {
+ .anchor-0-0-16 {
+ display: none;
+ }
+}
+ .heading-0-0-15:hover .anchor-0-0-16:hover {
+ opacity: 1;
+ }
+ .tabs-0-0-17 .selector {
+ overflow: auto;
+ white-space: nowrap;
+ margin-bottom: -8px;
+ padding-right: 24px;
+ padding-bottom: 8px;
+ }
+ .tabs-0-0-17 .tab {
+ border: 1px solid #e0e0e0;
+ padding: 8px;
+ border-radius: 3px;
+ }
+ .tabs-0-0-17 .tab.first {
+ border-top-left-radius: 0;
+ }
+ body.dark-mode-animate .tabs-0-0-17 .tab {
+ transition: border-color .3s;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .tabs-0-0-17 .tab {
+ border-color: #313131;
+ }
+}
+ body.dark .tabs-0-0-17 .tab {
+ border-color: #313131;
+ }
+ .tabs-0-0-17 .tab:not(.selected) {
+ display: none;
+ }
+ .tabs-0-0-17 .tab>pre:first-child {
+ margin-top: 0;
+ }
+ .tabs-0-0-17 .tab>pre:last-child {
+ margin-bottom: 0;
+ }
+ .tabs-0-0-17 .selector button {
+ color: #424242;
+ border: 1px solid #e0e0e0;
+ cursor: pointer;
+ margin: 0;
+ opacity: 0.35;
+ outline: none;
+ padding: 4px 8px;
+ position: relative;
+ font-size: inherit;
+ min-width: 96px;
+ transform: scale(.9);
+ background: #f5f5f5;
+ font-family: inherit;
+ border-radius: 8px;
+ transform-origin: bottom center;
+ border-bottom-left-radius: 0;
+ border-bottom-right-radius: 0;
+ }
+ .tabs-0-0-17 .selector button:after {
+ left: 0;
+ right: 0;
+ bottom: -1px;
+ height: 2px;
+ content: ' ';
+ position: absolute;
+ background: #f5f5f5;
+ }
+ .tabs-0-0-17 .selector button:hover {
+ opacity: 1;
+ }
+ .tabs-0-0-17 .selector button.selected {
+ opacity: 1;
+ transform: scale(1);
+ }
+ body.dark-mode-animate .tabs-0-0-17 .selector button {
+ transition: background .3s, border-color .3s, color .3s, opacity .1s, transform .1s;
+ }
+@media (prefers-color-scheme: dark) {
+ body:not(.dark-mode-animate) .tabs-0-0-17 .selector button {
+ color: #eeeeee;
+ background: #212121;
+ border-color: #313131;
+ }
+ body:not(.dark-mode-animate) .tabs-0-0-17 .selector button:after {
+ background: #212121;
+ }
+}
+ body.dark .tabs-0-0-17 .selector button {
+ color: #eeeeee;
+ background: #212121;
+ border-color: #313131;
+ }
+ .tabs-0-0-17 .selector button .icon-font {
+ opacity: 0.5;
+ font-size: 18px;
+ margin-left: 16px;
+ vertical-align: middle;
+ }
+ body.dark .tabs-0-0-17 .selector button:after {
+ background: #212121;
+ }
+ body.dark-mode-animate .tabs-0-0-17 .selector button:after {
+ transition: height .1s, bottom .1s, background .3s;
+ }
+ .tabs-0-0-17 .selector button.selected:after {
+ bottom: -4px;
+ height: 8px;
+ }
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/assets/dark.svg b/support/plugins/mtllm/docs/docs/assets/dark.svg
new file mode 100644
index 000000000..24a9080ba
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/assets/dark.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/assets/light.svg b/support/plugins/mtllm/docs/docs/assets/light.svg
new file mode 100644
index 000000000..60134b36a
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/assets/light.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/building-blocks/functions_methods.html b/support/plugins/mtllm/docs/docs/building-blocks/functions_methods.html
new file mode 100644
index 000000000..e89527733
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/building-blocks/functions_methods.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Functions and Methods link Functions and MethodsFunctions and methods play a crucial role in implementing various functionalities in a traditional GenAI application. In jaclang, we have designed these functions and methods to be highly flexible and powerful. Surprisingly, they don't even require a function or method body thanks to the MTLLM by <your_llm>
syntax. This section will guide you on how to effectively utilize functions and methods in jaclang using MTLLM.
link FunctionsFunctions/Abilities in jaclang are defined using the can
keyword. They can be used to define a set of actions. Normal function looks like this in jaclang:
1link can < function_name> ( < parameter : parameter_type> , . . ) - > < return_type> {
2link < function_body> ;
3link }
In a traditional GenAI application, you would make API calls inside the function body to perform the desired action. However, in jaclang, you can define the function using the by <your_llm>
syntax. This way, you can define the function without a body and let the MTLLM model handle the implementation. Here is an example:
1link can greet( name: str ) - > str by < your_llm> ( ) ;
In the above example, the greet
function takes a name
parameter of type str
and returns a str
. The function is defined using the by <your_llm>
syntax, which means the implementation of the function is handled by the MTLLM.
Below is an example where we define a function get_expert
that takes a question as input and returns the best expert to answer the question in string format using mtllm with openai model with the method Reason
. get_answer
function takes a question and an expert as input and returns the answer to the question using mtllm with openai model without any method. and we can call these function as normal functions.
1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link can get_expert( question: str ) - > 'Best Expert to Answer the Question' : str by llm( method= 'Reason' ) ;
6link can get_answer( question: str , expert: str ) - > str by llm( ) ;
7link
8link with entry {
9link question = "What are Large Language Models?" ;
10link expert = get_expert( question) ;
11link answer = get_answer( question, expert) ;
12link print ( f" { expert} says: ' { answer} ' " ) ;
13link }
Here's another example,
1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link can 'Get a Joke with a Punchline'
6link get_joke( ) - > tuple [ str , str ] by llm( ) ;
7link
8link with entry {
9link ( joke, punchline) = get_joke( ) ;
10link print ( f" { joke} : { punchline} " ) ;
11link }
In the above example, the joke_punchline
function returns a tuple of two strings, which are the joke and its punchline. The function is defined using the by <your_llm>
syntax, which means the implementation is handled by the MTLLM. You can add semstr to the function to make it more specific.
link MethodsMethods in jaclang are also defined using the can
keyword. They can be used to define a set of actions that are specific to a class. Normal method looks like this in jaclang:
1link obj ClassName {
2link has parameter: parameter_type;
3link can < method_name> ( < parameter : parameter_type> , . . ) - > < return_type> {
4link < method_body> ;
5link }
6link }
In a traditional GenAI application, you would make API calls inside the method body to perform the desired action while using self
keyword to get necessary information. However, in jaclang, you can define the method using the by <your_llm>
syntax. This way, you can define the method without a body and let the MTLLM model handle the implementation. Here is an example:
1link obj Person {
2link has name: str ;
3link can greet( ) - > str by < your_llm> ( incl_info= ( self) ) ;
4link }
In the above example, the greet
method returns a str
. The method is defined using the by <your_llm>
syntax, which means the implementation of the method is handled by the MTLLM. The incl_info=(self.name)
parameter is used to include the name
attribute of the Person
object as an information source for the MTLLM.
In the below example, we define a class Essay
with a method get_essay_judgement
that takes a criteria as input and returns the judgement for the essay based on the criteria using mtllm with openai model after a step of Reasoning
. get_reviewer_summary
method takes a dictionary of judgements as input and returns the summary of the reviewer based on the judgements using mtllm with openai model. give_grade
method takes the summary as input and returns the grade for the essay using mtllm with openai model. and we can call these methods as normal methods.
1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link obj Essay {
6link has essay: str ;
7link
8link can get_essay_judgement( criteria: str ) - > str by llm( incl_info= ( self. essay) ) ;
9link can get_reviewer_summary( judgements: dict ) - > str by llm( incl_info= ( self. essay) ) ;
10link can give_grade( summary: str ) - > 'A to D' : str by llm( ) ;
11link }
12link
13link with entry {
14link essay = "With a population of approximately 45 million Spaniards and 3.5 million immigrants,"
15link "Spain is a country of contrasts where the richness of its culture blends it up with"
16link "the variety of languages and dialects used. Being one of the largest economies worldwide,"
17link "and the second largest country in Europe, Spain is a very appealing destination for tourists"
18link "as well as for immigrants from around the globe. Almost all Spaniards are used to speaking at"
19link "least two different languages, but protecting and preserving that right has not been"
20link "easy for them.Spaniards have had to struggle with war, ignorance, criticism and the governments,"
21link "in order to preserve and defend what identifies them, and deal with the consequences." ;
22link essay = Essay( essay) ;
23link criterias = [ "Clarity" , "Originality" , "Evidence" ] ;
24link judgements = { } ;
25link for criteria in criterias {
26link judgement = essay. get_essay_judgement( criteria) ;
27link judgements[ criteria] = judgement;
28link }
29link summary = essay. get_reviewer_summary( judgements) ;
30link grade = essay. give_grade( summary) ;
31link print ( "Reviewer Notes: " , summary) ;
32link print ( "Grade: " , grade) ;
33link }
MTLLM is able to represent typed inputs in a way that is understandable to the model. Sametime, this makes the model to generate outputs in the expected output type without any additional information. Here is an example:
1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link
6link enum 'Personality of the Person'
7link Personality {
8link INTROVERT: 'Person who is shy and reticent' = "Introvert" ,
9link EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
10link }
11link
12link obj 'Person'
13link Person {
14link has full_name: 'Fullname of the Person' : str ,
15link yod: 'Year of Death' : int ,
16link personality: 'Personality of the Person' : Personality;
17link }
18link
19link can 'Get Person Information use common knowledge'
20link get_person_info( name: 'Name of the Person' : str ) - > 'Person' : Person by llm( ) ;
21link
22link with entry {
23link person_obj = get_person_info( 'Martin Luther King Jr.' ) ;
24link print ( person_obj) ;
25link }
1link
2link Person( full_name= 'Martin Luther King Jr.' , yod= 1968 , personality= Personality. INTROVERT)
In the above example, the get_person_info
function takes a name
parameter of type str
and returns a Person
object. The Person
object has three attributes: full_name
of type str
, yod
of type int
, and personality
of type Personality
. The Personality
enum has two values: INTROVERT
and EXTROVERT
. The function is defined using the by <your_llm>
syntax, which means the implementation is handled by the MTLLM. The model is able to understand the typed inputs and outputs and generate the output in the expected type.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/building-blocks/language_models.html b/support/plugins/mtllm/docs/docs/building-blocks/language_models.html
new file mode 100644
index 000000000..e3fdeb6e8
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/building-blocks/language_models.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Language Models link Language ModelsLanguage models is the most important building block of MTLLM. Without it we can't achieve neuro-symbolic programming.
Let's first make sure you can set up your language model. MTLLM support clients for many remote and local LMs. You can even create your own as well very easily if you want to.
link Setting up a LM clientIn this section, we will go through the process of setting up a OpenAI's GPT-4o
language model client. For that first makesure that you have installed the necessary dependancies by running pip install mtllm[openai]
.
1link import : py from mtllm. llms. openai, OpenAI;
2link
3link my_llm = OpenAI( model_name= "gpt-4o" ) ;
Makesure to set the OPENAI_API_KEY
environment variable with your OpenAI API key.
link Directly calling the LMYou can directly call the LM by giving the raw prompts as well.
1link my_llm( "What is the capital of France?" ) ;
You can also pass the max_tokens
, temperature
and other parameters to the LM.
1link my_llm( "What is the capital of France?" , max_tokens= 10 , temperature= 0.5 ) ;
link Using the LM with MTLLMIntented use of MTLLM's LMs is to use them with the jaclang
's BY_LLM
Feature.
link With Abilities and Methods1link can function( arg1: str , arg2: str ) - > str by llm( ) ;
link With Classes1link new_object = MyClass( arg1: str by llm( ) ) ;
link You can parse following attributes to the by llm()
feature:method
(default: Normal
): Reasoning method to use. Can be Normal
, Reason
or Chain-of-Thoughts
.tools
(default: None
): Tools to use. This is a list of abilities to use with ReAct Prompting method.model specific parameters
: You can pass the model specific parameters as well. for example, max_tokens
, temperature
etc.link Enabling Verbose ModeYou can enable the verbose mode to see the internal workings of the LM.
1link import : py from mtllm. llms, OpenAI;
2link
3link my_llm = OpenAI( model_name= "gpt-4o" , verbose= True ) ;
link Remote LMsThese language models are provided as managed services. To access them, simply sign up and obtain an API key. Before calling any of the remote language models listed below.
NOTICE
make sure to set the corresponding environment variable with your API key. Use Chat models for better performance.
1link llm = mtllm. llms. { provider_listed_below} ( model_name= "your model" , verbose= True / False ) ;
OpenAI
- OpenAI's gpt-3.5-turbo, gpt-4, gpt-4-turbo, gpt-4o model zoo Anthropic
- Anthropic's Claude 3 & Claude 3.5 - Haiku ,Sonnet, Opus model zoo Groq
- Groq's Fast Inference Models model zoo Together
- Together's hosted OpenSource Models model zoo link Local LMslink OllamaInitiate a ollama server by following this tutorial here . Then you can use it as follows:
1link import : py from mtllm. llms. ollama, Ollama;
2link
3link llm = Ollama( host= "ip:port of the ollama server" , model_name= "llama3" , verbose= True / False ) ;
link HuggingFaceYou can use any of the HuggingFace's language models as well. models
1link import : py from mtllm. llms. huggingface, HuggingFace;
2link
3link llm = HuggingFace( model_name= "microsoft/Phi-3-mini-4k-instruct" , verbose= True / False ) ;
NOTICE
We are constantly adding new LMs to the library. If you want to add a new LM, please open an issue here .
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/building-blocks/multimodality.html b/support/plugins/mtllm/docs/docs/building-blocks/multimodality.html
new file mode 100644
index 000000000..ce12b2415
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/building-blocks/multimodality.html
@@ -0,0 +1,56 @@
+MTLLM API Documentation | Multimodality link MultimodalityFor MTLLM to have actual neurosymbolic powers, it needs to be able to handle multimodal inputs and outputs. This means that it should be able to understand text, images, and videos. In this section, we will discuss how MTLLM can handle multimodal inputs.
link ImageMTLLM can handle images as inputs. You can provide an image as input to the MTLLM Function or Method using the Image
format of mtllm. Here is an example of how you can provide an image as input to the MTLLM Function or Method:
1link import : py from mtllm. llms, OpenAI;
2link import : py from mtllm, Image;
3link
4link glob llm = OpenAI( model_name= "gpt-4o" ) ;
5link
6link enum Personality {
7link INTROVERT: 'Person who is shy and reticent' = "Introvert" ,
8link EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
9link }
10link
11link obj 'Person'
12link Person {
13link has full_name: str ,
14link yod: 'Year of Death' : int ,
15link personality: 'Personality of the Person' : Personality;
16link }
17link
18link can get_person_info( img: 'Image of Person' : Image) - > Person
19link by llm( ) ;
20link
21link with entry {
22link person_obj = get_person_info( Image( "person.png" ) ) ;
23link print ( person_obj) ;
24link }
Input Image (person.png):
+
1link
2link Person( full_name= 'Albert Einstein' , yod= 1955 , personality= Personality. INTROVERT)
In the above example, we have provided an image of a person ("Albert Einstein") as input to the get_person_info
method. The method returns the information of the person in the image. The output of the method is a Person
object with the name, year of death, and personality of the person in the image.
link VideoSimilarly, MTLLM can handle videos as inputs. You can provide a video as input to the MTLLM Function or Method using the Video
format of mtllm. Here is an example of how you can provide a video as input to the MTLLM Function or Method:
1link import : py from mtllm. llms, OpenAI;
2link import : py from mtllm, Video;
3link
4link glob llm = OpenAI( model_name= "gpt-4o" ) ;
5link
6link can is_aligned( video: Video, text: str ) - > bool
7link by llm( method= "Chain-of-Thoughts" , context= "Mugen is the moving character" ) ;
8link
9link with entry {
10link video = Video( "mugen.mp4" , 1 ) ;
11link text = "Mugen jumps off and collects few coins." ;
12link print ( is_aligned( video, text) ) ;
13link }
Input Video (mugen.mp4):
+mugen.mp4
1link
2link True
In the above example, we have provided a video of a character ("Mugen") as input to the is_aligned
method. The method checks if the text is aligned with the video. The output of the method is a boolean value indicating whether the text is aligned with the video.
link AudioWe are working on adding support for audio inputs to MTLLM. Stay tuned for updates on this feature.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/building-blocks/object_init.html b/support/plugins/mtllm/docs/docs/building-blocks/object_init.html
new file mode 100644
index 000000000..e81228fd3
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/building-blocks/object_init.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Object Initialization link Object InitializationAs MTLLM is really great at handling typed outputs, we have added the ability to initialize a new object with only providing few of the required fields. MTLLM will automatically fill the rest of the fields based on the given context.
This behavior is very hard to achieve in other languages, but with MTLLM, it is as simple as providing the required fields and letting the MTLLM do the rest.
In the following example, we are initializing a new object of type Task
with only providing the description
field. The time_in_min
and priority_out_of_10
fields are automatically filled by the MTLLM based on the given context after a step of reasoning.
1link import : py from mtllm. llms, OpenAI, Ollama;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link obj Task {
6link has description: str ;
7link has time_in_min: int ,
8link priority_out_of_10: int ;
9link }
10link
11link with entry {
12link task_contents = [
13link "Have some sleep" ,
14link "Enjoy a better weekend with my girlfriend" ,
15link "Work on Jaseci Project" ,
16link "Teach EECS 281 Students" ,
17link "Enjoy family time with my parents"
18link ] ;
19link tasks = [ ] ;
20link for task_content in task_contents {
21link task_info = Task( description = task_content by llm( method= "Reason" ) ) ;
22link tasks. append( task_info) ;
23link }
24link print ( tasks) ;
25link }
1link
2link [
3link Task( description= 'Have some sleep' , time_in_min= 30 , priority_out_of_10= 5 ) ,
4link Task( description= 'Enjoy a better weekend with my girlfriend' , time_in_min= 60 , priority_out_of_10= 7 ) ,
5link Task( description= 'Work on Jaseci Project' , time_in_min= 120 , priority_out_of_10= 8 ) ,
6link Task( description= 'Teach EECS 281 Students' , time_in_min= 90 , priority_out_of_10= 9 ) ,
7link Task( description= 'Enjoy family time with my parents' , time_in_min= 60 , priority_out_of_10= 7 )
8link ]
Here is another example with nested custom types,
1link import : py from jaclang. core. llms, OpenAI;
2link
3link glob llm = OpenAI( model_name= "gpt-4o" ) ;
4link
5link obj Employer {
6link has name: 'Employer Name' : str ,
7link location: str ;
8link }
9link
10link obj 'Person'
11link Person {
12link has name: str ,
13link age: int ,
14link employer: Employer,
15link job: str ;
16link }
17link
18link with entry {
19link info: "Person's Information" : str = "Alice is a 21 years old and works as an engineer at LMQL Inc in Zurich, Switzerland." ;
20link person = Person( by llm( incl_info= ( info) ) ) ;
21link print ( person) ;
22link }
1link
2link Person( name= 'Alice' , age= 21 , employer= Employer( name= 'LMQL Inc' , location= 'Zurich, Switzerland' ) , job= 'engineer' )
In the above example, we have initialized a new object of type Person
with only providing info
as additional context. The name
, age
, employer
, and job
fields are automatically filled by the MTLLM based on the given context.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/building-blocks/semstrings.html b/support/plugins/mtllm/docs/docs/building-blocks/semstrings.html
new file mode 100644
index 000000000..c4ea57ef9
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/building-blocks/semstrings.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Semstrings link SemstringsThe core idea behind MT-LLM is that if the program has been written in a readable manner, with type-safety, an LLM would be able to understand the task required to be performed using meaning embedded within the code.
However, there are instanced where this is not the case for all instances. Hence, a new meaning insertion code abstraction called "semstrings " has been introduced in MT-LLM.
link Where code is not meaningful enough!Lets look into an instance where the existing code constructs are not sufficient to describe the meaning of the code for an LLM.
apple.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( ) ;
4link
5link obj item {
6link has name : str ,
7link category : str = '' ;
8link }
9link
10link obj shop {
11link has item_dir: dict [ str , item] ;
12link
13link can categorize( name: str ) - > str by llm( ) ;
14link }
15link
16link with entry {
17link shop_inv = shop( ) ;
18link apple = item( name= apple) ;
19link apple. category = categorize( apple. name) ;
20link shop_inv. item_dir[ apple. name] = apple. category;
21link }
This is a partial code that can be used as a shopkeeping app where each item name is tagged with its category. However, in this example, you can observe in line 16 that the item name is passed in as 'apple' which can be ambiguous for an LLM as apple can mean the fruit, as well as a tech product. To resolve this problem we can use much more descriptive variable names. For instance, instead of item
we can use tech_item
. How ever, adding more descriptive names for objects, variables and functions will hinder the reusability of object fields as the reference names are too long.
link Semstrings to uplift 'meaning'As the existing code abstractions does not fully allow the programmer to express their meaning we have added an extra feature you can use to embed meaning directly as text, into your code. We call these text annotations as semstrings .
Lets see how we can add semstring to the existing program above.
apple.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( ) ;
4link
5link obj 'An edible product'
6link item {
7link has name : str ,
8link category : str = '' ;
9link }
10link
11link obj 'Food store inventory'
12link shop {
13link has item_dir: 'Inventory of shop' : dict [ str , item] ;
14link
15link can 'categorize the edible as fruit, vegetables, sweets etc'
16link categorize( name: str ) - > 'Item category' : str by llm( ) ;
17link }
18link
19link with entry {
20link shop_inv = shop( ) ;
21link apple = item( name= apple) ;
22link apple. category = categorize( apple. name) ;
23link shop_inv. item_dir[ "ID_876837" ] = apple;
24link }
In this example we add semstrings that add semantic meaning to existing code constructs such as variables, objects and functions. The semstring of each item is linked with its signature which are called when generating the prompt for the LLM. These small descriptions adds more context for the LLM to give a much more accurate response.
link How to add semstrings?The below examples show different instances where semstrings can be inserted.
link Variables / Object Fields Declaration1link glob name: 'semstring' : str = 'sample value'
link Function / Method Declaration1link can 'semstring'
2link function_name( arg_1: 'semstring' : type . . . ) {
3link
4link }
link Object / Class Declaration1link obj 'semstring' object_name {
2link
3link
4link }
link With by llm()
1link can 'semstring_for_action'
2link function_name ( arg_1: 'semstring_input' : type . . . )
3link - > 'semstring_output' : type
4link by llm( ) ;
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/design-impl/inference_engine.html b/support/plugins/mtllm/docs/docs/design-impl/inference_engine.html
new file mode 100644
index 000000000..caf73f67b
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/design-impl/inference_engine.html
@@ -0,0 +1,144 @@
+MTLLM API Documentation | MTLLM Inference Engine link MTLLM Inference Enginelink 1. OverviewThe MTLLM (Meaning-Typed Large Language Model) Inference Engine is a core component of the MTLLM framework. It is responsible for managing the interaction between the application, the semantic registry, and the underlying Large Language Model (LLM). The Inference Engine handles the process of constructing prompts, managing LLM interactions, processing outputs, and implementing error handling and self-correction mechanisms.
graph TD
+ A[Jaclang Application] --> B[Compilation]
+ B --> C[SemRegistry]
+ C --> D[Pickle File]
+ A --> E[Runtime]
+ E --> F[MTLLM Inference Engine]
+ F --> G[LLM Model]
+ F --> H[Tool Integration]
+ D -.-> F
+ G --> I[Output Processing]
+ I --> J[Error Handling]
+ J -->|Error| F
+ J -->|Success| K[Final Output]
link 2. Key ComponentsThe MTLLM Inference Engine consists of several key components:
Prompt Constructor LLM Interface Output Processor Error Handler Tool Integrator link 2.1 Prompt ConstructorThe Prompt Constructor is responsible for building the input prompt for the LLM. It incorporates semantic information from the SemRegistry, user inputs, and contextual data to create a comprehensive and meaningful prompt.
Key features:
Semantic enrichment using SemRegistry data Dynamic prompt structure based on the chosen method (ReAct, Reason, CoT) Integration of type information and constraints Inclusion of available tools and their usage instructions Files involved:
aott.py
# aott_raise, get_all_type_explanationsplugin.py
# with_llm methodtypes.py
# Information, InputInformation, OutputHint, Tool classeslink 2.2 LLM InterfaceThe LLM Interface manages the communication between the MTLLM framework and the underlying Large Language Model. It handles sending prompts to the LLM and receiving raw outputs.
Key features:
Abstraction layer for different LLM providers Handling of API communication and error management Handling Multi-Modal Inputs if applicable Files involved:
link 2.3 Output ProcessorThe Output Processor is responsible for parsing and validating the raw output from the LLM. It ensures that the output meets the expected format and type constraints.
Key features:
Extraction of relevant information from LLM output Type checking and format validation Conversion of string representations to Python objects (when applicable) sequenceDiagram
+ participant A as Application
+ participant M as MTLLM Engine
+ participant S as SemRegistry
+ participant L as LLM Model
+ participant T as Tools
+ participant E as Evaluator
+ A->>M: Call by_llm()
+ M->>S: Fetch Semantic Info
+ M->>M: Construct Prompt
+ M->>L: Send Prompt
+ L->>M: Return Raw Output
+ M->>E: Evaluate Output
+ alt Evaluation Successful
+ E->>M: Return Result
+ M->>A: Return Final Output
+ else Evaluation Failed
+ E->>M: Return Error
+ M->>M: Construct Error Prompt
+ M->>L: Send Error Prompt
+ L->>M: Return Corrected Output
+ M->>E: Re-evaluate Output
+ end
+ opt Tool Usage Required
+ M->>T: Execute Tool
+ T->>M: Return Tool Result
+ M->>L: Include Tool Result in Prompt
+ end
Files involved:
aott.py
# aott_raisellms/base.py
# BaseLLM class, BaseLLM.resolve_output, BaseLLM._extract_output, BaseLLM.to_object, BaseLLM._fix_outputlink 2.4 Error HandlerThe Error Handler manages error detection, classification, and the self-correction process. It works closely with the Output Processor to identify issues and initiate corrective actions.
Key features:
Error detection and classification Generation of targeted feedback for the LLM Management of the self-correction loop Implementation of fallback strategies graph TD
+ A[LLM Output] --> B{Validate Output}
+ B -->|Valid| C[Return Result]
+ B -->|Invalid| D[Classify Error]
+ D --> E[Generate Error Feedback]
+ E --> F[Create Self-Correction Prompt]
+ F --> G[Submit to LLM]
+ G --> H{Check Retry Count}
+ H -->|Max Retries Reached| I[Return Error to Application]
+ H -->|Retries Available| B
Files involved:
llms/base.py
# BaseLLM._check_output , BaseLLM._extract_output, BaseLLM.to_object, BaseLLM._fix_outputThe Tool Integrator manages the integration and execution of external tools within the inference process. It allows the LLM to leverage additional capabilities when needed.
Key features:
Integration of tool results into the LLM prompt Error handling for tool execution in ReAct mode sequenceDiagram
+ participant A as Application
+ participant M as MTLLM Engine
+ participant L as LLM Model
+ participant T as Tools
+ A->>M: Call by_llm()
+ M->>L: Send Prompt
+ L->>M: Return Tool Usage Request
+ M->>T: Execute Tool
+ T->>M: Return Tool Result
+ M->>L: Include Tool Result in Prompt
+ L->>M: Return Final Output
+ M->>A: Return Final Output
Files involved:
plugin.py
# callable_to_tooltypes.py
# Tool classtools/base.py
# Tool class[`tools/<math_utils.py/serper.py/wikipedia.py>](mtllm/tools) # Predefined tools link 3. Inference ProcessThe MTLLM Inference Engine follows a structured process for each inference request:
Initialization : The inference process begins when the with_llm
function is called from the application.
Semantic Information Retrieval : The engine queries the SemRegistry to retrieve relevant semantic information based on the current context and input parameters.
Prompt Construction : The Prompt Constructor builds the initial prompt, incorporating semantic information, input data, and any relevant type constraints or tool descriptions.
LLM Interaction : The constructed prompt is sent to the LLM via the LLM Interface. The raw output is received and passed to the Output Processor.
Output Processing : The Output Processor parses the LLM's response, extracting the relevant information and performing initial validation.
Error Checking : The processed output is checked for errors or inconsistencies. If issues are detected, the Error Handler is invoked to manage the self-correction process.
Tool Execution (if required) : If the LLM's response indicates the need for tool usage, the Tool Integrator manages the execution of the required tool and integration of its results.
Iteration (if necessary) : Steps 4-7 may be repeated if error correction or additional tool usage is required.
Final Output : Once a valid output is obtained, it is returned to the calling application.
sequenceDiagram
+ participant App as Application
+ participant IE as Inference Engine
+ participant PC as Prompt Constructor
+ participant SR as Semantic Registry
+ participant LLM as LLM Interface
+ participant OP as Output Processor
+ participant EH as Error Handler
+ participant TI as Tool Integrator
+ App->>IE: Call by_llm()
+ IE->>SR: Retrieve semantic info
+ SR-->>IE: Return semantic info
+ IE->>PC: Construct prompt
+ PC-->>IE: Return initial prompt
+ loop Until valid output or max iterations
+ IE->>LLM: Send prompt
+ LLM-->>IE: Return raw output
+ IE->>OP: Process output
+ OP-->>IE: Return processed output
+ IE->>EH: Check for errors
+ alt Error detected
+ EH-->>IE: Return correction prompt
+ IE->>PC: Update prompt
+ else Tool required
+ IE->>TI: Execute tool
+ TI-->>IE: Return tool result
+ IE->>PC: Add tool result to prompt
+ else Valid output
+ IE->>App: Return final output
+ end
+ end
link 4. Implementation Detailslink 4.1 with_llm
FunctionThe with_llm
function serves as the main entry point for the MTLLM Inference Engine. It orchestrates the entire inference process, initializing the necessary components, managing the flow of information, and handling the iterative process of obtaining a valid output from the LLM.
link 4.2 Error Handling and Self-CorrectionThe Error Handler implements a sophisticated mechanism for detecting and correcting errors in the LLM's output. It maintains a count of correction attempts, generates targeted prompts for error correction, and determines when to terminate the correction process.
The Tool Integrator manages the execution of external tools and the integration of their results into the inference process. It converts normal functions to tools and executes them in the context of the inference engine.
link 5. Extensibility and CustomizationThe MTLLM Inference Engine is designed with extensibility in mind. Key areas for customization include:
Prompting Strategies : New prompting methods can be added by extending the Model class or changing the MTLLM_PROMPTsLLM Providers : Support for new LLM providers can be added by implementing the BaseLLM interface.Tool Integration : New tools can be easily registered and integrated into the inference process.Error Handling : Custom error detection and correction strategies can be implemented by simple prompting changes.The MTLLM Inference Engine is designed to balance performance and flexibility. Key performance considerations include:
Caching : Implement caching mechanisms for frequently used prompts or intermediate results.Asynchronous Processing : Utilize asynchronous programming techniques for non-blocking I/O operations, especially in LLM interactions.Batching : Implement batching strategies for processing multiple inference requests efficiently.Resource Management : Carefully manage memory usage/ token usage, especially when dealing with large prompts or outputs.link 7. Security ConsiderationsSecurity is a critical aspect of the MTLLM Inference Engine design:
Input Sanitization : Implement robust input sanitization to prevent injection attacks.Tool Execution Sandboxing : Execute external tools in a controlled environment to prevent unauthorized actions.Output Validation : Implement thorough output validation to ensure the LLM's responses don't contain harmful content.API Key Management : Securely manage and rotate API keys for LLM providers.link 8. Future ImprovementsPotential areas for future improvement of the MTLLM Inference Engine include:
Advanced Caching Strategies : Implement more sophisticated caching mechanisms to improve performance.Multi-Model Support : Enable the use of multiple LLMs within a single inference process for enhanced capabilities.Federated Learning Integration : Explore the integration of federated learning techniques for privacy-preserving model updates.Explainability Features : Develop features to provide explanations for the LLM's decision-making process.Adaptive Prompting : Implement adaptive prompting strategies that evolve based on the success rates of different prompt structures.This documentation provides a comprehensive overview of the MTLLM Inference Engine's design and implementation. It covers the key components, the inference process, implementation details, extensibility options, and important considerations for performance and security.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/design-impl/sem_registry.html b/support/plugins/mtllm/docs/docs/design-impl/sem_registry.html
new file mode 100644
index 000000000..a681922dc
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/design-impl/sem_registry.html
@@ -0,0 +1,71 @@
+MTLLM API Documentation | SemRegistry, SemInfo, and SemScope
+
+ link SemRegistry, SemInfo, and SemScopelink OverviewThe semantic registry system in the MTLLM framework consists of three main classes: SemInfo
, SemScope
, and SemRegistry
. These classes work together to provide a structured way of storing and retrieving semantic information about various elements in a program. This document outlines the design and implementation details of each class.
sequenceDiagram
+ participant C as Compiler
+ participant R as SemRegistry
+ participant S as SemScope
+ participant I as SemInfo
+ participant F as File System
+ C->>R: Initialize SemRegistry
+ loop For each AST node
+ C->>R: Get or Create SemScope
+ R->>S: Create if not exists
+ C->>I: Create SemInfo
+ C->>S: Add SemInfo to SemScope
+ S->>I: Store SemInfo
+ end
+ C->>R: Finalize Registry
+ R->>F: Save to Pickle File
link SemInfolink DesignSemInfo
is designed to encapsulate semantic information for individual elements in a program. It stores three key pieces of information:
name
: The identifier of the elementtype
: The type of the element (optional)semstr
: A semantic string describing the elementlink Implementation1link class SemInfo :
2link def __init__ ( self, name: str , type : Optional[ str ] = None , semstr: str = "" ) - > None :
3link self. name = name
4link self. type = type
5link self. semstr = semstr
6link
7link def __repr__ ( self) - > str :
8link return f" { self. semstr} ( { self. type } ) ( { self. name} )"
link Key Features:Simple initialization with optional type
and semstr
String representation includes all three attributes for easy debugging and display link SemScopelink DesignSemScope
represents a scope in the program, which can be nested. It includes:
scope
: The name of the scopetype
: The type of the scope (e.g., "class", "function")parent
: A reference to the parent scope (optional)Additional features include:
String representation that shows the full scope hierarchy Static method to create a SemScope
from a string representation Property to get the scope as a type string link Implementation1link class SemScope :
2link def __init__ ( self, scope: str , type : str , parent: Optional[ SemScope] = None ) - > None :
3link self. parent = parent
4link self. type = type
5link self. scope = scope
6link
7link def __str__ ( self) - > str :
8link if self. parent:
9link return f" { self. parent} . { self. scope} ( { self. type } )"
10link return f" { self. scope} ( { self. type } )"
11link
12link def __repr__ ( self) - > str :
13link return self. __str__( )
14link
15link @staticmethod
16link def get_scope_from_str ( scope_str: str ) - > Optional[ SemScope] :
17link scope_list = scope_str. split( "." )
18link parent = None
19link for scope in scope_list:
20link scope_name, scope_type = scope. split( "(" )
21link scope_type = scope_type[ : - 1 ]
22link parent = SemScope( scope_name, scope_type, parent)
23link return parent
24link
25link @property
26link def as_type_str ( self) - > Optional[ str ] :
27link if self. type not in [ "class" , "node" , "obj" ] :
28link return None
29link type_str = self. scope
30link node = self. parent
31link while node and node. parent:
32link if node. type not in [ "class" , "node" , "obj" ] :
33link return type_str
34link type_str = f" { node. scope} . { type_str} "
35link node = node. parent
36link return type_str
link Key Features:Nested structure representation through the parent
attribute String representation shows the full scope hierarchy get_scope_from_str
allows reconstruction of a SemScope
hierarchy from a stringas_type_str
property provides a string representation of the scope as a type, useful for type checking and inferencelink SemRegistrylink DesignSemRegistry
serves as the main container and manager for semantic information. It stores SemInfo
objects organized by SemScope
. Key features include:
Storage of semantic information in a nested dictionary structure Methods for adding new semantic information Flexible lookup functionality Utility methods for accessing and displaying the registry contents link Implementation1link class SemRegistry :
2link def __init__ ( self) - > None :
3link self. registry: dict [ SemScope, list [ SemInfo] ] = { }
4link
5link def add ( self, scope: SemScope, seminfo: SemInfo) - > None :
6link for k in self. registry. keys( ) :
7link if str ( k) == str ( scope) :
8link scope = k
9link break
10link else :
11link self. registry[ scope] = [ ]
12link self. registry[ scope] . append( seminfo)
13link
14link def lookup (
15link self,
16link scope: Optional[ SemScope] = None ,
17link name: Optional[ str ] = None ,
18link type : Optional[ str ] = None ,
19link ) - > tuple [ Optional[ SemScope] , Optional[ SemInfo | list [ SemInfo] ] ] :
20link if scope:
21link for k, v in self. registry. items( ) :
22link if str ( k) == str ( scope) :
23link if name:
24link for i in v:
25link if i. name == name:
26link return k, i
27link elif type :
28link for i in v:
29link if i. type == type :
30link return k, i
31link else :
32link return k, v
33link else :
34link for k, v in self. registry. items( ) :
35link if name:
36link for i in v:
37link if i. name == name:
38link return k, i
39link elif type :
40link for i in v:
41link if i. type == type :
42link return k, i
43link return None , None
44link
45link @property
46link def module_scope ( self) - > SemScope:
47link for i in self. registry. keys( ) :
48link if not i. parent:
49link break
50link return i
51link
52link def pp ( self) - > None :
53link for k, v in self. registry. items( ) :
54link print ( k)
55link for i in v:
56link print ( f" { i. name} { i. type } { i. semstr} " )
link Key Features:Efficient storage using a dictionary with SemScope
as keys and lists of SemInfo
as values add
method handles the case of existing scopes and adds new SemInfo
objects to the appropriate listFlexible lookup
method allows searching by scope, name, or type, with various combinations module_scope
property provides quick access to the top-level scopepp
(pretty print) method for easy debugging and inspection of the registry contentslink Usage and InteractionThese classes work together to provide a comprehensive system for managing semantic information:
SemInfo
objects are created to represent individual program elements.SemScope
objects are created to represent the hierarchical structure of the program.SemRegistry
is used to store and organize SemInfo
objects within their respective SemScope
s.The lookup
method of SemRegistry
allows for flexible querying of semantic information based on various criteria. This system enables efficient storage and retrieval of semantic information, which is crucial for the MTLLM framework's ability to understand and reason about program structure and meaning during compilation and inference processes.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/docker-compose.yml b/support/plugins/mtllm/docs/docs/docker-compose.yml
new file mode 100644
index 000000000..0ba485378
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/docker-compose.yml
@@ -0,0 +1,24 @@
+version: "2.0"
+services:
+ docs:
+ #
+ # Build ./Dockerfile
+ #
+ build: .
+
+ #
+ # Expose port 3000 for local development.
+ # This might be re-configured via `.codedoc/config.ts`, in which case
+ # you should change it here as well (I would recommend using an environment variable
+ # for keeping them in sync if you want to deviate from the standard port 3000)
+ #
+ ports:
+ - 3000:3000
+
+ #
+ # Volume root folder on `/home/blog`,
+ # but exclude `/home/blog/.codedoc/node_modules" (since the container env should install its own modules).
+ #
+ volumes:
+ - "..:/home/docs"
+ - "/home/docs/.codedoc/node_modules/"
diff --git a/support/plugins/mtllm/docs/docs/faqs.html b/support/plugins/mtllm/docs/docs/faqs.html
new file mode 100644
index 000000000..22f3f062d
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/faqs.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Frequently Asked Questions link Frequently Asked QuestionsHome
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/_toc.md b/support/plugins/mtllm/docs/docs/md/_toc.md
new file mode 100644
index 000000000..e5647df48
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/_toc.md
@@ -0,0 +1,37 @@
+[Home](/)
+
+> :Collapse label=Quick Start
+>
+> [Installation](/docs/quickstart/installation)
+> [Minimal Working Example](/docs/quickstart/minimal-working-example)
+
+> :Collapse label=Design and Implementation
+>
+> [SemRegistry](/docs/design-impl/sem_registry)
+> [Inference Engine](/docs/design-impl/inference_engine)
+
+
+> :Collapse label=Building Blocks
+>
+> [Language Models](/docs/building-blocks/language_models)
+> [Semstrings](/docs/building-blocks/semstrings)
+> [Functions and Methods](/docs/building-blocks/functions_methods)
+> [Object Initialization](/docs/building-blocks/object_init)
+> [Multimodality](/docs/building-blocks/multimodality)
+
+> :Collapse label=Tutorials
+>
+> [RPG Game Level Generation](/docs/tutorials/rpg_game)
+
+> :Collapse label=API Reference
+>
+> [MTLLM](/docs/api/mtllm)
+
+
+> :Collapse label=Tips and Tricks
+>
+> [Using MTLLM in your existing Application](/docs/tips-n-tricks/existing_application)
+> [When to use Semstrings](/docs/tips-n-tricks/when_to_use_semstrings)
+> [Create your own Language Model](/docs/tips-n-tricks/create_own_lm)
+
+[FAQs](/docs/faqs)
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/api/mtllm.md b/support/plugins/mtllm/docs/docs/md/docs/api/mtllm.md
new file mode 100644
index 000000000..e69de29bb
diff --git a/support/plugins/mtllm/docs/docs/md/docs/building-blocks/Multimodality.md b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/Multimodality.md
new file mode 100644
index 000000000..65b07f8c2
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/Multimodality.md
@@ -0,0 +1,78 @@
+# Multimodality
+
+For MTLLM to have actual neurosymbolic powers, it needs to be able to handle multimodal inputs and outputs. This means that it should be able to understand text, images, and videos. In this section, we will discuss how MTLLM can handle multimodal inputs.
+
+## Image
+
+MTLLM can handle images as inputs. You can provide an image as input to the MTLLM Function or Method using the `Image` format of mtllm. Here is an example of how you can provide an image as input to the MTLLM Function or Method:
+
+```python
+import:py from mtllm.llms, OpenAI;
+import:py from mtllm, Image;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+enum Personality {
+ INTROVERT: 'Person who is shy and reticent' = "Introvert",
+ EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
+}
+
+obj 'Person'
+Person {
+ has full_name: str,
+ yod: 'Year of Death': int,
+ personality: 'Personality of the Person': Personality;
+}
+
+can get_person_info(img: 'Image of Person': Image) -> Person
+by llm();
+
+with entry {
+ person_obj = get_person_info(Image("person.png"));
+ print(person_obj);
+}
+```
+
+Input Image (person.png):
+![person.png](https://preview.redd.it/g39au73fdir01.jpg?auto=webp&s=cef8394b639af82ba92d6ab084935f7adc8e841d)
+
+```python
+# Output
+Person(full_name='Albert Einstein', yod=1955, personality=Personality.INTROVERT)
+```
+
+In the above example, we have provided an image of a person ("Albert Einstein") as input to the `get_person_info` method. The method returns the information of the person in the image. The output of the method is a `Person` object with the name, year of death, and personality of the person in the image.
+
+## Video
+
+Similarly, MTLLM can handle videos as inputs. You can provide a video as input to the MTLLM Function or Method using the `Video` format of mtllm. Here is an example of how you can provide a video as input to the MTLLM Function or Method:
+
+```python
+import:py from mtllm.llms, OpenAI;
+import:py from mtllm, Video;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+can is_aligned(video: Video, text: str) -> bool
+by llm(method="Chain-of-Thoughts", context="Mugen is the moving character");
+
+with entry {
+ video = Video("mugen.mp4", 1);
+ text = "Mugen jumps off and collects few coins.";
+ print(is_aligned(video, text));
+}
+```
+
+Input Video (mugen.mp4):
+[mugen.mp4](https://github.com/Jaseci-Labs/mtllm/blob/main/examples/vision/mugen.mp4)
+
+```python
+# Output
+True
+```
+
+In the above example, we have provided a video of a character ("Mugen") as input to the `is_aligned` method. The method checks if the text is aligned with the video. The output of the method is a boolean value indicating whether the text is aligned with the video.
+
+## Audio
+
+We are working on adding support for audio inputs to MTLLM. Stay tuned for updates on this feature.
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/building-blocks/functions_methods.md b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/functions_methods.md
new file mode 100644
index 000000000..c8b7bf11c
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/functions_methods.md
@@ -0,0 +1,162 @@
+# Functions and Methods
+
+Functions and methods play a crucial role in implementing various functionalities in a traditional GenAI application. In jaclang, we have designed these functions and methods to be highly flexible and powerful. Surprisingly, they don't even require a function or method body thanks to the MTLLM `by ` syntax. This section will guide you on how to effectively utilize functions and methods in jaclang using MTLLM.
+
+## Functions
+
+Functions/Abilities in jaclang are defined using the `can` keyword. They can be used to define a set of actions. Normal function looks like this in jaclang:
+
+```python
+can (, ..) -> {
+ ;
+}
+```
+
+In a traditional GenAI application, you would make API calls inside the function body to perform the desired action. However, in jaclang, you can define the function using the `by ` syntax. This way, you can define the function without a body and let the MTLLM model handle the implementation. Here is an example:
+
+```python
+can greet(name: str) -> str by ();
+```
+
+In the above example, the `greet` function takes a `name` parameter of type `str` and returns a `str`. The function is defined using the `by ` syntax, which means the implementation of the function is handled by the MTLLM.
+
+Below is an example where we define a function `get_expert` that takes a question as input and returns the best expert to answer the question in string format using mtllm with openai model with the method `Reason`. `get_answer` function takes a question and an expert as input and returns the answer to the question using mtllm with openai model without any method. and we can call these function as normal functions.
+
+```python
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+can get_expert(question: str) -> 'Best Expert to Answer the Question': str by llm(method='Reason');
+can get_answer(question: str, expert: str) -> str by llm();
+
+with entry {
+ question = "What are Large Language Models?";
+ expert = get_expert(question);
+ answer = get_answer(question, expert);
+ print(f"{expert} says: '{answer}' ");
+}
+```
+
+Here's another example,
+
+```python
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+can 'Get a Joke with a Punchline'
+get_joke() -> tuple[str, str] by llm();
+
+with entry {
+ (joke, punchline) = get_joke();
+ print(f"{joke}: {punchline}");
+}
+```
+
+In the above example, the `joke_punchline` function returns a tuple of two strings, which are the joke and its punchline. The function is defined using the `by ` syntax, which means the implementation is handled by the MTLLM. You can add semstr to the function to make it more specific.
+
+
+## Methods
+
+Methods in jaclang are also defined using the `can` keyword. They can be used to define a set of actions that are specific to a class. Normal method looks like this in jaclang:
+
+```python
+obj ClassName {
+ has parameter: parameter_type;
+ can (, ..) -> {
+ ;
+ }
+}
+```
+
+In a traditional GenAI application, you would make API calls inside the method body to perform the desired action while using `self` keyword to get necessary information. However, in jaclang, you can define the method using the `by ` syntax. This way, you can define the method without a body and let the MTLLM model handle the implementation. Here is an example:
+
+```python
+obj Person {
+ has name: str;
+ can greet() -> str by (incl_info=(self));
+}
+```
+
+In the above example, the `greet` method returns a `str`. The method is defined using the `by ` syntax, which means the implementation of the method is handled by the MTLLM. The `incl_info=(self.name)` parameter is used to include the `name` attribute of the `Person` object as an information source for the MTLLM.
+
+In the below example, we define a class `Essay` with a method `get_essay_judgement` that takes a criteria as input and returns the judgement for the essay based on the criteria using mtllm with openai model after a step of `Reasoning`. `get_reviewer_summary` method takes a dictionary of judgements as input and returns the summary of the reviewer based on the judgements using mtllm with openai model. `give_grade` method takes the summary as input and returns the grade for the essay using mtllm with openai model. and we can call these methods as normal methods.
+
+```python
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+obj Essay {
+ has essay: str;
+
+ can get_essay_judgement(criteria: str) -> str by llm(incl_info=(self.essay));
+ can get_reviewer_summary(judgements: dict) -> str by llm(incl_info=(self.essay));
+ can give_grade(summary: str) -> 'A to D': str by llm();
+}
+
+with entry {
+ essay = "With a population of approximately 45 million Spaniards and 3.5 million immigrants,"
+ "Spain is a country of contrasts where the richness of its culture blends it up with"
+ "the variety of languages and dialects used. Being one of the largest economies worldwide,"
+ "and the second largest country in Europe, Spain is a very appealing destination for tourists"
+ "as well as for immigrants from around the globe. Almost all Spaniards are used to speaking at"
+ "least two different languages, but protecting and preserving that right has not been"
+ "easy for them.Spaniards have had to struggle with war, ignorance, criticism and the governments,"
+ "in order to preserve and defend what identifies them, and deal with the consequences.";
+ essay = Essay(essay);
+ criterias = ["Clarity", "Originality", "Evidence"];
+ judgements = {};
+ for criteria in criterias {
+ judgement = essay.get_essay_judgement(criteria);
+ judgements[criteria] = judgement;
+ }
+ summary = essay.get_reviewer_summary(judgements);
+ grade = essay.give_grade(summary);
+ print("Reviewer Notes: ", summary);
+ print("Grade: ", grade);
+}
+```
+
+## Ability to Understand Typed Inputs and Outputs
+
+MTLLM is able to represent typed inputs in a way that is understandable to the model. Sametime, this makes the model to generate outputs in the expected output type without any additional information. Here is an example:
+
+```python
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+
+enum 'Personality of the Person'
+Personality {
+ INTROVERT: 'Person who is shy and reticent' = "Introvert",
+ EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
+}
+
+obj 'Person'
+Person {
+ has full_name: 'Fullname of the Person': str,
+ yod: 'Year of Death': int,
+ personality: 'Personality of the Person': Personality;
+}
+
+can 'Get Person Information use common knowledge'
+get_person_info(name: 'Name of the Person': str) -> 'Person': Person by llm();
+
+with entry {
+ person_obj = get_person_info('Martin Luther King Jr.');
+ print(person_obj);
+}
+```
+
+```python
+# Output
+Person(full_name='Martin Luther King Jr.', yod=1968, personality=Personality.INTROVERT)
+```
+
+In the above example, the `get_person_info` function takes a `name` parameter of type `str` and returns a `Person` object. The `Person` object has three attributes: `full_name` of type `str`, `yod` of type `int`, and `personality` of type `Personality`. The `Personality` enum has two values: `INTROVERT` and `EXTROVERT`. The function is defined using the `by ` syntax, which means the implementation is handled by the MTLLM. The model is able to understand the typed inputs and outputs and generate the output in the expected type.
+
+![magic](https://media1.tenor.com/m/IOEsG9ldvhAAAAAd/mr-bean.gif)
+
diff --git a/support/plugins/mtllm/docs/docs/md/docs/building-blocks/language_models.md b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/language_models.md
new file mode 100644
index 000000000..8533ce3ef
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/language_models.md
@@ -0,0 +1,106 @@
+# Language Models
+
+Language models is the most important building block of MTLLM. Without it we can't achieve neuro-symbolic programming.
+
+Let's first make sure you can set up your language model. MTLLM support clients for many remote and local LMs. You can even create your own as well very easily if you want to.
+
+## Setting up a LM client
+
+In this section, we will go through the process of setting up a OpenAI's `GPT-4o` language model client. For that first makesure that you have installed the necessary dependancies by running `pip install mtllm[openai]`.
+
+```python
+import:py from mtllm.llms.openai, OpenAI;
+
+my_llm = OpenAI(model_name="gpt-4o");
+```
+
+Makesure to set the `OPENAI_API_KEY` environment variable with your OpenAI API key.
+
+## Directly calling the LM
+
+You can directly call the LM by giving the raw prompts as well.
+
+```python
+my_llm("What is the capital of France?");
+```
+
+You can also pass the `max_tokens`, `temperature` and other parameters to the LM.
+
+```python
+my_llm("What is the capital of France?", max_tokens=10, temperature=0.5);
+```
+
+## Using the LM with MTLLM
+
+Intented use of MTLLM's LMs is to use them with the `jaclang`'s `BY_LLM` Feature.
+
+### With Abilities and Methods
+
+```python
+can function(arg1: str, arg2: str) -> str by llm();
+```
+
+### With Classes
+
+```python
+new_object = MyClass(arg1: str by llm());
+```
+
+### You can parse following attributes to the `by llm()` feature:
+
+- `method` (default: `Normal`): Reasoning method to use. Can be `Normal`, `Reason` or `Chain-of-Thoughts`.
+- `tools` (default: `None`): Tools to use. This is a list of abilities to use with ReAct Prompting method.
+- `model specific parameters`: You can pass the model specific parameters as well. for example, `max_tokens`, `temperature` etc.
+
+## Enabling Verbose Mode
+
+You can enable the verbose mode to see the internal workings of the LM.
+
+```python
+import:py from mtllm.llms, OpenAI;
+
+my_llm = OpenAI(model_name="gpt-4o", verbose=True);
+```
+
+## Remote LMs
+
+These language models are provided as managed services. To access them, simply sign up and obtain an API key. Before calling any of the remote language models listed below.
+
+> **NOTICE**
+>
+> make sure to set the corresponding environment variable with your API key. Use Chat models for better performance.
+
+```python
+llm = mtllm.llms.{provider_listed_below}(model_name="your model", verbose=True/False);
+```
+
+1. `OpenAI` - OpenAI's gpt-3.5-turbo, gpt-4, gpt-4-turbo, gpt-4o [model zoo](https://platform.openai.com/docs/models)
+2. `Anthropic` - Anthropic's Claude 3 & Claude 3.5 - Haiku ,Sonnet, Opus [model zoo](https://docs.anthropic.com/en/docs/about-claude/models)
+3. `Groq` - Groq's Fast Inference Models [model zoo](https://console.groq.com/docs/models)
+4. `Together` - Together's hosted OpenSource Models [model zoo](https://docs.together.ai/docs/inference-models)
+
+## Local LMs
+
+### Ollama
+
+Initiate a ollama server by following this tutorial [here](https://github.com/ollama/ollama). Then you can use it as follows:
+
+```python
+import:py from mtllm.llms.ollama, Ollama;
+
+llm = Ollama(host="ip:port of the ollama server", model_name="llama3", verbose=True/False);
+```
+
+### HuggingFace
+
+You can use any of the HuggingFace's language models as well. [models](https://huggingface.co/models?pipeline_tag=text-generation)
+
+```python
+import:py from mtllm.llms.huggingface, HuggingFace;
+
+llm = HuggingFace(model_name="microsoft/Phi-3-mini-4k-instruct", verbose=True/False);
+```
+
+> **NOTICE**
+>
+> We are constantly adding new LMs to the library. If you want to add a new LM, please open an issue [here](https://github.com/Jaseci-Labs/mtllm/issues).
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/building-blocks/object_init.md b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/object_init.md
new file mode 100644
index 000000000..a28e68592
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/object_init.md
@@ -0,0 +1,78 @@
+# Object Initialization
+
+As MTLLM is really great at handling typed outputs, we have added the ability to initialize a new object with only providing few of the required fields. MTLLM will automatically fill the rest of the fields based on the given context.
+
+This behavior is very hard to achieve in other languages, but with MTLLM, it is as simple as providing the required fields and letting the MTLLM do the rest.
+
+In the following example, we are initializing a new object of type `Task` with only providing the `description` field. The `time_in_min` and `priority_out_of_10` fields are automatically filled by the MTLLM based on the given context after a step of reasoning.
+
+```python
+import:py from mtllm.llms, OpenAI, Ollama;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+obj Task {
+ has description: str;
+ has time_in_min: int,
+ priority_out_of_10: int;
+}
+
+with entry {
+ task_contents = [
+ "Have some sleep",
+ "Enjoy a better weekend with my girlfriend",
+ "Work on Jaseci Project",
+ "Teach EECS 281 Students",
+ "Enjoy family time with my parents"
+ ];
+ tasks = [];
+ for task_content in task_contents {
+ task_info = Task(description = task_content by llm(method="Reason"));
+ tasks.append(task_info);
+ }
+ print(tasks);
+}
+```
+```python
+# Output
+[
+ Task(description='Have some sleep', time_in_min=30, priority_out_of_10=5),
+ Task(description='Enjoy a better weekend with my girlfriend', time_in_min=60, priority_out_of_10=7),
+ Task(description='Work on Jaseci Project', time_in_min=120, priority_out_of_10=8),
+ Task(description='Teach EECS 281 Students', time_in_min=90, priority_out_of_10=9),
+ Task(description='Enjoy family time with my parents', time_in_min=60, priority_out_of_10=7)
+]
+```
+
+Here is another example with nested custom types,
+
+```python
+import:py from jaclang.core.llms, OpenAI;
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+obj Employer {
+ has name: 'Employer Name': str,
+ location: str;
+}
+
+obj 'Person'
+Person {
+ has name: str,
+ age: int,
+ employer: Employer,
+ job: str;
+}
+
+with entry {
+ info: "Person's Information": str = "Alice is a 21 years old and works as an engineer at LMQL Inc in Zurich, Switzerland.";
+ person = Person(by llm(incl_info=(info)));
+ print(person);
+}
+```
+```python
+# Output
+Person(name='Alice', age=21, employer=Employer(name='LMQL Inc', location='Zurich, Switzerland'), job='engineer')
+```
+
+In the above example, we have initialized a new object of type `Person` with only providing `info` as additional context. The `name`, `age`, `employer`, and `job` fields are automatically filled by the MTLLM based on the given context.
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/building-blocks/semstrings.md b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/semstrings.md
new file mode 100644
index 000000000..2cffd7d42
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/building-blocks/semstrings.md
@@ -0,0 +1,106 @@
+# Semstrings
+
+The core idea behind MT-LLM is that if the program has been written in a readable manner, with type-safety, an LLM would be able to understand the task required to be performed using _meaning_ embedded within the code.
+
+However, there are instanced where this is not the case for all instances. Hence, a new _meaning_ insertion code abstraction called "**semstrings**" has been introduced in MT-LLM.
+
+## Where code is not meaningful enough!
+
+Lets look into an instance where the existing code constructs are not sufficient to describe the meaning of the code for an LLM.
+
+```python | apple.jac
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI();
+
+obj item {
+ has name : str,
+ category : str = '';
+}
+
+obj shop {
+ has item_dir:dict[str,item];
+
+ can categorize(name:str) -> str by llm();
+}
+
+with entry {
+ shop_inv = shop();
+ apple = item(name=apple);
+ apple.category = categorize(apple.name);
+ shop_inv.item_dir[apple.name] = apple.category;
+}
+```
+
+This is a partial code that can be used as a shopkeeping app where each item name is tagged with its category. However, in this example, you can observe in line 16 that the item name is passed in as 'apple' which can be ambiguous for an LLM as apple can mean the fruit, as well as a tech product. To resolve this problem we can use much more descriptive variable names. For instance, instead of ```item``` we can use ```tech_item```. How ever, adding more descriptive names for objects, variables and functions will hinder the reusability of object fields as the reference names are too long.
+
+## Semstrings to uplift 'meaning'
+
+
+As the existing code abstractions does not fully allow the programmer to express their meaning we have added an extra feature you can use to embed meaning directly as text, into your code. We call these text annotations as **semstrings**.
+
+Lets see how we can add semstring to the existing program above.
+
+```python | apple.jac
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI();
+
+obj 'An edible product'
+item {
+ has name : str,
+ category : str = '';
+}
+
+obj 'Food store inventory'
+shop {
+ has item_dir:'Inventory of shop':dict[str,item];
+
+ can 'categorize the edible as fruit, vegetables, sweets etc'
+ categorize(name: str) -> 'Item category': str by llm();
+}
+
+with entry {
+ shop_inv = shop();
+ apple = item(name=apple);
+ apple.category = categorize(apple.name);
+ shop_inv.item_dir["ID_876837"] = apple;
+}
+```
+In this example we add semstrings that add semantic meaning to existing code constructs such as variables, objects and functions. The semstring of each item is linked with its signature which are called when generating the prompt for the LLM. These small descriptions adds more context for the LLM to give a much more accurate response.
+
+## How to add semstrings?
+
+The below examples show different instances where semstrings can be inserted.
+
+### Variables / Object Fields Declaration
+
+```python
+glob name: 'semstring': str = 'sample value'
+```
+### Function / Method Declaration
+
+```python
+can 'semstring'
+function_name(arg_1: 'semstring': type ...) {
+ #function body
+}
+```
+
+### Object / Class Declaration
+
+```python
+obj 'semstring' object_name {
+ # Object fields
+ # Object methods
+}
+```
+
+### With ```by llm()```
+
+```python
+can 'semstring_for_action'
+function_name (arg_1:'semstring_input': type ...)
+-> 'semstring_output': type
+by llm();
+```
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/design-impl/inference_engine.md b/support/plugins/mtllm/docs/docs/md/docs/design-impl/inference_engine.md
new file mode 100644
index 000000000..ae05513c9
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/design-impl/inference_engine.md
@@ -0,0 +1,272 @@
+# MTLLM Inference Engine
+
+## 1. Overview
+
+The MTLLM (Meaning-Typed Large Language Model) Inference Engine is a core component of the MTLLM framework. It is responsible for managing the interaction between the application, the semantic registry, and the underlying Large Language Model (LLM). The Inference Engine handles the process of constructing prompts, managing LLM interactions, processing outputs, and implementing error handling and self-correction mechanisms.
+
+
+> :Mermaid
+>
+> graph TD
+> A[Jaclang Application] --> B[Compilation]
+> B --> C[SemRegistry]
+> C --> D[Pickle File]
+> A --> E[Runtime]
+> E --> F[MTLLM Inference Engine]
+> F --> G[LLM Model]
+> F --> H[Tool Integration]
+> D -.-> F
+> G --> I[Output Processing]
+> I --> J[Error Handling]
+> J -->|Error| F
+> J -->|Success| K[Final Output]
+
+
+## 2. Key Components
+
+The MTLLM Inference Engine consists of several key components:
+
+1. Prompt Constructor
+2. LLM Interface
+3. Output Processor
+4. Error Handler
+5. Tool Integrator
+
+### 2.1 Prompt Constructor
+
+The Prompt Constructor is responsible for building the input prompt for the LLM. It incorporates semantic information from the SemRegistry, user inputs, and contextual data to create a comprehensive and meaningful prompt.
+
+Key features:
+- Semantic enrichment using SemRegistry data
+- Dynamic prompt structure based on the chosen method (ReAct, Reason, CoT)
+- Integration of type information and constraints
+- Inclusion of available tools and their usage instructions
+
+Files involved:
+- [`aott.py`](mtllm/aott.py) # aott_raise, get_all_type_explanations
+- [`plugin.py`](mtllm/plugin.py) # with_llm method
+- [`types.py`](mtllm/types.py) # Information, InputInformation, OutputHint, Tool classes
+
+### 2.2 LLM Interface
+
+The LLM Interface manages the communication between the MTLLM framework and the underlying Large Language Model. It handles sending prompts to the LLM and receiving raw outputs.
+
+Key features:
+- Abstraction layer for different LLM providers
+- Handling of API communication and error management
+- Handling Multi-Modal Inputs if applicable
+
+Files involved:
+- [`aott.py`](mtllm/aott.py) # aott_raise
+- ['llms/base.py'](mtllm/llms/base.py) # BaseLLM class, __call__, __infer__
+
+### 2.3 Output Processor
+
+The Output Processor is responsible for parsing and validating the raw output from the LLM. It ensures that the output meets the expected format and type constraints.
+
+Key features:
+- Extraction of relevant information from LLM output
+- Type checking and format validation
+- Conversion of string representations to Python objects (when applicable)
+
+> :Mermaid
+>
+> sequenceDiagram
+> participant A as Application
+> participant M as MTLLM Engine
+> participant S as SemRegistry
+> participant L as LLM Model
+> participant T as Tools
+> participant E as Evaluator
+> A->>M: Call by_llm()
+> M->>S: Fetch Semantic Info
+> M->>M: Construct Prompt
+> M->>L: Send Prompt
+> L->>M: Return Raw Output
+> M->>E: Evaluate Output
+> alt Evaluation Successful
+> E->>M: Return Result
+> M->>A: Return Final Output
+> else Evaluation Failed
+> E->>M: Return Error
+> M->>M: Construct Error Prompt
+> M->>L: Send Error Prompt
+> L->>M: Return Corrected Output
+> M->>E: Re-evaluate Output
+> end
+> opt Tool Usage Required
+> M->>T: Execute Tool
+> T->>M: Return Tool Result
+> M->>L: Include Tool Result in Prompt
+> end
+
+Files involved:
+- [`aott.py`](mtllm/aott.py) # aott_raise
+- [`llms/base.py`](mtllm/llms/base.py) # BaseLLM class, BaseLLM.resolve_output, BaseLLM._extract_output, BaseLLM.to_object, BaseLLM._fix_output
+
+### 2.4 Error Handler
+
+The Error Handler manages error detection, classification, and the self-correction process. It works closely with the Output Processor to identify issues and initiate corrective actions.
+
+Key features:
+- Error detection and classification
+- Generation of targeted feedback for the LLM
+- Management of the self-correction loop
+- Implementation of fallback strategies
+
+> :Mermaid
+>
+> graph TD
+> A[LLM Output] --> B{Validate Output}
+> B -->|Valid| C[Return Result]
+> B -->|Invalid| D[Classify Error]
+> D --> E[Generate Error Feedback]
+> E --> F[Create Self-Correction Prompt]
+> F --> G[Submit to LLM]
+> G --> H{Check Retry Count}
+> H -->|Max Retries Reached| I[Return Error to Application]
+> H -->|Retries Available| B
+
+Files involved:
+- [`llms/base.py`](mtllm/llms/base.py) # BaseLLM._check_output , BaseLLM._extract_output, BaseLLM.to_object, BaseLLM._fix_output
+
+
+### 2.5 Tool Integrator
+
+The Tool Integrator manages the integration and execution of external tools within the inference process. It allows the LLM to leverage additional capabilities when needed.
+
+Key features:
+- Integration of tool results into the LLM prompt
+- Error handling for tool execution in ReAct mode
+
+> :Mermaid
+>
+> sequenceDiagram
+> participant A as Application
+> participant M as MTLLM Engine
+> participant L as LLM Model
+> participant T as Tools
+> A->>M: Call by_llm()
+> M->>L: Send Prompt
+> L->>M: Return Tool Usage Request
+> M->>T: Execute Tool
+> T->>M: Return Tool Result
+> M->>L: Include Tool Result in Prompt
+> L->>M: Return Final Output
+> M->>A: Return Final Output
+
+Files involved:
+- [`plugin.py`](mtllm/plugin.py) # callable_to_tool
+- [`types.py`](mtllm/types.py) # Tool class
+- [`tools/base.py`](mtllm/tools/base.py) # Tool class
+- [`tools/](mtllm/tools) # Predefined tools
+
+## 3. Inference Process
+
+The MTLLM Inference Engine follows a structured process for each inference request:
+
+1. **Initialization**: The inference process begins when the `with_llm` function is called from the application.
+
+2. **Semantic Information Retrieval**: The engine queries the SemRegistry to retrieve relevant semantic information based on the current context and input parameters.
+
+3. **Prompt Construction**: The Prompt Constructor builds the initial prompt, incorporating semantic information, input data, and any relevant type constraints or tool descriptions.
+
+4. **LLM Interaction**: The constructed prompt is sent to the LLM via the LLM Interface. The raw output is received and passed to the Output Processor.
+
+5. **Output Processing**: The Output Processor parses the LLM's response, extracting the relevant information and performing initial validation.
+
+6. **Error Checking**: The processed output is checked for errors or inconsistencies. If issues are detected, the Error Handler is invoked to manage the self-correction process.
+
+7. **Tool Execution (if required)**: If the LLM's response indicates the need for tool usage, the Tool Integrator manages the execution of the required tool and integration of its results.
+
+8. **Iteration (if necessary)**: Steps 4-7 may be repeated if error correction or additional tool usage is required.
+
+9. **Final Output**: Once a valid output is obtained, it is returned to the calling application.
+
+> :Mermaid
+>
+> sequenceDiagram
+> participant App as Application
+> participant IE as Inference Engine
+> participant PC as Prompt Constructor
+> participant SR as Semantic Registry
+> participant LLM as LLM Interface
+> participant OP as Output Processor
+> participant EH as Error Handler
+> participant TI as Tool Integrator
+> App->>IE: Call by_llm()
+> IE->>SR: Retrieve semantic info
+> SR-->>IE: Return semantic info
+> IE->>PC: Construct prompt
+> PC-->>IE: Return initial prompt
+> loop Until valid output or max iterations
+> IE->>LLM: Send prompt
+> LLM-->>IE: Return raw output
+> IE->>OP: Process output
+> OP-->>IE: Return processed output
+> IE->>EH: Check for errors
+> alt Error detected
+> EH-->>IE: Return correction prompt
+> IE->>PC: Update prompt
+> else Tool required
+> IE->>TI: Execute tool
+> TI-->>IE: Return tool result
+> IE->>PC: Add tool result to prompt
+> else Valid output
+> IE->>App: Return final output
+> end
+> end
+
+
+## 4. Implementation Details
+
+### 4.1 `with_llm` Function
+
+The `with_llm` function serves as the main entry point for the MTLLM Inference Engine. It orchestrates the entire inference process, initializing the necessary components, managing the flow of information, and handling the iterative process of obtaining a valid output from the LLM.
+
+### 4.2 Error Handling and Self-Correction
+
+The Error Handler implements a sophisticated mechanism for detecting and correcting errors in the LLM's output. It maintains a count of correction attempts, generates targeted prompts for error correction, and determines when to terminate the correction process.
+
+### 4.3 Tool Integration
+
+The Tool Integrator manages the execution of external tools and the integration of their results into the inference process. It converts normal functions to tools and executes them in the context of the inference engine.
+
+## 5. Extensibility and Customization
+
+The MTLLM Inference Engine is designed with extensibility in mind. Key areas for customization include:
+
+1. **Prompting Strategies**: New prompting methods can be added by extending the Model class or changing the MTLLM_PROMPTs
+2. **LLM Providers**: Support for new LLM providers can be added by implementing the BaseLLM interface.
+3. **Tool Integration**: New tools can be easily registered and integrated into the inference process.
+4. **Error Handling**: Custom error detection and correction strategies can be implemented by simple prompting changes.
+
+## 6. Performance Considerations
+
+The MTLLM Inference Engine is designed to balance performance and flexibility. Key performance considerations include:
+
+1. **Caching**: Implement caching mechanisms for frequently used prompts or intermediate results.
+2. **Asynchronous Processing**: Utilize asynchronous programming techniques for non-blocking I/O operations, especially in LLM interactions.
+3. **Batching**: Implement batching strategies for processing multiple inference requests efficiently.
+4. **Resource Management**: Carefully manage memory usage/ token usage, especially when dealing with large prompts or outputs.
+
+## 7. Security Considerations
+
+Security is a critical aspect of the MTLLM Inference Engine design:
+
+1. **Input Sanitization**: Implement robust input sanitization to prevent injection attacks.
+2. **Tool Execution Sandboxing**: Execute external tools in a controlled environment to prevent unauthorized actions.
+3. **Output Validation**: Implement thorough output validation to ensure the LLM's responses don't contain harmful content.
+4. **API Key Management**: Securely manage and rotate API keys for LLM providers.
+
+## 8. Future Improvements
+
+Potential areas for future improvement of the MTLLM Inference Engine include:
+
+1. **Advanced Caching Strategies**: Implement more sophisticated caching mechanisms to improve performance.
+2. **Multi-Model Support**: Enable the use of multiple LLMs within a single inference process for enhanced capabilities.
+3. **Federated Learning Integration**: Explore the integration of federated learning techniques for privacy-preserving model updates.
+4. **Explainability Features**: Develop features to provide explanations for the LLM's decision-making process.
+5. **Adaptive Prompting**: Implement adaptive prompting strategies that evolve based on the success rates of different prompt structures.
+
+This documentation provides a comprehensive overview of the MTLLM Inference Engine's design and implementation. It covers the key components, the inference process, implementation details, extensibility options, and important considerations for performance and security.
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/design-impl/sem_registry.md b/support/plugins/mtllm/docs/docs/md/docs/design-impl/sem_registry.md
new file mode 100644
index 000000000..e03a89c36
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/design-impl/sem_registry.md
@@ -0,0 +1,205 @@
+
+
+# SemRegistry, SemInfo, and SemScope
+
+## Overview
+
+The semantic registry system in the MTLLM framework consists of three main classes: `SemInfo`, `SemScope`, and `SemRegistry`. These classes work together to provide a structured way of storing and retrieving semantic information about various elements in a program. This document outlines the design and implementation details of each class.
+
+> :Mermaid
+>
+> sequenceDiagram
+> participant C as Compiler
+> participant R as SemRegistry
+> participant S as SemScope
+> participant I as SemInfo
+> participant F as File System
+> C->>R: Initialize SemRegistry
+> loop For each AST node
+> C->>R: Get or Create SemScope
+> R->>S: Create if not exists
+> C->>I: Create SemInfo
+> C->>S: Add SemInfo to SemScope
+> S->>I: Store SemInfo
+> end
+> C->>R: Finalize Registry
+> R->>F: Save to Pickle File
+
+## SemInfo
+
+### Design
+
+`SemInfo` is designed to encapsulate semantic information for individual elements in a program. It stores three key pieces of information:
+
+1. `name`: The identifier of the element
+2. `type`: The type of the element (optional)
+3. `semstr`: A semantic string describing the element
+
+### Implementation
+
+```python
+class SemInfo:
+ def __init__(self, name: str, type: Optional[str] = None, semstr: str = "") -> None:
+ self.name = name
+ self.type = type
+ self.semstr = semstr
+
+ def __repr__(self) -> str:
+ return f"{self.semstr} ({self.type}) ({self.name})"
+```
+
+#### Key Features:
+- Simple initialization with optional `type` and `semstr`
+- String representation includes all three attributes for easy debugging and display
+
+## SemScope
+
+### Design
+
+`SemScope` represents a scope in the program, which can be nested. It includes:
+
+1. `scope`: The name of the scope
+2. `type`: The type of the scope (e.g., "class", "function")
+3. `parent`: A reference to the parent scope (optional)
+
+Additional features include:
+- String representation that shows the full scope hierarchy
+- Static method to create a `SemScope` from a string representation
+- Property to get the scope as a type string
+
+### Implementation
+
+```python
+class SemScope:
+ def __init__(self, scope: str, type: str, parent: Optional[SemScope] = None) -> None:
+ self.parent = parent
+ self.type = type
+ self.scope = scope
+
+ def __str__(self) -> str:
+ if self.parent:
+ return f"{self.parent}.{self.scope}({self.type})"
+ return f"{self.scope}({self.type})"
+
+ def __repr__(self) -> str:
+ return self.__str__()
+
+ @staticmethod
+ def get_scope_from_str(scope_str: str) -> Optional[SemScope]:
+ scope_list = scope_str.split(".")
+ parent = None
+ for scope in scope_list:
+ scope_name, scope_type = scope.split("(")
+ scope_type = scope_type[:-1]
+ parent = SemScope(scope_name, scope_type, parent)
+ return parent
+
+ @property
+ def as_type_str(self) -> Optional[str]:
+ if self.type not in ["class", "node", "obj"]:
+ return None
+ type_str = self.scope
+ node = self.parent
+ while node and node.parent:
+ if node.type not in ["class", "node", "obj"]:
+ return type_str
+ type_str = f"{node.scope}.{type_str}"
+ node = node.parent
+ return type_str
+```
+
+#### Key Features:
+- Nested structure representation through the `parent` attribute
+- String representation shows the full scope hierarchy
+- `get_scope_from_str` allows reconstruction of a `SemScope` hierarchy from a string
+- `as_type_str` property provides a string representation of the scope as a type, useful for type checking and inference
+
+## SemRegistry
+
+### Design
+
+`SemRegistry` serves as the main container and manager for semantic information. It stores `SemInfo` objects organized by `SemScope`. Key features include:
+
+1. Storage of semantic information in a nested dictionary structure
+2. Methods for adding new semantic information
+3. Flexible lookup functionality
+4. Utility methods for accessing and displaying the registry contents
+
+### Implementation
+
+```python
+class SemRegistry:
+ def __init__(self) -> None:
+ self.registry: dict[SemScope, list[SemInfo]] = {}
+
+ def add(self, scope: SemScope, seminfo: SemInfo) -> None:
+ for k in self.registry.keys():
+ if str(k) == str(scope):
+ scope = k
+ break
+ else:
+ self.registry[scope] = []
+ self.registry[scope].append(seminfo)
+
+ def lookup(
+ self,
+ scope: Optional[SemScope] = None,
+ name: Optional[str] = None,
+ type: Optional[str] = None,
+ ) -> tuple[Optional[SemScope], Optional[SemInfo | list[SemInfo]]]:
+ if scope:
+ for k, v in self.registry.items():
+ if str(k) == str(scope):
+ if name:
+ for i in v:
+ if i.name == name:
+ return k, i
+ elif type:
+ for i in v:
+ if i.type == type:
+ return k, i
+ else:
+ return k, v
+ else:
+ for k, v in self.registry.items():
+ if name:
+ for i in v:
+ if i.name == name:
+ return k, i
+ elif type:
+ for i in v:
+ if i.type == type:
+ return k, i
+ return None, None
+
+ @property
+ def module_scope(self) -> SemScope:
+ for i in self.registry.keys():
+ if not i.parent:
+ break
+ return i
+
+ def pp(self) -> None:
+ for k, v in self.registry.items():
+ print(k)
+ for i in v:
+ print(f" {i.name} {i.type} {i.semstr}")
+```
+
+#### Key Features:
+- Efficient storage using a dictionary with `SemScope` as keys and lists of `SemInfo` as values
+- `add` method handles the case of existing scopes and adds new `SemInfo` objects to the appropriate list
+- Flexible `lookup` method allows searching by scope, name, or type, with various combinations
+- `module_scope` property provides quick access to the top-level scope
+- `pp` (pretty print) method for easy debugging and inspection of the registry contents
+
+## Usage and Interaction
+
+These classes work together to provide a comprehensive system for managing semantic information:
+
+1. `SemInfo` objects are created to represent individual program elements.
+2. `SemScope` objects are created to represent the hierarchical structure of the program.
+3. `SemRegistry` is used to store and organize `SemInfo` objects within their respective `SemScope`s.
+4. The `lookup` method of `SemRegistry` allows for flexible querying of semantic information based on various criteria.
+
+This system enables efficient storage and retrieval of semantic information, which is crucial for the MTLLM framework's ability to understand and reason about program structure and meaning during compilation and inference processes.
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/faqs.md b/support/plugins/mtllm/docs/docs/md/docs/faqs.md
new file mode 100644
index 000000000..87b993045
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/faqs.md
@@ -0,0 +1 @@
+# Frequently Asked Questions
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/quickstart/installation.md b/support/plugins/mtllm/docs/docs/md/docs/quickstart/installation.md
new file mode 100644
index 000000000..c82748b18
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/quickstart/installation.md
@@ -0,0 +1,64 @@
+# Installation
+
+To install MTLLM run,
+
+```bash
+pip install mtllm
+```
+
+By default, MTLLM will not install any llm integrations, to install the available integrations, include the extra(s) below:
+
+> :Tabs
+> > :Tab title=OpenAI
+> >
+> > ```bash
+> > pip install mtllm[openai]
+> > ```
+>
+> > :Tab title=Anthropic
+> >
+> > ```bash
+> > pip install mtllm[anthropic]
+> > ```
+>
+> > :Tab title=Together
+> >
+> > ```bash
+> > pip install mtllm[together]
+> > ```
+>
+> > :Tab title=Ollama
+> >
+> > ```bash
+> > pip install mtllm[ollama]
+> > ```
+>
+> > :Tab title=Huggingface
+> >
+> > ```bash
+> > pip install mtllm[huggingface]
+> > ```
+>
+> > :Tab title=Groq
+> >
+> > ```bash
+> > pip install mtllm[groq]
+> > ```
+
+MTLLM Supports MultiModal LLMs. To Support Images and Videos, you need to install the following extra(s):
+
+> :Tabs
+> > :Tab title=Image Support
+> >
+> > ```bash
+> > pip install mtllm[image]
+> > ```
+>
+> > :Tab title=Video Support
+> >
+> > ```bash
+> > pip install mtllm[video]
+> > ```
+> >
+
+Currently, only multimodal LLMs from OpenAI and Anthropic are supported. In the future, we plan to support multimodal LLMs from other providers as well
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/quickstart/minimal-working-example.md b/support/plugins/mtllm/docs/docs/md/docs/quickstart/minimal-working-example.md
new file mode 100644
index 000000000..5984ef7e6
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/quickstart/minimal-working-example.md
@@ -0,0 +1,82 @@
+# Minimal Working Example
+
+Here we will walk you through a minimal working example of using MTLLM to generate translate a sentence from English to a target language.
+
+## Setup
+
+Before we start, make sure you have installed MTLLM & Jaclang. If not, follow the instructions [here](/docs/quickstart/installation).
+Following code snippet will be our starting point:
+
+```python | translator.jac
+can translate(eng_sentence: str, target_lang: str) -> str {
+ """Normally this would include the translation logic such as calling an API.
+ For the sake of this example, we will return a dummy translated sentence."""
+
+ return "Hola Mundo";
+}
+
+with entry {
+ print(translate("Hello World", "es"));
+}
+```
+
+Assuming we went with API based translation, `target_lang` would be the language code of the target language. For example, `es` for Spanish, `fr` for French, etc. But Assume that you don't know the language code for the target language, or you would like to provide a context to `target_lang` instead of a language code. for example `Spanish` instead of `es` or `Language spoken in Somalia`. This is where you need the help of LLMs.
+
+## Using MTLLM
+
+### Import the LLM You Want to Use
+
+For this example, we will use OpenAI's GPT-3.5-turbo (default).
+
+```python | translator.jac
+import:py from mtllm.llms, OpenAI;
+
+llm = OpenAI();
+
+# Rest of the code
+```
+
+### Remove the Ability Body and Add `by LLM` keyword
+
+```python | translator.jac
+import:py from mtllm.llms, OpenAI;
+
+llm = OpenAI();
+
+can translate(eng_sentence: str, target_lang: str) -> str by llm;
+
+with entry {
+ print(translate("Hello World", "Language spoken in Somalia"));
+}
+```
+
+Thats it! 🎊
+
+Now you can run the code and see the translated sentence by running the following command:
+Makesure to export your OpenAI API key as an environment variable `OPENAI_API_KEY` before running the code.
+
+```bash
+jac run translator.jac
+```
+
+## Adding Additional Support to the LLMs
+
+In this example, we dont need to add any additional support to the LLMs. But if you want to add additional support, you can do so by adding `SemStrings` to variables, output type hint and abilities the following code snippet:
+
+```python | translator.jac
+import:py from mtllm.llms, OpenAI;
+
+llm = OpenAI();
+
+can 'Translate the given english sentence to the target language'
+translate(eng_sentence: str, target_lang: str) -> 'Translation': str by llm;
+
+with entry {
+ print(translate("Hello World", "Language spoken in Somalia"));
+}
+```
+
+You've successfully created a working example using the Jaclang and MTLLM.
+
+Feel free to adapt and expand upon this example to suit your specific use case while exploring the extensive capabilities of MTLLM.
+
diff --git a/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/create_own_lm.md b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/create_own_lm.md
new file mode 100644
index 000000000..9d7d5376b
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/create_own_lm.md
@@ -0,0 +1,87 @@
+# Create Your Own Language Model
+
+This guide will help you to bring your own language model to be used with MTLLM. This is helpful if you have a self-hosted Language Model or you are using a different service that is not currently supported by MTLLM.
+
+> **IMPORTANT**
+>
+> This assumes that you have a proper understanding on how to inference with your language model. If you are not sure about this, please refer to the documentation of your language model.
+
+## Steps
+
+1. Create a new class that inherits from `BaseLLM` class.
+
+In Python,
+```python | my_llm.py
+from mtllm.llms.base import BaseLLM
+
+class MyLLM(BaseLLM):
+ def __init__(self, verbose: bool = False, max_tries: int = 10, **kwargs):
+ self.verbose = verbose
+ self.max_tries = max_tries
+ # Your model initialization code here
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs) -> str:
+ # Your inference code here
+ # If you are using a Multimodal (VLLM) model, use the list of dict -> openai format input with encoded images
+ # kwargs are the model specific parameters
+ return 'Your response'
+```
+
+In Jaclang,
+```python | my_llm.jac
+import:py from mtlm.llms.base, BaseLLM;
+
+class MyLLM:BaseLLM: {
+ can init(verbose:bool=false, max_tries:int=10, **kwargs: dict) -> None {
+ self.verbose = verbose;
+ self.max_tries = max_tries;
+ # Your model initialization code here
+ }
+
+ can __infer__(meaning_in:str|list[dict], **kwargs: dict) -> str {
+ # Your inference code here
+ # If you are using a Multimodal (VLLM) model, use the list of dict -> openai format input with encoded images
+ # kwargs are the model specific parameters
+ return 'Your response';
+ }
+}
+```
+
+2. Initialize your model with the required parameters.
+
+```python | app.jac
+import:jac from my_llm, MyLLM; # For Jaclang
+import:py from my_llm, MyLLM; # For Python
+
+llm = MyLLM();
+```
+
+## Changing the Prompting Techniques
+
+You can change the prompting techniques overriding the the following parameters in your class.
+
+```python | my_llm.py
+from mtllm.llms.base import BaseLLM
+
+class MyLLM(BaseLLM):
+ MTLLM_SYSTEM_PROMPT = 'Your System Prompt'
+ MTLLM_PROMPT = 'Your Prompt' # Not Recommended to change this
+ MTLLM_METHOD_PROMPTS = {
+ "Normal": 'Your Normal Prompt',
+ "Reason": 'Your Reason Prompt',
+ "Chain-of-Thoughts": 'Your Chain-of-Thought Prompt',
+ "ReAct": 'Your ReAct Prompt',
+ }
+ OUTPUT_FIX_PROMPT = 'Your Output Fix Prompt'
+ OUTPUT_CHECK_PROMPT = 'Your Output Check Prompt'
+
+ # Rest of the code
+```
+
+Check the [API Reference](/docs/api/mtllm) for more information on prompting techniques.
+
+Thats it! You have successfully created your own Language Model to be used with MTLLM.
+
+> **NOTICE**
+>
+> We are constantly adding new LMs to the library. If you want to add a new LM, please open an issue [here](https://github.com/Jaseci-Labs/mtllm/issues).
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/existing_application.md b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/existing_application.md
new file mode 100644
index 000000000..55a9907f9
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/existing_application.md
@@ -0,0 +1,103 @@
+# Using MTLLM in your existing Application
+
+As Jaclang is a language that supersets Python, you can easily integrate it into your existing Python application. This guide will show you how to do that by integrating a AI feature into a simple Task Manager application build using Python.
+
+## Python Task Manager Application
+
+Let's start by creating a simple Task Manager application using Python. The application will have the following features:
+1. Add a task
+2. View all tasks
+3. Delete a task
+
+```python | task_manager.py
+tasks: list[str] = []
+
+def add_task(task: str) - > None:
+ tasks.append(task)
+
+def view_tasks() -> None:
+ for i, task in enumerate(tasks):
+ print(f"{i+1}. {task}")
+
+def delete_task(index: int) -> None:
+ del tasks[index]
+
+def main() -> None:
+ while True:
+ print("1. Add Task")
+ print("2. View Tasks")
+ print("3. Delete Task")
+ print("4. Exit")
+ choice = int(input("Enter your choice: "))
+ if choice == 1:
+ task = input("Enter task: ")
+ add_task(task)
+ elif choice == 2:
+ view_tasks()
+ elif choice == 3:
+ index = int(input("Enter task number to delete: ")) - 1
+ delete_task(index)
+ elif choice == 4:
+ break
+ else:
+ print("Invalid choice")
+
+if __name__ == "__main__":
+ main()
+```
+
+You can run the application using the following command:
+
+```bash
+python task_manager.py
+```
+
+## Integrating Jaclang
+
+Currently the Tasks in the Task Manager are just strings. Let's add a feature where when the user adds a task, the application will decide the priority of the task and the estimated time to complete the task based on the previous tasks.
+
+### Creating the Jac Module
+
+```python | taskman.jac
+import:py from mtllm.llms, OpenAI;
+
+glob llm = OpenAI();
+
+obj Task {
+ has description: str,
+ priority: 'Priority of the Task (0-10)': int,
+ time: 'Estimated Time Required to Finish (min)': int;
+}
+
+can create_task(description: str, prev_tasks: list[Task]) -> Task
+by llm(method="Reason");
+```
+
+Just like that with a few lines of code, you have a AI powered Task Manager. The `create_task` function will take the description of the task and the previous tasks and return a Task object with the priority and estimated time to complete the task.
+
+### Integrating the Jac Module
+
+```python | task_manager.py
+from jaclang import jac_import
+
+# Importing the create_task function
+create_task = jac_import("taskman.jac").create_task
+
+tasks: list = []
+
+def add_task(task):
+ task = create_task(task, tasks)
+ tasks.append(task)
+
+# Rest of the code remains the same
+```
+
+Now when the user adds a task, the application will use the MTLLM to decide the priority and estimated time to complete the task based on the previous tasks.
+
+You can run the application using the same command:
+
+```bash
+python task_manager.py
+```
+
+This is just a simple example of how you can integrate Jaclang into your existing Python application. You can use Jaclang to add AI features to your application without having to write complex AI code.
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/when_to_use_semstrings.md b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/when_to_use_semstrings.md
new file mode 100644
index 000000000..d5c63313e
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/tips-n-tricks/when_to_use_semstrings.md
@@ -0,0 +1 @@
+# When to use Semstrings
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/docs/tutorials/rpg_game.md b/support/plugins/mtllm/docs/docs/md/docs/tutorials/rpg_game.md
new file mode 100644
index 000000000..95ae63ff9
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/docs/tutorials/rpg_game.md
@@ -0,0 +1,204 @@
+# Creating a Level Generator for an RPG Game
+
+Procedurally generated maps in video games has become a hot topic in the recent years among the gaming community. the algorithms for generating these maps are extremely complected, and requires months of development to build such algorithms. Instead of symbolically written programs, what if we can use generative models to generate these maps?
+
+In this Tutorial we will show you how you can generate game maps for a simple game where the map can be expressed using a list of strings.
+
+## What is a level?
+
+A level can be represented in this hypothetical game using a list of strings shown below.
+
+```python
+glob level = [ 'BBBBBBBBBB',
+ 'B....E...B',
+ 'B.P......B',
+ 'B.....B..B',
+ 'B.....BE.B',
+ 'B....BBBBB',
+ 'B....B...B',
+ 'B.....E..B',
+ 'BBBBBBBBBB' ];
+```
+
+In this level each character represent a different element in the map,
+
+- 'B': Walls
+- '.': Floor
+- 'E': Enemy
+- 'P': Player
+
+## Building a level map generator?
+
+The straightforward approach to build a map generator is to ask from the LLM to directly generate such a map as a list of strings. MTLLM allows you to do this by defining a function or a method. However, here we would discuss a more object oriented way of programming with LLMs which allow the model to 'think' using objects.
+
+### Level Configuration
+
+```python | level_manager.jac
+obj Level {
+ has name: str,
+ difficulty: int;
+ has width: int,
+ height: int,
+ num_wall: int,
+ num_enemies: int;
+ has time_countdown: int,
+ n_retries_allowed: int;
+}
+```
+
+Each level should have a basic configuration which describes the level in an abstract format. This level object embeds the difficulty of the level and the number of enemies and obstacles including other level configuration parameters.
+
+However, filling in the values for fields requires a cognitive capacity, for which will use an LLM later on.
+
+### Building the Map Elements
+
+- **A coordinate system**
+
+```python | level_manager.jac
+obj Position {
+ has x: int,
+ y: int;
+}
+```
+
+As the map we are aiming to generate is a 2D map the position of each object on the map can be designated using the ```Position``` custom type. It is simply representing a cartesian 2D coordinate system.
+
+- **Object to Represent a Wall**
+
+```python | level_manager.jac
+obj Wall {
+ has start_pos: Position,
+ end_pos: Position;
+}
+```
+
+The wall object represents a straight wall, as all obstacles in the 2D map can be represented by a collection of intersecting wall objects. Here each wall object will have a start position as well as a stop position
+
+- **Map represented using objects**
+
+```python | level_manager.jac
+obj Map {
+ has level: Level;
+ has walls: list[Wall],
+ small_obstacles: list[Position];
+ has enemies: list[Position];
+ has player_pos: Position;
+}
+```
+
+This Map object will hold the exact positions of all objects in the map. This is the object that we will generate using MT-LLM. Each field of this object is one of or a derivative of the custom types which we described above.
+
+### The Level Manager
+
+To manage all the generations we can define a Level manager object which can hold a directory of previous levels configurations and maps, which can be used to feed the LLM to give context about the play style of the player. We will be using the OpenAI GPT-4o as the LLM in this tutorial.
+
+```python | level_manager.jac
+import:py from mtllm.llms, OpenAI;
+glob llm = OpenAI(model_name="gpt-4o");
+
+obj LevelManager {
+ has current_level: int = 0,
+ current_difficulty: int = 1,
+ prev_levels: list[Level] = [],
+ prev_level_maps: list[Map] = [];
+
+ '''Generates the Next level configuration based upon previous playthroughs'''
+ can create_next_level( last_levels: list[Level],
+ difficulty: int,
+ level_width: int,
+ level_height: int) -> Level by llm(temperature=1.0);
+
+ '''Get the Next Level'''
+ can get_next_level -> tuple(Level, Map);
+
+ '''Generate the Map as a List of Strings'''
+ can get_map(map: Map) -> list[str];
+}
+```
+
+We have three methods defined under the level manager. Each will handle a separate set of tasks.
+
+- ```create_next_level``` : Takes in previous level configuration data from previously played levels and generate the new level configuration parameters and output a ```Level``` object which describes the new map, using the LLM.
+
+- ```get_next_level``` : Uses the ```create_next_level``` to generate the ```Level``` config. object which is then used to fill in the rest of a newly initiated ```Map``` object using an LLM. This is where the actual map generation happens. Still the generated map cannot be visualize.
+
+- ```get_map``` : This method will generate the actual list of strings which can be used with an actual game using the ```Map``` object generated by ```get_next_level``` method. This does not require any LLM as all objects of the map are included in the ```Map``` object with their exact positions.
+
+The implementation of the above methods are as follows.
+
+```python | level_manager.jac
+:obj:LevelManager:can:get_next_level {
+ self.current_level += 1;
+ # Keeping Only the Last 3 Levels
+ if len(self.prev_levels) > 3 {
+ self.prev_levels.pop(0);
+ self.prev_level_maps.pop(0);
+ }
+ # Generating the New Level
+ new_level = self.create_next_level(
+ self.prev_levels,
+ self.current_difficulty,
+ 20, 20
+ );
+ self.prev_levels.append(new_level);
+ # Generating the Map of the New Level
+ new_level_map = Map(level=new_level by llm());
+ self.prev_level_maps.append(new_level_map);
+ # Increasing the Difficulty for end of every 2 Levels
+ if self.current_level % 2 == 0 {
+ self.current_difficulty += 1;
+ }
+ return (new_level, new_level_map);
+}
+```
+
+In the ```get_next_level``` method there are two llm calls which we will discuss in this tutorial while other parts are related the functionality of the game.
+
+- ```Line 9-13``` : Here the saved data from previous levels are given as inputs which are defined previously along with the basic level config parameters of the new level. As the output type of this method was specified above to be a ```Level``` object the LLM will initiate and fill in the values of the objects. As the LLM hyperparameter temperature, is set for 1.0 at method declaration, the LLM is forced to be more creative.
+
+- ```Line 14``` : Here the programmer is initiating a Map object while passing in only the level parameter with the newly generated ```level``` object and ask the LLM to fill in the rest of the fields by generating the relevant types. This nested type approach ensures the output is formatted according to how you expect them to be.
+
+```python | level_manager.jac
+:obj:LevelManager:can:get_map {
+ map_tiles = [['.' for _ in range(map.level.width)] for _ in range(map.level.height)];
+ for wall in map.walls {
+ for x in range(wall.start_pos.x, wall.end_pos.x + 1) {
+ for y in range(wall.start_pos.y, wall.end_pos.y + 1) {
+ map_tiles[y-1][x-1] = 'B';
+ }
+ }
+ }
+ for obs in map.small_obstacles {
+ map_tiles[obs.y-1][obs.x-1] = 'B';
+ }
+
+ for enemy in map.enemies {
+ map_tiles[enemy.y-1][enemy.x-1] = 'E';
+ }
+ map_tiles[map.player_pos.y-1][map.player_pos.x-1] = 'P';
+ map_tiles = [['B'] + row + ['B'] for row in map_tiles];
+ map_tiles = [['B' for _ in range(map.level.width + 2)]] + map_tiles + [['B' for _ in range(map.level.width + 2)]];
+ return [''.join(row) for row in map_tiles];
+}
+```
+
+### Running the Program
+
+```python
+with entry {
+ level_manager = LevelManager();
+ for _ in range(2) {
+ (new_level, new_level_map) = level_manager.get_next_level();
+ print(new_level);
+ print('\n'.join(LevelManager.get_map(new_level_map)));
+ }
+}
+```
+
+This program will now generate two consecutive maps and print them on the terminal. by running this jac file using ```jac run level_manager.jac``` you can simply test your program.
+
+## A full scale game demo
+
+For the sake of this tutorial we have not included the entire development of an actual game. The full game is available on our [jac-lang repo](https://github.com/Jaseci-Labs/jaclang/tree/main/examples/rpg_game). A sample demonstration of the game can be viewed below.
+
+[Demo Video](https://drive.google.com/file/d/1JXyWbmI6vJsjpNUnscRxdnK5vmo8312r/view?usp=sharing)
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/md/index.md b/support/plugins/mtllm/docs/docs/md/index.md
new file mode 100644
index 000000000..000efda10
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/md/index.md
@@ -0,0 +1,24 @@
+> :DarkLight
+> > :InLight
+> >
+> > ![Banner](docs/assets/light.svg)
+>
+> > :InDark
+> >
+> > ![Banner](docs/assets/dark.svg)
+
+
+[![PyPI version](https://img.shields.io/pypi/v/mtllm.svg)](https://pypi.org/project/mtllm/)
+
+**MTLLM** Python library provides convenient access to a large number of easy to use and customizable APIs to be used in Jaseci's [Jaclang](https://github.com/Jaseci-Labs/jaclang) by llm feature.
+The Library provides automatic output fixing, output type validation, different prompting techniques, and more.
+
+> [warning](:Icon) **IMPORTANT**
+>
+> Though this is can be used with python projects, it is primarily intended to be used with Jaseci's Jaclang.
+
+```bash | --no-wmbar
+pip install mtllm
+```
+
+> :ToCPrevNext
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/quickstart/installation.html b/support/plugins/mtllm/docs/docs/quickstart/installation.html
new file mode 100644
index 000000000..8a88b1064
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/quickstart/installation.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Installation link InstallationTo install MTLLM run,
1link $ pip install mtllm
By default, MTLLM will not install any llm integrations, to install the available integrations, include the extra(s) below:
1link $ pip install mtllm[ openai]
1link $ pip install mtllm[ anthropic]
1link $ pip install mtllm[ together]
1link $ pip install mtllm[ ollama]
1link $ pip install mtllm[ huggingface]
1link $ pip install mtllm[ groq]
MTLLM Supports MultiModal LLMs. To Support Images and Videos, you need to install the following extra(s):
1link $ pip install mtllm[ image]
1link $ pip install mtllm[ video]
Currently, only multimodal LLMs from OpenAI and Anthropic are supported. In the future, we plan to support multimodal LLMs from other providers as well
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/quickstart/minimal-working-example.html b/support/plugins/mtllm/docs/docs/quickstart/minimal-working-example.html
new file mode 100644
index 000000000..c3d2138c0
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/quickstart/minimal-working-example.html
@@ -0,0 +1,56 @@
+MTLLM API Documentation | Minimal Working Example link Minimal Working ExampleHere we will walk you through a minimal working example of using MTLLM to generate translate a sentence from English to a target language.
link SetupBefore we start, make sure you have installed MTLLM & Jaclang. If not, follow the instructions here .
+Following code snippet will be our starting point:
translator.jac 1link can translate( eng_sentence: str , target_lang: str ) - > str {
2link """Normally this would include the translation logic such as calling an API.
3link For the sake of this example, we will return a dummy translated sentence."""
4link
5link return "Hola Mundo" ;
6link }
7link
8link with entry {
9link print ( translate( "Hello World" , "es" ) ) ;
10link }
Assuming we went with API based translation, target_lang
would be the language code of the target language. For example, es
for Spanish, fr
for French, etc. But Assume that you don't know the language code for the target language, or you would like to provide a context to target_lang
instead of a language code. for example Spanish
instead of es
or Language spoken in Somalia
. This is where you need the help of LLMs.
link Using MTLLMlink Import the LLM You Want to UseFor this example, we will use OpenAI's GPT-3.5-turbo (default).
translator.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link llm = OpenAI( ) ;
4link
5link
link Remove the Ability Body and Add by LLM
keywordtranslator.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link llm = OpenAI( ) ;
4link
5link can translate( eng_sentence: str , target_lang: str ) - > str by llm;
6link
7link with entry {
8link print ( translate( "Hello World" , "Language spoken in Somalia" ) ) ;
9link }
Thats it! 🎊
Now you can run the code and see the translated sentence by running the following command:
+Makesure to export your OpenAI API key as an environment variable OPENAI_API_KEY
before running the code.
1link $ jac run translator.jac
link Adding Additional Support to the LLMsIn this example, we dont need to add any additional support to the LLMs. But if you want to add additional support, you can do so by adding SemStrings
to variables, output type hint and abilities the following code snippet:
translator.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link llm = OpenAI( ) ;
4link
5link can 'Translate the given english sentence to the target language'
6link translate( eng_sentence: str , target_lang: str ) - > 'Translation' : str by llm;
7link
8link with entry {
9link print ( translate( "Hello World" , "Language spoken in Somalia" ) ) ;
10link }
You've successfully created a working example using the Jaclang and MTLLM.
Feel free to adapt and expand upon this example to suit your specific use case while exploring the extensive capabilities of MTLLM.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/tips-n-tricks/create_own_lm.html b/support/plugins/mtllm/docs/docs/tips-n-tricks/create_own_lm.html
new file mode 100644
index 000000000..91245add1
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/tips-n-tricks/create_own_lm.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Create Your Own Language Model link Create Your Own Language ModelThis guide will help you to bring your own language model to be used with MTLLM. This is helpful if you have a self-hosted Language Model or you are using a different service that is not currently supported by MTLLM.
IMPORTANT
This assumes that you have a proper understanding on how to inference with your language model. If you are not sure about this, please refer to the documentation of your language model.
link StepsCreate a new class that inherits from BaseLLM
class. In Python,
my_llm.py 1link from mtllm. llms. base import BaseLLM
2link
3link class MyLLM ( BaseLLM) :
4link def __init__ ( self, verbose: bool = False , max_tries: int = 10 , ** kwargs) :
5link self. verbose = verbose
6link self. max_tries = max_tries
7link
8link
9link def __infer__ ( self, meaning_in: str | list [ dict ] , ** kwargs) - > str :
10link
11link
12link
13link return 'Your response'
In Jaclang,
my_llm.jac 1link import : py from mtlm. llms. base, BaseLLM;
2link
3link class MyLLM : BaseLLM: {
4link can init( verbose: bool = false, max_tries: int = 10 , ** kwargs: dict ) - > None {
5link self. verbose = verbose;
6link self. max_tries = max_tries;
7link
8link }
9link
10link can __infer__( meaning_in: str | list [ dict ] , ** kwargs: dict ) - > str {
11link
12link
13link
14link return 'Your response' ;
15link }
16link }
Initialize your model with the required parameters. app.jac 1link import : jac from my_llm, MyLLM;
2link import : py from my_llm, MyLLM;
3link
4link llm = MyLLM( ) ;
link Changing the Prompting TechniquesYou can change the prompting techniques overriding the the following parameters in your class.
my_llm.py 1link from mtllm. llms. base import BaseLLM
2link
3link class MyLLM ( BaseLLM) :
4link MTLLM_SYSTEM_PROMPT = 'Your System Prompt'
5link MTLLM_PROMPT = 'Your Prompt'
6link MTLLM_METHOD_PROMPTS = {
7link "Normal" : 'Your Normal Prompt' ,
8link "Reason" : 'Your Reason Prompt' ,
9link "Chain-of-Thoughts" : 'Your Chain-of-Thought Prompt' ,
10link "ReAct" : 'Your ReAct Prompt' ,
11link }
12link OUTPUT_FIX_PROMPT = 'Your Output Fix Prompt'
13link OUTPUT_CHECK_PROMPT = 'Your Output Check Prompt'
14link
15link
Check the API Reference for more information on prompting techniques.
Thats it! You have successfully created your own Language Model to be used with MTLLM.
NOTICE
We are constantly adding new LMs to the library. If you want to add a new LM, please open an issue here .
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/tips-n-tricks/existing_application.html b/support/plugins/mtllm/docs/docs/tips-n-tricks/existing_application.html
new file mode 100644
index 000000000..171c419f8
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/tips-n-tricks/existing_application.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Using MTLLM in your existing Application link Using MTLLM in your existing ApplicationAs Jaclang is a language that supersets Python, you can easily integrate it into your existing Python application. This guide will show you how to do that by integrating a AI feature into a simple Task Manager application build using Python.
link Python Task Manager ApplicationLet's start by creating a simple Task Manager application using Python. The application will have the following features:
Add a task View all tasks Delete a task task_manager.py 1link tasks: list [ str ] = [ ]
2link
3link def add_task ( task: str ) - > None :
4link tasks. append( task)
5link
6link def view_tasks ( ) - > None :
7link for i, task in enumerate ( tasks) :
8link print ( f" { i+ 1 } . { task} " )
9link
10link def delete_task ( index: int ) - > None :
11link del tasks[ index]
12link
13link def main ( ) - > None :
14link while True :
15link print ( "1. Add Task" )
16link print ( "2. View Tasks" )
17link print ( "3. Delete Task" )
18link print ( "4. Exit" )
19link choice = int ( input ( "Enter your choice: " ) )
20link if choice == 1 :
21link task = input ( "Enter task: " )
22link add_task( task)
23link elif choice == 2 :
24link view_tasks( )
25link elif choice == 3 :
26link index = int ( input ( "Enter task number to delete: " ) ) - 1
27link delete_task( index)
28link elif choice == 4 :
29link break
30link else :
31link print ( "Invalid choice" )
32link
33link if __name__ == "__main__" :
34link main( )
You can run the application using the following command:
1link $ python task_manager.py
link Integrating JaclangCurrently the Tasks in the Task Manager are just strings. Let's add a feature where when the user adds a task, the application will decide the priority of the task and the estimated time to complete the task based on the previous tasks.
link Creating the Jac Moduletaskman.jac 1link import : py from mtllm. llms, OpenAI;
2link
3link glob llm = OpenAI( ) ;
4link
5link obj Task {
6link has description: str ,
7link priority: 'Priority of the Task (0-10)' : int ,
8link time: 'Estimated Time Required to Finish (min)' : int ;
9link }
10link
11link can create_task( description: str , prev_tasks: list [ Task] ) - > Task
12link by llm( method= "Reason" ) ;
Just like that with a few lines of code, you have a AI powered Task Manager. The create_task
function will take the description of the task and the previous tasks and return a Task object with the priority and estimated time to complete the task.
link Integrating the Jac Moduletask_manager.py 1link from jaclang import jac_import
2link
3link
4link create_task = jac_import( "taskman.jac" ) . create_task
5link
6link tasks: list = [ ]
7link
8link def add_task ( task) :
9link task = create_task( task, tasks)
10link tasks. append( task)
11link
12link
Now when the user adds a task, the application will use the MTLLM to decide the priority and estimated time to complete the task based on the previous tasks.
You can run the application using the same command:
1link $ python task_manager.py
This is just a simple example of how you can integrate Jaclang into your existing Python application. You can use Jaclang to add AI features to your application without having to write complex AI code.
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/tips-n-tricks/when_to_use_semstrings.html b/support/plugins/mtllm/docs/docs/tips-n-tricks/when_to_use_semstrings.html
new file mode 100644
index 000000000..142cd79d2
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/tips-n-tricks/when_to_use_semstrings.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | When to use Semstrings link When to use SemstringsHome
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/docs/tutorials/rpg_game.html b/support/plugins/mtllm/docs/docs/tutorials/rpg_game.html
new file mode 100644
index 000000000..b3ccc5cfc
--- /dev/null
+++ b/support/plugins/mtllm/docs/docs/tutorials/rpg_game.html
@@ -0,0 +1,54 @@
+MTLLM API Documentation | Creating a Level Generator for an RPG Game link Creating a Level Generator for an RPG GameProcedurally generated maps in video games has become a hot topic in the recent years among the gaming community. the algorithms for generating these maps are extremely complected, and requires months of development to build such algorithms. Instead of symbolically written programs, what if we can use generative models to generate these maps?
In this Tutorial we will show you how you can generate game maps for a simple game where the map can be expressed using a list of strings.
link What is a level?A level can be represented in this hypothetical game using a list of strings shown below.
1link glob level = [ 'BBBBBBBBBB' ,
2link 'B....E...B' ,
3link 'B.P......B' ,
4link 'B.....B..B' ,
5link 'B.....BE.B' ,
6link 'B....BBBBB' ,
7link 'B....B...B' ,
8link 'B.....E..B' ,
9link 'BBBBBBBBBB' ] ;
In this level each character represent a different element in the map,
'B': Walls '.': Floor 'E': Enemy 'P': Player link Building a level map generator?The straightforward approach to build a map generator is to ask from the LLM to directly generate such a map as a list of strings. MTLLM allows you to do this by defining a function or a method. However, here we would discuss a more object oriented way of programming with LLMs which allow the model to 'think' using objects.
link Level Configurationlevel_manager.jac 1link obj Level {
2link has name: str ,
3link difficulty: int ;
4link has width: int ,
5link height: int ,
6link num_wall: int ,
7link num_enemies: int ;
8link has time_countdown: int ,
9link n_retries_allowed: int ;
10link }
Each level should have a basic configuration which describes the level in an abstract format. This level object embeds the difficulty of the level and the number of enemies and obstacles including other level configuration parameters.
However, filling in the values for fields requires a cognitive capacity, for which will use an LLM later on.
link Building the Map Elementslevel_manager.jac 1link obj Position {
2link has x: int ,
3link y: int ;
4link }
As the map we are aiming to generate is a 2D map the position of each object on the map can be designated using the Position
custom type. It is simply representing a cartesian 2D coordinate system.
Object to Represent a Wall level_manager.jac 1link obj Wall {
2link has start_pos: Position,
3link end_pos: Position;
4link }
The wall object represents a straight wall, as all obstacles in the 2D map can be represented by a collection of intersecting wall objects. Here each wall object will have a start position as well as a stop position
Map represented using objects level_manager.jac 1link obj Map {
2link has level: Level;
3link has walls: list [ Wall] ,
4link small_obstacles: list [ Position] ;
5link has enemies: list [ Position] ;
6link has player_pos: Position;
7link }
This Map object will hold the exact positions of all objects in the map. This is the object that we will generate using MT-LLM. Each field of this object is one of or a derivative of the custom types which we described above.
link The Level ManagerTo manage all the generations we can define a Level manager object which can hold a directory of previous levels configurations and maps, which can be used to feed the LLM to give context about the play style of the player. We will be using the OpenAI GPT-4o as the LLM in this tutorial.
level_manager.jac 1link import : py from mtllm. llms, OpenAI;
2link glob llm = OpenAI( model_name= "gpt-4o" ) ;
3link
4link obj LevelManager {
5link has current_level: int = 0 ,
6link current_difficulty: int = 1 ,
7link prev_levels: list [ Level] = [ ] ,
8link prev_level_maps: list [ Map] = [ ] ;
9link
10link '''Generates the Next level configuration based upon previous playthroughs'''
11link can create_next_level( last_levels: list [ Level] ,
12link difficulty: int ,
13link level_width: int ,
14link level_height: int ) - > Level by llm( temperature= 1.0 ) ;
15link
16link '''Get the Next Level'''
17link can get_next_level - > tuple ( Level, Map) ;
18link
19link '''Generate the Map as a List of Strings'''
20link can get_map( map : Map) - > list [ str ] ;
21link }
We have three methods defined under the level manager. Each will handle a separate set of tasks.
create_next_level
: Takes in previous level configuration data from previously played levels and generate the new level configuration parameters and output a Level
object which describes the new map, using the LLM.
get_next_level
: Uses the create_next_level
to generate the Level
config. object which is then used to fill in the rest of a newly initiated Map
object using an LLM. This is where the actual map generation happens. Still the generated map cannot be visualize.
get_map
: This method will generate the actual list of strings which can be used with an actual game using the Map
object generated by get_next_level
method. This does not require any LLM as all objects of the map are included in the Map
object with their exact positions.
The implementation of the above methods are as follows.
level_manager.jac 1link : obj: LevelManager: can: get_next_level {
2link self. current_level += 1 ;
3link
4link if len ( self. prev_levels) > 3 {
5link self. prev_levels. pop( 0 ) ;
6link self. prev_level_maps. pop( 0 ) ;
7link }
8link
9link new_level = self. create_next_level(
10link self. prev_levels,
11link self. current_difficulty,
12link 20 , 20
13link ) ;
14link self. prev_levels. append( new_level) ;
15link
16link new_level_map = Map( level= new_level by llm( ) ) ;
17link self. prev_level_maps. append( new_level_map) ;
18link
19link if self. current_level % 2 == 0 {
20link self. current_difficulty += 1 ;
21link }
22link return ( new_level, new_level_map) ;
23link }
In the get_next_level
method there are two llm calls which we will discuss in this tutorial while other parts are related the functionality of the game.
Line 9-13
: Here the saved data from previous levels are given as inputs which are defined previously along with the basic level config parameters of the new level. As the output type of this method was specified above to be a Level
object the LLM will initiate and fill in the values of the objects. As the LLM hyperparameter temperature, is set for 1.0 at method declaration, the LLM is forced to be more creative.
Line 14
: Here the programmer is initiating a Map object while passing in only the level parameter with the newly generated level
object and ask the LLM to fill in the rest of the fields by generating the relevant types. This nested type approach ensures the output is formatted according to how you expect them to be.
level_manager.jac 1link : obj: LevelManager: can: get_map {
2link map_tiles = [ [ '.' for _ in range ( map . level. width) ] for _ in range ( map . level. height) ] ;
3link for wall in map . walls {
4link for x in range ( wall. start_pos. x, wall. end_pos. x + 1 ) {
5link for y in range ( wall. start_pos. y, wall. end_pos. y + 1 ) {
6link map_tiles[ y- 1 ] [ x- 1 ] = 'B' ;
7link }
8link }
9link }
10link for obs in map . small_obstacles {
11link map_tiles[ obs. y- 1 ] [ obs. x- 1 ] = 'B' ;
12link }
13link
14link for enemy in map . enemies {
15link map_tiles[ enemy. y- 1 ] [ enemy. x- 1 ] = 'E' ;
16link }
17link map_tiles[ map . player_pos. y- 1 ] [ map . player_pos. x- 1 ] = 'P' ;
18link map_tiles = [ [ 'B' ] + row + [ 'B' ] for row in map_tiles] ;
19link map_tiles = [ [ 'B' for _ in range ( map . level. width + 2 ) ] ] + map_tiles + [ [ 'B' for _ in range ( map . level. width + 2 ) ] ] ;
20link return [ '' . join( row) for row in map_tiles] ;
21link }
link Running the Program1link with entry {
2link level_manager = LevelManager( ) ;
3link for _ in range ( 2 ) {
4link ( new_level, new_level_map) = level_manager. get_next_level( ) ;
5link print ( new_level) ;
6link print ( '\n' . join( LevelManager. get_map( new_level_map) ) ) ;
7link }
8link }
This program will now generate two consecutive maps and print them on the terminal. by running this jac file using jac run level_manager.jac
you can simply test your program.
link A full scale game demoFor the sake of this tutorial we have not included the entire development of an actual game. The full game is available on our jac-lang repo . A sample demonstration of the game can be viewed below.
Demo Video
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/docs/index.html b/support/plugins/mtllm/docs/index.html
new file mode 100644
index 000000000..e1b118751
--- /dev/null
+++ b/support/plugins/mtllm/docs/index.html
@@ -0,0 +1,55 @@
+MTLLM API Documentation
MTLLM Python library provides convenient access to a large number of easy to use and customizable APIs to be used in Jaseci's Jaclang by llm feature.
+The Library provides automatic output fixing, output type validation, different prompting techniques, and more.
warning IMPORTANT
Though this is can be used with python projects, it is primarily intended to be used with Jaseci's Jaclang.
1link $ pip install mtllm
Home
Design and Implementation chevron_right
Building Blocks chevron_right
API Reference chevron_right
Tips and Tricks chevron_right
FAQs
\ No newline at end of file
diff --git a/support/plugins/mtllm/examples/debate_agent.jac b/support/plugins/mtllm/examples/debate_agent.jac
new file mode 100644
index 000000000..081a06bd5
--- /dev/null
+++ b/support/plugins/mtllm/examples/debate_agent.jac
@@ -0,0 +1,31 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm.tools.wikipedia_utils { wikipedia_summary }
+
+glob llm = OpenAI(model_name="gpt-4o-mini");
+
+can 'Ask Opponent for Input'
+ask_opponent(statement: 'Your Statement': str) -> str {
+ user_input = input(f"AI -> {statement} ");
+ return f"Opponents Answer -> {user_input}";
+}
+
+can 'States the Facts to the Opponent'
+state_facts(information: str) -> None {
+ print(f"AI -> {information}");
+}
+
+can 'Perform a debate session with a opponent on the given topic, for information use given tools'
+debate_agent(topic: 'Debate Topic': str) -> 'Summary of the Conversation': str by llm(
+ method='ReAct',
+ tools=[wikipedia_summary, ask_opponent, state_facts],
+ context=[
+ "You have to defend the given topic while the opponent is defending the counter topic",
+ "If you dont know about the topic or you want to verify the opponents claims use the given tools",
+ "You can ask opponent counter questions",
+ "You are a humorous, cunning, very arrogant debater."
+ ]
+);
+
+with entry {
+ debate_agent('Merlin the Wizard is still alive.');
+}
diff --git a/support/plugins/mtllm/examples/essay_review.jac b/support/plugins/mtllm/examples/essay_review.jac
new file mode 100644
index 000000000..736926fe6
--- /dev/null
+++ b/support/plugins/mtllm/examples/essay_review.jac
@@ -0,0 +1,34 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+obj Essay {
+ has essay: str;
+
+ can 'Evaluate the given essay based on the given criteria. Provide Detailed Judgement.'
+ essay_judge(criteria: str) -> str by llm(incl_info=(self.essay));
+ can generate_summary(judgements: dict) -> str by llm(incl_info=(self.essay));
+ can give_grade(summary: str) -> 'A to D': str by llm();
+}
+
+with entry {
+ essay = "With a population of approximately 45 million Spaniards and 3.5 million immigrants,"
+ "Spain is a country of contrasts where the richness of its culture blends it up with"
+ "the variety of languages and dialects used. Being one of the largest economies worldwide,"
+ "and the second largest country in Europe, Spain is a very appealing destination for tourists"
+ "as well as for immigrants from around the globe. Almost all Spaniards are used to speaking at"
+ "least two different languages, but protecting and preserving that right has not been"
+ "easy for them.Spaniards have had to struggle with war, ignorance, criticism and the governments,"
+ "in order to preserve and defend what identifies them, and deal with the consequences.";
+ essay = Essay(essay);
+ criterias = ["Clarity", "Originality", "Evidence"];
+ judgements = {};
+ for criteria in criterias {
+ judgement = essay.essay_judge(criteria);
+ judgements[criteria] = judgement;
+ }
+ summary = essay.generate_summary(judgements);
+ grade = essay.give_grade(summary);
+ print("Reviewer Notes: ", summary);
+ print("Grade: ", grade);
+}
diff --git a/support/plugins/mtllm/examples/expert_answer.jac b/support/plugins/mtllm/examples/expert_answer.jac
new file mode 100644
index 000000000..ff9f111c6
--- /dev/null
+++ b/support/plugins/mtllm/examples/expert_answer.jac
@@ -0,0 +1,13 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI();
+
+can get_expert(question: str) -> 'Best Expert to Answer the Question': str by llm(method='Reason');
+can get_answer(question: str, expert: str) -> str by llm();
+
+with entry {
+ question = "What are Large Language Models?";
+ expert = get_expert(question);
+ answer = get_answer(question, expert);
+ print(f"{expert} says: '{answer}' ");
+}
diff --git a/support/plugins/mtllm/examples/grammar_checker.jac b/support/plugins/mtllm/examples/grammar_checker.jac
new file mode 100644
index 000000000..e1b32f058
--- /dev/null
+++ b/support/plugins/mtllm/examples/grammar_checker.jac
@@ -0,0 +1,15 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI();
+
+can correct_grammar(text: str) -> 'Grammar Corrected Text': str by llm(temperature=0.9);
+
+with entry {
+ files_path = input("Enter the file path to the text file: ");
+ with open(files_path, 'r') as file {
+ text = file.read();
+ }
+ print("Original text:", text);
+ corrected_text = correct_grammar(text);
+ print("Corrected text:", corrected_text);
+}
diff --git a/support/plugins/mtllm/examples/joke_gen.jac b/support/plugins/mtllm/examples/joke_gen.jac
new file mode 100644
index 000000000..e56e37279
--- /dev/null
+++ b/support/plugins/mtllm/examples/joke_gen.jac
@@ -0,0 +1,30 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI();
+
+obj PunclineJokes {
+ has jokes: 'Jokes with Punchlines': list[dict] = [
+ {
+ "joke": "How does a penguin build its house?",
+ "punchline": "Igloos it together."
+ },
+ {
+ "joke": "Which knight invented King Arthur's Round Table?",
+ "punchline": "Sir Cumference."
+ }
+ ];
+
+ can generate_joke -> 'NEW JOKE WITH A PUNCHLINE': dict by llm(incl_info=(self.jokes), temperature=0.0);
+ can generate {
+ joke_punchline = self.generate_joke();
+ self.jokes.append(joke_punchline);
+ }
+}
+
+with entry {
+ joke_gen = PunclineJokes();
+ for i in range(5) {
+ joke_gen.generate();
+ }
+ print(joke_gen.jokes);
+}
diff --git a/support/plugins/mtllm/examples/marketing_agency.jac b/support/plugins/mtllm/examples/marketing_agency.jac
new file mode 100644
index 000000000..8aeae68fc
--- /dev/null
+++ b/support/plugins/mtllm/examples/marketing_agency.jac
@@ -0,0 +1,65 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm.tools.wikipedia_utils { wikipedia_summary }
+import:py from mtllm.tools.serper { search, scrape }
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o-mini");
+
+can save_output_as_md(output: str, filename: str) -> None {
+ with open(filename, 'w') as f {
+ f.write(output);
+ }
+}
+
+can 'Expert Persona Generation Agent, Generate detailed personas for a product'
+persona_expert(requirements: 'Managers Requirements': str) -> 'Markdown Formatted Detailed Report with citations': str
+by llm(
+ method="ReAct",
+ tools=[wikipedia_summary, search, scrape],
+ max_prev_react_outputs=10,
+ max_react_iterations=10,
+ context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
+);
+can 'Expert Demographic Analysis Agent, Perform a detailed analysis of the demographics for a product'
+demographic_expert(requirements: 'Managers Requirements': str) -> 'Markdown Formatted Detailed Report with citations': str
+by llm(
+ method="ReAct",
+ tools=[wikipedia_summary, search, scrape],
+ max_prev_react_outputs=10,
+ max_react_iterations=10,
+ context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
+);
+can 'Expert Market Analysis Agent, Perform a detailed analysis of the market for a product'
+market_expert(requirements: 'Managers Requirements': str) -> 'Markdown Formatted Detailed Report with citations': str
+by llm(
+ method="ReAct",
+ tools=[wikipedia_summary, scrape, search],
+ max_prev_react_outputs=10,
+ max_react_iterations=10,
+ context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
+);
+can 'Expert in demand analysis, Perform a detailed analysis of the demand for a product'
+demand_expert(requirements: 'Managers Requirements': str) -> 'Markdown Formatted Detailed Report with citations': str
+by llm(
+ method="ReAct",
+ tools=[wikipedia_summary, search, scrape],
+ max_prev_react_outputs=10,
+ max_react_iterations=10,
+ context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
+);
+can 'Manager agent come up with a marketing strategy and delegate tasks with detailed requirements to the team'
+manager(query: 'Customers Inquiry': str) -> "Markdown Formatted Highy Detailed Report with an Executive Summary": str
+by llm(
+ method="ReAct",
+ tools=[persona_expert, demographic_expert, market_expert, search, scrape],
+ max_react_iterations=10,
+ max_prev_react_outputs=10,
+ context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
+);
+
+with entry {
+ query = "Jaclang is a native superset of python with a focus on AI and ML. Jaclang allows developers to"
+ "prototype AI Applications with ease by providing high level abstractions for AIML Usecases."
+ "Perform a detailed analysis of the market for Jaclang and provide a detailed report on the market,"
+ "demand, demographics and personas for the product.";
+ save_output_as_md(manager(query), "marketing_report.md");
+}
diff --git a/support/plugins/mtllm/examples/odd_word_out.jac b/support/plugins/mtllm/examples/odd_word_out.jac
new file mode 100644
index 000000000..8468279ca
--- /dev/null
+++ b/support/plugins/mtllm/examples/odd_word_out.jac
@@ -0,0 +1,19 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+glob examples: 'Examples for Picking Odd Word out (Options, Reasoing, Result)': list[tuple[list[str], str, str]] = [
+ (["skirt", "dress", "pen", "jacket"], "skirt is clothing, dress is clothing, pen is an object, jacket is clothing.", "pen"),
+ (["Spain", "France", "German", "England", "Singapore"], "Spain, France, England, Singapore is a country, German is a language.", "German")
+];
+
+can 'Pick the Odd word out'
+odd_word_out(options: 'Options to pick from': list[str]) -> 'REASONING & RESULT': tuple[str, str] by llm(incl_info=(examples));
+
+with entry {
+ print(
+ odd_word_out(
+ ["Bentley", "Ferrari", "Lamborghini", "Casio", "Toyota"]
+ )
+ );
+}
diff --git a/support/plugins/mtllm/examples/personality_finder.jac b/support/plugins/mtllm/examples/personality_finder.jac
new file mode 100644
index 000000000..1d995e2ab
--- /dev/null
+++ b/support/plugins/mtllm/examples/personality_finder.jac
@@ -0,0 +1,35 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o-mini");
+
+enum 'Personality of the Person'
+Personality {
+ INTROVERT: 'Person who is shy and reticent' = "Introvert",
+ EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
+}
+
+obj 'Person'
+Person {
+ has full_name: 'Fullname of the Person': str,
+ yod: 'Year of Death': int,
+ personality: 'Personality of the Person': Personality;
+}
+
+glob personality_examples: 'Personality Information of Famous People': dict[str, Personality] = {
+ 'Albert Einstein': Personality.INTROVERT,
+ 'Barack Obama': Personality.EXTROVERT
+};
+
+can 'Get Person Information use common knowledge'
+get_person_info(name: 'Name of the Person': str) -> 'Person': Person by llm(
+ reason=True,
+ temperature=0.0,
+ incl_info=(personality_examples)
+);
+
+with entry {
+ person_obj = get_person_info('Martin Luther King Jr.');
+ print(
+ f"{person_obj.full_name} was a {person_obj.personality.value} person who died in {person_obj.yod}"
+ );
+}
diff --git a/support/plugins/mtllm/examples/text_to_type.jac b/support/plugins/mtllm/examples/text_to_type.jac
new file mode 100644
index 000000000..92cb9cac0
--- /dev/null
+++ b/support/plugins/mtllm/examples/text_to_type.jac
@@ -0,0 +1,25 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI();
+
+obj 'Employer'
+Employer {
+ has employer_name: 'Employer Name': str,
+ location: 'Location': str;
+}
+
+obj 'Person'
+Person {
+ has name: 'Name': str,
+ age: 'Age': int,
+ employer: 'Employer': Employer,
+ job: 'Job': str;
+}
+
+with entry {
+ info: "Person's Information": str = "Chandra is a 28 years old and works as an ML engineer at Jaseci Labs in Sri Lanka.";
+ person = Person(by llm(incl_info=(info)));
+ print(
+ f"Person's name is {person.name} and works at {person.employer.employer_name} which is located in {person.employer.location}."
+ );
+}
diff --git a/support/plugins/mtllm/examples/translator.jac b/support/plugins/mtllm/examples/translator.jac
new file mode 100644
index 000000000..f4962e59d
--- /dev/null
+++ b/support/plugins/mtllm/examples/translator.jac
@@ -0,0 +1,13 @@
+import:py from mtllm.llms { OpenAI }
+
+glob llm = OpenAI();
+
+can 'Translate English Representation to the given language'
+translate(input: 'English Representation': str, lang: 'Desired Language': str="French") -> 'Translation': str by llm();
+
+with entry {
+ print(translate("I am a student", "French"));
+ print(
+ translate("I am a student", "Language used in Somalia")
+ );
+}
diff --git a/support/plugins/mtllm/examples/vision/math_question.jpg b/support/plugins/mtllm/examples/vision/math_question.jpg
new file mode 100644
index 000000000..901de8aef
Binary files /dev/null and b/support/plugins/mtllm/examples/vision/math_question.jpg differ
diff --git a/support/plugins/mtllm/examples/vision/math_solver.jac b/support/plugins/mtllm/examples/vision/math_solver.jac
new file mode 100644
index 000000000..0e43a967f
--- /dev/null
+++ b/support/plugins/mtllm/examples/vision/math_solver.jac
@@ -0,0 +1,13 @@
+import:py from mtllm.llms { OpenAI }
+import:py from PIL { Image }
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o");
+
+can 'Solve the Given Math Question'
+solve_math_question(question_img: 'Image of the Question': Image) -> 'Answer to the Question': str by llm(method="Chain-of-Thoughts");
+
+with entry {
+ print(
+ solve_math_question(Image.open('math_question.jpg'))
+ );
+}
diff --git a/support/plugins/mtllm/examples/vision/mugen.jac b/support/plugins/mtllm/examples/vision/mugen.jac
new file mode 100644
index 000000000..1ce222b60
--- /dev/null
+++ b/support/plugins/mtllm/examples/vision/mugen.jac
@@ -0,0 +1,15 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm { Video }
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+can is_aligned(video: Video, text: str) -> bool by llm(
+ method="Chain-of-Thoughts",
+ context="Mugen is the moving character"
+);
+
+with entry {
+ video = Video("mugen.mp4", 1);
+ text = "Mugen jumps off and collects few coins.";
+ print(is_aligned(video, text));
+}
diff --git a/support/plugins/mtllm/examples/vision/mugen.mp4 b/support/plugins/mtllm/examples/vision/mugen.mp4
new file mode 100644
index 000000000..2de1a4e56
Binary files /dev/null and b/support/plugins/mtllm/examples/vision/mugen.mp4 differ
diff --git a/support/plugins/mtllm/examples/vision/person.png b/support/plugins/mtllm/examples/vision/person.png
new file mode 100644
index 000000000..05f2611b7
Binary files /dev/null and b/support/plugins/mtllm/examples/vision/person.png differ
diff --git a/support/plugins/mtllm/examples/vision/personality_finder.jac b/support/plugins/mtllm/examples/vision/personality_finder.jac
new file mode 100644
index 000000000..76e424862
--- /dev/null
+++ b/support/plugins/mtllm/examples/vision/personality_finder.jac
@@ -0,0 +1,27 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm { Image }
+
+glob llm = OpenAI(model_name="gpt-4o");
+
+enum 'Personality of the Person'
+Personality {
+ INTROVERT: 'Person who is shy and reticent' = "Introvert",
+ EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
+}
+
+obj 'Person'
+Person {
+ has full_name: 'Fullname of the Person': str,
+ yod: 'Year of Death': int,
+ personality: 'Personality of the Person': Personality;
+}
+
+can 'Get Person Information use common knowledge'
+get_person_info(img_of_person: 'Image of Person': Image) -> 'Person': Person by llm();
+
+with entry {
+ person_obj = get_person_info(Image("person.png"));
+ print(
+ f"{person_obj.full_name} was a {person_obj.personality.value} person who died in {person_obj.yod}"
+ );
+}
diff --git a/support/plugins/mtllm/examples/vision/receipt.jpg b/support/plugins/mtllm/examples/vision/receipt.jpg
new file mode 100644
index 000000000..07d3c6781
Binary files /dev/null and b/support/plugins/mtllm/examples/vision/receipt.jpg differ
diff --git a/support/plugins/mtllm/examples/vision/receipt_analyzer.jac b/support/plugins/mtllm/examples/vision/receipt_analyzer.jac
new file mode 100644
index 000000000..798280754
--- /dev/null
+++ b/support/plugins/mtllm/examples/vision/receipt_analyzer.jac
@@ -0,0 +1,40 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm.tools.math_utils { math_tool }
+import:py from PIL { Image }
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o");
+
+obj 'Purchased Item'
+PurchasedItem {
+ has name: 'Item Name': str,
+ price: 'Item Price': float,
+ quantity: 'Item Quantity': int;
+}
+
+obj 'Receipt'
+Receipt {
+ has store: 'Store Name': str,
+ date: 'Purchase Date': str,
+ items: 'Items': list[PurchasedItem],
+ total: 'Total': float;
+
+ can pp() -> None {
+ print(f"Store: {self.store}");
+ print(f"Date: {self.date}");
+ for item in self.items {
+ print(f"{item.name} - {item.price} x {item.quantity}");
+ }
+ print(f"Total: {self.total}");
+ }
+}
+
+can 'Get Reciept Object'
+get_reciept(reciept_img: 'Image of the Receipt': Image) -> 'Reciept Object': Receipt by llm();
+can verity_total(reciept: Receipt) -> bool by llm(method="ReAct", tools=[math_tool]);
+
+with entry {
+ reciept_img = Image.open("receipt.jpg");
+ receipt = get_reciept(reciept_img);
+ receipt.pp();
+ print(f"Total is correct: {verity_total(receipt)}");
+}
diff --git a/support/plugins/mtllm/examples/wikipedia.jac b/support/plugins/mtllm/examples/wikipedia.jac
new file mode 100644
index 000000000..3689450db
--- /dev/null
+++ b/support/plugins/mtllm/examples/wikipedia.jac
@@ -0,0 +1,12 @@
+import:py from mtllm.llms { OpenAI }
+import:py from mtllm.tools.wikipedia_utils { wikipedia_summary }
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o-mini");
+
+can get_answer(question: str) -> str by llm(method="ReAct", tools=[wikipedia_summary]);
+
+with entry {
+ question = "Who is Jason Mars?";
+ answer = get_answer(question);
+ print(answer);
+}
diff --git a/support/plugins/mtllm/mtllm/__init__.py b/support/plugins/mtllm/mtllm/__init__.py
new file mode 100644
index 000000000..5ff54e0fb
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/__init__.py
@@ -0,0 +1,5 @@
+"""MTLLM Package."""
+
+from mtllm.types import Image, Video
+
+__all__ = ["Image", "Video"]
diff --git a/support/plugins/mtllm/mtllm/aott.py b/support/plugins/mtllm/mtllm/aott.py
new file mode 100644
index 000000000..20f179706
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/aott.py
@@ -0,0 +1,226 @@
+"""
+AOTT: Automated Operational Type Transformation.
+
+This has all the necessary functions to perform the AOTT operations.
+"""
+
+from typing import Mapping
+
+from jaclang.compiler.semtable import SemRegistry
+
+from loguru import logger
+
+from mtllm.llms.base import BaseLLM
+from mtllm.tools import finish_tool
+from mtllm.types import (
+ Image,
+ Information,
+ InputInformation,
+ OutputHint,
+ ReActOutput,
+ Tool,
+ TypeExplanation,
+ Video,
+)
+
+
+def aott_raise(
+ model: BaseLLM,
+ informations: list[Information],
+ inputs_information: list[InputInformation],
+ output_hint: OutputHint,
+ type_explanations: list[TypeExplanation],
+ action: str,
+ context: str,
+ method: str,
+ tools: list[Tool],
+ model_params: dict,
+ _globals: dict,
+ _locals: Mapping,
+) -> str:
+ """AOTT Raise uses the information (Meanings types values) provided to generate a prompt(meaning in)."""
+ _globals["finish_tool"] = finish_tool
+ contains_media: bool = any(
+ isinstance(x.value, (Image, Video)) for x in inputs_information
+ )
+ informations_str = "\n".join([str(x) for x in informations])
+ inputs_information_repr: list[dict] | str
+ if contains_media:
+ inputs_information_repr = []
+ for x in inputs_information:
+ inputs_information_repr.extend(x.to_list_dict())
+ else:
+ inputs_information_repr = "\n".join([str(x) for x in inputs_information])
+
+ type_explanations_str = "\n".join([str(x) for x in type_explanations])
+
+ system_prompt = model.MTLLM_SYSTEM_PROMPT
+ meaning_typed_input_list: list[str] | list[dict]
+ is_react = method == "ReAct"
+ tools.append(finish_tool)
+ method_prompt = model.MTLLM_METHOD_PROMPTS[method]
+ if isinstance(inputs_information_repr, str):
+ mtllm_prompt = model.MTLLM_PROMPT.format(
+ information=informations_str,
+ inputs_information=inputs_information_repr,
+ output_information=str(output_hint),
+ type_explanations=type_explanations_str,
+ action=action,
+ context=context,
+ ).strip()
+ if not is_react:
+ meaning_typed_input_list = [system_prompt, mtllm_prompt, method_prompt]
+ else:
+ tool_prompt = "\n[Tools]\n" + "\n".join([str(tool) for tool in tools])
+ meaning_typed_input_list = [
+ system_prompt,
+ mtllm_prompt,
+ tool_prompt,
+ method_prompt,
+ ]
+ else:
+ upper_half = model.MTLLM_PROMPT.split("{inputs_information}")[0]
+ lower_half = model.MTLLM_PROMPT.split("{inputs_information}")[1]
+ upper_half = upper_half.format(
+ information=informations_str,
+ context=context,
+ )
+ lower_half = lower_half.format(
+ output_information=str(output_hint),
+ type_explanations=type_explanations_str,
+ action=action,
+ )
+ meaning_typed_input_list = [
+ {"type": "text", "text": system_prompt},
+ {"type": "text", "text": upper_half},
+ ]
+ meaning_typed_input_list.extend(inputs_information_repr)
+ if is_react:
+ tool_prompt = "[Teools]\n" + "\n".join([str(tool) for tool in tools])
+ meaning_typed_input_list.append({"type": "text", "text": tool_prompt})
+ meaning_typed_input_list.extend(
+ [
+ {"type": "text", "text": lower_half},
+ {"type": "text", "text": method_prompt},
+ ]
+ )
+ if is_react:
+ result = execute_react(
+ model,
+ meaning_typed_input_list,
+ contains_media,
+ model_params,
+ _globals,
+ _locals,
+ tool_prompt,
+ type_explanations_str,
+ )
+ return f"[Output] {result}"
+ meaning_typed_input = (
+ "\n".join(meaning_typed_input_list) # type: ignore
+ if not contains_media
+ else meaning_typed_input_list
+ )
+ return model(meaning_typed_input, **model_params) # type: ignore
+
+
+def execute_react(
+ model: BaseLLM,
+ meaning_typed_input_list: list[dict] | list[str],
+ contains_media: bool,
+ model_params: dict,
+ _globals: dict,
+ _locals: Mapping,
+ tool_prompt: str,
+ type_explanations_str: str,
+) -> str:
+ """Execute the ReAct method."""
+ max_react_iterations = model_params.pop("max_react_iterations", 10)
+ max_prev_react_outputs = model_params.pop("max_prev_react_outputs", 3)
+ prev_react_outputs: list[ReActOutput] = []
+ added_prev_react_input = False
+ reached_max_iterations = False
+ while True:
+ if len(prev_react_outputs) >= max_react_iterations:
+ reached_max_iterations = True
+ prev_react_input = process_prev_react(
+ prev_react_outputs[-max_prev_react_outputs:]
+ if len(prev_react_outputs) > max_prev_react_outputs
+ else prev_react_outputs
+ )
+ if prev_react_input:
+ if added_prev_react_input:
+ meaning_typed_input_list.pop(-2)
+ meaning_typed_input_list.insert(
+ -1,
+ (
+ prev_react_input # type: ignore
+ if not contains_media
+ else {"type": "text", "text": prev_react_input}
+ ),
+ )
+ added_prev_react_input = True
+ if reached_max_iterations:
+ meaning_typed_input_list.insert(
+ -1,
+ (
+ "[Reached Max Iterations] PLEASE FINALIZE using the finish tool." # type: ignore
+ if not contains_media
+ else {
+ "type": "text",
+ "text": "[Reached Max Iterations] PLEASE FINALIZE using the finish tool.",
+ }
+ ),
+ )
+ meaning_typed_input = (
+ "\n".join(meaning_typed_input_list) # type: ignore
+ if not contains_media
+ else meaning_typed_input_list
+ )
+ meaning_out = model(meaning_typed_input, **model_params) # type: ignore
+ react_output: ReActOutput = model.resolve_react_output(
+ meaning_out, _globals, _locals, tool_prompt, type_explanations_str
+ )
+ if model.verbose:
+ logger.info(f"React Output\n{react_output}")
+ if "finish_tool" in react_output.action:
+ return react_output.observation
+ if reached_max_iterations:
+ raise Exception("Reached max iterations.")
+ prev_react_outputs.append(react_output)
+
+
+def process_prev_react(prev_react_outputs: list[ReActOutput]) -> str:
+ """Process the previous ReAct outputs."""
+ prev_react_input = ""
+ for i, prev_react_output in enumerate(prev_react_outputs):
+ prev_react_input += f"{i + 1}.\n"
+ prev_react_input += f"[Thought] {prev_react_output.thought}\n"
+ prev_react_input += f"[Tool Usage] {prev_react_output.action}\n"
+ prev_react_input += f"[Observation] {prev_react_output.observation}\n\n"
+ if prev_react_input:
+ prev_react_input = (
+ f"\n[Previous Thoughts, Actions & Observations]\n{prev_react_input}"
+ )
+ return prev_react_input
+
+
+def get_all_type_explanations(
+ type_list: list, mod_registry: SemRegistry
+) -> list[TypeExplanation]:
+ """Get all type explanations from the input type list."""
+ collected_type_explanations = {}
+ for type_item in type_list:
+ type_explanation = TypeExplanation(type_item, mod_registry)
+ if str(type_explanation) and type_item not in collected_type_explanations:
+ collected_type_explanations[type_item] = type_explanation
+ if type_explanation.nested_types:
+ nested_collected_type_explanations = get_all_type_explanations(
+ list(type_explanation.nested_types), mod_registry
+ )
+ for nested_type_explanation in nested_collected_type_explanations:
+ if nested_type_explanation.type_item not in collected_type_explanations:
+ collected_type_explanations[nested_type_explanation.type_item] = (
+ nested_type_explanation
+ )
+ return list(collected_type_explanations.values())
diff --git a/support/plugins/mtllm/mtllm/llms/__init__.py b/support/plugins/mtllm/mtllm/llms/__init__.py
new file mode 100644
index 000000000..75dabef68
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/__init__.py
@@ -0,0 +1,20 @@
+"""LLM implementations for MTLLM."""
+
+from .anthropic import Anthropic
+from .base import BaseLLM
+from .groq import Groq
+from .huggingface import Huggingface
+from .ollama import Ollama
+from .openai import OpenAI
+from .togetherai import TogetherAI
+
+
+__all__ = [
+ "Anthropic",
+ "Ollama",
+ "Huggingface",
+ "Groq",
+ "OpenAI",
+ "TogetherAI",
+ "BaseLLM",
+]
diff --git a/support/plugins/mtllm/mtllm/llms/anthropic.py b/support/plugins/mtllm/mtllm/llms/anthropic.py
new file mode 100644
index 000000000..8a89b10a0
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/anthropic.py
@@ -0,0 +1,98 @@
+"""Anthropic API client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class Anthropic(BaseLLM):
+ """Anthropic API client for MTLLM."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict,
+ ) -> None:
+ """Initialize the Anthropic API client."""
+ import anthropic # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ self.client = anthropic.Anthropic()
+ self.model_name = str(kwargs.get("model_name", "claude-3-sonnet-20240229"))
+ self.temperature = kwargs.get("temperature", 0.7)
+ self.max_tokens = kwargs.get("max_tokens", 1024)
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ if not isinstance(meaning_in, str):
+ assert self.model_name.startswith(
+ ("claude-3-opus", "claude-3-sonnet", "claude-3-haiku")
+ ), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
+
+ import re
+
+ formatted_meaning_in = []
+ for item in meaning_in:
+ if item["type"] == "image_url":
+ # item["source"] = "data:image/jpeg;base64,base64_string"
+ img_match = re.match(
+ r"data:(image/[a-zA-Z]*);base64,(.*)", item["source"]
+ )
+ if img_match:
+ media_type, base64_string = img_match.groups()
+ formatted_meaning_in.append(
+ {
+ "type": "image",
+ "source": {
+ "type": "base64",
+ "media_type": media_type,
+ "data": base64_string,
+ },
+ }
+ )
+ continue
+ formatted_meaning_in.append(item)
+ meaning_in = formatted_meaning_in
+ messages = [{"role": "user", "content": meaning_in}]
+ output = self.client.messages.create(
+ model=kwargs.get("model_name", self.model_name),
+ temperature=kwargs.get("temperature", self.temperature),
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
+ messages=messages,
+ )
+ return output.content[0].text
diff --git a/support/plugins/mtllm/mtllm/llms/base.py b/support/plugins/mtllm/mtllm/llms/base.py
new file mode 100644
index 000000000..b7cbafc0e
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/base.py
@@ -0,0 +1,387 @@
+"""Base Large Language Model (LLM) class."""
+
+import logging
+import re
+from typing import Any, Mapping, Optional
+
+from loguru import logger
+
+from mtllm.types import OutputHint, ReActOutput, TypeExplanation
+
+
+httpx_logger = logging.getLogger("httpx")
+httpx_logger.setLevel(logging.WARNING)
+
+SYSTEM_PROMPT = """
+[System Prompt]
+This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.
+Input/Type formatting: Explanation of the Input (variable_name) (type) = value
+""" # noqa E501
+
+PROMPT_TEMPLATE = """
+[Information]
+{information}
+
+[Context]
+{context}
+
+[Inputs Information]
+{inputs_information}
+
+[Output Information]
+{output_information}
+
+[Type Explanations]
+{type_explanations}
+
+[Action]
+{action}
+""" # noqa E501
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+MTLLM_OUTPUT_EXTRACT_PROMPT = """
+[Output]
+{model_output}
+
+[Previous Result You Provided]
+{previous_output}
+
+[Desired Output Type]
+{output_info}
+
+[Type Explanations]
+{output_type_info}
+
+Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
+Important: Do not provide the code or the methodology. Only provide the output in the desired format.
+""" # noqa E501
+
+OUTPUT_CHECK_PROMPT = """
+[Output]
+{model_output}
+
+[Desired Output Type]
+{output_type}
+
+[Type Explanations]
+{output_type_info}
+
+Check if the output is exactly in the desired Output Type. Important: Just say 'Yes' or 'No'.
+""" # noqa E501
+
+OUTPUT_FIX_PROMPT = """
+[Previous Output]
+{model_output}
+
+[Desired Output Type]
+{output_type}
+
+[Type Explanations]
+{output_type_info}
+
+[Error]
+{error}
+
+Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
+Important: Do not provide the code or the methodology. Only provide the output in the desired format.
+""" # noqa E501
+
+REACT_OUTPUT_FIX_PROMPT = """
+[Previous Output]
+{model_output}
+
+[Error]
+{error}
+
+[Tool Explanations]
+{tool_explanations}
+
+[Type Explanations]
+{type_explanations}
+
+Above output is not in the desired Output Format/Type. Please provide the output in the desired type. Do not repeat the previously provided output.
+Provide the output in the below format. Where tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class BaseLLM:
+ """Base Large Language Model (LLM) class."""
+
+ MTLLM_SYSTEM_PROMPT: str = SYSTEM_PROMPT
+ MTLLM_PROMPT: str = PROMPT_TEMPLATE
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+ OUTPUT_EXTRACT_PROMPT: str = MTLLM_OUTPUT_EXTRACT_PROMPT
+ OUTPUT_CHECK_PROMPT: str = OUTPUT_CHECK_PROMPT
+ OUTPUT_FIX_PROMPT: str = OUTPUT_FIX_PROMPT
+ REACT_OUTPUT_FIX_PROMPT: str = REACT_OUTPUT_FIX_PROMPT
+
+ def __init__(
+ self, verbose: bool = False, max_tries: int = 10, type_check: bool = False
+ ) -> None:
+ """Initialize the Large Language Model (LLM) client."""
+ self.verbose = verbose
+ self.max_tries = max_tries
+ self.type_check = type_check
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ raise NotImplementedError
+
+ def __call__(self, input_text: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input text."""
+ if self.verbose:
+ logger.info(f"Meaning In\n{input_text}")
+ return self.__infer__(input_text, **kwargs)
+
+ def resolve_output(
+ self,
+ meaning_out: str,
+ output_hint: OutputHint,
+ output_type_explanations: list[TypeExplanation],
+ _globals: dict,
+ _locals: Mapping,
+ ) -> Any: # noqa: ANN401
+ """Resolve the output string to return the reasoning and output."""
+ if self.verbose:
+ logger.info(f"Meaning Out\n{meaning_out}")
+ output_match = re.search(r"\[Output\](.*)", meaning_out, re.DOTALL)
+ if not output_match:
+ output = self._extract_output(
+ meaning_out,
+ output_hint,
+ output_type_explanations,
+ self.max_tries,
+ )
+ else:
+ output = output_match.group(1).strip()
+ if self.type_check:
+ is_in_desired_format = self._check_output(
+ output, output_hint.type, output_type_explanations
+ )
+ if not is_in_desired_format:
+ output = self._extract_output(
+ meaning_out,
+ output_hint,
+ output_type_explanations,
+ self.max_tries,
+ output,
+ )
+
+ return self.to_object(
+ output, output_hint, output_type_explanations, _globals, _locals
+ )
+
+ def resolve_react_output(
+ self,
+ meaning_out: str,
+ _globals: dict,
+ _locals: Mapping,
+ tool_explanations: str,
+ type_explanations: str,
+ ) -> ReActOutput:
+ """Resolve the output string to return the reasoning and output."""
+ if self.verbose:
+ logger.info(f"Meaning Out\n{meaning_out}")
+ try:
+ thought_match = re.search(
+ r"\[Thought\](.*)\[Tool Usage\]", meaning_out, re.DOTALL
+ )
+ tool_usage_match = re.search(r"\[Tool Usage\](.*)", meaning_out, re.DOTALL)
+ if not thought_match or not tool_usage_match:
+ raise ValueError("Failed to find Thought or Tool Usage in the output.")
+ thought = thought_match.group(1).strip()
+ tool_usage = tool_usage_match.group(1).strip()
+ try:
+ output = eval(tool_usage, _globals, _locals)
+ except Exception as e:
+ return ReActOutput(
+ thought=thought, action=tool_usage, observation=str(e)
+ )
+ return ReActOutput(thought=thought, action=tool_usage, observation=output)
+ except Exception as e:
+ print(e)
+ new_meaning_out = self._fix_react_output(
+ meaning_out, e, tool_explanations, type_explanations
+ )
+ return self.resolve_react_output(
+ new_meaning_out, _globals, _locals, tool_explanations, type_explanations
+ )
+
+ def _fix_react_output(
+ self,
+ meaning_out: str,
+ error: Exception,
+ tool_explanations: str,
+ type_explanations: str,
+ ) -> str:
+ """Fix the output string."""
+ if self.verbose:
+ logger.info(f"Error: {error}, Fixing the output.")
+ react_output_fix_prompt = self.REACT_OUTPUT_FIX_PROMPT.format(
+ model_output=meaning_out,
+ error=str(error),
+ tool_explanations=tool_explanations,
+ type_explanations=type_explanations,
+ )
+ return self.__infer__(react_output_fix_prompt)
+
+ def _check_output(
+ self,
+ output: str,
+ output_type: str,
+ output_type_explanations: list[TypeExplanation],
+ ) -> bool:
+ """Check if the output is in the desired format."""
+ output_check_prompt = self.OUTPUT_CHECK_PROMPT.format(
+ model_output=output,
+ output_type=output_type,
+ output_type_info="\n".join(
+ [str(info) for info in output_type_explanations]
+ ),
+ )
+ llm_output = self.__infer__(output_check_prompt)
+ return "yes" in llm_output.lower()
+
+ def _extract_output(
+ self,
+ meaning_out: str,
+ output_hint: OutputHint,
+ output_type_explanations: list[TypeExplanation],
+ max_tries: int,
+ previous_output: str = "None",
+ ) -> str:
+ """Extract the output from the meaning out string."""
+ if max_tries == 0:
+ logger.error("Failed to extract output. Max tries reached.")
+ raise ValueError(
+ "Failed to extract output. Try Changing the Semstrings, provide examples or change the method."
+ )
+
+ if self.verbose:
+ if max_tries < self.max_tries:
+ logger.info(
+ f"Failed to extract output. Trying to extract output again. Max tries left: {max_tries}"
+ )
+ else:
+ logger.info("Extracting output from the meaning out string.")
+
+ output_extract_prompt = self.OUTPUT_EXTRACT_PROMPT.format(
+ model_output=meaning_out,
+ previous_output=previous_output,
+ output_info=str(output_hint),
+ output_type_info="\n".join(
+ [str(info) for info in output_type_explanations]
+ ),
+ )
+ llm_output = self.__infer__(output_extract_prompt)
+ is_in_desired_format = self._check_output(
+ llm_output, output_hint.type, output_type_explanations
+ )
+ if self.verbose:
+ logger.info(
+ f"Extracted Output: {llm_output}. Is in Desired Format: {is_in_desired_format}"
+ )
+ if is_in_desired_format:
+ return llm_output
+ return self._extract_output(
+ meaning_out,
+ output_hint,
+ output_type_explanations,
+ max_tries - 1,
+ llm_output,
+ )
+
+ def to_object(
+ self,
+ output: str,
+ output_hint: OutputHint,
+ output_type_explanations: list[TypeExplanation],
+ _globals: dict,
+ _locals: Mapping,
+ error: Optional[Exception] = None,
+ num_retries: int = 0,
+ ) -> Any: # noqa: ANN401
+ """Convert the output string to an object."""
+ if num_retries >= self.max_tries:
+ raise ValueError("Failed to convert output to object. Max tries reached.")
+ if output_hint.type == "str":
+ return output
+ if error:
+ fixed_output = self._fix_output(
+ output, output_hint, output_type_explanations, error
+ )
+ return self.to_object(
+ fixed_output,
+ output_hint,
+ output_type_explanations,
+ _globals,
+ _locals,
+ num_retries=num_retries + 1,
+ )
+
+ try:
+ return eval(output, _globals, _locals)
+ except Exception as e:
+ return self.to_object(
+ output,
+ output_hint,
+ output_type_explanations,
+ _globals,
+ _locals,
+ error=e,
+ num_retries=num_retries + 1,
+ )
+
+ def _fix_output(
+ self,
+ output: str,
+ output_hint: OutputHint,
+ output_type_explanations: list[TypeExplanation],
+ error: Exception,
+ ) -> str:
+ """Fix the output string."""
+ if self.verbose:
+ logger.info(f"Error: {error}, Fixing the output.")
+ output_fix_prompt = self.OUTPUT_FIX_PROMPT.format(
+ model_output=output,
+ output_type=output_hint.type,
+ output_type_info="\n".join(
+ [str(info) for info in output_type_explanations]
+ ),
+ error=error,
+ )
+ return self.__infer__(output_fix_prompt)
diff --git a/support/plugins/mtllm/mtllm/llms/groq.py b/support/plugins/mtllm/mtllm/llms/groq.py
new file mode 100644
index 000000000..8c3fe6c3d
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/groq.py
@@ -0,0 +1,78 @@
+"""Groq API client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class Groq(BaseLLM):
+ """Groq API client for MTLLM."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict
+ ) -> None:
+ """Initialize the Groq API client."""
+ import groq # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ self.client = groq.Groq()
+ self.model_name = kwargs.get("model_name", "mixtral-8x7b-32768")
+ self.temperature = kwargs.get("temperature", 0.7)
+ self.max_tokens = kwargs.get("max_tokens", 1024)
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ assert isinstance(
+ meaning_in, str
+ ), "Currently Multimodal models are not supported. Please provide a string input."
+ messages = [{"role": "user", "content": meaning_in}]
+ model_params = {
+ k: v
+ for k, v in kwargs.items()
+ if k not in ["model_name", "temperature", "max_tokens"]
+ }
+ output = self.client.chat.completions.create(
+ model=kwargs.get("model_name", self.model_name),
+ temperature=kwargs.get("temperature", self.temperature),
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
+ messages=messages,
+ **model_params,
+ )
+ return output.choices[0].message.content
diff --git a/support/plugins/mtllm/mtllm/llms/huggingface.py b/support/plugins/mtllm/mtllm/llms/huggingface.py
new file mode 100644
index 000000000..e16ae0ba5
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/huggingface.py
@@ -0,0 +1,84 @@
+"""Huggingface client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+
+REASON_SUFFIX = """
+Reason and return the output results(s) only such that should be eval() Compatible and reflects the
+expected output type, Follow the format below to provide the reasoning for the output result(s).
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Return the output result(s) only such that should be eval() Compatible and
+reflects the expected output type, Follow the format below to provide the output result(s).
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class Huggingface(BaseLLM):
+ """Huggingface API client for Large Language Models (LLMs)."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict
+ ) -> None:
+ """Initialize the Huggingface API client."""
+ import torch # type: ignore
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ torch.random.manual_seed(0)
+ model = AutoModelForCausalLM.from_pretrained(
+ kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct"),
+ device_map=kwargs.get("device_map", "cuda"),
+ torch_dtype="auto",
+ trust_remote_code=True,
+ )
+ tokenizer = AutoTokenizer.from_pretrained(
+ kwargs.get("model_name", "microsoft/Phi-3-mini-128k-instruct")
+ )
+ self.pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
+ self.temperature = kwargs.get("temperature", 0.7)
+ self.max_tokens = kwargs.get("max_new_tokens", 1024)
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ assert isinstance(
+ meaning_in, str
+ ), "Currently Multimodal models are not supported. Please provide a string input."
+ messages = [{"role": "user", "content": meaning_in}]
+ output = self.pipe(
+ messages,
+ temperature=kwargs.get("temperature", self.temperature),
+ max_length=kwargs.get("max_new_tokens", self.max_tokens),
+ **kwargs,
+ )
+ return output[0]["generated_text"][-1]["content"]
diff --git a/support/plugins/mtllm/mtllm/llms/ollama.py b/support/plugins/mtllm/mtllm/llms/ollama.py
new file mode 100644
index 000000000..29937b4e1
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/ollama.py
@@ -0,0 +1,87 @@
+"""Ollama client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class Ollama(BaseLLM):
+ """Ollama API client for Large Language Models (LLMs)."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict
+ ) -> None:
+ """Initialize the Ollama API client."""
+ import ollama # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ self.client = ollama.Client(host=kwargs.get("host", "http://localhost:11434"))
+ self.model_name = kwargs.get("model_name", "phi3")
+ self.default_model_params = {
+ k: v for k, v in kwargs.items() if k not in ["model_name", "host"]
+ }
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ assert isinstance(
+ meaning_in, str
+ ), "Currently Multimodal models are not supported. Please provide a string input."
+ model = str(kwargs.get("model_name", self.model_name))
+ if not self.check_model(model):
+ self.download_model(model)
+ model_params = {k: v for k, v in kwargs.items() if k not in ["model_name"]}
+ messages = [{"role": "user", "content": meaning_in}]
+ output = self.client.chat(
+ model=model,
+ messages=messages,
+ options={**self.default_model_params, **model_params},
+ )
+ return output["message"]["content"]
+
+ def check_model(self, model_name: str) -> bool:
+ """Check if the model is available."""
+ try:
+ self.client.show(model_name)
+ return True
+ except Exception:
+ return False
+
+ def download_model(self, model_name: str) -> None:
+ """Download the model."""
+ self.client.pull(model_name)
diff --git a/support/plugins/mtllm/mtllm/llms/openai.py b/support/plugins/mtllm/mtllm/llms/openai.py
new file mode 100644
index 000000000..872ab8ee5
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/openai.py
@@ -0,0 +1,143 @@
+"""Anthropic API client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class OpenAI(BaseLLM):
+ """Anthropic API client for MTLLM."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict,
+ ) -> None:
+ """Initialize the Anthropic API client."""
+ import openai # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ self.client = openai.OpenAI()
+ self.model_name = str(kwargs.get("model_name", "gpt-4o-mini"))
+ self.temperature = kwargs.get("temperature", 0.7)
+ self.max_tokens = kwargs.get("max_tokens", 1024)
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ if not isinstance(meaning_in, str):
+ assert self.model_name.startswith(
+ ("gpt-4o", "gpt-4-turbo")
+ ), f"Model {self.model_name} is not multimodal, use a multimodal model instead."
+ messages = [{"role": "user", "content": meaning_in}]
+ output = self.client.chat.completions.create(
+ model=kwargs.get("model_name", self.model_name),
+ temperature=kwargs.get("temperature", self.temperature),
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
+ messages=messages,
+ )
+ return output.choices[0].message.content
+
+
+COMPLETION_REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+
+---
+
+[Reasoning] """
+
+COMPLETION_NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+
+---
+
+[Output] """ # noqa E501
+
+COMPLETION_CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+
+---
+
+[Chain of Thoughts] Lets Think Step by Step,
+1. """ # noqa E501
+
+COMPLETION_REACT_SUFFIX = """
+""" # noqa E501
+
+
+class OpenAICompletion(OpenAI):
+ """OpenAI Completion API client for MTLLM."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": COMPLETION_NORMAL_SUFFIX,
+ "Reason": COMPLETION_REASON_SUFFIX,
+ "Chain-of-Thoughts": COMPLETION_CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": COMPLETION_REACT_SUFFIX,
+ }
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ assert "instruct" in self.model_name or self.model_name in [
+ "babbage-002",
+ "davinci-002",
+ ], f"Model {self.model_name} is not a instruction model. Please use an instruction model."
+ assert isinstance(
+ meaning_in, str
+ ), "Completion models are not supported with multimodal inputs. Please provide a string input."
+
+ model_params = {
+ k: v
+ for k, v in kwargs.items()
+ if k not in ["model_name", "temperature", "max_tokens"]
+ }
+ model_output = self.client.completions.create(
+ model=kwargs.get("model_name", self.model_name),
+ prompt=meaning_in,
+ temperature=kwargs.get("temperature", self.temperature),
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
+ **model_params,
+ )
+ output = model_output.choices[0].text.strip()
+ output = f"[Output] {output}" if "[Output]" not in output else output
+ return output
diff --git a/support/plugins/mtllm/mtllm/llms/togetherai.py b/support/plugins/mtllm/mtllm/llms/togetherai.py
new file mode 100644
index 000000000..5b3ee2245
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/llms/togetherai.py
@@ -0,0 +1,71 @@
+"""Anthropic API client for MTLLM."""
+
+from mtllm.llms.base import BaseLLM
+
+REASON_SUFFIX = """
+Reason and return the output result(s) only, adhering to the provided Type in the following format
+
+[Reasoning]
+[Output]
+"""
+
+NORMAL_SUFFIX = """Generate and return the output result(s) only, adhering to the provided Type in the following format
+
+[Output]
+""" # noqa E501
+
+CHAIN_OF_THOUGHT_SUFFIX = """
+Generate and return the output result(s) only, adhering to the provided Type in the following format. Perform the operation in a chain of thoughts.(Think Step by Step)
+
+[Chain of Thoughts]
+[Output]
+""" # noqa E501
+
+REACT_SUFFIX = """
+You are given with a list of tools you can use to do different things. To achieve the given [Action], incrementally think and provide tool_usage necessary to achieve what is thought.
+Provide your answer adhering in the following format. tool_usage is a function call with the necessary arguments. Only provide one [THOUGHT] and [TOOL USAGE] at a time.
+
+[Thought]
+[Tool Usage]
+""" # noqa E501
+
+
+class TogetherAI(BaseLLM):
+ """Anthropic API client for MTLLM."""
+
+ MTLLM_METHOD_PROMPTS: dict[str, str] = {
+ "Normal": NORMAL_SUFFIX,
+ "Reason": REASON_SUFFIX,
+ "Chain-of-Thoughts": CHAIN_OF_THOUGHT_SUFFIX,
+ "ReAct": REACT_SUFFIX,
+ }
+
+ def __init__(
+ self,
+ verbose: bool = False,
+ max_tries: int = 10,
+ type_check: bool = False,
+ **kwargs: dict
+ ) -> None:
+ """Initialize the Anthropic API client."""
+ import together # type: ignore
+
+ super().__init__(verbose, max_tries, type_check)
+ self.client = together.Together()
+ self.model_name = kwargs.get("model_name", "mistralai/Mistral-7B-Instruct-v0.3")
+ self.temperature = kwargs.get("temperature", 0.7)
+ self.max_tokens = kwargs.get("max_tokens", 1024)
+
+ def __infer__(self, meaning_in: str | list[dict], **kwargs: dict) -> str:
+ """Infer a response from the input meaning."""
+ assert isinstance(
+ meaning_in, str
+ ), "Currently Multimodal models are not supported. Please provide a string input."
+ messages = [{"role": "user", "content": meaning_in}]
+ output = self.client.chat.completions.create(
+ model=kwargs.get("model_name", self.model_name),
+ temperature=kwargs.get("temperature", self.temperature),
+ max_tokens=kwargs.get("max_tokens", self.max_tokens),
+ messages=messages,
+ )
+ return output.choices[0].message.content
diff --git a/support/plugins/mtllm/mtllm/plugin.py b/support/plugins/mtllm/mtllm/plugin.py
new file mode 100644
index 000000000..781a1d9e8
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/plugin.py
@@ -0,0 +1,585 @@
+"""Plugin for Jac's with_llm feature."""
+
+import ast as ast3
+import os
+import pickle
+from typing import Any, Callable, Mapping, Optional, Sequence
+
+import jaclang.compiler.absyntree as ast
+from jaclang.compiler.constant import Constants as Con
+from jaclang.compiler.passes.main.pyast_gen_pass import PyastGenPass
+from jaclang.compiler.semtable import SemInfo, SemRegistry, SemScope
+from jaclang.plugin.default import hookimpl
+from jaclang.runtimelib.utils import extract_params, extract_type, get_sem_scope
+
+from mtllm.aott import (
+ aott_raise,
+ get_all_type_explanations,
+)
+from mtllm.llms.base import BaseLLM
+from mtllm.types import Information, InputInformation, OutputHint, Tool
+from mtllm.utils import get_filtered_registry
+
+
+def callable_to_tool(tool: Callable, mod_registry: SemRegistry) -> Tool:
+ """Convert a callable to a Tool."""
+ assert callable(tool), f"{tool} cannot be used as a tool"
+ tool_name = tool.__name__
+ _, tool_info = mod_registry.lookup(name=tool_name)
+ assert tool_info and isinstance(
+ tool_info, SemInfo
+ ), f"Tool {tool_name} not found in the registry"
+ return Tool(tool, tool_info, tool_info.get_children(mod_registry, ast.ParamVar))
+
+
+class JacFeature:
+ """Jac's with_llm feature."""
+
+ @staticmethod
+ @hookimpl
+ def with_llm(
+ file_loc: str,
+ model: BaseLLM,
+ model_params: dict[str, Any],
+ scope: str,
+ incl_info: list[tuple[str, Any]], # noqa: ANN401
+ excl_info: list[tuple[str, str]],
+ inputs: list[
+ tuple[str, str, str, Any]
+ ], # TODO: Need to change this in the jaclang pyast_build_pass
+ outputs: tuple,
+ action: str,
+ _globals: dict,
+ _locals: Mapping,
+ ) -> Any: # noqa: ANN401
+ """Jac's with_llm feature."""
+ with open(
+ os.path.join(
+ os.path.dirname(file_loc),
+ "__jac_gen__",
+ os.path.basename(file_loc).replace(".jac", ".registry.pkl"),
+ ),
+ "rb",
+ ) as f:
+ mod_registry = pickle.load(f)
+
+ _scope = SemScope.get_scope_from_str(scope)
+ assert _scope is not None, f"Invalid scope: {scope}"
+
+ method = model_params.pop("method") if "method" in model_params else "Normal"
+ available_methods = model.MTLLM_METHOD_PROMPTS.keys()
+ assert (
+ method in available_methods
+ ), f"Invalid method: {method}. Select from {available_methods}"
+
+ context = (
+ "\n".join(model_params.pop("context")) if "context" in model_params else ""
+ )
+
+ type_collector: list = []
+
+ filtered_registry = get_filtered_registry(mod_registry, _scope)
+ incl_info = [x for x in incl_info if not isinstance(x[1], type)]
+ informations = [Information(filtered_registry, x[0], x[1]) for x in incl_info]
+ type_collector.extend([x.get_types() for x in informations])
+
+ inputs_information = []
+ for input_item in inputs:
+ _input = InputInformation(input_item[0], input_item[2], input_item[3])
+ type_collector.extend(_input.get_types())
+ inputs_information.append(_input)
+
+ output = outputs[0] if isinstance(outputs, list) else outputs
+ output_hint = OutputHint(output[0], output[1])
+ type_collector.extend(output_hint.get_types())
+ output_type_explanations = get_all_type_explanations(
+ output_hint.get_types(), mod_registry
+ )
+
+ type_explanations = get_all_type_explanations(type_collector, mod_registry)
+
+ tools = model_params.pop("tools") if "tools" in model_params else None
+ if method == "ReAct":
+ assert tools, "Tools must be provided for the ReAct method."
+ _tools = [
+ tool if isinstance(tool, Tool) else callable_to_tool(tool, mod_registry)
+ for tool in tools
+ ]
+ else:
+ _tools = []
+
+ meaning_out = aott_raise(
+ model,
+ informations,
+ inputs_information,
+ output_hint,
+ type_explanations,
+ action,
+ context,
+ method,
+ _tools,
+ model_params,
+ _globals,
+ _locals,
+ )
+ _output = model.resolve_output(
+ meaning_out, output_hint, output_type_explanations, _globals, _locals
+ )
+ return _output
+
+ @staticmethod
+ @hookimpl
+ def gen_llm_body(_pass: PyastGenPass, node: ast.Ability) -> list[ast3.AST]:
+ """Generate the by LLM body."""
+ _pass.needs_jac_feature()
+ if isinstance(node.body, ast.FuncCall):
+ model = node.body.target.gen.py_ast[0]
+ extracted_type = (
+ "".join(extract_type(node.signature.return_type))
+ if isinstance(node.signature, ast.FuncSignature)
+ and node.signature.return_type
+ else None
+ )
+ scope = _pass.sync(ast3.Constant(value=str(get_sem_scope(node))))
+ model_params, include_info, exclude_info = extract_params(node.body)
+ inputs = (
+ [
+ _pass.sync(
+ ast3.Tuple(
+ elts=[
+ (
+ _pass.sync(
+ ast3.Constant(
+ value=(
+ param.semstr.lit_value
+ if param.semstr
+ else None
+ )
+ )
+ )
+ ),
+ (
+ param.type_tag.tag.gen.py_ast[0]
+ if param.type_tag
+ else None
+ ),
+ _pass.sync(ast3.Constant(value=param.name.value)),
+ _pass.sync(
+ ast3.Name(
+ id=param.name.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ ],
+ ctx=ast3.Load(),
+ )
+ )
+ for param in node.signature.params.items
+ ]
+ if isinstance(node.signature, ast.FuncSignature)
+ and node.signature.params
+ else []
+ )
+ outputs = (
+ [
+ (
+ _pass.sync(
+ ast3.Constant(
+ value=(
+ node.signature.semstr.lit_value
+ if node.signature.semstr
+ else ""
+ )
+ )
+ )
+ ),
+ (_pass.sync(ast3.Constant(value=(extracted_type)))),
+ ]
+ if isinstance(node.signature, ast.FuncSignature)
+ else []
+ )
+ action = (
+ node.semstr.gen.py_ast[0]
+ if node.semstr
+ else _pass.sync(ast3.Constant(value=node.name_ref.sym_name))
+ )
+ return [
+ _pass.sync(
+ ast3.Return(
+ value=_pass.sync(
+ _pass.by_llm_call(
+ model,
+ model_params,
+ scope,
+ inputs,
+ outputs,
+ action,
+ include_info,
+ exclude_info,
+ )
+ )
+ )
+ )
+ ]
+ else:
+ return []
+
+ @staticmethod
+ @hookimpl
+ def by_llm_call(
+ _pass: PyastGenPass,
+ model: ast3.AST,
+ model_params: dict[str, ast.Expr],
+ scope: ast3.AST,
+ inputs: Sequence[Optional[ast3.AST]],
+ outputs: Sequence[Optional[ast3.AST]] | ast3.Call,
+ action: Optional[ast3.AST],
+ include_info: list[tuple[str, ast3.AST]],
+ exclude_info: list[tuple[str, ast3.AST]],
+ ) -> ast3.Call:
+ """Return the LLM Call, e.g. _Jac.with_llm()."""
+ return _pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Attribute(
+ value=_pass.sync(
+ ast3.Name(
+ id=Con.JAC_FEATURE.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ attr="with_llm",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[],
+ keywords=[
+ _pass.sync(
+ ast3.keyword(
+ arg="file_loc",
+ value=_pass.sync(ast3.Name(id="__file__", ctx=ast3.Load())),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="model",
+ value=model,
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="model_params",
+ value=_pass.sync(
+ ast3.Dict(
+ keys=[
+ _pass.sync(ast3.Constant(value=key))
+ for key in model_params.keys()
+ ],
+ values=[
+ value.gen.py_ast[0]
+ for value in model_params.values()
+ ],
+ )
+ ),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="scope",
+ value=scope,
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="incl_info",
+ value=_pass.sync(
+ ast3.List(
+ elts=[
+ _pass.sync(
+ ast3.Tuple(
+ elts=[
+ _pass.sync(
+ ast3.Constant(value=key)
+ ),
+ value,
+ ],
+ ctx=ast3.Load(),
+ )
+ )
+ for key, value in include_info
+ ],
+ ctx=ast3.Load(),
+ )
+ ),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="excl_info",
+ value=_pass.sync(
+ ast3.List(
+ elts=[
+ _pass.sync(
+ ast3.Tuple(
+ elts=[
+ _pass.sync(
+ ast3.Constant(value=key)
+ ),
+ value,
+ ],
+ ctx=ast3.Load(),
+ )
+ )
+ for key, value in exclude_info
+ ],
+ ctx=ast3.Load(),
+ )
+ ),
+ ),
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="inputs",
+ value=_pass.sync(
+ ast3.List(
+ elts=inputs,
+ ctx=ast3.Load(),
+ )
+ ),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="outputs",
+ value=(
+ _pass.sync(
+ ast3.Tuple(
+ elts=outputs,
+ ctx=ast3.Load(),
+ )
+ )
+ if not isinstance(outputs, ast3.Call)
+ else outputs
+ ),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="action",
+ value=action,
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="_globals",
+ value=_pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Name(
+ id="globals",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[],
+ keywords=[],
+ )
+ ),
+ )
+ ),
+ _pass.sync(
+ ast3.keyword(
+ arg="_locals",
+ value=_pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Name(
+ id="locals",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[],
+ keywords=[],
+ )
+ ),
+ )
+ ),
+ ],
+ )
+ )
+
+ @staticmethod
+ @hookimpl
+ def get_by_llm_call_args(_pass: PyastGenPass, node: ast.FuncCall) -> dict:
+ """Get the by LLM call args."""
+ if node.genai_call is None:
+ raise _pass.ice("No genai_call")
+
+ model = node.genai_call.target.gen.py_ast[0]
+ model_params, include_info, exclude_info = extract_params(node.genai_call)
+ action = _pass.sync(
+ ast3.Constant(
+ value="Create an object of the specified type, using the specifically "
+ " provided input value(s) and look up any missing attributes from reliable"
+ " online sources to fill them in accurately."
+ )
+ )
+ _output_ = "".join(extract_type(node.target))
+ include_info.append(
+ (
+ _output_.split(".")[0],
+ _pass.sync(ast3.Name(id=_output_.split(".")[0], ctx=ast3.Load())),
+ )
+ )
+ scope = _pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Attribute(
+ value=_pass.sync(
+ ast3.Name(
+ id=Con.JAC_FEATURE.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ attr="obj_scope",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[
+ _pass.sync(
+ ast3.Name(
+ id="__file__",
+ ctx=ast3.Load(),
+ )
+ ),
+ _pass.sync(ast3.Constant(value=_output_)),
+ ],
+ keywords=[],
+ )
+ )
+ outputs = _pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Attribute(
+ value=_pass.sync(
+ ast3.Name(
+ id=Con.JAC_FEATURE.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ attr="get_sem_type",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[
+ _pass.sync(
+ ast3.Name(
+ id="__file__",
+ ctx=ast3.Load(),
+ )
+ ),
+ _pass.sync(ast3.Constant(value=str(_output_))),
+ ],
+ keywords=[],
+ )
+ )
+ if node.params and node.params.items:
+ inputs = [
+ _pass.sync(
+ ast3.Tuple(
+ elts=[
+ _pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Attribute(
+ value=_pass.sync(
+ ast3.Name(
+ id=Con.JAC_FEATURE.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ attr="get_semstr_type",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[
+ _pass.sync(
+ ast3.Name(id="__file__", ctx=ast3.Load())
+ ),
+ scope,
+ _pass.sync(
+ ast3.Constant(
+ value=(
+ kw_pair.key.value
+ if isinstance(kw_pair.key, ast.Name)
+ else None
+ )
+ )
+ ),
+ _pass.sync(ast3.Constant(value=True)),
+ ],
+ keywords=[],
+ )
+ ),
+ _pass.sync(
+ ast3.Call(
+ func=_pass.sync(
+ ast3.Attribute(
+ value=_pass.sync(
+ ast3.Name(
+ id=Con.JAC_FEATURE.value,
+ ctx=ast3.Load(),
+ )
+ ),
+ attr="get_semstr_type",
+ ctx=ast3.Load(),
+ )
+ ),
+ args=[
+ _pass.sync(
+ ast3.Name(id="__file__", ctx=ast3.Load())
+ ),
+ scope,
+ _pass.sync(
+ ast3.Constant(
+ value=(
+ kw_pair.key.value
+ if isinstance(kw_pair.key, ast.Name)
+ else None
+ )
+ )
+ ),
+ _pass.sync(ast3.Constant(value=False)),
+ ],
+ keywords=[],
+ )
+ ),
+ _pass.sync(
+ ast3.Constant(
+ value=(
+ kw_pair.key.value
+ if isinstance(kw_pair.key, ast.Name)
+ else None
+ )
+ )
+ ),
+ kw_pair.value.gen.py_ast[0],
+ ],
+ ctx=ast3.Load(),
+ )
+ )
+ for kw_pair in node.params.items
+ if isinstance(kw_pair, ast.KWPair)
+ ]
+ else:
+ inputs = []
+
+ return {
+ "model": model,
+ "model_params": model_params,
+ "scope": scope,
+ "inputs": inputs,
+ "outputs": outputs,
+ "action": action,
+ "include_info": include_info,
+ "exclude_info": exclude_info,
+ }
diff --git a/support/plugins/mtllm/mtllm/tools/__init__.py b/support/plugins/mtllm/mtllm/tools/__init__.py
new file mode 100644
index 000000000..4dfdf0508
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/tools/__init__.py
@@ -0,0 +1,22 @@
+"""Pre-made tools for the mtllm package."""
+
+from jaclang.compiler.semtable import SemInfo
+
+from mtllm.types import Tool
+
+
+def finish(output: str) -> str:
+ """Finishes the prompt with the given output."""
+ return output
+
+
+finish_tool = Tool(
+ finish,
+ SemInfo(
+ None,
+ "finish_tool",
+ "ability",
+ "Finishes the Thought process by providing the output",
+ ),
+ [SemInfo(None, "output", "Any", "Final Output")],
+)
diff --git a/support/plugins/mtllm/mtllm/tools/math_utils.py b/support/plugins/mtllm/mtllm/tools/math_utils.py
new file mode 100644
index 000000000..7f24c9735
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/tools/math_utils.py
@@ -0,0 +1,22 @@
+"""Math tools for the MTLLM project."""
+
+from jaclang.compiler.semtable import SemInfo
+
+from mtllm.types import Tool
+
+
+def solve_math_expression(expr: str) -> float:
+ """Solves the given math expression."""
+ return eval(expr)
+
+
+math_tool = Tool(
+ solve_math_expression,
+ SemInfo(
+ None,
+ "math_tool",
+ "ability",
+ "Solves the given math expression",
+ ),
+ [SemInfo(None, "expr", "str", "Math expression to solve eg- 2+2")],
+)
diff --git a/support/plugins/mtllm/mtllm/tools/serper.py b/support/plugins/mtllm/mtllm/tools/serper.py
new file mode 100644
index 000000000..6067e70f1
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/tools/serper.py
@@ -0,0 +1,66 @@
+"""Serper API Integration."""
+
+import json
+import os
+
+from jaclang.compiler.semtable import SemInfo
+
+from mtllm.types import Tool
+
+import requests
+
+API_HEADERS = {
+ "X-API-KEY": os.getenv("SERPER_API_KEY"),
+ "Content-Type": "application/json",
+}
+assert API_HEADERS["X-API-KEY"], "Please set the SERPER_API_KEY environment variable."
+
+
+def serper_search_tool(query: str) -> str:
+ """Searches the Serper API."""
+ payload = json.dumps(
+ {
+ "q": query,
+ }
+ )
+ response = requests.request(
+ "POST", "https://google.serper.dev/search", headers=API_HEADERS, data=payload
+ )
+ return response.text
+
+
+search = Tool(
+ serper_search_tool,
+ SemInfo(
+ None,
+ "search",
+ "ability",
+ "Perform a Web Search",
+ ),
+ [SemInfo(None, "query", "str", "Query to search")],
+)
+
+
+def serper_scrape_webpage(url: str) -> str:
+ """Scrapes the Serper API."""
+ payload = json.dumps(
+ {
+ "url": url,
+ }
+ )
+ response = requests.request(
+ "POST", "https://scrape.serper.dev", headers=API_HEADERS, data=payload
+ )
+ return response.text
+
+
+scrape = Tool(
+ serper_scrape_webpage,
+ SemInfo(
+ None,
+ "scrape",
+ "ability",
+ "Scrape Information from a Webpage",
+ ),
+ [SemInfo(None, "url", "str", "URL to scrape")],
+)
diff --git a/support/plugins/mtllm/mtllm/tools/wikipedia_utils.py b/support/plugins/mtllm/mtllm/tools/wikipedia_utils.py
new file mode 100644
index 000000000..dd6a1abac
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/tools/wikipedia_utils.py
@@ -0,0 +1,65 @@
+"""Wikipedia Tools for the MTLLM framework."""
+
+from jaclang.compiler.semtable import SemInfo
+
+from mtllm.types import Tool
+
+import wikipedia as wikipedia_lib
+
+
+def get_wikipedia_summary(title: str) -> str:
+ """Gets the summary of the related article from Wikipedia."""
+ try:
+ return wikipedia_lib.summary(title)
+ except Exception:
+ options = wikipedia_lib.search(title, results=5, suggestion=True)
+ raise Exception(f"Could not get summary for {title}. Similar titles: {options}")
+
+
+wikipedia_summary = Tool(
+ get_wikipedia_summary,
+ SemInfo(
+ None,
+ "wikipedia_summary",
+ "ability",
+ "Gets the Summary of the related article from Wikipedia",
+ ),
+ [SemInfo(None, "title", "str", "Title to search")],
+)
+
+wikipedia_get_related_titles = Tool(
+ wikipedia_lib.search,
+ SemInfo(
+ None,
+ "wikipedia_get_related_titles",
+ "ability",
+ "Gets the related titles from Wikipedia",
+ ),
+ [SemInfo(None, "title", "str", "Title to search")],
+)
+
+
+def wikipedia_get_page(title: str) -> dict:
+ """Gets the page from Wikipedia."""
+ try:
+ pg = wikipedia_lib.page(title)
+ return {
+ "title": pg.title,
+ "content": pg.content,
+ "url": pg.url,
+ "summary": pg.summary,
+ }
+ except wikipedia_lib.DisambiguationError as e:
+ raise Exception(f"Could not get page for {title}. Similar titles: {e.options}")
+
+
+wikipedia_get_whole_page = Tool(
+ wikipedia_get_page,
+ SemInfo(
+ None,
+ "wikipedia_get_whole_page",
+ "ability",
+ "Gets the whole page from Wikipedia",
+ ),
+ [SemInfo(None, "title", "str", "Title to search")],
+)
diff --git a/support/plugins/mtllm/mtllm/types.py b/support/plugins/mtllm/mtllm/types.py
new file mode 100644
index 000000000..903f5dd41
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/types.py
@@ -0,0 +1,300 @@
+"""Type classes for the mtllm package."""
+
+import base64
+import importlib
+import importlib.util
+from io import BytesIO
+from typing import Any, Callable
+
+from jaclang.compiler.semtable import SemInfo, SemRegistry, SemScope
+
+from mtllm.utils import extract_non_primary_type, get_object_string, get_type_annotation
+
+cv2 = importlib.import_module("cv2") if importlib.util.find_spec("cv2") else None
+PILImage = (
+ importlib.import_module("PIL.Image") if importlib.util.find_spec("PIL") else None
+)
+
+
+class Video:
+ """Class to represent a video."""
+
+ def __init__(self, file_path: str, seconds_per_frame: int = 2) -> None:
+ """Initializes the Video class."""
+ assert (
+ cv2 is not None
+ ), "Please install the required dependencies by running `pip install mtllm[video]`."
+ self.file_path = file_path
+ self.seconds_per_frame = seconds_per_frame
+
+ def process(
+ self,
+ ) -> list:
+ """Processes the video and returns a list of base64 encoded frames."""
+ assert (
+ cv2 is not None
+ ), "Please install the required dependencies by running `pip install mtllm[video]`."
+
+ assert self.seconds_per_frame > 0, "Seconds per frame must be greater than 0"
+
+ base64_frames = []
+
+ video = cv2.VideoCapture(self.file_path)
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
+ fps = video.get(cv2.CAP_PROP_FPS)
+ video_total_seconds = total_frames / fps
+ assert (
+ video_total_seconds > self.seconds_per_frame
+ ), "Video is too short for the specified seconds per frame"
+ assert (
+ video_total_seconds < 4
+ ), "Video is too long. Please use a video less than 4 seconds long."
+
+ frames_to_skip = int(fps * self.seconds_per_frame)
+ curr_frame = 0
+ while curr_frame < total_frames - 1:
+ video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
+ success, frame = video.read()
+ if not success:
+ break
+ _, buffer = cv2.imencode(".jpg", frame)
+ base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
+ curr_frame += frames_to_skip
+ video.release()
+ return base64_frames
+
+
+class Image:
+ """Class to represent an image."""
+
+ def __init__(self, file_path: str) -> None:
+ """Initializes the Image class."""
+ assert (
+ PILImage is not None
+ ), "Please install the required dependencies by running `pip install mtllm[image]`."
+ self.file_path = file_path
+
+ def process(self) -> tuple[str, str]:
+ """Processes the image and returns a base64 encoded image and its format."""
+ assert (
+ PILImage is not None
+ ), "Please install the required dependencies by running `pip install mtllm[image]`."
+ image = PILImage.open(self.file_path)
+ img_format = image.format
+ with BytesIO() as buffer:
+ image.save(buffer, format=img_format, quality=100)
+ return (
+ base64.b64encode(buffer.getvalue()).decode("utf-8"),
+ img_format.lower(),
+ )
+
+
+class TypeExplanation:
+ """Class to represent a type explanation."""
+
+ def __init__(self, type_item: str, mod_registry: SemRegistry) -> None:
+ """Initializes the TypeExplanation class."""
+ self.type_item = type_item
+ self.explanation, self._nested_types = self.get_type_explanation(mod_registry)
+
+ def get_type_explanation(self, mod_registry: SemRegistry) -> tuple[str, set[str]]:
+ """Get the type explanation of the input type string."""
+ scope, sem_info = mod_registry.lookup(name=self.type_item)
+ if isinstance(sem_info, SemInfo) and sem_info.type:
+ sem_info_scope = SemScope(sem_info.name, sem_info.type, scope)
+ _, type_info = mod_registry.lookup(scope=sem_info_scope)
+ type_info_types = []
+ type_example_list = []
+ type_example: str
+ if sem_info.type == "Enum" and isinstance(type_info, list):
+ for enum_item in type_info:
+ (
+ type_example_list.append(
+ f'{sem_info.name}.{enum_item.name} : "{enum_item.semstr}"'
+ if enum_item.semstr
+ else f"{sem_info.name}.{enum_item.name}"
+ )
+ )
+ type_example = ", ".join(type_example_list)
+ elif sem_info.type in ["obj", "class", "node", "edge"] and isinstance(
+ type_info, list
+ ):
+ for arch_item in type_info:
+ if arch_item.type in ["obj", "class", "node", "edge"]:
+ continue
+ type_example_list.append(
+ f'{arch_item.name}="{arch_item.semstr}":{arch_item.type}'
+ if arch_item.semstr
+ else f"{arch_item.name}={arch_item.type}"
+ )
+ if arch_item.type and extract_non_primary_type(arch_item.type):
+ type_info_types.extend(extract_non_primary_type(arch_item.type))
+ type_example = f"{sem_info.name}({', '.join(type_example_list)})"
+ return (
+ f"{sem_info.semstr} ({sem_info.name}) ({sem_info.type}) eg:- {type_example}".strip(), # noqa: E501
+ set(type_info_types),
+ )
+ return "", set()
+
+ def __str__(self) -> str:
+ """Returns the string representation of the TypeExplanation class."""
+ return self.explanation
+
+ @property
+ def nested_types(self) -> set[str]:
+ """Get the nested types of the type."""
+ return self._nested_types
+
+
+class InputInformation:
+ """Class to represent the input information."""
+
+ def __init__(self, semstr: str, name: str, value: Any) -> None: # noqa: ANN401
+ """Initializes the InputInformation class."""
+ self.semstr = semstr
+ self.name = name
+ self.value = value
+
+ def __str__(self) -> str:
+ """Returns the string representation of the InputInformation class."""
+ type_anno = get_type_annotation(self.value)
+ return f"{self.semstr if self.semstr else ''} ({self.name}) ({type_anno}) = {get_object_string(self.value)}".strip() # noqa: E501
+
+ def to_list_dict(self) -> list[dict]:
+ """Returns the list of dictionaries representation of the InputInformation class."""
+ input_type = get_type_annotation(self.value)
+ if input_type == "Image":
+ img_base64, img_type = self.value.process()
+ return [
+ {
+ "type": "text",
+ "text": f"{self.semstr if self.semstr else ''} ({self.name}) (Image) = ".strip(),
+ },
+ {
+ "type": "image_url",
+ "image_url": {"url": f"data:image/{img_type};base64,{img_base64}"},
+ },
+ ]
+ elif input_type == "Video":
+ video_frames = self.value.process()
+ return [
+ {
+ "type": "text",
+ "text": f"{self.semstr if self.semstr else ''} ({self.name}) (Video) = ".strip(),
+ },
+ *(
+ {
+ "type": "image_url",
+ "image_url": {
+ "url": f"data:image/jpeg;base64,{frame}",
+ "detail": "low",
+ },
+ }
+ for frame in video_frames
+ ),
+ ]
+ return [
+ {
+ "type": "text",
+ "text": str(self),
+ }
+ ]
+
+ def get_types(self) -> list:
+ """Get the types of the input."""
+ return extract_non_primary_type(get_type_annotation(self.value))
+
+
+class OutputHint:
+ """Class to represent the output hint."""
+
+ def __init__(self, semstr: str, type: str) -> None: # noqa: ANN401
+ """Initializes the OutputHint class."""
+ self.semstr = semstr
+ self.type = type
+
+ def __str__(self) -> str:
+ """Returns the string representation of the OutputHint class."""
+ return f"{self.semstr if self.semstr else ''} ({self.type})".strip()
+
+ def get_types(self) -> list:
+ """Get the types of the output."""
+ return extract_non_primary_type(self.type)
+
+
+class Information:
+ """Class to represent the information."""
+
+ def __init__(
+ self, filtered_registry: SemRegistry, name: str, value: Any # noqa: ANN401
+ ) -> None:
+ """Initializes the Information class."""
+ self.name = name
+ self.value = value
+ self.registry = filtered_registry
+
+ @property
+ def semstr(self) -> str:
+ """Get the semantic string of the information."""
+ _, sem_info = self.registry.lookup(name=self.name)
+ return sem_info.semstr if sem_info and isinstance(sem_info, SemInfo) else ""
+
+ @property
+ def type(self) -> str:
+ """Get the type of the information."""
+ _, sem_info = self.registry.lookup(name=self.name)
+ return (
+ sem_info.type
+ if sem_info and isinstance(sem_info, SemInfo)
+ else get_type_annotation(self.value)
+ )
+
+ def __str__(self) -> str:
+ """Returns the string representation of the Information class."""
+ type_anno = get_type_annotation(self.value)
+ return f"{self.semstr} ({self.name}) ({type_anno}) = {get_object_string(self.value)}".strip()
+
+ def get_types(self) -> list:
+ """Get the types of the information."""
+ return extract_non_primary_type(self.type)
+
+
+class Tool:
+ """Base class for tools."""
+
+ def __init__(
+ self, func: Callable, sem_info: SemInfo, params: list[SemInfo]
+ ) -> None:
+ """Initialize the tool."""
+ self.sem_info = sem_info
+ self.func = func
+ self.params = params
+
+ def __call__(self, *args, **kwargs) -> str: # noqa
+ """Forward function of the tool."""
+ return self.func(*args, **kwargs)
+
+ def get_usage_example(self) -> str:
+ """Get the usage example of the tool."""
+ get_param_str = lambda x: ( # noqa E731
+ f'{x.name}="{x.semstr}":{x.type}' if x.semstr else f"{x.name}={x.type}"
+ )
+ return f"{self.sem_info.name}({', '.join([get_param_str(x) for x in self.params])})"
+
+ def __str__(self) -> str:
+ """String representation of the tool."""
+ return f"{self.sem_info.semstr} ({self.sem_info.name}) usage eg. {self.get_usage_example()}"
+
+
+class ReActOutput:
+ """Class to represent the ReAct output."""
+
+ def __init__(self, thought: str, action: str, observation: str) -> None:
+ """Initializes the ReActOutput class."""
+ self.thought = thought
+ self.action = action
+ self.observation = observation
+
+ def __repr__(self) -> str:
+ """Returns the string representation of the ReActOutput class."""
+ return f"ReActOutput(thought={self.thought}, action={self.action}, observation={self.observation})"
diff --git a/support/plugins/mtllm/mtllm/utils.py b/support/plugins/mtllm/mtllm/utils.py
new file mode 100644
index 000000000..9e05e524a
--- /dev/null
+++ b/support/plugins/mtllm/mtllm/utils.py
@@ -0,0 +1,99 @@
+"""Utility Functions for the MTLLM."""
+
+import re
+from enum import Enum
+from typing import Any
+
+from jaclang.compiler.semtable import SemRegistry, SemScope
+
+
+def get_object_string(obj: Any) -> str: # noqa: ANN401
+ """Get the string representation of the input object."""
+ if isinstance(obj, str):
+ return f'"{obj}"'
+ elif isinstance(obj, (int, float, bool)):
+ return str(obj)
+ elif isinstance(obj, list):
+ return "[" + ", ".join(get_object_string(item) for item in obj) + "]"
+ elif isinstance(obj, tuple):
+ return "(" + ", ".join(get_object_string(item) for item in obj) + ")"
+ elif isinstance(obj, dict):
+ return (
+ "{"
+ + ", ".join(
+ f"{get_object_string(key)}: {get_object_string(value)}"
+ for key, value in obj.items()
+ )
+ + "}"
+ )
+ elif isinstance(obj, Enum):
+ return f"{obj.__class__.__name__}.{obj.name}"
+ elif hasattr(obj, "__dict__"):
+ args = ", ".join(
+ f"{key}={get_object_string(value)}"
+ for key, value in vars(obj).items()
+ if key != "_jac_"
+ )
+ return f"{obj.__class__.__name__}({args})"
+ else:
+ return str(obj)
+
+
+def extract_non_primary_type(type_str: str) -> list:
+ """Extract non-primary types from the type string."""
+ if not type_str:
+ return []
+ pattern = r"(?:\[|,\s*|\|)([a-zA-Z_][a-zA-Z0-9_]*)|([a-zA-Z_][a-zA-Z0-9_]*)"
+ matches = re.findall(pattern, type_str)
+ primary_types = [
+ "str",
+ "int",
+ "float",
+ "bool",
+ "list",
+ "dict",
+ "tuple",
+ "set",
+ "Any",
+ "None",
+ ]
+ non_primary_types = [m for t in matches for m in t if m and m not in primary_types]
+ return non_primary_types
+
+
+def get_type_annotation(data: Any) -> str: # noqa: ANN401
+ """Get the type annotation of the input data."""
+ if isinstance(data, dict):
+ class_name = next(
+ (value.__class__.__name__ for value in data.values() if value is not None),
+ None,
+ )
+ if class_name:
+ return f"dict[str, {class_name}]"
+ else:
+ return "dict[str, Any]"
+ elif isinstance(data, list):
+ if data:
+ class_name = data[0].__class__.__name__
+ return f"list[{class_name}]"
+ else:
+ return "list"
+ else:
+ return str(type(data).__name__)
+
+
+def get_filtered_registry(mod_registry: SemRegistry, scope: SemScope) -> SemRegistry:
+ """Get the filtered registry based on the scope."""
+ avail_scopes = []
+ while True:
+ avail_scopes.append(str(scope))
+ if not scope.parent:
+ break
+ scope = scope.parent
+
+ filtered_registry = SemRegistry()
+ for _scope, sem_info_list in mod_registry.registry.items():
+ if str(_scope) in avail_scopes:
+ filtered_registry.registry[_scope] = sem_info_list
+
+ return filtered_registry
diff --git a/support/plugins/mtllm/mypy.ini b/support/plugins/mtllm/mypy.ini
new file mode 100644
index 000000000..01341aa5c
--- /dev/null
+++ b/support/plugins/mtllm/mypy.ini
@@ -0,0 +1,2 @@
+[mypy]
+exclude = __jac_gen__
\ No newline at end of file
diff --git a/support/plugins/mtllm/poetry.lock b/support/plugins/mtllm/poetry.lock
new file mode 100644
index 000000000..932155020
--- /dev/null
+++ b/support/plugins/mtllm/poetry.lock
@@ -0,0 +1,2050 @@
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+
+[[package]]
+name = "aiohappyeyeballs"
+version = "2.3.5"
+description = "Happy Eyeballs for asyncio"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "aiohappyeyeballs-2.3.5-py3-none-any.whl", hash = "sha256:4d6dea59215537dbc746e93e779caea8178c866856a721c9c660d7a5a7b8be03"},
+ {file = "aiohappyeyeballs-2.3.5.tar.gz", hash = "sha256:6fa48b9f1317254f122a07a131a86b71ca6946ca989ce6326fff54a99a920105"},
+]
+
+[[package]]
+name = "aiohttp"
+version = "3.10.3"
+description = "Async http client/server framework (asyncio)"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "aiohttp-3.10.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc36cbdedf6f259371dbbbcaae5bb0e95b879bc501668ab6306af867577eb5db"},
+ {file = "aiohttp-3.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85466b5a695c2a7db13eb2c200af552d13e6a9313d7fa92e4ffe04a2c0ea74c1"},
+ {file = "aiohttp-3.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:71bb1d97bfe7e6726267cea169fdf5df7658831bb68ec02c9c6b9f3511e108bb"},
+ {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baec1eb274f78b2de54471fc4c69ecbea4275965eab4b556ef7a7698dee18bf2"},
+ {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13031e7ec1188274bad243255c328cc3019e36a5a907978501256000d57a7201"},
+ {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bbc55a964b8eecb341e492ae91c3bd0848324d313e1e71a27e3d96e6ee7e8e8"},
+ {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8cc0564b286b625e673a2615ede60a1704d0cbbf1b24604e28c31ed37dc62aa"},
+ {file = "aiohttp-3.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f817a54059a4cfbc385a7f51696359c642088710e731e8df80d0607193ed2b73"},
+ {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8542c9e5bcb2bd3115acdf5adc41cda394e7360916197805e7e32b93d821ef93"},
+ {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:671efce3a4a0281060edf9a07a2f7e6230dca3a1cbc61d110eee7753d28405f7"},
+ {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:0974f3b5b0132edcec92c3306f858ad4356a63d26b18021d859c9927616ebf27"},
+ {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:44bb159b55926b57812dca1b21c34528e800963ffe130d08b049b2d6b994ada7"},
+ {file = "aiohttp-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6ae9ae382d1c9617a91647575255ad55a48bfdde34cc2185dd558ce476bf16e9"},
+ {file = "aiohttp-3.10.3-cp310-cp310-win32.whl", hash = "sha256:aed12a54d4e1ee647376fa541e1b7621505001f9f939debf51397b9329fd88b9"},
+ {file = "aiohttp-3.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:b51aef59370baf7444de1572f7830f59ddbabd04e5292fa4218d02f085f8d299"},
+ {file = "aiohttp-3.10.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e021c4c778644e8cdc09487d65564265e6b149896a17d7c0f52e9a088cc44e1b"},
+ {file = "aiohttp-3.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:24fade6dae446b183e2410a8628b80df9b7a42205c6bfc2eff783cbeedc224a2"},
+ {file = "aiohttp-3.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bc8e9f15939dacb0e1f2d15f9c41b786051c10472c7a926f5771e99b49a5957f"},
+ {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5a9ec959b5381271c8ec9310aae1713b2aec29efa32e232e5ef7dcca0df0279"},
+ {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a5d0ea8a6467b15d53b00c4e8ea8811e47c3cc1bdbc62b1aceb3076403d551f"},
+ {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9ed607dbbdd0d4d39b597e5bf6b0d40d844dfb0ac6a123ed79042ef08c1f87e"},
+ {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3e66d5b506832e56add66af88c288c1d5ba0c38b535a1a59e436b300b57b23e"},
+ {file = "aiohttp-3.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fda91ad797e4914cca0afa8b6cccd5d2b3569ccc88731be202f6adce39503189"},
+ {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:61ccb867b2f2f53df6598eb2a93329b5eee0b00646ee79ea67d68844747a418e"},
+ {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6d881353264e6156f215b3cb778c9ac3184f5465c2ece5e6fce82e68946868ef"},
+ {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b031ce229114825f49cec4434fa844ccb5225e266c3e146cb4bdd025a6da52f1"},
+ {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5337cc742a03f9e3213b097abff8781f79de7190bbfaa987bd2b7ceb5bb0bdec"},
+ {file = "aiohttp-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ab3361159fd3dcd0e48bbe804006d5cfb074b382666e6c064112056eb234f1a9"},
+ {file = "aiohttp-3.10.3-cp311-cp311-win32.whl", hash = "sha256:05d66203a530209cbe40f102ebaac0b2214aba2a33c075d0bf825987c36f1f0b"},
+ {file = "aiohttp-3.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:70b4a4984a70a2322b70e088d654528129783ac1ebbf7dd76627b3bd22db2f17"},
+ {file = "aiohttp-3.10.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:166de65e2e4e63357cfa8417cf952a519ac42f1654cb2d43ed76899e2319b1ee"},
+ {file = "aiohttp-3.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7084876352ba3833d5d214e02b32d794e3fd9cf21fdba99cff5acabeb90d9806"},
+ {file = "aiohttp-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d98c604c93403288591d7d6d7d6cc8a63459168f8846aeffd5b3a7f3b3e5e09"},
+ {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d73b073a25a0bb8bf014345374fe2d0f63681ab5da4c22f9d2025ca3e3ea54fc"},
+ {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8da6b48c20ce78f5721068f383e0e113dde034e868f1b2f5ee7cb1e95f91db57"},
+ {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3a9dcdccf50284b1b0dc72bc57e5bbd3cc9bf019060dfa0668f63241ccc16aa7"},
+ {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56fb94bae2be58f68d000d046172d8b8e6b1b571eb02ceee5535e9633dcd559c"},
+ {file = "aiohttp-3.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf75716377aad2c718cdf66451c5cf02042085d84522aec1f9246d3e4b8641a6"},
+ {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6c51ed03e19c885c8e91f574e4bbe7381793f56f93229731597e4a499ffef2a5"},
+ {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b84857b66fa6510a163bb083c1199d1ee091a40163cfcbbd0642495fed096204"},
+ {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c124b9206b1befe0491f48185fd30a0dd51b0f4e0e7e43ac1236066215aff272"},
+ {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3461d9294941937f07bbbaa6227ba799bc71cc3b22c40222568dc1cca5118f68"},
+ {file = "aiohttp-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:08bd0754d257b2db27d6bab208c74601df6f21bfe4cb2ec7b258ba691aac64b3"},
+ {file = "aiohttp-3.10.3-cp312-cp312-win32.whl", hash = "sha256:7f9159ae530297f61a00116771e57516f89a3de6ba33f314402e41560872b50a"},
+ {file = "aiohttp-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:e1128c5d3a466279cb23c4aa32a0f6cb0e7d2961e74e9e421f90e74f75ec1edf"},
+ {file = "aiohttp-3.10.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d1100e68e70eb72eadba2b932b185ebf0f28fd2f0dbfe576cfa9d9894ef49752"},
+ {file = "aiohttp-3.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a541414578ff47c0a9b0b8b77381ea86b0c8531ab37fc587572cb662ccd80b88"},
+ {file = "aiohttp-3.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d5548444ef60bf4c7b19ace21f032fa42d822e516a6940d36579f7bfa8513f9c"},
+ {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ba2e838b5e6a8755ac8297275c9460e729dc1522b6454aee1766c6de6d56e5e"},
+ {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:48665433bb59144aaf502c324694bec25867eb6630fcd831f7a893ca473fcde4"},
+ {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bac352fceed158620ce2d701ad39d4c1c76d114255a7c530e057e2b9f55bdf9f"},
+ {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0f670502100cdc567188c49415bebba947eb3edaa2028e1a50dd81bd13363f"},
+ {file = "aiohttp-3.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43b09f38a67679e32d380fe512189ccb0b25e15afc79b23fbd5b5e48e4fc8fd9"},
+ {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:cd788602e239ace64f257d1c9d39898ca65525583f0fbf0988bcba19418fe93f"},
+ {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:214277dcb07ab3875f17ee1c777d446dcce75bea85846849cc9d139ab8f5081f"},
+ {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:32007fdcaab789689c2ecaaf4b71f8e37bf012a15cd02c0a9db8c4d0e7989fa8"},
+ {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:123e5819bfe1b87204575515cf448ab3bf1489cdeb3b61012bde716cda5853e7"},
+ {file = "aiohttp-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:812121a201f0c02491a5db335a737b4113151926a79ae9ed1a9f41ea225c0e3f"},
+ {file = "aiohttp-3.10.3-cp38-cp38-win32.whl", hash = "sha256:b97dc9a17a59f350c0caa453a3cb35671a2ffa3a29a6ef3568b523b9113d84e5"},
+ {file = "aiohttp-3.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:3731a73ddc26969d65f90471c635abd4e1546a25299b687e654ea6d2fc052394"},
+ {file = "aiohttp-3.10.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38d91b98b4320ffe66efa56cb0f614a05af53b675ce1b8607cdb2ac826a8d58e"},
+ {file = "aiohttp-3.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9743fa34a10a36ddd448bba8a3adc2a66a1c575c3c2940301bacd6cc896c6bf1"},
+ {file = "aiohttp-3.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7c126f532caf238031c19d169cfae3c6a59129452c990a6e84d6e7b198a001dc"},
+ {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:926e68438f05703e500b06fe7148ef3013dd6f276de65c68558fa9974eeb59ad"},
+ {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:434b3ab75833accd0b931d11874e206e816f6e6626fd69f643d6a8269cd9166a"},
+ {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d35235a44ec38109b811c3600d15d8383297a8fab8e3dec6147477ec8636712a"},
+ {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59c489661edbd863edb30a8bd69ecb044bd381d1818022bc698ba1b6f80e5dd1"},
+ {file = "aiohttp-3.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50544fe498c81cb98912afabfc4e4d9d85e89f86238348e3712f7ca6a2f01dab"},
+ {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:09bc79275737d4dc066e0ae2951866bb36d9c6b460cb7564f111cc0427f14844"},
+ {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:af4dbec58e37f5afff4f91cdf235e8e4b0bd0127a2a4fd1040e2cad3369d2f06"},
+ {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:b22cae3c9dd55a6b4c48c63081d31c00fc11fa9db1a20c8a50ee38c1a29539d2"},
+ {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ba562736d3fbfe9241dad46c1a8994478d4a0e50796d80e29d50cabe8fbfcc3f"},
+ {file = "aiohttp-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f25d6c4e82d7489be84f2b1c8212fafc021b3731abdb61a563c90e37cced3a21"},
+ {file = "aiohttp-3.10.3-cp39-cp39-win32.whl", hash = "sha256:b69d832e5f5fa15b1b6b2c8eb6a9fd2c0ec1fd7729cb4322ed27771afc9fc2ac"},
+ {file = "aiohttp-3.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:673bb6e3249dc8825df1105f6ef74e2eab779b7ff78e96c15cadb78b04a83752"},
+ {file = "aiohttp-3.10.3.tar.gz", hash = "sha256:21650e7032cc2d31fc23d353d7123e771354f2a3d5b05a5647fc30fea214e696"},
+]
+
+[package.dependencies]
+aiohappyeyeballs = ">=2.3.0"
+aiosignal = ">=1.1.2"
+attrs = ">=17.3.0"
+frozenlist = ">=1.1.1"
+multidict = ">=4.5,<7.0"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+description = "aiosignal: a list of registered asynchronous callbacks"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[package.dependencies]
+frozenlist = ">=1.1.0"
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
+ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
+]
+
+[[package]]
+name = "anthropic"
+version = "0.26.1"
+description = "The official Python library for the anthropic API"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "anthropic-0.26.1-py3-none-any.whl", hash = "sha256:2812b9b250b551ed8a1f0a7e6ae3f005654098994f45ebca5b5808bd154c9628"},
+ {file = "anthropic-0.26.1.tar.gz", hash = "sha256:26680ff781a6f678a30a1dccd0743631e602b23a47719439ffdef5335fa167d8"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+jiter = ">=0.1.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+tokenizers = ">=0.13.0"
+typing-extensions = ">=4.7,<5"
+
+[package.extras]
+bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"]
+vertex = ["google-auth (>=2,<3)"]
+
+[[package]]
+name = "anyio"
+version = "4.4.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"},
+ {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.23)"]
+
+[[package]]
+name = "attrs"
+version = "24.2.0"
+description = "Classes Without Boilerplate"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
+ {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
+]
+
+[package.extras]
+benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+
+[[package]]
+name = "certifi"
+version = "2024.7.4"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = true
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
+ {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = true
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "distro"
+version = "1.9.0"
+description = "Distro - an OS platform information API"
+optional = true
+python-versions = ">=3.6"
+files = [
+ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
+ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
+]
+
+[[package]]
+name = "eval-type-backport"
+version = "0.2.0"
+description = "Like `typing._eval_type`, but lets older Python versions use newer typing features."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"},
+ {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
+[[package]]
+name = "filelock"
+version = "3.15.4"
+description = "A platform independent file lock."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"},
+ {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"]
+typing = ["typing-extensions (>=4.8)"]
+
+[[package]]
+name = "frozenlist"
+version = "1.4.1"
+description = "A list-like structure which implements collections.abc.MutableSequence"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"},
+ {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"},
+ {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"},
+ {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"},
+ {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"},
+ {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"},
+ {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"},
+ {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"},
+ {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"},
+ {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"},
+ {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"},
+ {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"},
+ {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"},
+ {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"},
+ {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"},
+ {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"},
+ {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"},
+ {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"},
+ {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"},
+ {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"},
+ {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"},
+ {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"},
+]
+
+[[package]]
+name = "fsspec"
+version = "2024.6.1"
+description = "File-system specification"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2024.6.1-py3-none-any.whl", hash = "sha256:3cb443f8bcd2efb31295a5b9fdb02aee81d8452c80d28f97a6d0959e6cee101e"},
+ {file = "fsspec-2024.6.1.tar.gz", hash = "sha256:fad7d7e209dd4c1208e3bbfda706620e0da5142bebbd9c384afb95b07e798e49"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+dev = ["pre-commit", "ruff"]
+doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"]
+test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"]
+test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "groq"
+version = "0.8.0"
+description = "The official Python library for the groq API"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "groq-0.8.0-py3-none-any.whl", hash = "sha256:f5e4e892d45001241a930db451e633ca1f0007e3f749deaa5d7360062fcd61e3"},
+ {file = "groq-0.8.0.tar.gz", hash = "sha256:37ceb2f706bd516d0bfcac8e89048a24b375172987a0d6bd9efb521c54f6deff"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+typing-extensions = ">=4.7,<5"
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.5"
+description = "A minimal low-level HTTP client."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"},
+ {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"},
+]
+
+[package.dependencies]
+certifi = "*"
+h11 = ">=0.13,<0.15"
+
+[package.extras]
+asyncio = ["anyio (>=4.0,<5.0)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+trio = ["trio (>=0.22.0,<0.26.0)"]
+
+[[package]]
+name = "httpx"
+version = "0.27.0"
+description = "The next generation HTTP client."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"},
+ {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"},
+]
+
+[package.dependencies]
+anyio = "*"
+certifi = "*"
+httpcore = "==1.*"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.24.5"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = true
+python-versions = ">=3.8.0"
+files = [
+ {file = "huggingface_hub-0.24.5-py3-none-any.whl", hash = "sha256:d93fb63b1f1a919a22ce91a14518974e81fc4610bf344dfe7572343ce8d3aced"},
+ {file = "huggingface_hub-0.24.5.tar.gz", hash = "sha256:7b45d6744dd53ce9cbf9880957de00e9d10a9ae837f1c9b7255fc8fa4e8264f3"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = ">=2023.5.0"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+hf-transfer = ["hf-transfer (>=0.1.4)"]
+inference = ["aiohttp", "minijinja (>=1.0)"]
+quality = ["mypy (==1.5.1)", "ruff (>=0.5.0)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+tensorflow-testing = ["keras (<3.0)", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio", "jedi", "minijinja (>=1.0)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["safetensors[torch]", "torch"]
+typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
+
+[[package]]
+name = "idna"
+version = "3.7"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = true
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
+ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "jaclang"
+version = "0.7.16"
+description = "Jac is a unique and powerful programming language that runs on top of Python, offering an unprecedented level of intelligence and intuitive understanding."
+optional = false
+python-versions = "<4.0.0,>=3.11.0"
+files = [
+ {file = "jaclang-0.7.16-py3-none-any.whl", hash = "sha256:e14d34241d7830a5bef69fd2d28906c8abd3dc77324a1c91425dafcb4dda6aea"},
+ {file = "jaclang-0.7.16.tar.gz", hash = "sha256:52519aabc4d74833ed5abfe8765e7aa2f650d2a427404a2d90b8686c2f39924d"},
+]
+
+[[package]]
+name = "jiter"
+version = "0.5.0"
+description = "Fast iterable JSON parser."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "jiter-0.5.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b599f4e89b3def9a94091e6ee52e1d7ad7bc33e238ebb9c4c63f211d74822c3f"},
+ {file = "jiter-0.5.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2a063f71c4b06225543dddadbe09d203dc0c95ba352d8b85f1221173480a71d5"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acc0d5b8b3dd12e91dd184b87273f864b363dfabc90ef29a1092d269f18c7e28"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c22541f0b672f4d741382a97c65609332a783501551445ab2df137ada01e019e"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:63314832e302cc10d8dfbda0333a384bf4bcfce80d65fe99b0f3c0da8945a91a"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a25fbd8a5a58061e433d6fae6d5298777c0814a8bcefa1e5ecfff20c594bd749"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503b2c27d87dfff5ab717a8200fbbcf4714516c9d85558048b1fc14d2de7d8dc"},
+ {file = "jiter-0.5.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d1f3d27cce923713933a844872d213d244e09b53ec99b7a7fdf73d543529d6d"},
+ {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c95980207b3998f2c3b3098f357994d3fd7661121f30669ca7cb945f09510a87"},
+ {file = "jiter-0.5.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:afa66939d834b0ce063f57d9895e8036ffc41c4bd90e4a99631e5f261d9b518e"},
+ {file = "jiter-0.5.0-cp310-none-win32.whl", hash = "sha256:f16ca8f10e62f25fd81d5310e852df6649af17824146ca74647a018424ddeccf"},
+ {file = "jiter-0.5.0-cp310-none-win_amd64.whl", hash = "sha256:b2950e4798e82dd9176935ef6a55cf6a448b5c71515a556da3f6b811a7844f1e"},
+ {file = "jiter-0.5.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4c8e1ed0ef31ad29cae5ea16b9e41529eb50a7fba70600008e9f8de6376d553"},
+ {file = "jiter-0.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c6f16e21276074a12d8421692515b3fd6d2ea9c94fd0734c39a12960a20e85f3"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5280e68e7740c8c128d3ae5ab63335ce6d1fb6603d3b809637b11713487af9e6"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:583c57fc30cc1fec360e66323aadd7fc3edeec01289bfafc35d3b9dcb29495e4"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26351cc14507bdf466b5f99aba3df3143a59da75799bf64a53a3ad3155ecded9"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4829df14d656b3fb87e50ae8b48253a8851c707da9f30d45aacab2aa2ba2d614"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a42a4bdcf7307b86cb863b2fb9bb55029b422d8f86276a50487982d99eed7c6e"},
+ {file = "jiter-0.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04d461ad0aebf696f8da13c99bc1b3e06f66ecf6cfd56254cc402f6385231c06"},
+ {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e6375923c5f19888c9226582a124b77b622f8fd0018b843c45eeb19d9701c403"},
+ {file = "jiter-0.5.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cec323a853c24fd0472517113768c92ae0be8f8c384ef4441d3632da8baa646"},
+ {file = "jiter-0.5.0-cp311-none-win32.whl", hash = "sha256:aa1db0967130b5cab63dfe4d6ff547c88b2a394c3410db64744d491df7f069bb"},
+ {file = "jiter-0.5.0-cp311-none-win_amd64.whl", hash = "sha256:aa9d2b85b2ed7dc7697597dcfaac66e63c1b3028652f751c81c65a9f220899ae"},
+ {file = "jiter-0.5.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9f664e7351604f91dcdd557603c57fc0d551bc65cc0a732fdacbf73ad335049a"},
+ {file = "jiter-0.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:044f2f1148b5248ad2c8c3afb43430dccf676c5a5834d2f5089a4e6c5bbd64df"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:702e3520384c88b6e270c55c772d4bd6d7b150608dcc94dea87ceba1b6391248"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:528d742dcde73fad9d63e8242c036ab4a84389a56e04efd854062b660f559544"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cf80e5fe6ab582c82f0c3331df27a7e1565e2dcf06265afd5173d809cdbf9ba"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:44dfc9ddfb9b51a5626568ef4e55ada462b7328996294fe4d36de02fce42721f"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c451f7922992751a936b96c5f5b9bb9312243d9b754c34b33d0cb72c84669f4e"},
+ {file = "jiter-0.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:308fce789a2f093dca1ff91ac391f11a9f99c35369117ad5a5c6c4903e1b3e3a"},
+ {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7f5ad4a7c6b0d90776fdefa294f662e8a86871e601309643de30bf94bb93a64e"},
+ {file = "jiter-0.5.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ea189db75f8eca08807d02ae27929e890c7d47599ce3d0a6a5d41f2419ecf338"},
+ {file = "jiter-0.5.0-cp312-none-win32.whl", hash = "sha256:e3bbe3910c724b877846186c25fe3c802e105a2c1fc2b57d6688b9f8772026e4"},
+ {file = "jiter-0.5.0-cp312-none-win_amd64.whl", hash = "sha256:a586832f70c3f1481732919215f36d41c59ca080fa27a65cf23d9490e75b2ef5"},
+ {file = "jiter-0.5.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f04bc2fc50dc77be9d10f73fcc4e39346402ffe21726ff41028f36e179b587e6"},
+ {file = "jiter-0.5.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6f433a4169ad22fcb550b11179bb2b4fd405de9b982601914ef448390b2954f3"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad4a6398c85d3a20067e6c69890ca01f68659da94d74c800298581724e426c7e"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6baa88334e7af3f4d7a5c66c3a63808e5efbc3698a1c57626541ddd22f8e4fbf"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ece0a115c05efca597c6d938f88c9357c843f8c245dbbb53361a1c01afd7148"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:335942557162ad372cc367ffaf93217117401bf930483b4b3ebdb1223dbddfa7"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649b0ee97a6e6da174bffcb3c8c051a5935d7d4f2f52ea1583b5b3e7822fbf14"},
+ {file = "jiter-0.5.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f4be354c5de82157886ca7f5925dbda369b77344b4b4adf2723079715f823989"},
+ {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5206144578831a6de278a38896864ded4ed96af66e1e63ec5dd7f4a1fce38a3a"},
+ {file = "jiter-0.5.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8120c60f8121ac3d6f072b97ef0e71770cc72b3c23084c72c4189428b1b1d3b6"},
+ {file = "jiter-0.5.0-cp38-none-win32.whl", hash = "sha256:6f1223f88b6d76b519cb033a4d3687ca157c272ec5d6015c322fc5b3074d8a5e"},
+ {file = "jiter-0.5.0-cp38-none-win_amd64.whl", hash = "sha256:c59614b225d9f434ea8fc0d0bec51ef5fa8c83679afedc0433905994fb36d631"},
+ {file = "jiter-0.5.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0af3838cfb7e6afee3f00dc66fa24695199e20ba87df26e942820345b0afc566"},
+ {file = "jiter-0.5.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:550b11d669600dbc342364fd4adbe987f14d0bbedaf06feb1b983383dcc4b961"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:489875bf1a0ffb3cb38a727b01e6673f0f2e395b2aad3c9387f94187cb214bbf"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b250ca2594f5599ca82ba7e68785a669b352156260c5362ea1b4e04a0f3e2389"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ea18e01f785c6667ca15407cd6dabbe029d77474d53595a189bdc813347218e"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:462a52be85b53cd9bffd94e2d788a09984274fe6cebb893d6287e1c296d50653"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:92cc68b48d50fa472c79c93965e19bd48f40f207cb557a8346daa020d6ba973b"},
+ {file = "jiter-0.5.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1c834133e59a8521bc87ebcad773608c6fa6ab5c7a022df24a45030826cf10bc"},
+ {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab3a71ff31cf2d45cb216dc37af522d335211f3a972d2fe14ea99073de6cb104"},
+ {file = "jiter-0.5.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cccd3af9c48ac500c95e1bcbc498020c87e1781ff0345dd371462d67b76643eb"},
+ {file = "jiter-0.5.0-cp39-none-win32.whl", hash = "sha256:368084d8d5c4fc40ff7c3cc513c4f73e02c85f6009217922d0823a48ee7adf61"},
+ {file = "jiter-0.5.0-cp39-none-win_amd64.whl", hash = "sha256:ce03f7b4129eb72f1687fa11300fbf677b02990618428934662406d2a76742a1"},
+ {file = "jiter-0.5.0.tar.gz", hash = "sha256:1d916ba875bcab5c5f7d927df998c4cb694d27dceddf3392e58beaf10563368a"},
+]
+
+[[package]]
+name = "loguru"
+version = "0.7.2"
+description = "Python logging made (stupidly) simple"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"},
+ {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""}
+win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""}
+
+[package.extras]
+dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
+]
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "multidict"
+version = "6.0.5"
+description = "multidict implementation"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9"},
+ {file = "multidict-6.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604"},
+ {file = "multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600"},
+ {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c"},
+ {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5"},
+ {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f"},
+ {file = "multidict-6.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae"},
+ {file = "multidict-6.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182"},
+ {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf"},
+ {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442"},
+ {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a"},
+ {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef"},
+ {file = "multidict-6.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc"},
+ {file = "multidict-6.0.5-cp310-cp310-win32.whl", hash = "sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319"},
+ {file = "multidict-6.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8"},
+ {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba"},
+ {file = "multidict-6.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e"},
+ {file = "multidict-6.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd"},
+ {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3"},
+ {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf"},
+ {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29"},
+ {file = "multidict-6.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed"},
+ {file = "multidict-6.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733"},
+ {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f"},
+ {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4"},
+ {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1"},
+ {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc"},
+ {file = "multidict-6.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e"},
+ {file = "multidict-6.0.5-cp311-cp311-win32.whl", hash = "sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c"},
+ {file = "multidict-6.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea"},
+ {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"},
+ {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"},
+ {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"},
+ {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"},
+ {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"},
+ {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"},
+ {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"},
+ {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"},
+ {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"},
+ {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"},
+ {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"},
+ {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"},
+ {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"},
+ {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"},
+ {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"},
+ {file = "multidict-6.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3"},
+ {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5"},
+ {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd"},
+ {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e"},
+ {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626"},
+ {file = "multidict-6.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83"},
+ {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a"},
+ {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c"},
+ {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5"},
+ {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3"},
+ {file = "multidict-6.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc"},
+ {file = "multidict-6.0.5-cp37-cp37m-win32.whl", hash = "sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee"},
+ {file = "multidict-6.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423"},
+ {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54"},
+ {file = "multidict-6.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d"},
+ {file = "multidict-6.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7"},
+ {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93"},
+ {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8"},
+ {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b"},
+ {file = "multidict-6.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50"},
+ {file = "multidict-6.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e"},
+ {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89"},
+ {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386"},
+ {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453"},
+ {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461"},
+ {file = "multidict-6.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44"},
+ {file = "multidict-6.0.5-cp38-cp38-win32.whl", hash = "sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241"},
+ {file = "multidict-6.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c"},
+ {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929"},
+ {file = "multidict-6.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9"},
+ {file = "multidict-6.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a"},
+ {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1"},
+ {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e"},
+ {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046"},
+ {file = "multidict-6.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c"},
+ {file = "multidict-6.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40"},
+ {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527"},
+ {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9"},
+ {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38"},
+ {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479"},
+ {file = "multidict-6.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c"},
+ {file = "multidict-6.0.5-cp39-cp39-win32.whl", hash = "sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b"},
+ {file = "multidict-6.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755"},
+ {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"},
+ {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"},
+]
+
+[[package]]
+name = "numpy"
+version = "2.0.1"
+description = "Fundamental package for array computing in Python"
+optional = true
+python-versions = ">=3.9"
+files = [
+ {file = "numpy-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fbb536eac80e27a2793ffd787895242b7f18ef792563d742c2d673bfcb75134"},
+ {file = "numpy-2.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:69ff563d43c69b1baba77af455dd0a839df8d25e8590e79c90fcbe1499ebde42"},
+ {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:1b902ce0e0a5bb7704556a217c4f63a7974f8f43e090aff03fcf262e0b135e02"},
+ {file = "numpy-2.0.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:f1659887361a7151f89e79b276ed8dff3d75877df906328f14d8bb40bb4f5101"},
+ {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4658c398d65d1b25e1760de3157011a80375da861709abd7cef3bad65d6543f9"},
+ {file = "numpy-2.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4127d4303b9ac9f94ca0441138acead39928938660ca58329fe156f84b9f3015"},
+ {file = "numpy-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e5eeca8067ad04bc8a2a8731183d51d7cbaac66d86085d5f4766ee6bf19c7f87"},
+ {file = "numpy-2.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9adbd9bb520c866e1bfd7e10e1880a1f7749f1f6e5017686a5fbb9b72cf69f82"},
+ {file = "numpy-2.0.1-cp310-cp310-win32.whl", hash = "sha256:7b9853803278db3bdcc6cd5beca37815b133e9e77ff3d4733c247414e78eb8d1"},
+ {file = "numpy-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:81b0893a39bc5b865b8bf89e9ad7807e16717f19868e9d234bdaf9b1f1393868"},
+ {file = "numpy-2.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75b4e316c5902d8163ef9d423b1c3f2f6252226d1aa5cd8a0a03a7d01ffc6268"},
+ {file = "numpy-2.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6e4eeb6eb2fced786e32e6d8df9e755ce5be920d17f7ce00bc38fcde8ccdbf9e"},
+ {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:a1e01dcaab205fbece13c1410253a9eea1b1c9b61d237b6fa59bcc46e8e89343"},
+ {file = "numpy-2.0.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:a8fc2de81ad835d999113ddf87d1ea2b0f4704cbd947c948d2f5513deafe5a7b"},
+ {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a3d94942c331dd4e0e1147f7a8699a4aa47dffc11bf8a1523c12af8b2e91bbe"},
+ {file = "numpy-2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15eb4eca47d36ec3f78cde0a3a2ee24cf05ca7396ef808dda2c0ddad7c2bde67"},
+ {file = "numpy-2.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b83e16a5511d1b1f8a88cbabb1a6f6a499f82c062a4251892d9ad5d609863fb7"},
+ {file = "numpy-2.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f87fec1f9bc1efd23f4227becff04bd0e979e23ca50cc92ec88b38489db3b55"},
+ {file = "numpy-2.0.1-cp311-cp311-win32.whl", hash = "sha256:36d3a9405fd7c511804dc56fc32974fa5533bdeb3cd1604d6b8ff1d292b819c4"},
+ {file = "numpy-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:08458fbf403bff5e2b45f08eda195d4b0c9b35682311da5a5a0a0925b11b9bd8"},
+ {file = "numpy-2.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bf4e6f4a2a2e26655717a1983ef6324f2664d7011f6ef7482e8c0b3d51e82ac"},
+ {file = "numpy-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d6fddc5fe258d3328cd8e3d7d3e02234c5d70e01ebe377a6ab92adb14039cb4"},
+ {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:5daab361be6ddeb299a918a7c0864fa8618af66019138263247af405018b04e1"},
+ {file = "numpy-2.0.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:ea2326a4dca88e4a274ba3a4405eb6c6467d3ffbd8c7d38632502eaae3820587"},
+ {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:529af13c5f4b7a932fb0e1911d3a75da204eff023ee5e0e79c1751564221a5c8"},
+ {file = "numpy-2.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6790654cb13eab303d8402354fabd47472b24635700f631f041bd0b65e37298a"},
+ {file = "numpy-2.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbab9fc9c391700e3e1287666dfd82d8666d10e69a6c4a09ab97574c0b7ee0a7"},
+ {file = "numpy-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99d0d92a5e3613c33a5f01db206a33f8fdf3d71f2912b0de1739894668b7a93b"},
+ {file = "numpy-2.0.1-cp312-cp312-win32.whl", hash = "sha256:173a00b9995f73b79eb0191129f2455f1e34c203f559dd118636858cc452a1bf"},
+ {file = "numpy-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:bb2124fdc6e62baae159ebcfa368708867eb56806804d005860b6007388df171"},
+ {file = "numpy-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfc085b28d62ff4009364e7ca34b80a9a080cbd97c2c0630bb5f7f770dae9414"},
+ {file = "numpy-2.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8fae4ebbf95a179c1156fab0b142b74e4ba4204c87bde8d3d8b6f9c34c5825ef"},
+ {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:72dc22e9ec8f6eaa206deb1b1355eb2e253899d7347f5e2fae5f0af613741d06"},
+ {file = "numpy-2.0.1-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:ec87f5f8aca726117a1c9b7083e7656a9d0d606eec7299cc067bb83d26f16e0c"},
+ {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f682ea61a88479d9498bf2091fdcd722b090724b08b31d63e022adc063bad59"},
+ {file = "numpy-2.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8efc84f01c1cd7e34b3fb310183e72fcdf55293ee736d679b6d35b35d80bba26"},
+ {file = "numpy-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3fdabe3e2a52bc4eff8dc7a5044342f8bd9f11ef0934fcd3289a788c0eb10018"},
+ {file = "numpy-2.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:24a0e1befbfa14615b49ba9659d3d8818a0f4d8a1c5822af8696706fbda7310c"},
+ {file = "numpy-2.0.1-cp39-cp39-win32.whl", hash = "sha256:f9cf5ea551aec449206954b075db819f52adc1638d46a6738253a712d553c7b4"},
+ {file = "numpy-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:e9e81fa9017eaa416c056e5d9e71be93d05e2c3c2ab308d23307a8bc4443c368"},
+ {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:61728fba1e464f789b11deb78a57805c70b2ed02343560456190d0501ba37b0f"},
+ {file = "numpy-2.0.1-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:12f5d865d60fb9734e60a60f1d5afa6d962d8d4467c120a1c0cda6eb2964437d"},
+ {file = "numpy-2.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eacf3291e263d5a67d8c1a581a8ebbcfd6447204ef58828caf69a5e3e8c75990"},
+ {file = "numpy-2.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2c3a346ae20cfd80b6cfd3e60dc179963ef2ea58da5ec074fd3d9e7a1e7ba97f"},
+ {file = "numpy-2.0.1.tar.gz", hash = "sha256:485b87235796410c3519a699cfe1faab097e509e90ebb05dcd098db2ae87e7b3"},
+]
+
+[[package]]
+name = "ollama"
+version = "0.2.1"
+description = "The official Python client for Ollama."
+optional = true
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "ollama-0.2.1-py3-none-any.whl", hash = "sha256:b6e2414921c94f573a903d1069d682ba2fb2607070ea9e19ca4a7872f2a460ec"},
+ {file = "ollama-0.2.1.tar.gz", hash = "sha256:fa316baa9a81eac3beb4affb0a17deb3008fdd6ed05b123c26306cfbe4c349b6"},
+]
+
+[package.dependencies]
+httpx = ">=0.27.0,<0.28.0"
+
+[[package]]
+name = "openai"
+version = "1.40.6"
+description = "The official Python library for the openai API"
+optional = true
+python-versions = ">=3.7.1"
+files = [
+ {file = "openai-1.40.6-py3-none-any.whl", hash = "sha256:b36372124a779381a420a34dd96f762baa748b6bdfaf83a6b9f2745f72ccc1c5"},
+ {file = "openai-1.40.6.tar.gz", hash = "sha256:2239232bcb7f4bd4ce8e02544b5769618582411cf399816d96686d1b6c1e5c8d"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+jiter = ">=0.4.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+tqdm = ">4"
+typing-extensions = ">=4.11,<5"
+
+[package.extras]
+datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
+
+[[package]]
+name = "packaging"
+version = "24.1"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"},
+ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"},
+]
+
+[[package]]
+name = "pillow"
+version = "10.4.0"
+description = "Python Imaging Library (Fork)"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"},
+ {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"},
+ {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"},
+ {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"},
+ {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"},
+ {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"},
+ {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"},
+ {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"},
+ {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"},
+ {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"},
+ {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"},
+ {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"},
+ {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"},
+ {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"},
+ {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"},
+ {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"},
+ {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"},
+ {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"},
+ {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"},
+ {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"},
+ {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"},
+ {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"},
+ {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"},
+ {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"},
+ {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"},
+ {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"},
+ {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"},
+ {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"},
+ {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"},
+ {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"},
+ {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"},
+ {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"},
+ {file = "pillow-10.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8d4d5063501b6dd4024b8ac2f04962d661222d120381272deea52e3fc52d3736"},
+ {file = "pillow-10.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c1ee6f42250df403c5f103cbd2768a28fe1a0ea1f0f03fe151c8741e1469c8b"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15e02e9bb4c21e39876698abf233c8c579127986f8207200bc8a8f6bb27acf2"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8d4bade9952ea9a77d0c3e49cbd8b2890a399422258a77f357b9cc9be8d680"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:43efea75eb06b95d1631cb784aa40156177bf9dd5b4b03ff38979e048258bc6b"},
+ {file = "pillow-10.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:950be4d8ba92aca4b2bb0741285a46bfae3ca699ef913ec8416c1b78eadd64cd"},
+ {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d7480af14364494365e89d6fddc510a13e5a2c3584cb19ef65415ca57252fb84"},
+ {file = "pillow-10.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:73664fe514b34c8f02452ffb73b7a92c6774e39a647087f83d67f010eb9a0cf0"},
+ {file = "pillow-10.4.0-cp38-cp38-win32.whl", hash = "sha256:e88d5e6ad0d026fba7bdab8c3f225a69f063f116462c49892b0149e21b6c0a0e"},
+ {file = "pillow-10.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:5161eef006d335e46895297f642341111945e2c1c899eb406882a6c61a4357ab"},
+ {file = "pillow-10.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ae24a547e8b711ccaaf99c9ae3cd975470e1a30caa80a6aaee9a2f19c05701d"},
+ {file = "pillow-10.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:298478fe4f77a4408895605f3482b6cc6222c018b2ce565c2b6b9c354ac3229b"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:134ace6dc392116566980ee7436477d844520a26a4b1bd4053f6f47d096997fd"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:930044bb7679ab003b14023138b50181899da3f25de50e9dbee23b61b4de2126"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c76e5786951e72ed3686e122d14c5d7012f16c8303a674d18cdcd6d89557fc5b"},
+ {file = "pillow-10.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b2724fdb354a868ddf9a880cb84d102da914e99119211ef7ecbdc613b8c96b3c"},
+ {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dbc6ae66518ab3c5847659e9988c3b60dc94ffb48ef9168656e0019a93dbf8a1"},
+ {file = "pillow-10.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:06b2f7898047ae93fad74467ec3d28fe84f7831370e3c258afa533f81ef7f3df"},
+ {file = "pillow-10.4.0-cp39-cp39-win32.whl", hash = "sha256:7970285ab628a3779aecc35823296a7869f889b8329c16ad5a71e4901a3dc4ef"},
+ {file = "pillow-10.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:961a7293b2457b405967af9c77dcaa43cc1a8cd50d23c532e62d48ab6cdd56f5"},
+ {file = "pillow-10.4.0-cp39-cp39-win_arm64.whl", hash = "sha256:32cda9e3d601a52baccb2856b8ea1fc213c90b340c542dcef77140dfa3278a9e"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"},
+ {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:a02364621fe369e06200d4a16558e056fe2805d3468350df3aef21e00d26214b"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1b5dea9831a90e9d0721ec417a80d4cbd7022093ac38a568db2dd78363b00908"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b885f89040bb8c4a1573566bbb2f44f5c505ef6e74cec7ab9068c900047f04b"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87dd88ded2e6d74d31e1e0a99a726a6765cda32d00ba72dc37f0651f306daaa8"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:2db98790afc70118bd0255c2eeb465e9767ecf1f3c25f9a1abb8ffc8cfd1fe0a"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f7baece4ce06bade126fb84b8af1c33439a76d8a6fd818970215e0560ca28c27"},
+ {file = "pillow-10.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cfdd747216947628af7b259d274771d84db2268ca062dd5faf373639d00113a3"},
+ {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=7.3)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"]
+fpx = ["olefile"]
+mic = ["olefile"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+typing = ["typing-extensions"]
+xmp = ["defusedxml"]
+
+[[package]]
+name = "pluggy"
+version = "1.5.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
+ {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "pyarrow"
+version = "17.0.0"
+description = "Python library for Apache Arrow"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pyarrow-17.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a5c8b238d47e48812ee577ee20c9a2779e6a5904f1708ae240f53ecbee7c9f07"},
+ {file = "pyarrow-17.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db023dc4c6cae1015de9e198d41250688383c3f9af8f565370ab2b4cb5f62655"},
+ {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da1e060b3876faa11cee287839f9cc7cdc00649f475714b8680a05fd9071d545"},
+ {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c06d4624c0ad6674364bb46ef38c3132768139ddec1c56582dbac54f2663e2"},
+ {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:fa3c246cc58cb5a4a5cb407a18f193354ea47dd0648194e6265bd24177982fe8"},
+ {file = "pyarrow-17.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:f7ae2de664e0b158d1607699a16a488de3d008ba99b3a7aa5de1cbc13574d047"},
+ {file = "pyarrow-17.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5984f416552eea15fd9cee03da53542bf4cddaef5afecefb9aa8d1010c335087"},
+ {file = "pyarrow-17.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:1c8856e2ef09eb87ecf937104aacfa0708f22dfeb039c363ec99735190ffb977"},
+ {file = "pyarrow-17.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e19f569567efcbbd42084e87f948778eb371d308e137a0f97afe19bb860ccb3"},
+ {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b244dc8e08a23b3e352899a006a26ae7b4d0da7bb636872fa8f5884e70acf15"},
+ {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b72e87fe3e1db343995562f7fff8aee354b55ee83d13afba65400c178ab2597"},
+ {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dc5c31c37409dfbc5d014047817cb4ccd8c1ea25d19576acf1a001fe07f5b420"},
+ {file = "pyarrow-17.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e3343cb1e88bc2ea605986d4b94948716edc7a8d14afd4e2c097232f729758b4"},
+ {file = "pyarrow-17.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:a27532c38f3de9eb3e90ecab63dfda948a8ca859a66e3a47f5f42d1e403c4d03"},
+ {file = "pyarrow-17.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9b8a823cea605221e61f34859dcc03207e52e409ccf6354634143e23af7c8d22"},
+ {file = "pyarrow-17.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f1e70de6cb5790a50b01d2b686d54aaf73da01266850b05e3af2a1bc89e16053"},
+ {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0071ce35788c6f9077ff9ecba4858108eebe2ea5a3f7cf2cf55ebc1dbc6ee24a"},
+ {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:757074882f844411fcca735e39aae74248a1531367a7c80799b4266390ae51cc"},
+ {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ba11c4f16976e89146781a83833df7f82077cdab7dc6232c897789343f7891a"},
+ {file = "pyarrow-17.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b0c6ac301093b42d34410b187bba560b17c0330f64907bfa4f7f7f2444b0cf9b"},
+ {file = "pyarrow-17.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:392bc9feabc647338e6c89267635e111d71edad5fcffba204425a7c8d13610d7"},
+ {file = "pyarrow-17.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:af5ff82a04b2171415f1410cff7ebb79861afc5dae50be73ce06d6e870615204"},
+ {file = "pyarrow-17.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:edca18eaca89cd6382dfbcff3dd2d87633433043650c07375d095cd3517561d8"},
+ {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c7916bff914ac5d4a8fe25b7a25e432ff921e72f6f2b7547d1e325c1ad9d155"},
+ {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f553ca691b9e94b202ff741bdd40f6ccb70cdd5fbf65c187af132f1317de6145"},
+ {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0cdb0e627c86c373205a2f94a510ac4376fdc523f8bb36beab2e7f204416163c"},
+ {file = "pyarrow-17.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d7d192305d9d8bc9082d10f361fc70a73590a4c65cf31c3e6926cd72b76bc35c"},
+ {file = "pyarrow-17.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:02dae06ce212d8b3244dd3e7d12d9c4d3046945a5933d28026598e9dbbda1fca"},
+ {file = "pyarrow-17.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:13d7a460b412f31e4c0efa1148e1d29bdf18ad1411eb6757d38f8fbdcc8645fb"},
+ {file = "pyarrow-17.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9b564a51fbccfab5a04a80453e5ac6c9954a9c5ef2890d1bcf63741909c3f8df"},
+ {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32503827abbc5aadedfa235f5ece8c4f8f8b0a3cf01066bc8d29de7539532687"},
+ {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a155acc7f154b9ffcc85497509bcd0d43efb80d6f733b0dc3bb14e281f131c8b"},
+ {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dec8d129254d0188a49f8a1fc99e0560dc1b85f60af729f47de4046015f9b0a5"},
+ {file = "pyarrow-17.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a48ddf5c3c6a6c505904545c25a4ae13646ae1f8ba703c4df4a1bfe4f4006bda"},
+ {file = "pyarrow-17.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:42bf93249a083aca230ba7e2786c5f673507fa97bbd9725a1e2754715151a204"},
+ {file = "pyarrow-17.0.0.tar.gz", hash = "sha256:4beca9521ed2c0921c1023e68d097d0299b62c362639ea315572a58f3f50fd28"},
+]
+
+[package.dependencies]
+numpy = ">=1.16.6"
+
+[package.extras]
+test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"]
+
+[[package]]
+name = "pydantic"
+version = "2.8.2"
+description = "Data validation using Python type hints"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"},
+ {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.4.0"
+pydantic-core = "2.20.1"
+typing-extensions = [
+ {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
+ {version = ">=4.6.1", markers = "python_version < \"3.13\""},
+]
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.20.1"
+description = "Core functionality for Pydantic validation and serialization"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"},
+ {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"},
+ {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"},
+ {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"},
+ {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"},
+ {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"},
+ {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"},
+ {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"},
+ {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"},
+ {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"},
+ {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"},
+ {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"},
+ {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"},
+ {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"},
+ {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"},
+ {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"},
+ {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"},
+ {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"},
+ {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"},
+ {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"},
+ {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"},
+ {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+
+[[package]]
+name = "pygments"
+version = "2.18.0"
+description = "Pygments is a syntax highlighting package written in Python."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"},
+ {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"},
+]
+
+[package.extras]
+windows-terminal = ["colorama (>=0.4.6)"]
+
+[[package]]
+name = "pytest"
+version = "8.3.2"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"},
+ {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=1.5,<2"
+
+[package.extras]
+dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+description = "YAML parser and emitter for Python"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+ {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+ {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
+]
+
+[[package]]
+name = "regex"
+version = "2024.7.24"
+description = "Alternative regular expression module, to replace re."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:228b0d3f567fafa0633aee87f08b9276c7062da9616931382993c03808bb68ce"},
+ {file = "regex-2024.7.24-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3426de3b91d1bc73249042742f45c2148803c111d1175b283270177fdf669024"},
+ {file = "regex-2024.7.24-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f273674b445bcb6e4409bf8d1be67bc4b58e8b46fd0d560055d515b8830063cd"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23acc72f0f4e1a9e6e9843d6328177ae3074b4182167e34119ec7233dfeccf53"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65fd3d2e228cae024c411c5ccdffae4c315271eee4a8b839291f84f796b34eca"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c414cbda77dbf13c3bc88b073a1a9f375c7b0cb5e115e15d4b73ec3a2fbc6f59"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7a89eef64b5455835f5ed30254ec19bf41f7541cd94f266ab7cbd463f00c41"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19c65b00d42804e3fbea9708f0937d157e53429a39b7c61253ff15670ff62cb5"},
+ {file = "regex-2024.7.24-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7a5486ca56c8869070a966321d5ab416ff0f83f30e0e2da1ab48815c8d165d46"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f51f9556785e5a203713f5efd9c085b4a45aecd2a42573e2b5041881b588d1f"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a4997716674d36a82eab3e86f8fa77080a5d8d96a389a61ea1d0e3a94a582cf7"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c0abb5e4e8ce71a61d9446040c1e86d4e6d23f9097275c5bd49ed978755ff0fe"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:18300a1d78cf1290fa583cd8b7cde26ecb73e9f5916690cf9d42de569c89b1ce"},
+ {file = "regex-2024.7.24-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:416c0e4f56308f34cdb18c3f59849479dde5b19febdcd6e6fa4d04b6c31c9faa"},
+ {file = "regex-2024.7.24-cp310-cp310-win32.whl", hash = "sha256:fb168b5924bef397b5ba13aabd8cf5df7d3d93f10218d7b925e360d436863f66"},
+ {file = "regex-2024.7.24-cp310-cp310-win_amd64.whl", hash = "sha256:6b9fc7e9cc983e75e2518496ba1afc524227c163e43d706688a6bb9eca41617e"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:382281306e3adaaa7b8b9ebbb3ffb43358a7bbf585fa93821300a418bb975281"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4fdd1384619f406ad9037fe6b6eaa3de2749e2e12084abc80169e8e075377d3b"},
+ {file = "regex-2024.7.24-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3d974d24edb231446f708c455fd08f94c41c1ff4f04bcf06e5f36df5ef50b95a"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2ec4419a3fe6cf8a4795752596dfe0adb4aea40d3683a132bae9c30b81e8d73"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb563dd3aea54c797adf513eeec819c4213d7dbfc311874eb4fd28d10f2ff0f2"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:45104baae8b9f67569f0f1dca5e1f1ed77a54ae1cd8b0b07aba89272710db61e"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:994448ee01864501912abf2bad9203bffc34158e80fe8bfb5b031f4f8e16da51"},
+ {file = "regex-2024.7.24-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fac296f99283ac232d8125be932c5cd7644084a30748fda013028c815ba3364"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7e37e809b9303ec3a179085415cb5f418ecf65ec98cdfe34f6a078b46ef823ee"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:01b689e887f612610c869421241e075c02f2e3d1ae93a037cb14f88ab6a8934c"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f6442f0f0ff81775eaa5b05af8a0ffa1dda36e9cf6ec1e0d3d245e8564b684ce"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:871e3ab2838fbcb4e0865a6e01233975df3a15e6fce93b6f99d75cacbd9862d1"},
+ {file = "regex-2024.7.24-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c918b7a1e26b4ab40409820ddccc5d49871a82329640f5005f73572d5eaa9b5e"},
+ {file = "regex-2024.7.24-cp311-cp311-win32.whl", hash = "sha256:2dfbb8baf8ba2c2b9aa2807f44ed272f0913eeeba002478c4577b8d29cde215c"},
+ {file = "regex-2024.7.24-cp311-cp311-win_amd64.whl", hash = "sha256:538d30cd96ed7d1416d3956f94d54e426a8daf7c14527f6e0d6d425fcb4cca52"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"},
+ {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"},
+ {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"},
+ {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"},
+ {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"},
+ {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:66b4c0731a5c81921e938dcf1a88e978264e26e6ac4ec96a4d21ae0354581ae0"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:88ecc3afd7e776967fa16c80f974cb79399ee8dc6c96423321d6f7d4b881c92b"},
+ {file = "regex-2024.7.24-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:64bd50cf16bcc54b274e20235bf8edbb64184a30e1e53873ff8d444e7ac656b2"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb462f0e346fcf41a901a126b50f8781e9a474d3927930f3490f38a6e73b6950"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a82465ebbc9b1c5c50738536fdfa7cab639a261a99b469c9d4c7dcbb2b3f1e57"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:68a8f8c046c6466ac61a36b65bb2395c74451df2ffb8458492ef49900efed293"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac8e84fff5d27420f3c1e879ce9929108e873667ec87e0c8eeb413a5311adfe"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba2537ef2163db9e6ccdbeb6f6424282ae4dea43177402152c67ef869cf3978b"},
+ {file = "regex-2024.7.24-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:43affe33137fcd679bdae93fb25924979517e011f9dea99163f80b82eadc7e53"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:c9bb87fdf2ab2370f21e4d5636e5317775e5d51ff32ebff2cf389f71b9b13750"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:945352286a541406f99b2655c973852da7911b3f4264e010218bbc1cc73168f2"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:8bc593dcce679206b60a538c302d03c29b18e3d862609317cb560e18b66d10cf"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3f3b6ca8eae6d6c75a6cff525c8530c60e909a71a15e1b731723233331de4169"},
+ {file = "regex-2024.7.24-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c51edc3541e11fbe83f0c4d9412ef6c79f664a3745fab261457e84465ec9d5a8"},
+ {file = "regex-2024.7.24-cp38-cp38-win32.whl", hash = "sha256:d0a07763776188b4db4c9c7fb1b8c494049f84659bb387b71c73bbc07f189e96"},
+ {file = "regex-2024.7.24-cp38-cp38-win_amd64.whl", hash = "sha256:8fd5afd101dcf86a270d254364e0e8dddedebe6bd1ab9d5f732f274fa00499a5"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0ffe3f9d430cd37d8fa5632ff6fb36d5b24818c5c986893063b4e5bdb84cdf24"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25419b70ba00a16abc90ee5fce061228206173231f004437730b67ac77323f0d"},
+ {file = "regex-2024.7.24-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33e2614a7ce627f0cdf2ad104797d1f68342d967de3695678c0cb84f530709f8"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d33a0021893ede5969876052796165bab6006559ab845fd7b515a30abdd990dc"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04ce29e2c5fedf296b1a1b0acc1724ba93a36fb14031f3abfb7abda2806c1535"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b16582783f44fbca6fcf46f61347340c787d7530d88b4d590a397a47583f31dd"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:836d3cc225b3e8a943d0b02633fb2f28a66e281290302a79df0e1eaa984ff7c1"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:438d9f0f4bc64e8dea78274caa5af971ceff0f8771e1a2333620969936ba10be"},
+ {file = "regex-2024.7.24-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:973335b1624859cb0e52f96062a28aa18f3a5fc77a96e4a3d6d76e29811a0e6e"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c5e69fd3eb0b409432b537fe3c6f44ac089c458ab6b78dcec14478422879ec5f"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:fbf8c2f00904eaf63ff37718eb13acf8e178cb940520e47b2f05027f5bb34ce3"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2757ace61bc4061b69af19e4689fa4416e1a04840f33b441034202b5cd02d4"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:44fc61b99035fd9b3b9453f1713234e5a7c92a04f3577252b45feefe1b327759"},
+ {file = "regex-2024.7.24-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:84c312cdf839e8b579f504afcd7b65f35d60b6285d892b19adea16355e8343c9"},
+ {file = "regex-2024.7.24-cp39-cp39-win32.whl", hash = "sha256:ca5b2028c2f7af4e13fb9fc29b28d0ce767c38c7facdf64f6c2cd040413055f1"},
+ {file = "regex-2024.7.24-cp39-cp39-win_amd64.whl", hash = "sha256:7c479f5ae937ec9985ecaf42e2e10631551d909f203e31308c12d703922742f9"},
+ {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"},
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+description = "Python HTTP for Humans."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
+ {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "rich"
+version = "13.7.1"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+optional = true
+python-versions = ">=3.7.0"
+files = [
+ {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"},
+ {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=2.2.0"
+pygments = ">=2.13.0,<3.0.0"
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<9)"]
+
+[[package]]
+name = "safetensors"
+version = "0.4.4"
+description = ""
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "safetensors-0.4.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2adb497ada13097f30e386e88c959c0fda855a5f6f98845710f5bb2c57e14f12"},
+ {file = "safetensors-0.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7db7fdc2d71fd1444d85ca3f3d682ba2df7d61a637dfc6d80793f439eae264ab"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d4f0eed76b430f009fbefca1a0028ddb112891b03cb556d7440d5cd68eb89a9"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:57d216fab0b5c432aabf7170883d7c11671622bde8bd1436c46d633163a703f6"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d9b76322e49c056bcc819f8bdca37a2daa5a6d42c07f30927b501088db03309"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32f0d1f6243e90ee43bc6ee3e8c30ac5b09ca63f5dd35dbc985a1fc5208c451a"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d464bdc384874601a177375028012a5f177f1505279f9456fea84bbc575c7f"},
+ {file = "safetensors-0.4.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63144e36209ad8e4e65384dbf2d52dd5b1866986079c00a72335402a38aacdc5"},
+ {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:051d5ecd490af7245258000304b812825974d5e56f14a3ff7e1b8b2ba6dc2ed4"},
+ {file = "safetensors-0.4.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:51bc8429d9376224cd3cf7e8ce4f208b4c930cd10e515b6ac6a72cbc3370f0d9"},
+ {file = "safetensors-0.4.4-cp310-none-win32.whl", hash = "sha256:fb7b54830cee8cf9923d969e2df87ce20e625b1af2fd194222ab902d3adcc29c"},
+ {file = "safetensors-0.4.4-cp310-none-win_amd64.whl", hash = "sha256:4b3e8aa8226d6560de8c2b9d5ff8555ea482599c670610758afdc97f3e021e9c"},
+ {file = "safetensors-0.4.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bbaa31f2cb49013818bde319232ccd72da62ee40f7d2aa532083eda5664e85ff"},
+ {file = "safetensors-0.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9fdcb80f4e9fbb33b58e9bf95e7dbbedff505d1bcd1c05f7c7ce883632710006"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55c14c20be247b8a1aeaf3ab4476265e3ca83096bb8e09bb1a7aa806088def4f"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949aaa1118660f992dbf0968487b3e3cfdad67f948658ab08c6b5762e90cc8b6"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c11a4ab7debc456326a2bac67f35ee0ac792bcf812c7562a4a28559a5c795e27"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0cea44bba5c5601b297bc8307e4075535b95163402e4906b2e9b82788a2a6df"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9d752c97f6bbe327352f76e5b86442d776abc789249fc5e72eacb49e6916482"},
+ {file = "safetensors-0.4.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03f2bb92e61b055ef6cc22883ad1ae898010a95730fa988c60a23800eb742c2c"},
+ {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf3f91a9328a941acc44eceffd4e1f5f89b030985b2966637e582157173b98"},
+ {file = "safetensors-0.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:20d218ec2b6899d29d6895419a58b6e44cc5ff8f0cc29fac8d236a8978ab702e"},
+ {file = "safetensors-0.4.4-cp311-none-win32.whl", hash = "sha256:8079486118919f600c603536e2490ca37b3dbd3280e3ad6eaacfe6264605ac8a"},
+ {file = "safetensors-0.4.4-cp311-none-win_amd64.whl", hash = "sha256:2f8c2eb0615e2e64ee27d478c7c13f51e5329d7972d9e15528d3e4cfc4a08f0d"},
+ {file = "safetensors-0.4.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baec5675944b4a47749c93c01c73d826ef7d42d36ba8d0dba36336fa80c76426"},
+ {file = "safetensors-0.4.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f15117b96866401825f3e94543145028a2947d19974429246ce59403f49e77c6"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a13a9caea485df164c51be4eb0c87f97f790b7c3213d635eba2314d959fe929"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b54bc4ca5f9b9bba8cd4fb91c24b2446a86b5ae7f8975cf3b7a277353c3127c"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08332c22e03b651c8eb7bf5fc2de90044f3672f43403b3d9ac7e7e0f4f76495e"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bb62841e839ee992c37bb75e75891c7f4904e772db3691c59daaca5b4ab960e1"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5b927acc5f2f59547270b0309a46d983edc44be64e1ca27a7fcb0474d6cd67"},
+ {file = "safetensors-0.4.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a69c71b1ae98a8021a09a0b43363b0143b0ce74e7c0e83cacba691b62655fb8"},
+ {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23654ad162c02a5636f0cd520a0310902c4421aab1d91a0b667722a4937cc445"},
+ {file = "safetensors-0.4.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0677c109d949cf53756859160b955b2e75b0eefe952189c184d7be30ecf7e858"},
+ {file = "safetensors-0.4.4-cp312-none-win32.whl", hash = "sha256:a51d0ddd4deb8871c6de15a772ef40b3dbd26a3c0451bb9e66bc76fc5a784e5b"},
+ {file = "safetensors-0.4.4-cp312-none-win_amd64.whl", hash = "sha256:2d065059e75a798bc1933c293b68d04d79b586bb7f8c921e0ca1e82759d0dbb1"},
+ {file = "safetensors-0.4.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:9d625692578dd40a112df30c02a1adf068027566abd8e6a74893bb13d441c150"},
+ {file = "safetensors-0.4.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7cabcf39c81e5b988d0adefdaea2eb9b4fd9bd62d5ed6559988c62f36bfa9a89"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8359bef65f49d51476e9811d59c015f0ddae618ee0e44144f5595278c9f8268c"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1a32c662e7df9226fd850f054a3ead0e4213a96a70b5ce37b2d26ba27004e013"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c329a4dcc395364a1c0d2d1574d725fe81a840783dda64c31c5a60fc7d41472c"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:239ee093b1db877c9f8fe2d71331a97f3b9c7c0d3ab9f09c4851004a11f44b65"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd574145d930cf9405a64f9923600879a5ce51d9f315443a5f706374841327b6"},
+ {file = "safetensors-0.4.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f6784eed29f9e036acb0b7769d9e78a0dc2c72c2d8ba7903005350d817e287a4"},
+ {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:65a4a6072436bf0a4825b1c295d248cc17e5f4651e60ee62427a5bcaa8622a7a"},
+ {file = "safetensors-0.4.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:df81e3407630de060ae8313da49509c3caa33b1a9415562284eaf3d0c7705f9f"},
+ {file = "safetensors-0.4.4-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e4a0f374200e8443d9746e947ebb346c40f83a3970e75a685ade0adbba5c48d9"},
+ {file = "safetensors-0.4.4-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:181fb5f3dee78dae7fd7ec57d02e58f7936498d587c6b7c1c8049ef448c8d285"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb4ac1d8f6b65ec84ddfacd275079e89d9df7c92f95675ba96c4f790a64df6e"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76897944cd9239e8a70955679b531b9a0619f76e25476e57ed373322d9c2075d"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a9e9d1a27e51a0f69e761a3d581c3af46729ec1c988fa1f839e04743026ae35"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:005ef9fc0f47cb9821c40793eb029f712e97278dae84de91cb2b4809b856685d"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26987dac3752688c696c77c3576f951dbbdb8c57f0957a41fb6f933cf84c0b62"},
+ {file = "safetensors-0.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c05270b290acd8d249739f40d272a64dd597d5a4b90f27d830e538bc2549303c"},
+ {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:068d3a33711fc4d93659c825a04480ff5a3854e1d78632cdc8f37fee917e8a60"},
+ {file = "safetensors-0.4.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:063421ef08ca1021feea8b46951251b90ae91f899234dd78297cbe7c1db73b99"},
+ {file = "safetensors-0.4.4-cp37-none-win32.whl", hash = "sha256:d52f5d0615ea83fd853d4e1d8acf93cc2e0223ad4568ba1e1f6ca72e94ea7b9d"},
+ {file = "safetensors-0.4.4-cp37-none-win_amd64.whl", hash = "sha256:88a5ac3280232d4ed8e994cbc03b46a1807ce0aa123867b40c4a41f226c61f94"},
+ {file = "safetensors-0.4.4-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3467ab511bfe3360967d7dc53b49f272d59309e57a067dd2405b4d35e7dcf9dc"},
+ {file = "safetensors-0.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2ab4c96d922e53670ce25fbb9b63d5ea972e244de4fa1dd97b590d9fd66aacef"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87df18fce4440477c3ef1fd7ae17c704a69a74a77e705a12be135ee0651a0c2d"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e5fe345b2bc7d88587149ac11def1f629d2671c4c34f5df38aed0ba59dc37f8"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f1a3e01dce3cd54060791e7e24588417c98b941baa5974700eeb0b8eb65b0a0"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c6bf35e9a8998d8339fd9a05ac4ce465a4d2a2956cc0d837b67c4642ed9e947"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:166c0c52f6488b8538b2a9f3fbc6aad61a7261e170698779b371e81b45f0440d"},
+ {file = "safetensors-0.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87e9903b8668a16ef02c08ba4ebc91e57a49c481e9b5866e31d798632805014b"},
+ {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a9c421153aa23c323bd8483d4155b4eee82c9a50ac11cccd83539104a8279c64"},
+ {file = "safetensors-0.4.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a4b8617499b2371c7353302c5116a7e0a3a12da66389ce53140e607d3bf7b3d3"},
+ {file = "safetensors-0.4.4-cp38-none-win32.whl", hash = "sha256:c6280f5aeafa1731f0a3709463ab33d8e0624321593951aefada5472f0b313fd"},
+ {file = "safetensors-0.4.4-cp38-none-win_amd64.whl", hash = "sha256:6ceed6247fc2d33b2a7b7d25d8a0fe645b68798856e0bc7a9800c5fd945eb80f"},
+ {file = "safetensors-0.4.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5cf6c6f6193797372adf50c91d0171743d16299491c75acad8650107dffa9269"},
+ {file = "safetensors-0.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:419010156b914a3e5da4e4adf992bee050924d0fe423c4b329e523e2c14c3547"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88f6fd5a5c1302ce79993cc5feeadcc795a70f953c762544d01fb02b2db4ea33"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d468cffb82d90789696d5b4d8b6ab8843052cba58a15296691a7a3df55143cd2"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9353c2af2dd467333d4850a16edb66855e795561cd170685178f706c80d2c71e"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83c155b4a33368d9b9c2543e78f2452090fb030c52401ca608ef16fa58c98353"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9850754c434e636ce3dc586f534bb23bcbd78940c304775bee9005bf610e98f1"},
+ {file = "safetensors-0.4.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:275f500b4d26f67b6ec05629a4600645231bd75e4ed42087a7c1801bff04f4b3"},
+ {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5c2308de665b7130cd0e40a2329278226e4cf083f7400c51ca7e19ccfb3886f3"},
+ {file = "safetensors-0.4.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e06a9ebc8656e030ccfe44634f2a541b4b1801cd52e390a53ad8bacbd65f8518"},
+ {file = "safetensors-0.4.4-cp39-none-win32.whl", hash = "sha256:ef73df487b7c14b477016947c92708c2d929e1dee2bacdd6fff5a82ed4539537"},
+ {file = "safetensors-0.4.4-cp39-none-win_amd64.whl", hash = "sha256:83d054818a8d1198d8bd8bc3ea2aac112a2c19def2bf73758321976788706398"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1d1f34c71371f0e034004a0b583284b45d233dd0b5f64a9125e16b8a01d15067"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1a8043a33d58bc9b30dfac90f75712134ca34733ec3d8267b1bd682afe7194f5"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db8f0c59c84792c12661f8efa85de160f80efe16b87a9d5de91b93f9e0bce3c"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cfc1fc38e37630dd12d519bdec9dcd4b345aec9930bb9ce0ed04461f49e58b52"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c9d86d9b13b18aafa88303e2cd21e677f5da2a14c828d2c460fe513af2e9a5"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:43251d7f29a59120a26f5a0d9583b9e112999e500afabcfdcb91606d3c5c89e3"},
+ {file = "safetensors-0.4.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:2c42e9b277513b81cf507e6121c7b432b3235f980cac04f39f435b7902857f91"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3daacc9a4e3f428a84dd56bf31f20b768eb0b204af891ed68e1f06db9edf546f"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:218bbb9b883596715fc9997bb42470bf9f21bb832c3b34c2bf744d6fa8f2bbba"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bd5efc26b39f7fc82d4ab1d86a7f0644c8e34f3699c33f85bfa9a717a030e1b"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ad9776b65d8743f86698a1973292c966cf3abff627efc44ed60e66cc538ddd"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:30f23e6253c5f43a809dea02dc28a9f5fa747735dc819f10c073fe1b605e97d4"},
+ {file = "safetensors-0.4.4-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:5512078d00263de6cb04e9d26c9ae17611098f52357fea856213e38dc462f81f"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b96c3d9266439d17f35fc2173111d93afc1162f168e95aed122c1ca517b1f8f1"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:08d464aa72a9a13826946b4fb9094bb4b16554bbea2e069e20bd903289b6ced9"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:210160816d5a36cf41f48f38473b6f70d7bcb4b0527bedf0889cc0b4c3bb07db"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb276a53717f2bcfb6df0bcf284d8a12069002508d4c1ca715799226024ccd45"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a2c28c6487f17d8db0089e8b2cdc13de859366b94cc6cdc50e1b0a4147b56551"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7915f0c60e4e6e65d90f136d85dd3b429ae9191c36b380e626064694563dbd9f"},
+ {file = "safetensors-0.4.4-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:00eea99ae422fbfa0b46065acbc58b46bfafadfcec179d4b4a32d5c45006af6c"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bb1ed4fcb0b3c2f3ea2c5767434622fe5d660e5752f21ac2e8d737b1e5e480bb"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:73fc9a0a4343188bdb421783e600bfaf81d0793cd4cce6bafb3c2ed567a74cd5"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c37e6b714200824c73ca6eaf007382de76f39466a46e97558b8dc4cf643cfbf"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f75698c5c5c542417ac4956acfc420f7d4a2396adca63a015fd66641ea751759"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca1a209157f242eb183e209040097118472e169f2e069bfbd40c303e24866543"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:177f2b60a058f92a3cec7a1786c9106c29eca8987ecdfb79ee88126e5f47fa31"},
+ {file = "safetensors-0.4.4-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ee9622e84fe6e4cd4f020e5fda70d6206feff3157731df7151d457fdae18e541"},
+ {file = "safetensors-0.4.4.tar.gz", hash = "sha256:5fe3e9b705250d0172ed4e100a811543108653fb2b66b9e702a088ad03772a07"},
+]
+
+[package.extras]
+all = ["safetensors[jax]", "safetensors[numpy]", "safetensors[paddlepaddle]", "safetensors[pinned-tf]", "safetensors[quality]", "safetensors[testing]", "safetensors[torch]"]
+dev = ["safetensors[all]"]
+jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "safetensors[numpy]"]
+mlx = ["mlx (>=0.0.9)"]
+numpy = ["numpy (>=1.21.6)"]
+paddlepaddle = ["paddlepaddle (>=2.4.1)", "safetensors[numpy]"]
+pinned-tf = ["safetensors[numpy]", "tensorflow (==2.11.0)"]
+quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"]
+tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"]
+testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools-rust (>=1.5.2)"]
+torch = ["safetensors[numpy]", "torch (>=1.10)"]
+
+[[package]]
+name = "shellingham"
+version = "1.5.4"
+description = "Tool to Detect Surrounding Shell"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"},
+ {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+description = "Sniff out which async library your code is running under"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
+ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
+]
+
+[[package]]
+name = "tabulate"
+version = "0.9.0"
+description = "Pretty-print tabular data"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
+ {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
+]
+
+[package.extras]
+widechars = ["wcwidth"]
+
+[[package]]
+name = "together"
+version = "1.2.7"
+description = "Python client for Together's Cloud Platform!"
+optional = true
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "together-1.2.7-py3-none-any.whl", hash = "sha256:1350e3c85a0108f268177d14dd5807af2a71d01c446d1c27a907795de376a81d"},
+ {file = "together-1.2.7.tar.gz", hash = "sha256:fae73acc903f2f364d57d3ca33d72de51f44442b4a06c69f32ad6d058457c6ee"},
+]
+
+[package.dependencies]
+aiohttp = ">=3.9.3,<4.0.0"
+click = ">=8.1.7,<9.0.0"
+eval-type-backport = ">=0.1.3,<0.3.0"
+filelock = ">=3.13.1,<4.0.0"
+numpy = {version = ">=1.26.0", markers = "python_version >= \"3.12\""}
+pillow = ">=10.3.0,<11.0.0"
+pyarrow = ">=10.0.1"
+pydantic = ">=2.6.3,<3.0.0"
+requests = ">=2.31.0,<3.0.0"
+tabulate = ">=0.9.0,<0.10.0"
+tqdm = ">=4.66.2,<5.0.0"
+typer = ">=0.9,<0.13"
+
+[[package]]
+name = "tokenizers"
+version = "0.19.1"
+description = ""
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "tokenizers-0.19.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:952078130b3d101e05ecfc7fc3640282d74ed26bcf691400f872563fca15ac97"},
+ {file = "tokenizers-0.19.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82c8b8063de6c0468f08e82c4e198763e7b97aabfe573fd4cf7b33930ca4df77"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f03727225feaf340ceeb7e00604825addef622d551cbd46b7b775ac834c1e1c4"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:453e4422efdfc9c6b6bf2eae00d5e323f263fff62b29a8c9cd526c5003f3f642"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:02e81bf089ebf0e7f4df34fa0207519f07e66d8491d963618252f2e0729e0b46"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b07c538ba956843833fee1190cf769c60dc62e1cf934ed50d77d5502194d63b1"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28cab1582e0eec38b1f38c1c1fb2e56bce5dc180acb1724574fc5f47da2a4fe"},
+ {file = "tokenizers-0.19.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b01afb7193d47439f091cd8f070a1ced347ad0f9144952a30a41836902fe09e"},
+ {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7fb297edec6c6841ab2e4e8f357209519188e4a59b557ea4fafcf4691d1b4c98"},
+ {file = "tokenizers-0.19.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2e8a3dd055e515df7054378dc9d6fa8c8c34e1f32777fb9a01fea81496b3f9d3"},
+ {file = "tokenizers-0.19.1-cp310-none-win32.whl", hash = "sha256:7ff898780a155ea053f5d934925f3902be2ed1f4d916461e1a93019cc7250837"},
+ {file = "tokenizers-0.19.1-cp310-none-win_amd64.whl", hash = "sha256:bea6f9947e9419c2fda21ae6c32871e3d398cba549b93f4a65a2d369662d9403"},
+ {file = "tokenizers-0.19.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5c88d1481f1882c2e53e6bb06491e474e420d9ac7bdff172610c4f9ad3898059"},
+ {file = "tokenizers-0.19.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ddf672ed719b4ed82b51499100f5417d7d9f6fb05a65e232249268f35de5ed14"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:dadc509cc8a9fe460bd274c0e16ac4184d0958117cf026e0ea8b32b438171594"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfedf31824ca4915b511b03441784ff640378191918264268e6923da48104acc"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac11016d0a04aa6487b1513a3a36e7bee7eec0e5d30057c9c0408067345c48d2"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76951121890fea8330d3a0df9a954b3f2a37e3ec20e5b0530e9a0044ca2e11fe"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b342d2ce8fc8d00f376af068e3274e2e8649562e3bc6ae4a67784ded6b99428d"},
+ {file = "tokenizers-0.19.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d16ff18907f4909dca9b076b9c2d899114dd6abceeb074eca0c93e2353f943aa"},
+ {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:706a37cc5332f85f26efbe2bdc9ef8a9b372b77e4645331a405073e4b3a8c1c6"},
+ {file = "tokenizers-0.19.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:16baac68651701364b0289979ecec728546133e8e8fe38f66fe48ad07996b88b"},
+ {file = "tokenizers-0.19.1-cp311-none-win32.whl", hash = "sha256:9ed240c56b4403e22b9584ee37d87b8bfa14865134e3e1c3fb4b2c42fafd3256"},
+ {file = "tokenizers-0.19.1-cp311-none-win_amd64.whl", hash = "sha256:ad57d59341710b94a7d9dbea13f5c1e7d76fd8d9bcd944a7a6ab0b0da6e0cc66"},
+ {file = "tokenizers-0.19.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:621d670e1b1c281a1c9698ed89451395d318802ff88d1fc1accff0867a06f153"},
+ {file = "tokenizers-0.19.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d924204a3dbe50b75630bd16f821ebda6a5f729928df30f582fb5aade90c818a"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4f3fefdc0446b1a1e6d81cd4c07088ac015665d2e812f6dbba4a06267d1a2c95"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9620b78e0b2d52ef07b0d428323fb34e8ea1219c5eac98c2596311f20f1f9266"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04ce49e82d100594715ac1b2ce87d1a36e61891a91de774755f743babcd0dd52"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5c2ff13d157afe413bf7e25789879dd463e5a4abfb529a2d8f8473d8042e28f"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3174c76efd9d08f836bfccaca7cfec3f4d1c0a4cf3acbc7236ad577cc423c840"},
+ {file = "tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c9d5b6c0e7a1e979bec10ff960fae925e947aab95619a6fdb4c1d8ff3708ce3"},
+ {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a179856d1caee06577220ebcfa332af046d576fb73454b8f4d4b0ba8324423ea"},
+ {file = "tokenizers-0.19.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:952b80dac1a6492170f8c2429bd11fcaa14377e097d12a1dbe0ef2fb2241e16c"},
+ {file = "tokenizers-0.19.1-cp312-none-win32.whl", hash = "sha256:01d62812454c188306755c94755465505836fd616f75067abcae529c35edeb57"},
+ {file = "tokenizers-0.19.1-cp312-none-win_amd64.whl", hash = "sha256:b70bfbe3a82d3e3fb2a5e9b22a39f8d1740c96c68b6ace0086b39074f08ab89a"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:bb9dfe7dae85bc6119d705a76dc068c062b8b575abe3595e3c6276480e67e3f1"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:1f0360cbea28ea99944ac089c00de7b2e3e1c58f479fb8613b6d8d511ce98267"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:71e3ec71f0e78780851fef28c2a9babe20270404c921b756d7c532d280349214"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b82931fa619dbad979c0ee8e54dd5278acc418209cc897e42fac041f5366d626"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8ff5b90eabdcdaa19af697885f70fe0b714ce16709cf43d4952f1f85299e73a"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e742d76ad84acbdb1a8e4694f915fe59ff6edc381c97d6dfdd054954e3478ad4"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8c5d59d7b59885eab559d5bc082b2985555a54cda04dda4c65528d90ad252ad"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b2da5c32ed869bebd990c9420df49813709e953674c0722ff471a116d97b22d"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:638e43936cc8b2cbb9f9d8dde0fe5e7e30766a3318d2342999ae27f68fdc9bd6"},
+ {file = "tokenizers-0.19.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:78e769eb3b2c79687d9cb0f89ef77223e8e279b75c0a968e637ca7043a84463f"},
+ {file = "tokenizers-0.19.1-cp37-none-win32.whl", hash = "sha256:72791f9bb1ca78e3ae525d4782e85272c63faaef9940d92142aa3eb79f3407a3"},
+ {file = "tokenizers-0.19.1-cp37-none-win_amd64.whl", hash = "sha256:f3bbb7a0c5fcb692950b041ae11067ac54826204318922da754f908d95619fbc"},
+ {file = "tokenizers-0.19.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:07f9295349bbbcedae8cefdbcfa7f686aa420be8aca5d4f7d1ae6016c128c0c5"},
+ {file = "tokenizers-0.19.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10a707cc6c4b6b183ec5dbfc5c34f3064e18cf62b4a938cb41699e33a99e03c1"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6309271f57b397aa0aff0cbbe632ca9d70430839ca3178bf0f06f825924eca22"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ad23d37d68cf00d54af184586d79b84075ada495e7c5c0f601f051b162112dc"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:427c4f0f3df9109314d4f75b8d1f65d9477033e67ffaec4bca53293d3aca286d"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e83a31c9cf181a0a3ef0abad2b5f6b43399faf5da7e696196ddd110d332519ee"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c27b99889bd58b7e301468c0838c5ed75e60c66df0d4db80c08f43462f82e0d3"},
+ {file = "tokenizers-0.19.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bac0b0eb952412b0b196ca7a40e7dce4ed6f6926489313414010f2e6b9ec2adf"},
+ {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8a6298bde623725ca31c9035a04bf2ef63208d266acd2bed8c2cb7d2b7d53ce6"},
+ {file = "tokenizers-0.19.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:08a44864e42fa6d7d76d7be4bec62c9982f6f6248b4aa42f7302aa01e0abfd26"},
+ {file = "tokenizers-0.19.1-cp38-none-win32.whl", hash = "sha256:1de5bc8652252d9357a666e609cb1453d4f8e160eb1fb2830ee369dd658e8975"},
+ {file = "tokenizers-0.19.1-cp38-none-win_amd64.whl", hash = "sha256:0bcce02bf1ad9882345b34d5bd25ed4949a480cf0e656bbd468f4d8986f7a3f1"},
+ {file = "tokenizers-0.19.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:0b9394bd204842a2a1fd37fe29935353742be4a3460b6ccbaefa93f58a8df43d"},
+ {file = "tokenizers-0.19.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4692ab92f91b87769d950ca14dbb61f8a9ef36a62f94bad6c82cc84a51f76f6a"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6258c2ef6f06259f70a682491c78561d492e885adeaf9f64f5389f78aa49a051"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c85cf76561fbd01e0d9ea2d1cbe711a65400092bc52b5242b16cfd22e51f0c58"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:670b802d4d82bbbb832ddb0d41df7015b3e549714c0e77f9bed3e74d42400fbe"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:85aa3ab4b03d5e99fdd31660872249df5e855334b6c333e0bc13032ff4469c4a"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbf001afbbed111a79ca47d75941e9e5361297a87d186cbfc11ed45e30b5daba"},
+ {file = "tokenizers-0.19.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c89aa46c269e4e70c4d4f9d6bc644fcc39bb409cb2a81227923404dd6f5227"},
+ {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:39c1ec76ea1027438fafe16ecb0fb84795e62e9d643444c1090179e63808c69d"},
+ {file = "tokenizers-0.19.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c2a0d47a89b48d7daa241e004e71fb5a50533718897a4cd6235cb846d511a478"},
+ {file = "tokenizers-0.19.1-cp39-none-win32.whl", hash = "sha256:61b7fe8886f2e104d4caf9218b157b106207e0f2a4905c9c7ac98890688aabeb"},
+ {file = "tokenizers-0.19.1-cp39-none-win_amd64.whl", hash = "sha256:f97660f6c43efd3e0bfd3f2e3e5615bf215680bad6ee3d469df6454b8c6e8256"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3b11853f17b54c2fe47742c56d8a33bf49ce31caf531e87ac0d7d13d327c9334"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d26194ef6c13302f446d39972aaa36a1dda6450bc8949f5eb4c27f51191375bd"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e8d1ed93beda54bbd6131a2cb363a576eac746d5c26ba5b7556bc6f964425594"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca407133536f19bdec44b3da117ef0d12e43f6d4b56ac4c765f37eca501c7bda"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce05fde79d2bc2e46ac08aacbc142bead21614d937aac950be88dc79f9db9022"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:35583cd46d16f07c054efd18b5d46af4a2f070a2dd0a47914e66f3ff5efb2b1e"},
+ {file = "tokenizers-0.19.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:43350270bfc16b06ad3f6f07eab21f089adb835544417afda0f83256a8bf8b75"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b4399b59d1af5645bcee2072a463318114c39b8547437a7c2d6a186a1b5a0e2d"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6852c5b2a853b8b0ddc5993cd4f33bfffdca4fcc5d52f89dd4b8eada99379285"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bcd266ae85c3d39df2f7e7d0e07f6c41a55e9a3123bb11f854412952deacd828"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecb2651956eea2aa0a2d099434134b1b68f1c31f9a5084d6d53f08ed43d45ff2"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:b279ab506ec4445166ac476fb4d3cc383accde1ea152998509a94d82547c8e2a"},
+ {file = "tokenizers-0.19.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:89183e55fb86e61d848ff83753f64cded119f5d6e1f553d14ffee3700d0a4a49"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2edbc75744235eea94d595a8b70fe279dd42f3296f76d5a86dde1d46e35f574"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0e64bfde9a723274e9a71630c3e9494ed7b4c0f76a1faacf7fe294cd26f7ae7c"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b5ca92bfa717759c052e345770792d02d1f43b06f9e790ca0a1db62838816f3"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8a20266e695ec9d7a946a019c1d5ca4eddb6613d4f466888eee04f16eedb85"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63c38f45d8f2a2ec0f3a20073cccb335b9f99f73b3c69483cd52ebc75369d8a1"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dd26e3afe8a7b61422df3176e06664503d3f5973b94f45d5c45987e1cb711876"},
+ {file = "tokenizers-0.19.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:eddd5783a4a6309ce23432353cdb36220e25cbb779bfa9122320666508b44b88"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:56ae39d4036b753994476a1b935584071093b55c7a72e3b8288e68c313ca26e7"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f9939ca7e58c2758c01b40324a59c034ce0cebad18e0d4563a9b1beab3018243"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c330c0eb815d212893c67a032e9dc1b38a803eccb32f3e8172c19cc69fbb439"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec11802450a2487cdf0e634b750a04cbdc1c4d066b97d94ce7dd2cb51ebb325b"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b718f316b596f36e1dae097a7d5b91fc5b85e90bf08b01ff139bd8953b25af"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ed69af290c2b65169f0ba9034d1dc39a5db9459b32f1dd8b5f3f32a3fcf06eab"},
+ {file = "tokenizers-0.19.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f8a9c828277133af13f3859d1b6bf1c3cb6e9e1637df0e45312e6b7c2e622b1f"},
+ {file = "tokenizers-0.19.1.tar.gz", hash = "sha256:ee59e6680ed0fdbe6b724cf38bd70400a0c1dd623b07ac729087270caeac88e3"},
+]
+
+[package.dependencies]
+huggingface-hub = ">=0.16.4,<1.0"
+
+[package.extras]
+dev = ["tokenizers[testing]"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
+
+[[package]]
+name = "tqdm"
+version = "4.66.5"
+description = "Fast, Extensible Progress Meter"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"},
+ {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "transformers"
+version = "4.44.0"
+description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow"
+optional = true
+python-versions = ">=3.8.0"
+files = [
+ {file = "transformers-4.44.0-py3-none-any.whl", hash = "sha256:ea0ff72def71e9f4812d9414d4803b22681b1617aa6f511bd51cfff2b44a6fca"},
+ {file = "transformers-4.44.0.tar.gz", hash = "sha256:75699495e30b7635ca444d8d372e138c687ab51a875b387e33f1fb759c37f196"},
+]
+
+[package.dependencies]
+filelock = "*"
+huggingface-hub = ">=0.23.2,<1.0"
+numpy = ">=1.17"
+packaging = ">=20.0"
+pyyaml = ">=5.1"
+regex = "!=2019.12.17"
+requests = "*"
+safetensors = ">=0.4.1"
+tokenizers = ">=0.19,<0.20"
+tqdm = ">=4.27"
+
+[package.extras]
+accelerate = ["accelerate (>=0.21.0)"]
+agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"]
+all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision"]
+audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+benchmark = ["optimum-benchmark (>=0.2.0)"]
+codecarbon = ["codecarbon (==1.2.0)"]
+deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"]
+deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "scipy (<1.13.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1,<0.14.0)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.19,<0.20)", "urllib3 (<2.0.0)"]
+dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm (<=0.9.16)", "tokenizers (>=0.19,<0.20)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"]
+flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)", "scipy (<1.13.0)"]
+flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+ftfy = ["ftfy"]
+integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"]
+ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"]
+modelcreation = ["cookiecutter (==1.7.3)"]
+natten = ["natten (>=0.14.6,<0.15.0)"]
+onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"]
+onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"]
+optuna = ["optuna"]
+quality = ["GitPython (<3.1.19)", "datasets (!=2.5.0)", "isort (>=5.5.4)", "ruff (==0.5.1)", "urllib3 (<2.0.0)"]
+ray = ["ray[tune] (>=2.7.0)"]
+retrieval = ["datasets (!=2.5.0)", "faiss-cpu"]
+ruff = ["ruff (==0.5.1)"]
+sagemaker = ["sagemaker (>=2.31.0)"]
+sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"]
+serving = ["fastapi", "pydantic", "starlette", "uvicorn"]
+sigopt = ["sigopt"]
+sklearn = ["scikit-learn"]
+speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "nltk", "parameterized", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-rich", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.5.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"]
+tf = ["keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow (>2.9,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"]
+tf-cpu = ["keras (>2.9,<2.16)", "keras-nlp (>=0.3.1,<0.14.0)", "onnxconverter-common", "tensorflow-cpu (>2.9,<2.16)", "tensorflow-probability (<0.24)", "tensorflow-text (<2.16)", "tf2onnx"]
+tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"]
+timm = ["timm (<=0.9.16)"]
+tokenizers = ["tokenizers (>=0.19,<0.20)"]
+torch = ["accelerate (>=0.21.0)", "torch"]
+torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"]
+torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"]
+torchhub = ["filelock", "huggingface-hub (>=0.23.2,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.19,<0.20)", "torch", "tqdm (>=4.27)"]
+video = ["av (==9.2.0)", "decord (==0.6.0)"]
+vision = ["Pillow (>=10.0.1,<=15.0)"]
+
+[[package]]
+name = "typer"
+version = "0.12.3"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "typer-0.12.3-py3-none-any.whl", hash = "sha256:070d7ca53f785acbccba8e7d28b08dcd88f79f1fbda035ade0aecec71ca5c914"},
+ {file = "typer-0.12.3.tar.gz", hash = "sha256:49e73131481d804288ef62598d97a1ceef3058905aa536a1134f90891ba35482"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+rich = ">=10.11.0"
+shellingham = ">=1.3.0"
+typing-extensions = ">=3.7.4.3"
+
+[[package]]
+name = "typing-extensions"
+version = "4.12.2"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
+ {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.2.2"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"},
+ {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+h2 = ["h2 (>=4,<5)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "win32-setctime"
+version = "1.1.0"
+description = "A small Python utility to set file creation time on Windows"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"},
+ {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"},
+]
+
+[package.extras]
+dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
+
+[[package]]
+name = "yarl"
+version = "1.9.4"
+description = "Yet another URL library"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"},
+ {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"},
+ {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"},
+ {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"},
+ {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"},
+ {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"},
+ {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"},
+ {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"},
+ {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"},
+ {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"},
+ {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"},
+ {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"},
+ {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"},
+ {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"},
+ {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"},
+ {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"},
+ {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"},
+ {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"},
+ {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"},
+ {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"},
+ {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"},
+ {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"},
+ {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"},
+ {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"},
+ {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"},
+ {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"},
+ {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"},
+ {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"},
+ {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"},
+ {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"},
+ {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"},
+ {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"},
+ {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"},
+ {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+
+[extras]
+anthropic = ["anthropic"]
+groq = ["groq"]
+image = []
+ollama = ["ollama"]
+openai = ["openai"]
+together = ["together"]
+tools = []
+transformers = ["transformers"]
+video = []
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.12.0"
+content-hash = "680ead43569d0005695a86b502ad900a71fa2319224d011009c3b73a187af416"
diff --git a/support/plugins/mtllm/pyproject.toml b/support/plugins/mtllm/pyproject.toml
new file mode 100644
index 000000000..4a5358c9e
--- /dev/null
+++ b/support/plugins/mtllm/pyproject.toml
@@ -0,0 +1,41 @@
+[tool.poetry]
+name = "mtllm"
+version = "0.3.1"
+description = "MTLLM Provides Easy to use APIs for different LLM Providers to be used with Jaseci's Jaclang Programming Language."
+maintainers = ["Chandra Irugalbandara ", "Kugesan Sivasothynathan "]
+authors = ["Chandra Irugalbandara ", "Kugesan Sivasothynathan "]
+license = "MIT"
+readme = "README.md"
+keywords = ["llm", "jaclang", "jaseci", "mtllm"]
+
+[tool.poetry.dependencies]
+python = "^3.12.0"
+jaclang = "0.7.16"
+loguru = "^0.7.2"
+openai = { version = "^1.30.4", optional = true }
+anthropic = { version = "^0.26.1", optional = true }
+ollama = { version = "^0.2.0", optional = true }
+together = { version = "^1.2.0", optional = true }
+transformers = { version = "^4.41.1", optional = true }
+groq = { version = "^0.8.0", optional = true }
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^8.3.2"
+
+[tool.poetry.extras]
+tools = ["wikipedia"]
+video = ["opencv-python-headless"]
+image = ["pillow"]
+groq = ["groq"]
+transformers = ["transformers"]
+ollama = ["ollama"]
+anthropic = ["anthropic"]
+openai = ["openai"]
+together = ["together"]
+
+[tool.poetry.plugins."jac"]
+mtllm = "mtllm.plugin:JacFeature"
+
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
\ No newline at end of file
diff --git a/support/plugins/mtllm/scripts/gh_release.py b/support/plugins/mtllm/scripts/gh_release.py
new file mode 100644
index 000000000..16a4d53c8
--- /dev/null
+++ b/support/plugins/mtllm/scripts/gh_release.py
@@ -0,0 +1,38 @@
+"""GH Release script for MTLLM."""
+
+from github_release import gh_release_create
+
+import markdown_to_json
+
+import tomllib
+
+
+def get_release_info(version: str) -> str:
+ """Get release info from CHANGELOG.md."""
+
+ def list_to_markdown(items: list) -> str:
+ """Convert list to markdown."""
+ return "\n".join([f"- {item}" for item in items])
+
+ with open("CHANGELOG.md", "r") as f:
+ changelog = f.read()
+ changelog_json = markdown_to_json.dictify(changelog)
+ for release_str, release_info in changelog_json["RELEASES"].items():
+ if version in release_str:
+ return list_to_markdown(release_info)
+ raise ValueError(f"Version {version} not found in CHANGELOG.md")
+
+
+with open("pyproject.toml", "rb") as f:
+ data = tomllib.load(f)
+
+version = data["tool"]["poetry"]["version"]
+
+gh_release_create(
+ "Jaseci-Labs/mtllm",
+ version,
+ publish=True,
+ name=f"v{version}",
+ asset_pattern="dist/*",
+ body=get_release_info(version),
+)
diff --git a/support/plugins/mtllm/support/dataset_generation/.gitignore b/support/plugins/mtllm/support/dataset_generation/.gitignore
new file mode 100644
index 000000000..36d386897
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/.gitignore
@@ -0,0 +1,2 @@
+dataset/
+program_args.json
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/dataset_generation/chat_template.txt b/support/plugins/mtllm/support/dataset_generation/chat_template.txt
new file mode 100644
index 000000000..ddc5d9781
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/chat_template.txt
@@ -0,0 +1,2 @@
+<|im_start|>user
+{input}<|im_end|>
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/dataset_generation/llm_synthetic_gen.py b/support/plugins/mtllm/support/dataset_generation/llm_synthetic_gen.py
new file mode 100644
index 000000000..bfe89424a
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/llm_synthetic_gen.py
@@ -0,0 +1,210 @@
+"""Synthetically generate the inputs and outputs for a mtllm task."""
+
+import argparse
+import contextlib
+import io
+import itertools
+import json
+import os
+import re
+import uuid
+
+from jaclang import jac_import
+
+from loguru import logger
+
+import pandas as pd
+
+####################################################################################################
+# THIS PIECE OF CODE CAN CHANGE ON BASE ON THE TYPE OF MODEL YOU ARE USING,
+# WHETHER YOU ARE USING DIFFERENT SYSTEM PROMPTS OR NOT.
+
+with open(os.path.join(os.path.dirname(__file__), "chat_template.txt"), "r") as f:
+ INPUT_TEMPLATE: str = f.read()
+ INPUT_TEMPLATE_ARGS: list[str] = re.findall(r"{(.*?)}", INPUT_TEMPLATE)
+ assert "input" in INPUT_TEMPLATE_ARGS, "Input Template must contain input"
+
+
+def get_input_prompt(input: str) -> str:
+ """Return the input prompt."""
+ return INPUT_TEMPLATE.format(input=input)
+
+
+with open(os.path.join(os.path.dirname(__file__), "output_template.txt"), "r") as f:
+ OUTPUT_TEMPLATE: str = f.read()
+ OUTPUT_TEMPLATE_ARGS: list[str] = re.findall(r"{(.*?)}", OUTPUT_TEMPLATE)
+ assert "output" in OUTPUT_TEMPLATE_ARGS, "Output Template must contain output"
+
+
+def get_output_prompt(output: str) -> str:
+ """Return the output prompt."""
+ return OUTPUT_TEMPLATE.format(output=output)
+
+
+####################################################################################################
+
+
+class LogCapture(contextlib.AbstractContextManager):
+ """Capture log messages in a context manager."""
+
+ def __init__(self) -> None:
+ """Initialize the log capture."""
+ self._log = io.StringIO()
+ self._handler_id = logger.add(self._log)
+
+ def __exit__(self, exc_type, exc_value, traceback) -> None: # noqa: ANN001
+ """Remove the log handler."""
+ logger.remove(self._handler_id)
+
+ @property
+ def log(self) -> io.StringIO:
+ """Return the log."""
+ return self._log
+
+ def getvalue(self) -> str:
+ """Return the log."""
+ return self._log.getvalue()
+
+
+def run(args: argparse.Namespace) -> None:
+ """Run the program with different arguments and log the output."""
+ # specify the program argument change logic here
+ current_level_range = range(1, 100)
+ level_difficulty_range = range(1, 10)
+
+ for current_level, level_difficulty in itertools.product(
+ current_level_range, level_difficulty_range
+ ):
+
+ # This would be better if we can parse aguments to like
+ # jac run program.jac --current_level 1 --level_difficulty 1
+ program_args = {
+ "current_level": current_level,
+ "level_difficulty": level_difficulty,
+ }
+ with open(os.path.join(args.program_dir, "program_args.json"), "w") as f:
+ json.dump(program_args, f, indent=4)
+
+ with LogCapture() as log_capture:
+ try:
+ jac_import(args.program, args.program_dir)
+ except Exception as e:
+ logger.error(e)
+
+ with open(
+ os.path.join(
+ args.output_dir,
+ args.output_name,
+ f"log_capture_{current_level}_{level_difficulty}.log",
+ ),
+ "w",
+ ) as f:
+ f.write(log_capture.getvalue())
+
+
+def read_logs(log_file: str) -> list[dict[str, str]]:
+ """Read the logs and return the input and output."""
+ log_list = []
+ with open(log_file, "r") as f:
+ logs_str = f.read()
+ date = logs_str[:10]
+ for log_str in logs_str.split(date):
+ log_str = log_str.strip()
+ if not log_str:
+ continue
+ search = re.search(r" - (Meaning In|Meaning Out)\n(.+)", log_str, re.DOTALL)
+ if search:
+ log_list.append({"type": search.group(1), "result": search.group(2)})
+ else:
+ log_list.append({"type": "Error", "result": log_str})
+ return log_list
+
+
+def filter_logs(logs: list[dict[str, str]]) -> list[dict[str, str]]:
+ """Filter the logs to get the input and output as one object. skip the ones that ended with error."""
+ filtered_logs = []
+ # go from bottom to top
+ i = len(logs) - 1
+ while i >= 0:
+ if logs[i]["type"] == "Meaning Out":
+ filtered_logs.append(
+ {"input": logs[i - 1]["result"], "output": logs[i]["result"]}
+ )
+ i -= 2
+ elif logs[i]["type"] == "Error":
+ i -= 3
+ else:
+ print("Unexpected log type", logs[i]["type"])
+ return filtered_logs
+
+
+def convert_to_dataset(args: argparse.Namespace) -> pd.DataFrame:
+ """Reads the logs in the output directory and converts them to a one csv file. with input and output columns."""
+ log_files = os.listdir(os.path.join(args.output_dir, args.output_name))
+ logs: list[dict[str, str]] = []
+ for log_file in log_files:
+ logs.extend(
+ read_logs(os.path.join(args.output_dir, args.output_name, log_file))
+ )
+ logs = filter_logs(logs)
+ df = pd.DataFrame(logs)
+ df["input_prompt"] = df["input"].apply(get_input_prompt)
+ df["output_prompt"] = df["output"].apply(get_output_prompt)
+ return df
+
+
+def push_to_hf(df: pd.DataFrame, args: argparse.Namespace) -> None:
+ """Push the dataset to Hugging Face."""
+ from huggingface_hub import HfApi
+
+ try:
+ HfApi().create_repo(repo_id=args.repo_id, repo_type="dataset")
+ except Exception as e:
+ print(e)
+ df.to_parquet(f"hf://datasets/{args.repo_id}/data.parquet")
+ print("Dataset pushed to Hugging Face")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--program",
+ type=str,
+ required=True,
+ help="Name of the program to run",
+ )
+ parser.add_argument(
+ "--output_name",
+ type=str,
+ default=uuid.uuid4().hex,
+ help="Name of the output directory",
+ )
+ parser.add_argument(
+ "--program_dir",
+ type=str,
+ default=".",
+ help="Directory containing the program.jac and program_args.json file",
+ )
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default="data_generated",
+ help="Directory to write the output files",
+ )
+ parser.add_argument(
+ "--repo_id",
+ type=str,
+ default=None,
+ help="Hugging Face Repository ID to push the dataset to.",
+ )
+ args = parser.parse_args()
+
+ os.makedirs(args.output_dir, exist_ok=True)
+ os.makedirs(os.path.join(args.output_dir, args.output_name), exist_ok=True)
+
+ # uncomment below line to run the program
+ # run(args)
+ df = convert_to_dataset(args)
+ if args.repo_id:
+ push_to_hf(df, args)
+ df.to_csv(os.path.join(args.output_dir, args.output_name, "dataset.csv"))
diff --git a/support/plugins/mtllm/support/dataset_generation/output_template.txt b/support/plugins/mtllm/support/dataset_generation/output_template.txt
new file mode 100644
index 000000000..9dbf8103d
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/output_template.txt
@@ -0,0 +1,2 @@
+<|im_start|>assistant
+{output}<|im_end|>
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/dataset_generation/program.jac b/support/plugins/mtllm/support/dataset_generation/program.jac
new file mode 100644
index 000000000..bb1ce5e5a
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/program.jac
@@ -0,0 +1,116 @@
+import:py from mtllm.llms { OpenAI }
+import:py json;
+import:py os;
+
+glob llm = OpenAI(verbose=True, model_name="gpt-4o");
+
+obj Position {
+ has x: int,
+ y: int;
+}
+
+obj Wall {
+ has start_pos: Position,
+ end_pos: Position;
+}
+
+obj Map {
+ has level: Level;
+ has walls: list[Wall],
+ small_obstacles: list[Position];
+ has enemies: list[Position];
+ has player_pos: Position;
+}
+
+obj Level {
+ has name: 'Fantasy based Name': str,
+ difficulty: int;
+ has width: int,
+ height: int,
+ num_wall: int,
+ num_enemies: int;
+ has time_countdown: int,
+ n_retries_allowed: int;
+}
+
+obj LevelManager {
+ has current_level: int = 0,
+ current_difficulty: int = 1,
+ prev_levels: list[Level] = [],
+ prev_level_maps: list[Map] = [];
+
+ can create_next_level(last_levels: list[Level], difficulty: int, level_width: int, level_height: int) -> Level by llm(temperature=1.0);
+ '''Get the Next Level'''
+ can get_next_level -> tuple(Level, Map) {
+ self.current_level += 1;
+
+ # Keeping Only the Last 2 Levels
+ if len(self.prev_levels) > 2 {
+ self.prev_levels.pop(0);
+ self.prev_level_maps.pop(0);
+ }
+ # Generating the New Level
+ new_level = self.create_next_level(
+ self.prev_levels,
+ self.current_difficulty,
+ 20,
+ 20
+ );
+ self.prev_levels.append(new_level);
+ # Generating the Map of the New Level
+ new_level_map = Map(level=new_level by llm());
+ self.prev_level_maps.append(new_level_map);
+ # Increasing the Difficulty for end of every 2 Levels
+ if self.current_level % 2 == 0 {
+ self.current_difficulty += 1;
+ }
+ return (new_level, new_level_map);
+ }
+}
+
+'''Get the map of the level'''
+can get_map(map: Map) -> str {
+ map_tiles = [['.' for _ in range(map.level.width)] for _ in range(map.level.height)];
+
+ for wall in map.walls {
+ for x in range(wall.start_pos.x, wall.end_pos.x + 1) {
+ for y in range(wall.start_pos.y, wall.end_pos.y + 1) {
+ map_tiles[y - 1][x - 1] = 'B';
+ }
+ }
+ }
+
+ for obs in map.small_obstacles {
+ map_tiles[obs.y - 1][obs.x - 1] = 'B';
+ }
+
+ for enemy in map.enemies {
+ map_tiles[enemy.y - 1][enemy.x - 1] = 'E';
+ }
+ map_tiles[map.player_pos.y - 1][map.player_pos.x - 1] = 'P';
+ map_tiles = [['B'] + row + ['B'] for row in map_tiles];
+ map_tiles = [['B' for _ in range(map.level.width
+ + 2)]]
+ + map_tiles
+ + [['B' for _ in range(map.level.width
+ + 2)]];
+ return [''.join(row) for row in map_tiles];
+}
+
+with entry {
+ with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ "program_args.json"
+ ),
+ "r"
+ ) as f {
+ args = json.load(f);
+ }
+ current_level = args["current_level"];
+ level_difficulty = args["level_difficulty"];
+ level_manager = LevelManager(current_level, level_difficulty);
+ for _ in range(3) {
+ (new_level, new_level_map) = level_manager.get_next_level();
+ }
+}
diff --git a/support/plugins/mtllm/support/dataset_generation/program_random.jac b/support/plugins/mtllm/support/dataset_generation/program_random.jac
new file mode 100644
index 000000000..66c9f1a55
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/program_random.jac
@@ -0,0 +1,173 @@
+import:py from mtllm.llms.base { BaseLLM }
+import:py json;
+import:py os;
+import:py random;
+
+glob output_str = "None";
+
+obj model :BaseLLM: {
+ can init {
+ self.verbose = True;
+ self.max_tries = 1;
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ :g: output_str ;
+
+ return f"[Output] {output_str}";
+ }
+}
+
+glob llm = model();
+
+obj Position {
+ has x: int,
+ y: int;
+}
+
+obj Wall {
+ has start_pos: Position,
+ end_pos: Position;
+}
+
+obj Map {
+ has level: Level;
+ has walls: list[Wall],
+ small_obstacles: list[Position];
+ has enemies: list[Position];
+ has player_pos: Position;
+}
+
+obj Level {
+ has name: 'Fantasy based Name': str,
+ difficulty: int;
+ has width: int,
+ height: int,
+ num_wall: int,
+ num_enemies: int;
+ has time_countdown: int,
+ n_retries_allowed: int;
+}
+
+obj LevelManager {
+ has current_level: int = 0,
+ current_difficulty: int = 1,
+ prev_levels: list[Level] = [],
+ prev_level_maps: list[Map] = [];
+
+ can create_next_level(last_levels: list[Level], difficulty: int, level_width: int, level_height: int) -> Level by llm(temperature=1.0);
+ '''Get the Next Level'''
+ can get_next_level -> tuple(Level, Map) {
+ :g: output_str ;
+
+ self.current_level += 1;
+
+ # Keeping Only the Last 2 Levels
+ if len(self.prev_levels) > 2 {
+ self.prev_levels.pop(0);
+ self.prev_level_maps.pop(0);
+ }
+ # Generating the New Level
+ output_str = str(
+ Level(
+ name="Level_" + str(self.current_level),
+ difficulty=self.current_difficulty,
+ width=random.randint(10, 30 + self.current_difficulty * 5),
+ height=random.randint(10, 30 + self.current_difficulty * 5),
+ num_wall=random.randint(10, 20 + self.current_difficulty * 5),
+ num_enemies=random.randint(5, 10 + self.current_difficulty * 2),
+ time_countdown=random.randint(100, 300 + self.current_difficulty * 50),
+ n_retries_allowed=random.randint(1, 5 + self.current_difficulty)
+ )
+ );
+ new_level = self.create_next_level(
+ self.prev_levels,
+ self.current_difficulty,
+ 20,
+ 20
+ );
+ self.prev_levels.append(new_level);
+ # Generating the Map of the New Level
+ output_str = str(
+ Map(
+ level=new_level,
+ walls=[Wall(
+ start_pos=Position(
+ x=random.randint(1, new_level.width),
+ y=random.randint(1, new_level.height)
+ ),
+ end_pos=Position(
+ x=random.randint(1, new_level.width),
+ y=random.randint(1, new_level.height)
+ )
+ ) for _ in range(new_level.num_wall)],
+ small_obstacles=[Position(
+ x=random.randint(1, new_level.width),
+ y=random.randint(1, new_level.height)
+ ) for _ in range(5)],
+ enemies=[Position(
+ x=random.randint(1, new_level.width),
+ y=random.randint(1, new_level.height)
+ ) for _ in range(new_level.num_enemies)],
+ player_pos=Position(
+ x=random.randint(1, new_level.width),
+ y=random.randint(1, new_level.height)
+ )
+ )
+ );
+ new_level_map = Map(level=new_level by llm());
+ self.prev_level_maps.append(new_level_map);
+ # Increasing the Difficulty for end of every 2 Levels
+ if self.current_level % 2 == 0 {
+ self.current_difficulty += 1;
+ }
+ return (new_level, new_level_map);
+ }
+}
+
+'''Get the map of the level'''
+can get_map(map: Map) -> str {
+ map_tiles = [['.' for _ in range(map.level.width)] for _ in range(map.level.height)];
+
+ for wall in map.walls {
+ for x in range(wall.start_pos.x, wall.end_pos.x + 1) {
+ for y in range(wall.start_pos.y, wall.end_pos.y + 1) {
+ map_tiles[y - 1][x - 1] = 'B';
+ }
+ }
+ }
+
+ for obs in map.small_obstacles {
+ map_tiles[obs.y - 1][obs.x - 1] = 'B';
+ }
+
+ for enemy in map.enemies {
+ map_tiles[enemy.y - 1][enemy.x - 1] = 'E';
+ }
+ map_tiles[map.player_pos.y - 1][map.player_pos.x - 1] = 'P';
+ map_tiles = [['B'] + row + ['B'] for row in map_tiles];
+ map_tiles = [['B' for _ in range(map.level.width
+ + 2)]]
+ + map_tiles
+ + [['B' for _ in range(map.level.width
+ + 2)]];
+ return [''.join(row) for row in map_tiles];
+}
+
+with entry {
+ with open(
+ os.path.join(
+ os.path.dirname(__file__),
+ "program_args.json"
+ ),
+ "r"
+ ) as f {
+ args = json.load(f);
+ }
+ current_level = args["current_level"];
+ level_difficulty = args["level_difficulty"];
+ level_manager = LevelManager(current_level, level_difficulty);
+ for _ in range(3) {
+ (new_level, new_level_map) = level_manager.get_next_level();
+ }
+}
diff --git a/support/plugins/mtllm/support/dataset_generation/requirements.txt b/support/plugins/mtllm/support/dataset_generation/requirements.txt
new file mode 100644
index 000000000..e9af75e22
--- /dev/null
+++ b/support/plugins/mtllm/support/dataset_generation/requirements.txt
@@ -0,0 +1,2 @@
+pandas
+datasets
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/finetune_llm/.gitignore b/support/plugins/mtllm/support/finetune_llm/.gitignore
new file mode 100644
index 000000000..ecf37bf72
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/.gitignore
@@ -0,0 +1,2 @@
+mtllm*/
+wandb/
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/finetune_llm/README.md b/support/plugins/mtllm/support/finetune_llm/README.md
new file mode 100644
index 000000000..4733a0684
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/README.md
@@ -0,0 +1,32 @@
+# Finetuning Scripts
+The default `config.yaml` provides an example of how to finetune a SmolLM model for the Map Generation Task. Depending on your specific task, you can modify the `config.yaml` file to finetune the model accordingly. Follow the steps below to finetune the model:
+
+1. Install the required dependencies:
+```bash
+pip install -r requirements.txt
+```
+
+2. If you plan to push the model to the Hugging Face Model Hub, follow these steps and make sure to save the key in the git repository to avoid losing it:
+```bash
+huggingface-cli login
+```
+
+3. To finetune the model, run the following command:
+```bash
+python train.py --config config.yaml --push_to_hf
+```
+
+4. To push the model to the Hugging Face Model Hub, use the following command:
+```bash
+python merge_n_push.py --config config.yaml --checkpoint 500
+```
+If you don't want to push the model to the Hugging Face Model Hub, you can remove the `--push_to_hf` flag.
+
+5. Test the trained using the `test.ipynb` notebook. You can also run it in colab. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/Jaseci-Labs/mtllm/blob/main/support/finetune_llm/test.ipynb)
+```bash
+
+6. To evaluate the model, run the following command:
+```bash
+python evaluate.py --config config.yaml --checkpoint 500 --eval_data chandralegend/mtllm_eval
+```
+Make sure to replace `checkpoint` with the desired checkpoint number and `eval_data` with the dataset you want to evaluate on. The dataset should be available in the Hugging Face Dataset Hub.
diff --git a/support/plugins/mtllm/support/finetune_llm/config.yaml b/support/plugins/mtllm/support/finetune_llm/config.yaml
new file mode 100644
index 000000000..40d67a229
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/config.yaml
@@ -0,0 +1,37 @@
+model:
+ hf_dataset: ["chandralegend/map_gen_randomized", "chandralegend/mtllm-level-gen-test"]
+ hf_model: "HuggingFaceTB/SmolLM-1.7B-Instruct"
+ output_model: "mtllm-levelgen-smollm-1.7b-chat"
+
+lora_config:
+ r: 8
+ lora_alpha: 8
+ lora_dropout: 0.05
+ bias: "none"
+ task_type: "CASUAL_LM"
+
+training_args:
+ learning_rate: 0.00002
+ lr_scheduler_type: "cosine"
+ per_device_train_batch_size: 1
+ # per_device_eval_batch_size: 2
+ gradient_accumulation_steps: 4
+ optim: "paged_adamw_32bit"
+ save_strategy: "steps"
+ save_steps: 100
+ # eval_strategy: "steps"
+ # eval_steps: 500
+ logging_steps: 50
+ save_total_limit: 4
+ max_steps: -1
+ fp16: false
+ bf16: false
+ # eval_on_start: true
+ # do_eval: true
+
+trainer:
+ dataset_text_field: "text"
+ max_seq_length: 2048
+
+push_to_hf:
+ hf_username: chandralegend
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/finetune_llm/merge_n_push.py b/support/plugins/mtllm/support/finetune_llm/merge_n_push.py
new file mode 100644
index 000000000..f87a34a35
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/merge_n_push.py
@@ -0,0 +1,62 @@
+"""Merge the LoRA with the Base Model and push to the Hugging Face Hub."""
+
+import argparse
+import os
+
+from peft import PeftModel
+
+import torch
+
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from utils import load_config
+
+
+def merge_n_push(config: argparse.Namespace, checkpoint: str) -> None:
+ """Merge the LoRA with the Base Model and push to the Hugging Face Hub."""
+ # Merging the LoRA with the base model
+ model = AutoModelForCausalLM.from_pretrained(
+ config.model["hf_model"],
+ torch_dtype=torch.float16,
+ load_in_8bit=False,
+ device_map="auto",
+ trust_remote_code=True,
+ )
+ tokenizer = AutoTokenizer.from_pretrained(config.model["hf_model"])
+ peft_model = PeftModel.from_pretrained(
+ model,
+ os.path.join(config.model["output_model"], f"checkpoint-{checkpoint}"),
+ from_transformers=True,
+ device_map="auto",
+ )
+ model = peft_model.merge_and_unload()
+ model.save_pretrained(os.path.join(config.model["output_model"], "merged"))
+ tokenizer.save_pretrained(os.path.join(config.model["output_model"], "merged"))
+
+ # Pushing the model to the Hugging Face Hub
+ model.push_to_hub(
+ f"{config.push_to_hf['hf_username']}/{config.model['output_model']}"
+ )
+ tokenizer.push_to_hub(
+ f"{config.push_to_hf['hf_username']}/{config.model['output_model']}"
+ )
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--config",
+ type=str,
+ default="config.yaml",
+ help="Path to the configuration file",
+ )
+ parser.add_argument(
+ "--checkpoint",
+ type=int,
+ default=0,
+ help="Checkpoint to merge with the base model",
+ )
+ args = parser.parse_args()
+
+ config = load_config(args.config)
+ merge_n_push(config, args.checkpoint)
diff --git a/support/plugins/mtllm/support/finetune_llm/requirements.txt b/support/plugins/mtllm/support/finetune_llm/requirements.txt
new file mode 100644
index 000000000..66db58156
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/requirements.txt
@@ -0,0 +1,8 @@
+accelerate
+peft
+bitsandbytes
+transformers
+trl
+loguru
+datasets
+wandb
\ No newline at end of file
diff --git a/support/plugins/mtllm/support/finetune_llm/test.ipynb b/support/plugins/mtllm/support/finetune_llm/test.ipynb
new file mode 100644
index 000000000..fa60e01c7
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/test.ipynb
@@ -0,0 +1,279 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/Jaseci-Labs/mtllm/blob/main/support/finetune_llm/test.ipynb)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/opt/conda/envs/mtllm_train/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+ " from .autonotebook import tqdm as notebook_tqdm\n"
+ ]
+ }
+ ],
+ "source": [
+ "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
+ "import torch\n",
+ "\n",
+ " # Change this to the model you want to use\n",
+ "checkpoint = \"chandralegend/mtllm-levelgen-smollm-1.7b-chat\"\n",
+ "\n",
+ "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
+ "\n",
+ "tokenizer = AutoTokenizer.from_pretrained(checkpoint, load_in_4bit=True)\n",
+ "model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from datasets import Dataset, load_dataset\n",
+ "\n",
+ "\n",
+ "def formatted_train(input: str, response: str) -> str:\n",
+ " \"\"\"Format the input and response into the chat prompt format.\"\"\"\n",
+ " return f\"{input}\\n{response}\\n\"\n",
+ "\n",
+ "\n",
+ "def prepare_train_data(dataset: str) -> Dataset:\n",
+ " \"\"\"Prepare the training data for the MTLLM model.\"\"\"\n",
+ " _dataset = load_dataset(dataset)\n",
+ " dataset_df = _dataset[\"train\"].to_pandas()\n",
+ " dataset_df[\"text\"] = dataset_df[[\"input_prompt\", \"output_prompt\"]].apply(\n",
+ " lambda x: formatted_train(x[\"input_prompt\"], x[\"output_prompt\"]), axis=1\n",
+ " )\n",
+ " _dataset_ = Dataset.from_pandas(dataset_df)\n",
+ " return _dataset_\n",
+ "\n",
+ "test_data = prepare_train_data(\"chandralegend/mtllm-level-gen-synthetic\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Input: \n",
+ "\n",
+ "[System Prompt]\n",
+ "This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.\n",
+ "Input/Type formatting: Explanation of the Input (variable_name) (type) = value\n",
+ "\n",
+ "[Information]\n",
+ "\n",
+ "\n",
+ "[Context]\n",
+ "\n",
+ "\n",
+ "[Inputs Information]\n",
+ "(last_levels) (list[Level]) = [Level(name=\"Aldur's Keep\", difficulty=3, width=20, height=20, num_wall=23, num_enemies=6, time_countdown=90, n_retries_allowed=3), Level(name=\"Dragon's Roost\", difficulty=3, width=20, height=20, num_wall=25, num_enemies=8, time_countdown=85, n_retries_allowed=3)]\n",
+ "(difficulty) (int) = 4\n",
+ "(level_width) (int) = 20\n",
+ "(level_height) (int) = 20\n",
+ "\n",
+ "[Output Information]\n",
+ "(Level)\n",
+ "\n",
+ "[Type Explanations]\n",
+ "(Level) (obj) eg:- Level(name=str, difficulty=int, width=int, height=int, num_wall=int, num_enemies=int, time_countdown=int, n_retries_allowed=int) -> Fantasy based Name (name) (str), (difficulty) (int), (width) (int), (height) (int), (num_wall) (int), (num_enemies) (int), (time_countdown) (int), (n_retries_allowed) (int)\n",
+ "\n",
+ "[Action]\n",
+ "create_next_level\n",
+ "Generate and return the output result(s) only, adhering to the provided Type in the following format\n",
+ "\n",
+ "[Output] \n",
+ "\n",
+ "\n",
+ "Expected Output: \n",
+ "\n",
+ "[Output] Level(name=\"Griffon's Perch\", difficulty=4, width=20, height=20, num_wall=27, num_enemies=10, time_countdown=80, n_retries_allowed=3)\n"
+ ]
+ }
+ ],
+ "source": [
+ "idx = 11\n",
+ "sample_input = test_data[idx][\"input\"]\n",
+ "expected_output = test_data[idx][\"output\"]\n",
+ "\n",
+ "print(f\"Input: \\n\\n{sample_input}\\n\\n\")\n",
+ "print(f\"Expected Output: \\n\\n{expected_output}\")\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<|im_start|>user\n",
+ "[System Prompt]\n",
+ "This is an operation you must perform and return the output values. Neither, the methodology, extra sentences nor the code are not needed.\n",
+ "Input/Type formatting: Explanation of the Input (variable_name) (type) = value\n",
+ "\n",
+ "[Information]\n",
+ "\n",
+ "\n",
+ "[Context]\n",
+ "\n",
+ "\n",
+ "[Inputs Information]\n",
+ "(last_levels) (list[Level]) = [Level(name=\"Aldur's Keep\", difficulty=3, width=20, height=20, num_wall=23, num_enemies=6, time_countdown=90, n_retries_allowed=3), Level(name=\"Dragon's Roost\", difficulty=3, width=20, height=20, num_wall=25, num_enemies=8, time_countdown=85, n_retries_allowed=3)]\n",
+ "(difficulty) (int) = 4\n",
+ "(level_width) (int) = 20\n",
+ "(level_height) (int) = 20\n",
+ "\n",
+ "[Output Information]\n",
+ "(Level)\n",
+ "\n",
+ "[Type Explanations]\n",
+ "(Level) (obj) eg:- Level(name=str, difficulty=int, width=int, height=int, num_wall=int, num_enemies=int, time_countdown=int, n_retries_allowed=int) -> Fantasy based Name (name) (str), (difficulty) (int), (width) (int), (height) (int), (num_wall) (int), (num_enemies) (int), (time_countdown) (int), (n_retries_allowed) (int)\n",
+ "\n",
+ "[Action]\n",
+ "create_next_level\n",
+ "Generate and return the output result(s) only, adhering to the provided Type in the following format\n",
+ "\n",
+ "[Output] <|im_end|>\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "messages = [\n",
+ " {\"role\": \"user\", \"content\": sample_input},\n",
+ "]\n",
+ "model_input_str = tokenizer.apply_chat_template(messages, tokenize=False)\n",
+ "print(model_input_str)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model_input = tokenizer.encode(model_input_str, return_tensors=\"pt\").to(device)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "torch.Size([1, 418])"
+ ]
+ },
+ "execution_count": 13,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "model_input.shape"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "outputs = model.generate(model_input, max_new_tokens=100)\n",
+ "output = tokenizer.decode(outputs[0])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<|im_start|>assistant\n",
+ "[Output] Level(name='Dragon's Roost', difficulty=4, width=20, height=20, num_wall=25, num_enemies=8, time_countdown=85, n_retries_allowed=3)\n",
+ "\n",
+ "[Type] Level(name='', difficulty=4, width=20, height=20, num_wall=25, num_enemies=\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(output[len(model_input_str):])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "<|im_start|>assistant\n",
+ "[Output] Level(name=\"Griffon's Perch\", difficulty=4, width=20, height=20, num_wall=27, num_enemies=10, time_countdown=80, n_retries_allowed=3)<|im_end|>\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "messages = [\n",
+ " {\"role\": \"assistant\", \"content\": expected_output},\n",
+ "]\n",
+ "expected_output_str = tokenizer.apply_chat_template(messages, tokenize=False)\n",
+ "print(expected_output_str)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "mtllm_train",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/support/plugins/mtllm/support/finetune_llm/train.py b/support/plugins/mtllm/support/finetune_llm/train.py
new file mode 100644
index 000000000..7985ec599
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/train.py
@@ -0,0 +1,75 @@
+"""Training script for the MTLLM Model finetuning task."""
+
+import argparse
+import os
+
+from merge_n_push import merge_n_push
+
+from peft import LoraConfig
+
+from transformers import TrainingArguments
+
+from trl import SFTTrainer
+
+from utils import load_config
+from utils.dataset import prepare_train_data
+from utils.model import get_model_tokenizer
+
+import wandb
+
+seed = 42
+
+
+def train(config: argparse.Namespace) -> None:
+ """Train the model on the given dataset."""
+ wandb.init(project=config.model["output_model"]) # type: ignore
+ os.environ["WANDB_PROJECT"] = config.model["output_model"]
+ os.environ["WANDB_LOG_MODEL"] = "checkpoint"
+
+ train_data = prepare_train_data(config.model["hf_dataset"])
+ train_eval_data = train_data.train_test_split(test_size=0.2, seed=seed)
+ train_data = train_eval_data["train"]
+ eval_data = train_eval_data["test"]
+
+ model, tokenizer = get_model_tokenizer(config.model["hf_model"])
+
+ peft_config = LoraConfig(**config.lora_config)
+ training_args = TrainingArguments(
+ output_dir=config.model["output_model"],
+ report_to="wandb",
+ **config.training_args
+ )
+
+ trainer = SFTTrainer(
+ model=model,
+ train_dataset=train_data,
+ eval_dataset=eval_data,
+ peft_config=peft_config,
+ args=training_args,
+ tokenizer=tokenizer,
+ packing=False,
+ **config.trainer
+ )
+ trainer.train()
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--config",
+ type=str,
+ default="config.yaml",
+ help="Path to the configuration file",
+ )
+ parser.add_argument(
+ "--push_to_hf",
+ action="store_true",
+ help="Push the trained model to the Hugging Face Hub",
+ )
+ args = parser.parse_args()
+ config = load_config(args.config)
+ train(config)
+
+ if args.push_to_hf:
+ checkpoint = input("Enter the checkpoint to push to the Hugging Face Hub: ")
+ merge_n_push(config, checkpoint)
diff --git a/support/plugins/mtllm/support/finetune_llm/utils/__init__.py b/support/plugins/mtllm/support/finetune_llm/utils/__init__.py
new file mode 100644
index 000000000..5b118d55e
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/utils/__init__.py
@@ -0,0 +1,12 @@
+"""Utility functions for finetuning LLMs."""
+
+from argparse import Namespace
+
+import yaml
+
+
+def load_config(yaml_file: str) -> Namespace:
+ """Load the configuration file."""
+ with open(yaml_file, "r") as file:
+ config = yaml.safe_load(file)
+ return Namespace(**config)
diff --git a/support/plugins/mtllm/support/finetune_llm/utils/dataset.py b/support/plugins/mtllm/support/finetune_llm/utils/dataset.py
new file mode 100644
index 000000000..4cb85cf51
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/utils/dataset.py
@@ -0,0 +1,26 @@
+"""Utility functions for dataset processing."""
+
+from datasets import Dataset, load_dataset
+
+import pandas as pd
+
+
+def formatted_train(input: str, response: str) -> str:
+ """Format the input and response into the chat prompt format."""
+ return f"{input}\n{response}"
+
+
+def prepare_train_data(datasets: list[str]) -> Dataset:
+ """Prepare the training data for the MTLLM model."""
+ dataset_df: pd.DataFrame = None
+ for dataset in datasets:
+ _dataset = load_dataset(dataset)
+ if dataset_df is None:
+ dataset_df = _dataset["train"].to_pandas()
+ else:
+ dataset_df = pd.concat([dataset_df, _dataset["train"].to_pandas()])
+ dataset_df["text"] = dataset_df[["input_prompt", "output_prompt"]].apply(
+ lambda x: formatted_train(x["input_prompt"], x["output_prompt"]), axis=1
+ )
+ _dataset_ = Dataset.from_pandas(dataset_df)
+ return _dataset_.shuffle()
diff --git a/support/plugins/mtllm/support/finetune_llm/utils/model.py b/support/plugins/mtllm/support/finetune_llm/utils/model.py
new file mode 100644
index 000000000..c663310ed
--- /dev/null
+++ b/support/plugins/mtllm/support/finetune_llm/utils/model.py
@@ -0,0 +1,21 @@
+"""Utility functions for model."""
+
+from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
+
+
+def get_model_tokenizer(model_id: str) -> tuple:
+ """Get the model and tokenizer for the given model_id."""
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
+ tokenizer.pad_token = tokenizer.eos_token
+ bnb_config = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_quant_type="nf4",
+ bnb_4bit_compute_dtype="float16",
+ bnb_4bit_use_double_quant=True,
+ )
+ model = AutoModelForCausalLM.from_pretrained(
+ model_id, quantization_config=bnb_config, device_map="auto"
+ )
+ model.config.use_cache = False
+ model.config.pretraining_tp = 1
+ return model, tokenizer
diff --git a/support/plugins/mtllm/tests/fixtures/math_question.jpg b/support/plugins/mtllm/tests/fixtures/math_question.jpg
new file mode 100644
index 000000000..901de8aef
Binary files /dev/null and b/support/plugins/mtllm/tests/fixtures/math_question.jpg differ
diff --git a/support/plugins/mtllm/tests/fixtures/mugen.mp4 b/support/plugins/mtllm/tests/fixtures/mugen.mp4
new file mode 100644
index 000000000..2de1a4e56
Binary files /dev/null and b/support/plugins/mtllm/tests/fixtures/mugen.mp4 differ
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_function.jac b/support/plugins/mtllm/tests/fixtures/with_llm_function.jac
new file mode 100644
index 000000000..d0ef03059
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_function.jac
@@ -0,0 +1,34 @@
+import:py from mtllm.llms { BaseLLM }
+
+obj model :BaseLLM: {
+ can init {
+ super.__init__();
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print(kwargs);
+ print(meaning_in);
+ return "[Output] Something";
+ }
+}
+
+glob llm = model();
+
+glob emoji_examples: 'Examples of Text to Emoji': list[dict[str, str]] = [
+ {
+ "input": "I love tp drink pina coladas",
+ "output": "👤 ❤️ 🥤 🍍🥥"
+ },
+ {"input": "Mime Person", "output": "👤🤲🚷"}
+];
+
+can 'Get Emoji Representation'
+get_emoji(input: 'Text Input': str) -> 'Emoji Representation': str by llm(
+ temperature=0.7,
+ incl_info=(emoji_examples),
+ excl_info=()
+);
+
+with entry {
+ print(get_emoji('Lets move to paris'));
+}
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_image.jac b/support/plugins/mtllm/tests/fixtures/with_llm_image.jac
new file mode 100644
index 000000000..44118c81e
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_image.jac
@@ -0,0 +1,30 @@
+import:py from mtllm.llms { BaseLLM }
+import:py from mtllm { Image }
+import:py os;
+
+obj model :BaseLLM: {
+ can init {
+ super.__init__();
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print(kwargs);
+ print(meaning_in);
+ return "[Output] Something";
+ }
+}
+
+glob llm = model();
+
+can 'Solve the Given Math Question'
+solve_math_question(question_img: 'Image of the Question': Image) -> 'Answer to the Question': str by llm(method="Chain-of-Thoughts");
+
+with entry {
+ question_img = Image(
+ os.path.join(
+ os.path.dirname(__file__),
+ 'math_question.jpg'
+ )
+ );
+ print(solve_math_question(question_img));
+}
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_lower.jac b/support/plugins/mtllm/tests/fixtures/with_llm_lower.jac
new file mode 100644
index 000000000..f79074cd7
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_lower.jac
@@ -0,0 +1,48 @@
+import:py from mtllm.llms { BaseLLM }
+
+obj model :BaseLLM: {
+ can init {
+ super.__init__();
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print(meaning_in);
+ return '[Reasoning] J. Robert Oppenheimer, also known as the "father of the atomic bomb," was a brilliant '
+ 'theoretical physicist and the director of the Manhattan Project during World War II. He played a crucial '
+ 'role in developing the first nuclear weapons. However, after witnessing the devastation caused by the '
+ 'atomic bombs dropped on Hiroshima and Nagasaki, he expressed deep regret and became an advocate for nuclear '
+ 'disarmament. While he was an exceptional scientist, he was also known for his introspective and philosophical '
+ 'nature, which suggests an introverted personality.\n'
+ '[Output] Person(full_name="J. Robert Oppenheimer", yod=1967, personality=Personality.INTROVERT)';
+ }
+}
+
+glob llm = model();
+
+enum 'Personality of the Person'
+Personality {
+ INTROVERT: 'Person who is shy and reticent' = "Introvert",
+ EXTROVERT: 'Person who is outgoing and socially confident' = "Extrovert"
+}
+
+obj 'Person'
+Person {
+ has full_name: 'Fullname of the Person': str,
+ yod: 'Year of Death': int,
+ personality: 'Personality of the Person': Personality;
+}
+
+glob personality_examples: 'Personality Information of Famous People': dict[str, Personality] = {
+ 'Albert Einstein': Personality.INTROVERT,
+ 'Barack Obama': Personality.EXTROVERT
+};
+
+can 'Get Person Information use common knowledge'
+get_person_info(name: 'Name of the Person': str) -> 'Person': Person by llm(method="Reason");
+
+with entry {
+ person_obj = get_person_info('Oppenheimer');
+ print(
+ f"{person_obj.full_name} was a {person_obj.personality.value} person who died in {person_obj.yod}"
+ );
+}
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_method.jac b/support/plugins/mtllm/tests/fixtures/with_llm_method.jac
new file mode 100644
index 000000000..29dea9c7e
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_method.jac
@@ -0,0 +1,58 @@
+import:py from mtllm.llms { BaseLLM }
+
+obj model :BaseLLM: {
+ can init {
+ super.__init__();
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print(meaning_in);
+ return "[Output] Personality.INTROVERT";
+ }
+}
+
+glob llm = model();
+
+class'Personality Index of a Person'
+PersonalityIndex {
+ has index: 'Personality Index': int;
+}
+
+enum 'Personality of the Person'
+Personality {
+ INTROVERT: 'Person who is shy and reticent',
+ EXTROVERT: 'Person who is outgoing and socially confident'
+}
+
+glob personality_examples: 'Personality Information of Famous People': dict[str, Personality | None] = {
+ 'Albert Einstein': Personality.INTROVERT,
+ 'Barack Obama': Personality.EXTROVERT
+};
+
+obj 'Person'
+Person {
+ has name: 'Name of the Person': str,
+ age: 'Age of the Person': int;
+}
+
+obj 'main object '
+outer {
+ obj 'inner object'
+ inner {
+ has diary_entries: 'Diary Entries': list[str];
+
+ can 'Get Personality of the Person'
+ get_personality(person: 'Person Object': list[Person]) -> 'Personality of the Person': dict[Personality, PersonalityIndex] by llm(
+ method="Reason",
+ incl_info=(personality_examples, self.diary_entries)
+ );
+ }
+}
+
+with entry {
+ obj1 = outer.inner(
+ ["I won noble prize in Physics", "I am popular for my theory of relativity"]
+ );
+ pp = Person('Albert Einstein', 76);
+ print(type(obj1.get_personality(pp)));
+}
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_type.jac b/support/plugins/mtllm/tests/fixtures/with_llm_type.jac
new file mode 100644
index 000000000..8538258a1
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_type.jac
@@ -0,0 +1,59 @@
+import:py from mtllm.llms { BaseLLM }
+
+obj model :BaseLLM: {
+ can init(output_str: str) {
+ super.__init__();
+ self.output_str = output_str;
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print("Meaning in: ", meaning_in);
+ return f"[Output] {self.output_str}";
+ }
+}
+
+glob llm1 = model(
+ output_str="Person(name='Albert Einstein', dob='14/03/1879', age=76)"
+);
+
+obj 'Person'
+Person {
+ has name: 'Name of the Person': str,
+ dob: 'Date of Birth': str,
+ age: 'Age of the Person': int;
+}
+
+with entry {
+ einstein: 'Einstein Object': Person = Person(name="Albert Einstein" by llm1());
+ print(einstein.dob); #14/03/1879
+}
+
+glob llm2 = model(
+ output_str="University.Department(name='Computer Science', head=Person(name='Jason Mars', dob='1994-01-01', age=30))"
+);
+
+obj 'University'
+University {
+ has name: 'Name of the University': str,
+ location: 'Location of the University': str,
+ departments: 'Departments in the University': list[self.Department] = [];
+
+ obj 'Department'
+ Department {
+ has name: 'Name of the Department': str,
+ head: 'Head of the Department': Person;
+ }
+}
+
+with entry {
+ umich: 'Univeristy of Michigan': University = University(
+ name="University of Michigan",
+ location="Ann Arbor, Michigan"
+ );
+ cs_department: 'Computer Science Department': University.Department = University.Department(name="Computer Science" by llm2(incl_info=(umich)));
+ print(cs_department.head); # Person(name='Jason Mars', dob='1994-01-01', age=30)
+ umich.departments.append(
+ umich.Department(name="Computer Science" by llm2())
+ );
+ print(umich.departments[0].head); # Person(name='Jason Mars', dob='1994-01-01', age=30)
+}
diff --git a/support/plugins/mtllm/tests/fixtures/with_llm_video.jac b/support/plugins/mtllm/tests/fixtures/with_llm_video.jac
new file mode 100644
index 000000000..0ebcdff68
--- /dev/null
+++ b/support/plugins/mtllm/tests/fixtures/with_llm_video.jac
@@ -0,0 +1,31 @@
+import:py from mtllm.llms { BaseLLM }
+import:py from mtllm { Video }
+import:py os;
+
+obj model :BaseLLM: {
+ can init {
+ super.__init__();
+ }
+
+ can __infer__(meaning_in: str, **kwargs: dict) {
+ print(kwargs);
+ print(meaning_in);
+ return "[Output] Something";
+ }
+}
+
+glob llm = model();
+
+can is_aligned(video: Video, text: str) -> bool by llm(
+ method="Chain-of-Thoughts",
+ context="Mugen is the moving character"
+);
+
+with entry {
+ video = Video(
+ os.path.join(os.path.dirname(__file__), "mugen.mp4"),
+ 1
+ );
+ text = "Mugen jumps off and collects few coins.";
+ print(is_aligned(video, text));
+}
diff --git a/support/plugins/mtllm/tests/test_jaclang.py b/support/plugins/mtllm/tests/test_jaclang.py
new file mode 100644
index 000000000..19cd3959b
--- /dev/null
+++ b/support/plugins/mtllm/tests/test_jaclang.py
@@ -0,0 +1,131 @@
+"""Tests for Integration with Jaclang."""
+
+import io
+import sys
+
+from jaclang import jac_import
+from jaclang.utils.test import TestCase
+
+
+class JacLanguageTests(TestCase):
+ """Tests for Integration with Jaclang."""
+
+ def setUp(self) -> None:
+ """Set up test."""
+ return super().setUp()
+
+ def test_with_llm_function(self) -> None:
+ """Parse micro jac file."""
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_function", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn("{'temperature': 0.7}", stdout_value)
+ self.assertIn("Emoji Representation (str)", stdout_value)
+ self.assertIn('Text Input (input) (str) = "Lets move to paris"', stdout_value)
+ self.assertIn(
+ ' = [{"input": "I love tp drink pina coladas"',
+ stdout_value,
+ )
+
+ def test_with_llm_method(self) -> None:
+ """Parse micro jac file."""
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_method", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn("[Reasoning] ", stdout_value)
+ self.assertIn(
+ "Personality of the Person (Personality) (Enum) eg:- Personality.INTROVERT",
+ stdout_value,
+ )
+ self.assertIn(
+ "Personality Index of a Person (PersonalityIndex) (class) eg:- "
+ 'PersonalityIndex(index="Personality Index":int)',
+ stdout_value,
+ )
+ self.assertIn(
+ "Personality of the Person (dict[Personality,PersonalityIndex])",
+ stdout_value,
+ )
+ self.assertIn(
+ 'Diary Entries (diary_entries) (list[str]) = ["I won noble prize in '
+ 'Physics", "I am popular for my theory of relativity"]',
+ stdout_value,
+ )
+
+ def test_with_llm_lower(self) -> None:
+ """Parse micro jac file."""
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_lower", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn("[Reasoning] ", stdout_value)
+ self.assertIn(
+ 'Name of the Person (name) (str) = "Oppenheimer"',
+ stdout_value,
+ )
+ self.assertIn(
+ 'Person (Person) (obj) eg:- Person(full_name="Fullname of the Person":str, '
+ 'yod="Year of Death":int, personality="Personality of the Person":Personality)',
+ stdout_value,
+ )
+ self.assertIn(
+ "J. Robert Oppenheimer was a Introvert person who died in 1967",
+ stdout_value,
+ )
+
+ def test_with_llm_type(self) -> None:
+ """Parse micro jac file."""
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_type", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn("14/03/1879", stdout_value)
+ self.assertNotIn(
+ 'University (University) (obj) = type(__module__="with_llm_type", __doc__=None, '
+ "_jac_entry_funcs_`=[`], _jac_exit_funcs_=[], __init__=function(__wrapped__=function()))",
+ stdout_value,
+ )
+ desired_output_count = stdout_value.count(
+ "Person(name='Jason Mars', dob='1994-01-01', age=30)"
+ )
+ self.assertEqual(desired_output_count, 2)
+
+ def test_with_llm_image(self) -> None:
+ """Test MTLLLM Image Implementation."""
+ try:
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_image", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn(
+ "{'type': 'text', 'text': '\\n[System Prompt]\\n", stdout_value[:500]
+ )
+ self.assertNotIn(
+ " {'type': 'text', 'text': 'Image of the Question (question_img) (Image) = '}, "
+ "{'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQAB",
+ stdout_value[:500],
+ )
+ except Exception:
+ self.skipTest("This test requires Pillow to be installed.")
+
+ def test_with_llm_video(self) -> None:
+ """Test MTLLLM Video Implementation."""
+ try:
+ captured_output = io.StringIO()
+ sys.stdout = captured_output
+ jac_import("with_llm_video", base_path=self.fixture_abs_path("./"))
+ sys.stdout = sys.__stdout__
+ stdout_value = captured_output.getvalue()
+ self.assertIn(
+ "{'type': 'text', 'text': '\\n[System Prompt]\\n", stdout_value[:500]
+ )
+ self.assertEqual(stdout_value.count("data:image/jpeg;base64"), 4)
+ except Exception:
+ self.skipTest("This test requires OpenCV to be installed.")