From 13374a12e9c6b9feeb3108f3231d94f19c007ddd Mon Sep 17 00:00:00 2001 From: Saket Aryan <94069182+whysosaket@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:53:58 +0530 Subject: [PATCH] (Feature) Vercel AI SDK (#2024) --- .../vercel-ai-sdk-chat-app/.gitattributes | 2 + examples/vercel-ai-sdk-chat-app/.gitignore | 29 ++ .../vercel-ai-sdk-chat-app/components.json | 20 ++ .../vercel-ai-sdk-chat-app/eslint.config.js | 28 ++ examples/vercel-ai-sdk-chat-app/index.html | 13 + examples/vercel-ai-sdk-chat-app/package.json | 51 +++ .../vercel-ai-sdk-chat-app/postcss.config.js | 6 + .../public/mem0_logo.jpeg | Bin 0 -> 8607 bytes examples/vercel-ai-sdk-chat-app/src/App.tsx | 13 + .../src/assets/mem0_logo.jpeg | Bin 0 -> 8607 bytes .../src/assets/react.svg | 1 + .../src/assets/user.jpg | Bin 0 -> 15547 bytes .../src/components/api-settings-popup.tsx | 91 +++++ .../src/components/chevron-toggle.tsx | 35 ++ .../src/components/header.tsx | 81 +++++ .../src/components/input-area.tsx | 107 ++++++ .../src/components/memories.tsx | 93 +++++ .../src/components/messages.tsx | 102 ++++++ .../src/components/ui/avatar.tsx | 50 +++ .../src/components/ui/badge.tsx | 36 ++ .../src/components/ui/button.tsx | 57 +++ .../src/components/ui/card.tsx | 76 ++++ .../src/components/ui/dialog.tsx | 120 +++++++ .../src/components/ui/input.tsx | 25 ++ .../src/components/ui/label.tsx | 24 ++ .../src/components/ui/scroll-area.tsx | 46 +++ .../src/components/ui/select.tsx | 164 +++++++++ .../src/contexts/GlobalContext.tsx | 324 ++++++++++++++++++ examples/vercel-ai-sdk-chat-app/src/index.css | 97 ++++++ examples/vercel-ai-sdk-chat-app/src/main.tsx | 10 + examples/vercel-ai-sdk-chat-app/src/page.tsx | 14 + .../vercel-ai-sdk-chat-app/src/pages/home.tsx | 41 +++ examples/vercel-ai-sdk-chat-app/src/types.ts | 22 ++ .../vercel-ai-sdk-chat-app/src/vite-env.d.ts | 1 + .../vercel-ai-sdk-chat-app/tailwind.config.js | 62 ++++ .../vercel-ai-sdk-chat-app/tsconfig.app.json | 32 ++ examples/vercel-ai-sdk-chat-app/tsconfig.json | 13 + .../vercel-ai-sdk-chat-app/tsconfig.node.json | 24 ++ .../vercel-ai-sdk-chat-app/vite.config.ts | 13 + vercel-ai-sdk/.gitattributes | 2 + vercel-ai-sdk/.gitignore | 10 + vercel-ai-sdk/README.md | 228 ++++++++++++ vercel-ai-sdk/config/test-config.ts | 105 ++++++ vercel-ai-sdk/jest.config.js | 6 + vercel-ai-sdk/nodemon.json | 5 + vercel-ai-sdk/package.json | 69 ++++ vercel-ai-sdk/src/index.ts | 4 + vercel-ai-sdk/src/mem0-chat-language-model.ts | 150 ++++++++ vercel-ai-sdk/src/mem0-chat-settings.ts | 36 ++ .../src/mem0-completion-language-model.ts | 150 ++++++++ vercel-ai-sdk/src/mem0-completion-settings.ts | 19 + vercel-ai-sdk/src/mem0-facade.ts | 36 ++ .../src/mem0-generic-language-model.ts | 148 ++++++++ vercel-ai-sdk/src/mem0-provider-selector.ts | 34 ++ vercel-ai-sdk/src/mem0-provider.ts | 145 ++++++++ vercel-ai-sdk/src/mem0-utils.ts | 114 ++++++ .../src/provider-response-provider.ts | 113 ++++++ vercel-ai-sdk/src/stream-utils.ts | 28 ++ vercel-ai-sdk/teardown.ts | 12 + .../tests/anthropic-structured-ouput.test.ts | 110 ++++++ vercel-ai-sdk/tests/anthropic.test.ts | 61 ++++ vercel-ai-sdk/tests/cohere.test.ts | 60 ++++ vercel-ai-sdk/tests/generate-output.test.ts | 86 +++++ vercel-ai-sdk/tests/groq.test.ts | 61 ++++ vercel-ai-sdk/tests/memory-core.test.ts | 75 ++++ .../tests/openai-structured-ouput.test.ts | 110 ++++++ vercel-ai-sdk/tests/openai.test.ts | 58 ++++ vercel-ai-sdk/tests/text-properties.test.ts | 77 +++++ vercel-ai-sdk/tsconfig.json | 29 ++ vercel-ai-sdk/tsup.config.ts | 10 + 70 files changed, 4074 insertions(+) create mode 100644 examples/vercel-ai-sdk-chat-app/.gitattributes create mode 100644 examples/vercel-ai-sdk-chat-app/.gitignore create mode 100644 examples/vercel-ai-sdk-chat-app/components.json create mode 100644 examples/vercel-ai-sdk-chat-app/eslint.config.js create mode 100644 examples/vercel-ai-sdk-chat-app/index.html create mode 100644 examples/vercel-ai-sdk-chat-app/package.json create mode 100644 examples/vercel-ai-sdk-chat-app/postcss.config.js create mode 100644 examples/vercel-ai-sdk-chat-app/public/mem0_logo.jpeg create mode 100644 examples/vercel-ai-sdk-chat-app/src/App.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/assets/mem0_logo.jpeg create mode 100644 examples/vercel-ai-sdk-chat-app/src/assets/react.svg create mode 100644 examples/vercel-ai-sdk-chat-app/src/assets/user.jpg create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/api-settings-popup.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/chevron-toggle.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/header.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/input-area.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/memories.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/messages.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/avatar.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/badge.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/button.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/card.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/dialog.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/input.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/label.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/scroll-area.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/components/ui/select.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/contexts/GlobalContext.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/index.css create mode 100644 examples/vercel-ai-sdk-chat-app/src/main.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/page.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/pages/home.tsx create mode 100644 examples/vercel-ai-sdk-chat-app/src/types.ts create mode 100644 examples/vercel-ai-sdk-chat-app/src/vite-env.d.ts create mode 100644 examples/vercel-ai-sdk-chat-app/tailwind.config.js create mode 100644 examples/vercel-ai-sdk-chat-app/tsconfig.app.json create mode 100644 examples/vercel-ai-sdk-chat-app/tsconfig.json create mode 100644 examples/vercel-ai-sdk-chat-app/tsconfig.node.json create mode 100644 examples/vercel-ai-sdk-chat-app/vite.config.ts create mode 100644 vercel-ai-sdk/.gitattributes create mode 100644 vercel-ai-sdk/.gitignore create mode 100644 vercel-ai-sdk/README.md create mode 100644 vercel-ai-sdk/config/test-config.ts create mode 100644 vercel-ai-sdk/jest.config.js create mode 100644 vercel-ai-sdk/nodemon.json create mode 100644 vercel-ai-sdk/package.json create mode 100644 vercel-ai-sdk/src/index.ts create mode 100644 vercel-ai-sdk/src/mem0-chat-language-model.ts create mode 100644 vercel-ai-sdk/src/mem0-chat-settings.ts create mode 100644 vercel-ai-sdk/src/mem0-completion-language-model.ts create mode 100644 vercel-ai-sdk/src/mem0-completion-settings.ts create mode 100644 vercel-ai-sdk/src/mem0-facade.ts create mode 100644 vercel-ai-sdk/src/mem0-generic-language-model.ts create mode 100644 vercel-ai-sdk/src/mem0-provider-selector.ts create mode 100644 vercel-ai-sdk/src/mem0-provider.ts create mode 100644 vercel-ai-sdk/src/mem0-utils.ts create mode 100644 vercel-ai-sdk/src/provider-response-provider.ts create mode 100644 vercel-ai-sdk/src/stream-utils.ts create mode 100644 vercel-ai-sdk/teardown.ts create mode 100644 vercel-ai-sdk/tests/anthropic-structured-ouput.test.ts create mode 100644 vercel-ai-sdk/tests/anthropic.test.ts create mode 100644 vercel-ai-sdk/tests/cohere.test.ts create mode 100644 vercel-ai-sdk/tests/generate-output.test.ts create mode 100644 vercel-ai-sdk/tests/groq.test.ts create mode 100644 vercel-ai-sdk/tests/memory-core.test.ts create mode 100644 vercel-ai-sdk/tests/openai-structured-ouput.test.ts create mode 100644 vercel-ai-sdk/tests/openai.test.ts create mode 100644 vercel-ai-sdk/tests/text-properties.test.ts create mode 100644 vercel-ai-sdk/tsconfig.json create mode 100644 vercel-ai-sdk/tsup.config.ts diff --git a/examples/vercel-ai-sdk-chat-app/.gitattributes b/examples/vercel-ai-sdk-chat-app/.gitattributes new file mode 100644 index 0000000000..dfe0770424 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/examples/vercel-ai-sdk-chat-app/.gitignore b/examples/vercel-ai-sdk-chat-app/.gitignore new file mode 100644 index 0000000000..9767597e36 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/.gitignore @@ -0,0 +1,29 @@ +**/.env +**/node_modules +**/dist +**/.DS_Store + +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/examples/vercel-ai-sdk-chat-app/components.json b/examples/vercel-ai-sdk-chat-app/components.json new file mode 100644 index 0000000000..0b03196d3a --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/components.json @@ -0,0 +1,20 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "new-york", + "rsc": false, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "src/index.css", + "baseColor": "zinc", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + } +} \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/eslint.config.js b/examples/vercel-ai-sdk-chat-app/eslint.config.js new file mode 100644 index 0000000000..092408a9f0 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/eslint.config.js @@ -0,0 +1,28 @@ +import js from '@eslint/js' +import globals from 'globals' +import reactHooks from 'eslint-plugin-react-hooks' +import reactRefresh from 'eslint-plugin-react-refresh' +import tseslint from 'typescript-eslint' + +export default tseslint.config( + { ignores: ['dist'] }, + { + extends: [js.configs.recommended, ...tseslint.configs.recommended], + files: ['**/*.{ts,tsx}'], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + plugins: { + 'react-hooks': reactHooks, + 'react-refresh': reactRefresh, + }, + rules: { + ...reactHooks.configs.recommended.rules, + 'react-refresh/only-export-components': [ + 'warn', + { allowConstantExport: true }, + ], + }, + }, +) diff --git a/examples/vercel-ai-sdk-chat-app/index.html b/examples/vercel-ai-sdk-chat-app/index.html new file mode 100644 index 0000000000..e2135b1c43 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/index.html @@ -0,0 +1,13 @@ + + + + + + + JustChat | Chat with AI + + +
+ + + diff --git a/examples/vercel-ai-sdk-chat-app/package.json b/examples/vercel-ai-sdk-chat-app/package.json new file mode 100644 index 0000000000..c0aed9cd34 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/package.json @@ -0,0 +1,51 @@ +{ + "name": "mem0-sdk-chat-bot", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "@mem0/vercel-ai-provider": "^0.0.7", + "@radix-ui/react-avatar": "^1.1.1", + "@radix-ui/react-dialog": "^1.1.2", + "@radix-ui/react-icons": "^1.3.1", + "@radix-ui/react-label": "^2.1.0", + "@radix-ui/react-scroll-area": "^1.2.0", + "@radix-ui/react-select": "^2.1.2", + "@radix-ui/react-slot": "^1.1.0", + "ai": "^3.4.31", + "buffer": "^6.0.3", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "framer-motion": "^11.11.11", + "lucide-react": "^0.454.0", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "react-markdown": "^9.0.1", + "tailwind-merge": "^2.5.4", + "tailwindcss-animate": "^1.0.7", + "zod": "^3.23.8" + }, + "devDependencies": { + "@eslint/js": "^9.13.0", + "@types/node": "^22.8.6", + "@types/react": "^18.3.12", + "@types/react-dom": "^18.3.1", + "@vitejs/plugin-react": "^4.3.3", + "autoprefixer": "^10.4.20", + "eslint": "^9.13.0", + "eslint-plugin-react-hooks": "^5.0.0", + "eslint-plugin-react-refresh": "^0.4.14", + "globals": "^15.11.0", + "postcss": "^8.4.47", + "tailwindcss": "^3.4.14", + "typescript": "~5.6.2", + "typescript-eslint": "^8.11.0", + "vite": "^5.4.10" + } +} diff --git a/examples/vercel-ai-sdk-chat-app/postcss.config.js b/examples/vercel-ai-sdk-chat-app/postcss.config.js new file mode 100644 index 0000000000..2e7af2b7f1 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/examples/vercel-ai-sdk-chat-app/public/mem0_logo.jpeg b/examples/vercel-ai-sdk-chat-app/public/mem0_logo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..eb02b0ec9c908a940253f8e0092fb5023a36f25b GIT binary patch literal 8607 zcmb7pbyOTd^XJA2OTso9yS?pt-MZ$B?QZvYTEDOo810RaFI;2+?584w3Rh=~6VxPjpZ5-JiB z7>tCDf`W{SiH?bhfsTQJg^dTn!p6nMz<|7f;NlYy5)xwK5D~v1AjTsgB=~m|1Q7f` zU?emoBs2mn3@n2GZ+q?ra8ZFlU=V~r10doefN&9>djSdnKm-AxfA9N0hJc8I3PwUk z1Hs*72yi#T|LcWciwO6kJTC#5Ah;I-g1|#BX}wg49F);AdF|Bp0&a@{pJYseT9zbV5)867oW6 zloAH-6sKMEn98VJ)%xG|cl2x54S1WfjHG1pe;=66$?9d(67}HdpycKO?1*U9t}%et@Sd;vPWiZgNM+{B=w9#j zs)^+9B#A;HMl=s1zZ&ma8a`wxJOg}oYo3;q9zTb4<~f{+yu8QX?w^ZUR|1Ks1{Ya~ z8SS1mN8g%}->?3f`<^Yx)A`lb{&ZnNhf&BTcCw;c!mw}3kK>E!Q`|O&MYzPZuJqoE zRlOC!DHQ<^pp2K^C;#$^%L+8=mc!OnqM1J#5g#d{E436yo#yJu)2#kmVe_{3)5F%W7j!}uI7{MWp z3WquZ65=y}Qhn_H)H>%p;srJ;=+V84#paqh9B3Zd<{P26aBj1Ey%KE$%)gQ|`wJl~ zJ5kX*HcorLeQLJE{)wTfOF!9Ssox;uL-Bdq*yX|}SLp1Rgp_X7T3kk-*7O7b*lSZLI;|%|Adv;oxdbM6mC8Xbt1sw^UmSHW)D2=*Y)9xL) zn-3f#rRTIwpEPQk)&vVd1GcO&wm$9CbA>!u@yj_yt@FFpB&%2T4CO%K+ClJ7)JJYU zmchada|4~CTIKnMlMz+*h-> z0c27fCi{hCMo)iTanwU*Opi1_s@&wiwBG-95;xW9K)5N|bmbv?VS7{2gt@o}HT&j` zEUH*?r;FsUGGF)qEbA+S+~?2BA#oVzDh&e&1{_o<>?3(twq@ixG@CEc-g*WG((EDnof%E-r8F-ya07 z!cZ0qY^=j;a$<;)XY4`nhqyBoTBa~7dJo6HOznap1jefh8o2rCLf6mLkgpc*{(f6x5mZWF#W%Vb7AN7m=j0zmk}+9V{0Sd}>eecEBc_aj zfn)2!+w|;6C%rs{?vGk!LwqlW(+|~se==%@7QZ{u+Uoo8#dc&7e^ev&P`nkzQjlx{ zudKl*t(n!@;nyQ=%v-Al*LUiv1!-Rc6BEa{N)pEKvJn&`({xO5c#UR7CKq?^spiLE z4thuSo$-s9rC}dR@_-6fEHh>D$$%3nhU{qPBTQc(MTdj(rCEh!#>_>S7M648?BeD^ zr2%Clqh&R6vr&fgtd%YOm+KLGz1dHB-|8;MGFslfPFFMadu-yv!pFk!)T77!mwE_bIQ{)!e!&G%<5AJD!yx#yq9S6d|H(Tz%LpSp z`4U#+<@=C5lKTn%$Yjwam-(uFIY3R*(N!{O9>LQf^|8TIB=T326TYp~eC>}Ymgt>^ z@HHM64l_kH82`I--#MNFuKWGEQFYanx)08?kPzlc`hIuZ)@Q&3rXi_L_icvi{LlAv zO{uo`sE-c2qlt=Y{P_qBSNb>XyDECx>+6CT=gMX!q!UGt7~R=KJ|>WM88M;=RIz<{ zEOh^3K>+{1bcuin!Uf||QA6-yG_;}w9RCRg@n0w;G!jFJlq@lc3`2ExN&imF7Fq8k z8;3DiQrIs~$4$yz-$zoC$0S-BPRcT+jd2|r%32*$^KFweeWdU|{2^Djw5sm(k%OB< zWxwhbSrldYN#9cGu?<76tls7~|EV#-hUL4c%X#sOT%zr_`#wiEQq{JrG~O~?HwoXC zguznYk>}WG!pUs(ZoALcyzE^_^1iMi!XebKxoKdB&7e8U-&OD zE`Udk4^u^g5YTXlsyPR;zcEh8@1hk^ucZ@nnLMMS7dP7E{O^K{EDSHmEwoz7^=ylh zM5e)9nD_70End_b!frL_x&Nx&w>UbgpY>ao6kByDFj4Amyi;FAeQOrcoZstyL^dKf z2_7xDMg0**m@+MOHZ2o~&)ekc`wZ-n+aHxc@+YPKtcfGiT&S-rTrg@K1PqHa&ET%z zq7;@C9azw&N^_VKZE$wasRw=gB zzM#Sl=5|(;`kFl4y^{kNt7QllMbAl^R>p1F*-QG32hp&qUJgh}lq?Ht$R%VwWh_0V zA7Dx7<<#mIq}>`K66_48IB@iV9%fxm`u4u@ISlfLbLFLv!@M^v0V zuOzL>sn)^aob{Srd3y2SejU4un@iPaq53Z+j-?>x9bIsPaPl=dFF}kJWR+f6=PC1C zqLD(axbo}PuzdvdaGFzjji+o2c0-J=ZU31U)r;(b!S6)AVNO86Y^MR;tfc z(9N4Q+n((hz7fRs?;}k&EZ?3+|9r)zYD-XZC5&lz$4vOBU-#?j8Bp+Vee-UHVw1R> zEfwG7{r(gO|@h z7TaDSGO>QUS9BcsF6$@o*LQ{tSu&XI)(Wgm+5^=Ui}W$ILG zYzvRm-PnR0sYt7_+4(kzrWO6MTw!bIGjOpTVPj*4D@xo3hs8?zRL^#zUeTTQmYk)g zEPkb_V9R*8sgj0%96pZj^?-CSICV}|w_$HlS!}$Xt-J%7IHqFfleMXB(^!?^#@mX~ z=A&Qc0$R}BuL<9$$7U}{x5B!nxGsk6ub%-)&)_2Ky=23aNa)->V`lX}=WZUZVfANi zXT6dP$-G#?qL{j>Ne9C%XC#*|qd%gvh#jx=X-X4yWws2avuuZKYd7eR{1#%1NfIRv zBTyXPZ1N(PBrj=y#O+JY)y?MsnjES!*Ej)-%`T1Z3>A61vK8(dwPoSyDC z&k#44Jnbd$#A0*Mq&W86l4Fi7wSU)t>98FJ>ugNG+5Oab>)2B)y*XBE9xW6m*q`Ik z!I`8oCr!v1lSvn_D$ufpPAqF4=aYTxGhP=+dQV?MB4qI}-lXaMwyy}I-O0|ssRBN) zi!@0yL_mp!auwAf&4}C zzJ*_Xt{#Is99X|M)E(73P0w$IUId5OiJR~ZOON4SQyhAcYc>jNl&In%)$1>)+&_Pr_Bw7gwb~WgW;^B ze#BnZSMf8@vAT5agtM;VN`;~C@7>G=q*y_MDPrbsUQ18#&iEs25rRsz~Io0eC%-Fc12DUOr@J6Pf;!ojK<(S1g zPf?5f#S=WdH4mPrD$j}}mL6uI{<0HKSjTKofLEVzoV=`hEky$5MXHBPH zTW?wXds0sJN?>RU?A-pZg|2#3XYew1rS%V2oGI)TE#=v>Ci4^u9(#{&5i3DV6T$1j z&{!LvqeP;^N9UUS0I@LjHQmvz*hrF>q}Gr&Gwz>`Z4cWKP*UmClmfPtm_NAoPS!Sj zA>@<8Jo)_^IVlB?V!ghv!|G2Ci?5S()`B;0Rk7U2-l@Cu)Ed%;=@4_9-zzD?`G3V9 zPU=h$_&Ny@2?Rp=A8h~u7odiTsv0Li;AEa(OVu^Gc_yOrKT9fM>U}q8K_s*w?u-H* zT9{W5DO7eOte`4MpgN%{E&f1H06%pT)Gp0~yU@|0`|s|sJy@Y64wJvr4|M1sWlH}U z+*uW*Vnp0z$ZGPKaZqUY)9OV$c;A!LM)d23Szfor#|8n!BuHR>$kRJoSh^0<64X1y zKsJt3SVB&;Y`;$uNm3FC$++75{V4<2Tttrmlj-l5V+yQ1-3M+|?;9t63Qf;$7zk#5 zaO1RSM(7+D`V1Fe2nnJHlD>#@$m%h@DdE-!u$^2o#;nR?2kQqlusTLr=6wj?{`ryM zHi6m1gwB?*3@N+dBKiDYvsXA6ov(S;rM;PGEeFr|6bzK<8K9nrcA`Y4mdX*25k=!R zno=#dtLB7}b?CWCN=g!*s|Q$oWm^uc7)px_stiM_f@Y??={EJmI<1))Dr`+vJ|t7a zLT+y0hx#*{hW9i3t2azrsLqq;ii9c-^Mt1hc(2SsET7tNoXer!|@;?sOi>FDR+0A%e`=!ID=727hMYP4k z#)&#K*EI6OAGN+3{_$4rC#rY_GP)wVroJ!}>5RA(G(;`- zAgj?X4)`-+ve1EvS!0WPbwKNIjuaAuV~?$)@kc;yHG||mjtoq=MvnRuF-kYlu|{}7 zQAHk=e0;faWENv>tW*xQ-ete7Zr!I~h`PqzTAiTaY=d#QGl-87X zsySHtb0X}EftCH0-8Q=@e#J!mx26DTD`g7s4w}GnVNzt5#q|U-4;Qe9QZuG4P#qiH zH`$2nMM*(mVD=M6m%vBpA(i`vO6l|MWq9%TB0G!Owd;t$wQ-x=2W;XU*Eg|b6B;qy z4*R^7MQvvfr&3(10Uv2y>t&J{#J(Y7er7J6!w>jKZCa~b2;3s0Tw!PhLO)QPzm6#N zNt$#<5nrF2QCtsIBj)V>#K%fkKX|8P7j6DwZEZl7m*eRRLYBNesRHHdkHk;MuIila zBc0vhAu<+XPAlR)Ygy48HJmecL)slg4-{pOwXZ-McA}~l6;7nN1Dw=}dhx#p&$WqY zAPVwy#T|rp?`hlbQxGBD%I;Y+k$UjKKF-O|++8Q?t=BvmTp!qDKe>XwHBZdmL}9(O zJ*Q;vb&c{;3ZUXME6Z6)*0c{<6xt<_^bN0EeXL+3KzQI+i7qhj-Y-{UD?E3HM9;i! zQOXP?MI|4)*dJFzIcDkky;QRS*vP2f7Flx6?bx13rYi&Dk?A=Rj#&yzA@p5v)GGxR zp>N<1=MlfQcR-ipj^+Ps;g|ni$*E;!z~`6{1TzTjD8hlGhClv6 zn^sVYhZ$C^(1U9LQ+iPvYCv2V+sPJlKo+XB5RXp%G+6^r4z!20- zr@mz?cw_wn3mEqZRP1oJXfqa@q=@#A%#q65ax(8O{fV^8>=`?tKtq?9B)l;()hi7m zzw}I{v~$pxMRs(1liT1HudCO+uVM?`lt-vyTC_!d@q@3ZA68w)C8#`@FT-h-fUfF= zETHkDx2Qs+O*Z~DI~pJHz@|EdBZ!I-E&Y#Ld^hHyCl^!Jsx6uSvRG=v0y0Y4NWf*y zWzdeO+zgv$`r8v|l`e(yjp|`e{eG_MCFM5X%Q$!e{*O!x{9ArQ0{46quWJ?ZMNr1? zU~IwPb00cxGaLL63mSb-4ZX5MdZGz=@1FtR5xR`?78JHP--jPKALHUgteoc0+hNau z;H}~hc(1sEz*Mi=SfAw%8ufVwp7b|m zut=W7E}ntMXW-h;dA$b%P9Idb|1@Q|FaTGO|H}{nTnIH4Ox2iOwD#;jtnp6}fIRTq zx1VkvWj8IGzL##Ls8k=ZZ?gU^I>n~!k&igWq2SuJag6F(ny!EL1@}u`D5$wB=G8`~ zgY#WlD=puq*zy6X@kYPzp8J5}NwhF@TUY{J36wd{1_(mnvX8LdZkYcU0#-Wno&wz{^qt`49L z>x_fE{fK&wi2Ywu$G?ye!2cA2{|$;72B8u)PH+ya?b=k$|F6jY=>rh&SCPD1BWcKb zUh+i|eQB(iEtit@zvZaIl`p&^NOQ?q^+=ren$mkOw>W-&p%?zq2K%vhNr1Wor>wK{6UfzU{`6N=zT+jUp>ndA_XC+bqKhn~_f ztT&9$H5V`b)mrW4$;%4kl9yZJkNFYptBOG^c7y)T4pY`2?b z$|AgSc4U;0XZTiXNcZojA4R|~Dm^3cG~QA~K|s*9I%y`8T!Bx=oQkUK>>Ma2rgprqp5h{#u0MJAJGyngjl>gp*_}4!` z{xuL(&VdQ=2BK^7e|rbW$mvT8leyQs>CHFh!NF^@LS9k}PAlkVR$sZ!Gr{ zb=B`;1oO4v;0KURiTC0K-Qc`9LM(;|i;uhNI2w$XfJEqGOlT<`F6ET9cTKMh3Sxw6db zxqA7QO^=g~K2_O#2|YbqS<`J1fDETrRgmdd8M1Px5NP{3D3Gw5SrG)wg$6KJaX1eN z+a~=5Z7S~UedX^Ki$Ui|g<5SPWm(Ye1Dt^46$dG?Z^w6PAiAfi@58@p>b zzebb&#rsN-E4wOy01`p0Dep*1Vpzds|6~q9Pzo705je|C+w-%`e4IRwJSe!G`mz5E zcwzD5gh(Iw&kSr{P(mfbsRD7q5@VtTOds(L%6+TYVWt})l9>TWMw)+@QR@85?g&o< z!b3Ex(>ax6>zU;`ldUO!gP!_jUmM>RKLdHf=@7n!rbbdykMLFhUX8!82sN`k%T#ZM zq~*g6Ch~5xr)#gG)&R0a&wxuL>QI4azYe*1HNz0m7^4XW#_SBg10F0Y1v|M{c>5e5j;|Yx|r#9llAJi@!ME!uQVi@c zKzM@^86FwyyZIzZc@4BWwS)VRT2Q=hQP+zAdpV#y4}FK>bP@V$t9qBGaY2#Ng93Fy zg~x_X^)ge~^zSwf-6VdzHh_*D=<`l$!w(;y5u(#5i~UDdx|j#XYKY8?Ct(zv?}W)r z<^M%`;i*Ev30d$MEZC!X6-V+6^p?QC&%pfVpI3hEjEm1e^g;|DI>}M-u@Emmb;lRF z4=C62+rfss>3OmQLD;i{*E+>PALzm++FJ1gHk}^`QZ@^jkr0Evflov~zZd>SWVz$_ zuq~qqp1EAHiz${@io2APdm_3(uEDrU(Jy-kH!T#oTMW({Bt!HaKM)c%YU?`#bgvQL^x|;7IYC#tCRTG8VMw+Ol9^I z*z*|bjqvx6=TPd%4txX*om2`lx?7ivD8jihZk;g}4p$(RZ}xN3jtU+}h#x&`^(PAA z)ppRCq1x>pHhnrE$dTor*KU->w^69rA@#oR$rbsMk>xG83;r``!<-WorS`n{O_fUq>aM! zBqZ>KKF-+*!_!79wrlIQTd40DV6S71&06QVFWV061WZ7dMTSP+UaopAU)Yc~se1Dz zRYGG*(KYktTl4zB=}dAMW@}!pk@~2M=n|?$3MZ8QGEDR9=V8S*sp88u>mu;IL(zhb zXyr5{Y6+0ciMBV%837x(W-xmiiVu8HgUpk9r28Xr5?*Y9Po}S35C9=T>-6UjTPHK>CY%;ACnr@0dDxV(crv( z?VEFi4VV*e4TqjqtF|^mWwc7meg-sjFkedI^@P2~=W*{!If-h|U;#$CTX!`3+k@9X zd-HyPM%#K~8k0bXPCS9 literal 0 HcmV?d00001 diff --git a/examples/vercel-ai-sdk-chat-app/src/App.tsx b/examples/vercel-ai-sdk-chat-app/src/App.tsx new file mode 100644 index 0000000000..4564ce5d59 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/App.tsx @@ -0,0 +1,13 @@ +import Home from "./page" + + +function App() { + + return ( + <> + + + ) +} + +export default App diff --git a/examples/vercel-ai-sdk-chat-app/src/assets/mem0_logo.jpeg b/examples/vercel-ai-sdk-chat-app/src/assets/mem0_logo.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..eb02b0ec9c908a940253f8e0092fb5023a36f25b GIT binary patch literal 8607 zcmb7pbyOTd^XJA2OTso9yS?pt-MZ$B?QZvYTEDOo810RaFI;2+?584w3Rh=~6VxPjpZ5-JiB z7>tCDf`W{SiH?bhfsTQJg^dTn!p6nMz<|7f;NlYy5)xwK5D~v1AjTsgB=~m|1Q7f` zU?emoBs2mn3@n2GZ+q?ra8ZFlU=V~r10doefN&9>djSdnKm-AxfA9N0hJc8I3PwUk z1Hs*72yi#T|LcWciwO6kJTC#5Ah;I-g1|#BX}wg49F);AdF|Bp0&a@{pJYseT9zbV5)867oW6 zloAH-6sKMEn98VJ)%xG|cl2x54S1WfjHG1pe;=66$?9d(67}HdpycKO?1*U9t}%et@Sd;vPWiZgNM+{B=w9#j zs)^+9B#A;HMl=s1zZ&ma8a`wxJOg}oYo3;q9zTb4<~f{+yu8QX?w^ZUR|1Ks1{Ya~ z8SS1mN8g%}->?3f`<^Yx)A`lb{&ZnNhf&BTcCw;c!mw}3kK>E!Q`|O&MYzPZuJqoE zRlOC!DHQ<^pp2K^C;#$^%L+8=mc!OnqM1J#5g#d{E436yo#yJu)2#kmVe_{3)5F%W7j!}uI7{MWp z3WquZ65=y}Qhn_H)H>%p;srJ;=+V84#paqh9B3Zd<{P26aBj1Ey%KE$%)gQ|`wJl~ zJ5kX*HcorLeQLJE{)wTfOF!9Ssox;uL-Bdq*yX|}SLp1Rgp_X7T3kk-*7O7b*lSZLI;|%|Adv;oxdbM6mC8Xbt1sw^UmSHW)D2=*Y)9xL) zn-3f#rRTIwpEPQk)&vVd1GcO&wm$9CbA>!u@yj_yt@FFpB&%2T4CO%K+ClJ7)JJYU zmchada|4~CTIKnMlMz+*h-> z0c27fCi{hCMo)iTanwU*Opi1_s@&wiwBG-95;xW9K)5N|bmbv?VS7{2gt@o}HT&j` zEUH*?r;FsUGGF)qEbA+S+~?2BA#oVzDh&e&1{_o<>?3(twq@ixG@CEc-g*WG((EDnof%E-r8F-ya07 z!cZ0qY^=j;a$<;)XY4`nhqyBoTBa~7dJo6HOznap1jefh8o2rCLf6mLkgpc*{(f6x5mZWF#W%Vb7AN7m=j0zmk}+9V{0Sd}>eecEBc_aj zfn)2!+w|;6C%rs{?vGk!LwqlW(+|~se==%@7QZ{u+Uoo8#dc&7e^ev&P`nkzQjlx{ zudKl*t(n!@;nyQ=%v-Al*LUiv1!-Rc6BEa{N)pEKvJn&`({xO5c#UR7CKq?^spiLE z4thuSo$-s9rC}dR@_-6fEHh>D$$%3nhU{qPBTQc(MTdj(rCEh!#>_>S7M648?BeD^ zr2%Clqh&R6vr&fgtd%YOm+KLGz1dHB-|8;MGFslfPFFMadu-yv!pFk!)T77!mwE_bIQ{)!e!&G%<5AJD!yx#yq9S6d|H(Tz%LpSp z`4U#+<@=C5lKTn%$Yjwam-(uFIY3R*(N!{O9>LQf^|8TIB=T326TYp~eC>}Ymgt>^ z@HHM64l_kH82`I--#MNFuKWGEQFYanx)08?kPzlc`hIuZ)@Q&3rXi_L_icvi{LlAv zO{uo`sE-c2qlt=Y{P_qBSNb>XyDECx>+6CT=gMX!q!UGt7~R=KJ|>WM88M;=RIz<{ zEOh^3K>+{1bcuin!Uf||QA6-yG_;}w9RCRg@n0w;G!jFJlq@lc3`2ExN&imF7Fq8k z8;3DiQrIs~$4$yz-$zoC$0S-BPRcT+jd2|r%32*$^KFweeWdU|{2^Djw5sm(k%OB< zWxwhbSrldYN#9cGu?<76tls7~|EV#-hUL4c%X#sOT%zr_`#wiEQq{JrG~O~?HwoXC zguznYk>}WG!pUs(ZoALcyzE^_^1iMi!XebKxoKdB&7e8U-&OD zE`Udk4^u^g5YTXlsyPR;zcEh8@1hk^ucZ@nnLMMS7dP7E{O^K{EDSHmEwoz7^=ylh zM5e)9nD_70End_b!frL_x&Nx&w>UbgpY>ao6kByDFj4Amyi;FAeQOrcoZstyL^dKf z2_7xDMg0**m@+MOHZ2o~&)ekc`wZ-n+aHxc@+YPKtcfGiT&S-rTrg@K1PqHa&ET%z zq7;@C9azw&N^_VKZE$wasRw=gB zzM#Sl=5|(;`kFl4y^{kNt7QllMbAl^R>p1F*-QG32hp&qUJgh}lq?Ht$R%VwWh_0V zA7Dx7<<#mIq}>`K66_48IB@iV9%fxm`u4u@ISlfLbLFLv!@M^v0V zuOzL>sn)^aob{Srd3y2SejU4un@iPaq53Z+j-?>x9bIsPaPl=dFF}kJWR+f6=PC1C zqLD(axbo}PuzdvdaGFzjji+o2c0-J=ZU31U)r;(b!S6)AVNO86Y^MR;tfc z(9N4Q+n((hz7fRs?;}k&EZ?3+|9r)zYD-XZC5&lz$4vOBU-#?j8Bp+Vee-UHVw1R> zEfwG7{r(gO|@h z7TaDSGO>QUS9BcsF6$@o*LQ{tSu&XI)(Wgm+5^=Ui}W$ILG zYzvRm-PnR0sYt7_+4(kzrWO6MTw!bIGjOpTVPj*4D@xo3hs8?zRL^#zUeTTQmYk)g zEPkb_V9R*8sgj0%96pZj^?-CSICV}|w_$HlS!}$Xt-J%7IHqFfleMXB(^!?^#@mX~ z=A&Qc0$R}BuL<9$$7U}{x5B!nxGsk6ub%-)&)_2Ky=23aNa)->V`lX}=WZUZVfANi zXT6dP$-G#?qL{j>Ne9C%XC#*|qd%gvh#jx=X-X4yWws2avuuZKYd7eR{1#%1NfIRv zBTyXPZ1N(PBrj=y#O+JY)y?MsnjES!*Ej)-%`T1Z3>A61vK8(dwPoSyDC z&k#44Jnbd$#A0*Mq&W86l4Fi7wSU)t>98FJ>ugNG+5Oab>)2B)y*XBE9xW6m*q`Ik z!I`8oCr!v1lSvn_D$ufpPAqF4=aYTxGhP=+dQV?MB4qI}-lXaMwyy}I-O0|ssRBN) zi!@0yL_mp!auwAf&4}C zzJ*_Xt{#Is99X|M)E(73P0w$IUId5OiJR~ZOON4SQyhAcYc>jNl&In%)$1>)+&_Pr_Bw7gwb~WgW;^B ze#BnZSMf8@vAT5agtM;VN`;~C@7>G=q*y_MDPrbsUQ18#&iEs25rRsz~Io0eC%-Fc12DUOr@J6Pf;!ojK<(S1g zPf?5f#S=WdH4mPrD$j}}mL6uI{<0HKSjTKofLEVzoV=`hEky$5MXHBPH zTW?wXds0sJN?>RU?A-pZg|2#3XYew1rS%V2oGI)TE#=v>Ci4^u9(#{&5i3DV6T$1j z&{!LvqeP;^N9UUS0I@LjHQmvz*hrF>q}Gr&Gwz>`Z4cWKP*UmClmfPtm_NAoPS!Sj zA>@<8Jo)_^IVlB?V!ghv!|G2Ci?5S()`B;0Rk7U2-l@Cu)Ed%;=@4_9-zzD?`G3V9 zPU=h$_&Ny@2?Rp=A8h~u7odiTsv0Li;AEa(OVu^Gc_yOrKT9fM>U}q8K_s*w?u-H* zT9{W5DO7eOte`4MpgN%{E&f1H06%pT)Gp0~yU@|0`|s|sJy@Y64wJvr4|M1sWlH}U z+*uW*Vnp0z$ZGPKaZqUY)9OV$c;A!LM)d23Szfor#|8n!BuHR>$kRJoSh^0<64X1y zKsJt3SVB&;Y`;$uNm3FC$++75{V4<2Tttrmlj-l5V+yQ1-3M+|?;9t63Qf;$7zk#5 zaO1RSM(7+D`V1Fe2nnJHlD>#@$m%h@DdE-!u$^2o#;nR?2kQqlusTLr=6wj?{`ryM zHi6m1gwB?*3@N+dBKiDYvsXA6ov(S;rM;PGEeFr|6bzK<8K9nrcA`Y4mdX*25k=!R zno=#dtLB7}b?CWCN=g!*s|Q$oWm^uc7)px_stiM_f@Y??={EJmI<1))Dr`+vJ|t7a zLT+y0hx#*{hW9i3t2azrsLqq;ii9c-^Mt1hc(2SsET7tNoXer!|@;?sOi>FDR+0A%e`=!ID=727hMYP4k z#)&#K*EI6OAGN+3{_$4rC#rY_GP)wVroJ!}>5RA(G(;`- zAgj?X4)`-+ve1EvS!0WPbwKNIjuaAuV~?$)@kc;yHG||mjtoq=MvnRuF-kYlu|{}7 zQAHk=e0;faWENv>tW*xQ-ete7Zr!I~h`PqzTAiTaY=d#QGl-87X zsySHtb0X}EftCH0-8Q=@e#J!mx26DTD`g7s4w}GnVNzt5#q|U-4;Qe9QZuG4P#qiH zH`$2nMM*(mVD=M6m%vBpA(i`vO6l|MWq9%TB0G!Owd;t$wQ-x=2W;XU*Eg|b6B;qy z4*R^7MQvvfr&3(10Uv2y>t&J{#J(Y7er7J6!w>jKZCa~b2;3s0Tw!PhLO)QPzm6#N zNt$#<5nrF2QCtsIBj)V>#K%fkKX|8P7j6DwZEZl7m*eRRLYBNesRHHdkHk;MuIila zBc0vhAu<+XPAlR)Ygy48HJmecL)slg4-{pOwXZ-McA}~l6;7nN1Dw=}dhx#p&$WqY zAPVwy#T|rp?`hlbQxGBD%I;Y+k$UjKKF-O|++8Q?t=BvmTp!qDKe>XwHBZdmL}9(O zJ*Q;vb&c{;3ZUXME6Z6)*0c{<6xt<_^bN0EeXL+3KzQI+i7qhj-Y-{UD?E3HM9;i! zQOXP?MI|4)*dJFzIcDkky;QRS*vP2f7Flx6?bx13rYi&Dk?A=Rj#&yzA@p5v)GGxR zp>N<1=MlfQcR-ipj^+Ps;g|ni$*E;!z~`6{1TzTjD8hlGhClv6 zn^sVYhZ$C^(1U9LQ+iPvYCv2V+sPJlKo+XB5RXp%G+6^r4z!20- zr@mz?cw_wn3mEqZRP1oJXfqa@q=@#A%#q65ax(8O{fV^8>=`?tKtq?9B)l;()hi7m zzw}I{v~$pxMRs(1liT1HudCO+uVM?`lt-vyTC_!d@q@3ZA68w)C8#`@FT-h-fUfF= zETHkDx2Qs+O*Z~DI~pJHz@|EdBZ!I-E&Y#Ld^hHyCl^!Jsx6uSvRG=v0y0Y4NWf*y zWzdeO+zgv$`r8v|l`e(yjp|`e{eG_MCFM5X%Q$!e{*O!x{9ArQ0{46quWJ?ZMNr1? zU~IwPb00cxGaLL63mSb-4ZX5MdZGz=@1FtR5xR`?78JHP--jPKALHUgteoc0+hNau z;H}~hc(1sEz*Mi=SfAw%8ufVwp7b|m zut=W7E}ntMXW-h;dA$b%P9Idb|1@Q|FaTGO|H}{nTnIH4Ox2iOwD#;jtnp6}fIRTq zx1VkvWj8IGzL##Ls8k=ZZ?gU^I>n~!k&igWq2SuJag6F(ny!EL1@}u`D5$wB=G8`~ zgY#WlD=puq*zy6X@kYPzp8J5}NwhF@TUY{J36wd{1_(mnvX8LdZkYcU0#-Wno&wz{^qt`49L z>x_fE{fK&wi2Ywu$G?ye!2cA2{|$;72B8u)PH+ya?b=k$|F6jY=>rh&SCPD1BWcKb zUh+i|eQB(iEtit@zvZaIl`p&^NOQ?q^+=ren$mkOw>W-&p%?zq2K%vhNr1Wor>wK{6UfzU{`6N=zT+jUp>ndA_XC+bqKhn~_f ztT&9$H5V`b)mrW4$;%4kl9yZJkNFYptBOG^c7y)T4pY`2?b z$|AgSc4U;0XZTiXNcZojA4R|~Dm^3cG~QA~K|s*9I%y`8T!Bx=oQkUK>>Ma2rgprqp5h{#u0MJAJGyngjl>gp*_}4!` z{xuL(&VdQ=2BK^7e|rbW$mvT8leyQs>CHFh!NF^@LS9k}PAlkVR$sZ!Gr{ zb=B`;1oO4v;0KURiTC0K-Qc`9LM(;|i;uhNI2w$XfJEqGOlT<`F6ET9cTKMh3Sxw6db zxqA7QO^=g~K2_O#2|YbqS<`J1fDETrRgmdd8M1Px5NP{3D3Gw5SrG)wg$6KJaX1eN z+a~=5Z7S~UedX^Ki$Ui|g<5SPWm(Ye1Dt^46$dG?Z^w6PAiAfi@58@p>b zzebb&#rsN-E4wOy01`p0Dep*1Vpzds|6~q9Pzo705je|C+w-%`e4IRwJSe!G`mz5E zcwzD5gh(Iw&kSr{P(mfbsRD7q5@VtTOds(L%6+TYVWt})l9>TWMw)+@QR@85?g&o< z!b3Ex(>ax6>zU;`ldUO!gP!_jUmM>RKLdHf=@7n!rbbdykMLFhUX8!82sN`k%T#ZM zq~*g6Ch~5xr)#gG)&R0a&wxuL>QI4azYe*1HNz0m7^4XW#_SBg10F0Y1v|M{c>5e5j;|Yx|r#9llAJi@!ME!uQVi@c zKzM@^86FwyyZIzZc@4BWwS)VRT2Q=hQP+zAdpV#y4}FK>bP@V$t9qBGaY2#Ng93Fy zg~x_X^)ge~^zSwf-6VdzHh_*D=<`l$!w(;y5u(#5i~UDdx|j#XYKY8?Ct(zv?}W)r z<^M%`;i*Ev30d$MEZC!X6-V+6^p?QC&%pfVpI3hEjEm1e^g;|DI>}M-u@Emmb;lRF z4=C62+rfss>3OmQLD;i{*E+>PALzm++FJ1gHk}^`QZ@^jkr0Evflov~zZd>SWVz$_ zuq~qqp1EAHiz${@io2APdm_3(uEDrU(Jy-kH!T#oTMW({Bt!HaKM)c%YU?`#bgvQL^x|;7IYC#tCRTG8VMw+Ol9^I z*z*|bjqvx6=TPd%4txX*om2`lx?7ivD8jihZk;g}4p$(RZ}xN3jtU+}h#x&`^(PAA z)ppRCq1x>pHhnrE$dTor*KU->w^69rA@#oR$rbsMk>xG83;r``!<-WorS`n{O_fUq>aM! zBqZ>KKF-+*!_!79wrlIQTd40DV6S71&06QVFWV061WZ7dMTSP+UaopAU)Yc~se1Dz zRYGG*(KYktTl4zB=}dAMW@}!pk@~2M=n|?$3MZ8QGEDR9=V8S*sp88u>mu;IL(zhb zXyr5{Y6+0ciMBV%837x(W-xmiiVu8HgUpk9r28Xr5?*Y9Po}S35C9=T>-6UjTPHK>CY%;ACnr@0dDxV(crv( z?VEFi4VV*e4TqjqtF|^mWwc7meg-sjFkedI^@P2~=W*{!If-h|U;#$CTX!`3+k@9X zd-HyPM%#K~8k0bXPCS9 literal 0 HcmV?d00001 diff --git a/examples/vercel-ai-sdk-chat-app/src/assets/react.svg b/examples/vercel-ai-sdk-chat-app/src/assets/react.svg new file mode 100644 index 0000000000..6c87de9bb3 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/assets/react.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/src/assets/user.jpg b/examples/vercel-ai-sdk-chat-app/src/assets/user.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2e7fc22d39645a57d6c2da8369874f2198a150a GIT binary patch literal 15547 zcmeIYcUV+Q(=R%1qLM@eBn&}NBuf}_5J@6oBufT|JPc`uoVFrC5D*YhkeqYQAi_wJ zIOI4WVaPe>Gq}V1e&>AWK6l^yoPW;qHV8@2>{p+e;UDb6pb~OvQ{aisw0dNCb zxNdL(09QW&I`Zx|mH>dNDu4q301yIjZ_onpuunIz|57*T{(e@t@d$wPtNu3t;5Ujt zpRcKYm1}O-@BDGU+5m99uy#N=z^xq|nFM$q14N!Gsp8`PA|<20p%1{Jd!R^sfjGc;-OU_%`FI}l0wko}9nBz63k1_E3o9FYNw%%} zCN?G;b4fN`km_SqM_CJNn`fTR7Mh-FS`betMAV#3TI!aByO=x75oUofV{(Vt*~7)$ zCE0#47sEbZSM#zl{USj?CE1iywrV%K13IXKuc@$vD9h_GRqIGbCFX~-%3$pZUJ zlI>4f-Q3)G+yrv7_R>9>%{7>;L^wvD5N5ivMM# zf6e$G1I3bw$vRt@Asn2q=R`{8S~Hk55e^V2(+fK@gr$QsOoI1+ruzRYnX2mlvurRJ z@89C7s*1@uKwMxJ_6Q|8DXb(sHa6yBAajU_nWg1pZeg>>7TkiOmKNNimS!MsGfVzg zB7Eiod}jQwe#!k`U%$p5%K}@eWDiG}*+VR_JpWKwa|Z~PFZ_DsVuBW;{QTw+L2h9Y zb0Ka)egS@N5mAB1-28&Cgdre4zE|d=puf2P8>4@52RqwfjmXUI-z5KEdjWw2ghhoO zi$4B~)qnHWe~a`VYJs&FtnKjrZGqQ{`+vUtr;~q#@qgg@4_yBUfqz8&AMN@NT>l7x ze?om0^B|i}OPoyXSdvgvMl7*UTe9@2-5Mg2amY}eR z;80z>=_O+Wflx9_T36cre=h^Bk(Cm>H{PE6xLP5J+|yIDftj*b08?)iW`iKUpk5yB z#nbrNhELzRbb=*^0{Ae`@vI$XRYHMDSu1S&>00dLGf=LVSbL@F zg|6Xh0C!bLR$WRWu2z4YZZbrML%v<;W~KO`{vm|7`nzvpi=|BVdX&+o21y*EA~8o}_^)n03L+=o{6f{WK?S1o%CFR}VM>Laa{a?%b=nSzi9K{fCx01;g$tCStdQ zcV^#o+&A7;IQa@7KXV13{wcL44*I~*o6hw1h-y^u3h+6=bW=PNq5<9U)$_>O-bf8m z-z3qGS;Kb8z^1obuWbsX(+0ERo*A^=^@&`kuyxg`-D)#&4E7QC?oKutrxl+{Pl&;D zDa#p{%E_~~rA6>*wJawei~QEGHf&Qf#SL07t{m0sA1+xPU&%x`FpPZAnlDdWDqCX? zkLJhOJqW;5+m#X zW=x@Eaf?*+YCYUD%(a7)%kzuvoV7I7<8_^sSi^bXeec*lOI7Z+)o$6@L6eS>s4CMn=4S~! zG%$T`9urhrW2?5y;}QMR$uLLUXFm^LF~nhhY95l;KkQo%GaqdVSa6XoZb=+(F8*kD znYQ?NoU1DI0aHf#c(DZHN7inVQAP!&4OoY7$LXj51Tug)v4)JSqrBEh;|JKdBwGm4e&7A5$!9F}=4uY<3hm1spyib>Qr25P86tP-hq-(AxFUoa>$ai{GYXPF zy-OB)M0WtOQi$-_dSEJE$?ejiO<~bv{FB{Gb%|BL?Yxo1Oe3*e=JGzX)sRn8Z|G98P7_7j$J{Embhnm zbZd?bA9fv?wSk59XK_Da@$+`jG9m$xQ&K!XTR;<94kYH2wJL#lv5Wx`Nx!kwW1#SY z&Ct~HY4Ksk zi00am|5bDv=iJ^}b5Cnn|kZBl?y}l95y9 zuQQYBZ=(eCJ+1r@$f1a$P2U@Nw)ByIiEx**Ds0B7yC3XQN@(YHWINAlH;|M$gDyd^ ztyL!nJ7Reua<#8Z2Lbia$_1X?w5_IUHAsYNm{&5qm|F;G7c>vrJ*+>fY10PxDv{?Iui{H`G9V zu?6+Sj+=u2e2Q*5ec8m$8aF7ZN1lWdj}wPEkU7Qr()@sRkL|M_E>@?Fy#ipsuTjtI zPHWiN(`mojWDr9X6C#+>468jzK98Y~ZR{y^J128~R?B|x@%!!s_4`c=?uy1dotsp< zgX5K`>xl7r@38mMZ(xvPZuH_~nd7ndeIq~0PZbVuCP&!hO}xg0b)ei*m4JIZ?mwdVNTEXW@w8??)k!}3X@CGE^_?4rwZG~(?b4b};e z&l49IP>Rf6wq`==0!^{`lU7IJR3-ZR&Hdg_x|_; z`bTjdu)Oy2ZY}jmb4VFyH2s87l8?Wbm>by*RC*)&Ngj(tS)W97ALn8$)~Z;hzc1A! zDzP50Cb}40wp5upPv~fG3%Pzg40$>_1qGahN!~+m3d0A#*@D&FmgvhS`PP6BV*8YU zKs?r0B4+QMtr@>3Wh9v=KJU2h|BcXSgHiOIST7!Kdg1yZ^D-rorfHkR*(Fj3$J4?U zw4%Hw{L2h`546Wp)g!lqFm^(Z9)eY4*7fFoG9~h0+B!#&Bfi^I9{KGiM zdUeRNwMQKDd_Bbe(XTIYU2x&Jwv_mjS=~GapdVrMiJ7Wt1+|B(EmB^JI^y+V4V62z z}LjO!&$NSke{w$IuUqRL2+d-$rBW|0}ub5>`v`y1y|Whg2lnf zgXt>(uhtdd>0=i=@`%rL-tp2DxmzYlB=3#j^jSf`a`_4E;ik7D5eg4U?IY17))+t1 z44#1XLE;Ki_ppcJR#u%)MKV7l6^EYtBEt8?yzu>N?Q*cnmS?9D zsgiVeizHuq5-=o^@w}S2p4ySBxIle@28Ci^gTO)-y}Al2fA%93$>QDKa7TPpV(-hV zwIB=7*an8DwelK1XGCy3v@-Sf&V)36=1!)oRlh?ETV0W!CXIfI3>n+pvbI=OmVwkjVgSC;W`C}p@7-HIoSc~59?q_X$aY=H z?@@-{ovdeCO?BbDZ8jD7Q=5-nW1-A2xt~Z>Jj^9TnlERLr8&hdf8csVkTPg1m2`w5 zuN#fH*mGL9Ar;>)8CEiZe$P>10g1pS&L2g3ls${X+Iw}nBre22+UA9k-%+bC-(E{5 zAJ?)=d#=e*ZLZTEJN!azTp!dgfq;`m$18&i5z#aa>Lxp@MALO@15tH}F}IV;e&!H# zBYv&XS_YoeiSIeBNox&K*4IbNR)cXA(}^e6xO`J*goKN7X_`;5k&kpURl?DD5%>p z^LC0s>f;um#xq{fde;_>!h-Z9VPhe@a{WgX5;vd>osYu`I0)obMNVvXa~NaI3tNf; z7d%*s68Q5=`j`<_`Yj#JlFV^~)`#L1x4f=_N`uW4L`aTTwsp4&j<(IbokaV-U&pQR z`~tGsbzbrxJCy8>42Mi8UvE60Ey(QM@|-Fc6jHhL)+sW2RQ&$rHu&LC+f4_I!|j8> z{1j}5r8J3$aWvmIKjYc_M&!d4LyEi7+H#ZRAUfr+$acDj(#@dIXyB{t10(aTBsYem z#@2SUR(72wJ%3@r!0=G(#0x#T?UhRUL?$u0nVuEjX5E@==s293v^xhksJftJ4?f10 zvpe`_Moyfi-)c;|ZoSWWvnI2%A9m*m%F7%RTl&Z$G%d2uD}!VHx|xHyF_xass9<Nql5R${SKQ$XLs->7;Ata>7%L;TuqVT* zMPOe6=3c)i$#YanxZrCLJi>Z7H8sZ2+eGiAB=YrP^C+7`?6 z&Yf*UGRw82@3CbF*dAVz-63m!i7O|k(kA`VZeM-ycQPK-72weoz&_mc z@R9wftYq(!GAz@^Mc!~dJD9BC2tUHZNU)t7aj1-kmlT;=u`h@kYy%9UAhL?v#{1XW zqFg0~9C(ofOCHenkj?M0v13KcpP4x-X4q>~dCOc`bu5$f1c zQ2XDn97L~E6GVeW_dUxR~#(lAM=E&MA$1^t_Sqnv`8J{GAZ8j_2C zs+qdskH%H=rjW^2N{WmkKc2*BC5Nq8&Qq~p5Eq^uLN@chuX}K*3{9?1tMcJ|GDk*A z8LHqM#{3$CoSe>J*}ED}vWEzgaswIPP!e`e@|3UI)JM@|l{ zf8NHv$8~2bOzSqvNCl&?x6evHwgvHjjyd=dk~i&?TRqyc%&}sD5NP|^6Jf)Q+#%}y zW&Mp*6CCw1ZSm7GMA`XyF(u7(io%WU3K8FnKCBN?XCobz6+mjT0VD!?EbZq?WWx0m zbMqSpcdo(W2xvxrNkz-t7VCb1l2vw_o|$AyV<{tI4JrwOU)Rz4W%3n z4IZA%#jy3DDv~{W((Z@5ea;v3y7^-fTEUxsJK{VFlsr_s?-}DDOp>;~-5vEaTXa(; z)Z})@>&vo&U(WLV-1;TE&i!h#4XQphRgfaFt|ebnel(`cL9-Ur{$21WMlbRj3Chn? zNK0^D4KNMO-jS6(7c$UZeGuh6+Oz5)RB;@~Fk2Y)Yod$jj!~|kEJYm$52)E%i=*e* zj!QTK#fsi+cIiNcC_r#Ns=OLg9sm8ZDk~=WEZ#3F&34^h zmM{VWSIQQ(KOh|)8k;=-i1~$S!x0Cqa?IDX851y2m0Qj}#5%*i(2HJ>DdIQEyJ%`i zHOUt=(8R&K@@1=5^5!s5RUqP-g_N}R%rlRi#OpyQ(>1KS{;Yr^p_|09arZK#FEHt; zxsGeZitTIS%Ml>YjS8*UN$h6pW>@TETiY~S#_exLj<&DZt&s$s~7 zU*_ruYZwj89rm;mjeGhm`PUx~wfJePYxQ09O7|!)2dDb%RbC=j_pbm;18Y}+v7EX3 zvzV2Ocdb_d=e#SxW6f8;;Nk_1%*eV_gY9cIkh+R5ko5iC-!PKdIZOxIoG^n*!wro- zkMT3sn#tZ`pVHouG_vi^3y60_hSWe&d=B-++nUU6jy*?#ns6L zNtQ$H%1VP}n9efy$iz#VM=N72f%jh0wh1Btq@e@9W*~%O1`J+TGdYXlJqgLUwX=#mXM@zGJ(YjrOOxzyE97>9in?8 z2T_E!$_O9xCe9aE0K;&Mq^!(B_1!-fsTY=KWxukxtGf!V$j z?3n3D{!%aOMUdgfUyqcIGi}x7NWL!Lrs7&DBMI62EXNIh+R`)YLy8bN%(97srSu8I z(iK1hYfRXs&Y|rXbU2LsZo4tqLXeQ|_~%kr3&j6gX)a7w=PDYgXa%pJ)FRgsbpN!# zEw%Dn^{<%?UEG4)TWV}Bv>vKS1Ta))EF>6>S4kbo!@Id*F^#X@7*Ef41xnqg3UfwB zSXT7HZPLH$lJ_I!?hT$y&CHezI9LS-c_#_@Iqp0pzkn|7SF}p6K#!J!E|gC7UnTEX zp6wwoGcLIKz^D2bCqJ0NvmCEuBXK(Qh!>$?*5Fmn%dL)3*Cu z^-d*{n?FM;2VOKK8xLm)A0a6(fY-A}#mU~u^|CQbxkv_)S%oSuh;C8^`zz`)GTqd! zwmuuK57Fl$?I-eh?O0Q>w`cz3Y|1AD&<&qz=lR^%QN|enzd2gUzx{MZ3x-PH)*pOg zjY_#O`%5hdajj1;k)F7FZ{ zS&({MEx5bq&APT>bIAE1WFeL-ZJ?`28j)G6-jwk{+K9|QNGJLJ4ZZ7agxcbh>WoIB zktv5QNKINA0>QO3SY}OnvAdXKhwgPalWAX1vE}4>tZ)UWZ6j(zP!6qix`Z%itsrqMAdOYDh!vM{w|I_x1j6P{nx#>{ar@ zn4KO7Hc|!3nFD>OD@6P#x3E&YXq$FDu=sWc<^Ei1wEI{PAz{Mu+}nbsX`R%Ul>-)U zQA-N(aG7MS1Dj~L%+8DPGatNc?)2~Qa(VgZHbOq3Wp^rww4rJvY?>`4m)2<;sQ2i! z*lb~E!6%+Gx+Jkt58nHu%fn9R{rfOFGU|?D#(VF!&IpoUb68;Pt*~d`y z4`0@JHI95rne4%)Qs3hDXHzrdns2c0{%jbTo_^({Mi($l7N$@-I>jP(PSGaPciu;_ z?|bw-lx2oxnl%EB-ZfkjSv3UnD@lUmPyU2K&P2U&r?E_7iEo3s2d);70mmipKgoBg ze>gqu?IT4OuT2)F7TIseD(21GB1(4NhqM3n$bca@ID~9QoSjPbV*)F`9XMre ztbFjpS^Ug|R-|EL`%W}$eArh}(3>A^+wnef)J#3 zW;wKL_jZkgJxWO|3akP4F!}x$0P+`*FW7~kwFvjNO#d6P+g>5{1_dxj%%LKr`t-wk$n-tQ z^a!yIMP{}cdjCe#qeM1bIe9s^vTg&+ z(+LK}yk4r_uwziO|8nm)oQPbcA{iU^208Rh?nmLVF`ZK+2BkkC_??LA*Fyj6QuZn4aw&A-qbOdIeOq0i z8JA)$n%gbZ5QOVai3&?x)Hxp{sWy*AW>7{Fi1E81!1T#9d(hWJQo$9f4OZl{20}IH zmxm!IblXrbvVp|ez+_E@A}LkFZLg%G+Fi*$uXpe-^SMqEol+KHgIF`mqvXCA?bd78 zyCX_eEbED^AE$fb8n^Cs1cfTP>ij;}nH4oBAqnLd$wuPvQ%d5Y@-dH!6Okgqh`JFwFclCTw5p8aw+5OdfeSZy?ed`T-Rubtg1rE#MeP0*^__rTpveZsMak1V*T zl3w>r0%ji2K9K95{$P&|KxEcuzZg!FBC`A~>gds4Pob3MZE#4a6`! za0c>`96%|;Gu$X84T+Oj=GrSnyh09gbUin+iFt1eacV1)T<*|)rm@-2O~EkoMoNRe zdM2mkY%g|j64WO3CVKy1OqiM-nYQ-ByY0Bht&=R`p+MT?9)ail>vLwdr{X*w`)ZWf zbBo$W(SSVc)Ekpj=;cj6n95)VFSdS}IC!&bZKu9o$~floZHo2yyfnYUiHI+O`c#ty z9b4Vcacxq0%eXh#P@+u6DSyc>!X6t+WH~+{N=*>#-k?FVC`Ibc>}rzIm^lq^KbJz- z?=kG8F6*?i(6QQwZID7=lj7PEZfli$Oou83jw;7Mw0eDoUtCxP_?!Ix3=H0Wx9t0F z!NWS`^2+6Ztgmlec*?rB?@DY)HWvwcGA>GzgQb3cN)OB*o{frOQorWRV? zNFf2>)It-KyEpDVWYLW@X54)delOJqb#N!q9pdo$hy?7slmw5S#s+ii?>BT^2Xlif z7_q@z)v&K5)(g@M8O5VkpArX!x|W^lWu=UBFTcFC9b;jXQuGk?Ws^bIu?Ik6yOpU@ z+6!+VY9Z7K$BTPCR+s8qd&Q!>pLhRK?(0~8jADg^mW}}g;>-!9UlnFBsu!yxV8j{D z*msNM2=2RE0%oQ7ew_KPHA>-dyEdHQDWN1Giz?L>prkN+stOw8%`Y{Oh0@XQ3Znz} zSuY6gJH7s?E^H zr>W;07T!^$0?nrvJtSToqxB}xWjOKx{1N7tlh;2g8ABEooz zIq1g8|F$aif%brY^$7+<-}3RuZxRoCzT>Z|@u0AsV*$VD1JH<~&b1VZLUS+AN+3A) zA#wS}vKCnldQh{%lsr>!0XY{^#*Xl}45quB(=0SB^z9X*MYOjSQUYGO9@M)h9?($r z&YC*|P1LP$K}C{EzbC0mo9=E8rsY#=?F|fdDQgp#fQh;ugy>K;h680^W5^;+;{O8u z!iIe*KQ|;cmc}Z{Dp3Be3Xj*6C;sd1WTrx)W;ci1QpBOw73)zl=_~6CL7?o~!N|_` zby8!oeH~9Bej$85tn&8*1OdFH@}8CRBl-!gt5vidw{)C(1hb!@&O4U@7%)@L*O|j+ z=LyyM*`b!}b3IiX_M6yqy=*8?vT}1HEm?S`eBwk>$mC{XX^{SBe2(cDFgUyBJgNTH4PG9wK;(5oVsGi--)|Cl85@E;zOLmk)pkBGPA zBR>tnZ#^BW8s0hsHBh6X4XhJqM+}lKXs-ZO$bHtqrqpGZ#mvQVv{U$!E50$`1g4Mr zVGl(kjj7*IWRu)k#EWMJ_rqNqOS{3=C1Re-BzQjye}*io^gp0QkO5g8B9~^LW+!Nk zU0Ri|n8zgrPps<^1l@u#ec@qYt ziRU;4L7Tff%!ZO5*5MT6!y@s4I416JcZ*uB3L0>*=?a~9kn|m`_mzcdR`;-{xjysv zi|okZc6`DEv8h;}O6Y=R6R30pdq@j2#s=x~+ks|+I>8L8z}kr6f-?tcHC6FbQvarx zNA((rTm75OB|*O;G!PZ3PI%xja7;YP@+J1?#X0Dh~5fVSEunt#T0c&h+CV5gE z0Q*nG(y@l2I@%h;@rs~941TX^ASue?-) zTfLjAbw17eR_ym)9?a)5syCb~Ym4^q*Iz=sqxK>VEsFB}9>zJFtDe5MUKlwcv(Ju4 z4kEebcA9ZDl)1u*!?7~| zw=Q(;5vqK9*;(c1kjsTO+4 zHPt#hGU#30ojHx63Lg&rPZpzNz~Wj{TUVos5Fal?>ie{ARRNofb+>k}09Ge6)MRJW zk=W3iUBU(Duxi;xhL&$WUPnYijxp-&8`nXxsn^RI^5%ESDl>)Jn#B5EM3qO}Kah1v zPP#2?m6%RDGYKU9X;6eI3Bv*FtFv6QA^9?sVqF7dJAWJ1pqx+G@JvJIy0q1?<^6uX zDf{UsJ68ajNP_1+&f>a^t#%z1s+ln8&2MR5yt+Ed#`p!*11sFJjW7a*2;tk5>JuZ| zdcM}MqCOws#BOJj}D`H|nb> zw7l+Vuw`~mR8kr{b3B`4Cw&+nd-$hAhKE?UWpIwn={k=qaDs|s zY$b1v2Af6|2XSP@rcqg7^SI_TFUcFweOqQ9&^^YYTAt_Zi5avf&-zwLH1VPitQ!Yz z)l2q<&#Ni2vuE3do$Eh8O0&jB!U#i~Jsb78J_Z);z|*TW!^^aK$0m~`Sde$#H zMqAB69>5pN?0VRwurCpp9~J={1n#g@i>J?RpSMBw$ri^ZJMW12yMvz37`{6!8#OU&!Y{xtVnp1nM{FWO0QA+sa9#=2@R*qx?$z7qDrK zbw6#2eeebXvjU3*(o=HZp&GpCZ(OG}j*c2|1ZY=PWd)`Hz2J4 zmDM)U*-JOgdWX&Hq`j#b-irmT*ix(^FN@1Hcv(dv3!4j?jhG);}EJmhY<)n)G=K$B*~@p(?cy4?hse z>0)Yu6X66mzB&p4NPVerm_7eSbvDxU9nHh=a8IPl^{ke2M0t}NdI%CSfRW-v%*#r)&YQ2e3RkG`NbEoR7pO<11g4LT1eB2llm|M7se ztW%G)X?IhAY;LY$CjDWT%jm4GiEbM|ud2{S{iER3vj^rgbB31E&CQn9t~6yK{6H#7 tdk^4VT+gK&W##w|u}&XyM@QHym3!GVEi7mUM?0bauMOruyYpU+{|{;G(pmrj literal 0 HcmV?d00001 diff --git a/examples/vercel-ai-sdk-chat-app/src/components/api-settings-popup.tsx b/examples/vercel-ai-sdk-chat-app/src/components/api-settings-popup.tsx new file mode 100644 index 0000000000..75cea86cd1 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/api-settings-popup.tsx @@ -0,0 +1,91 @@ +import { Dispatch, SetStateAction, useContext, useEffect, useState } from 'react' +import { Button } from "@/components/ui/button" +import { Input } from "@/components/ui/input" +import { Label } from "@/components/ui/label" +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select" +import { Dialog, DialogContent, DialogHeader, DialogTitle, DialogFooter } from "@/components/ui/dialog" +import GlobalContext from '@/contexts/GlobalContext' + +export default function ApiSettingsPopup(props: { isOpen: boolean, setIsOpen: Dispatch> }) { + const {isOpen, setIsOpen} = props + const [mem0ApiKey, setMem0ApiKey] = useState('') + const [providerApiKey, setProviderApiKey] = useState('') + const [provider, setProvider] = useState('OpenAI') + const { selectorHandler, selectedOpenAIKey, selectedMem0Key, selectedProvider } = useContext(GlobalContext); + + const handleSave = () => { + // Here you would typically save the settings to your backend or local storage + selectorHandler(mem0ApiKey, providerApiKey, provider); + setIsOpen(false) + } + + useEffect(() => { + if (selectedOpenAIKey) { + setProviderApiKey(selectedOpenAIKey); + } + if (selectedMem0Key) { + setMem0ApiKey(selectedMem0Key); + } + if (selectedProvider) { + setProvider(selectedProvider); + } + }, [selectedOpenAIKey, selectedMem0Key, selectedProvider]); + + + + return ( + <> + + + + API Configuration Settings + +
+
+ + setMem0ApiKey(e.target.value)} + className="col-span-3 rounded-3xl" + /> +
+
+ + setProviderApiKey(e.target.value)} + className="col-span-3 rounded-3xl" + /> +
+
+ + +
+
+ + + + +
+
+ + ) +} \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/src/components/chevron-toggle.tsx b/examples/vercel-ai-sdk-chat-app/src/components/chevron-toggle.tsx new file mode 100644 index 0000000000..7b8b128ea9 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/chevron-toggle.tsx @@ -0,0 +1,35 @@ +import { Button } from "@/components/ui/button"; +import { ChevronLeft, ChevronRight } from "lucide-react"; +import React from "react"; + +const ChevronToggle = (props: { + isMemoriesExpanded: boolean; + setIsMemoriesExpanded: React.Dispatch>; +}) => { + const { isMemoriesExpanded, setIsMemoriesExpanded } = props; + return ( + <> +
+
+ +
+
+ + ); +}; + +export default ChevronToggle; diff --git a/examples/vercel-ai-sdk-chat-app/src/components/header.tsx b/examples/vercel-ai-sdk-chat-app/src/components/header.tsx new file mode 100644 index 0000000000..7ddbd37d1d --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/header.tsx @@ -0,0 +1,81 @@ +import { Button } from "@/components/ui/button"; +import { ChevronRight, X, RefreshCcw, Settings } from "lucide-react"; +import { Dispatch, SetStateAction, useContext, useEffect, useState } from "react"; +import GlobalContext from "../contexts/GlobalContext"; +import { Input } from "./ui/input"; + +const Header = (props: { + setIsSettingsOpen: Dispatch>; +}) => { + const { setIsSettingsOpen } = props; + const { selectUserHandler, clearUserHandler, selectedUser, clearConfiguration } = useContext(GlobalContext); + const [userId, setUserId] = useState(""); + + const handleSelectUser = (e: React.ChangeEvent) => { + setUserId(e.target.value); + }; + + const handleClearUser = () => { + clearUserHandler(); + setUserId(""); + }; + + const handleSubmit = () => { + selectUserHandler(userId); + }; + + // New function to handle key down events + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'Enter') { + e.preventDefault(); // Prevent form submission if it's in a form + handleSubmit(); + } + }; + + useEffect(() => { + if (selectedUser) { + setUserId(selectedUser); + } + }, [selectedUser]); + + return ( + <> +
+
+ Mem0 Assistant +
+
+
+ + + +
+
+ + +
+
+
+ + ); +}; + +export default Header; diff --git a/examples/vercel-ai-sdk-chat-app/src/components/input-area.tsx b/examples/vercel-ai-sdk-chat-app/src/components/input-area.tsx new file mode 100644 index 0000000000..877e19a28e --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/input-area.tsx @@ -0,0 +1,107 @@ +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import GlobalContext from "@/contexts/GlobalContext"; +import { FileInfo } from "@/types"; +import { Images, Send, X } from "lucide-react"; +import { useContext, useRef, useState } from "react"; + +const InputArea = () => { + const [inputValue, setInputValue] = useState(""); + const { handleSend, selectedFile, setSelectedFile, setFile } = useContext(GlobalContext); + const [loading, setLoading] = useState(false); + + const ref = useRef(null); + const fileInputRef = useRef(null) + + const handleFileChange = (event: React.ChangeEvent) => { + const file = event.target.files?.[0] + if (file) { + setSelectedFile({ + name: file.name, + type: file.type, + size: file.size + }) + setFile(file) + } + } + + const handleSendController = async () => { + setLoading(true); + setInputValue(""); + await handleSend(inputValue); + setLoading(false); + + // focus on input + setTimeout(() => { + ref.current?.focus(); + }, 0); + }; + + const handleClosePopup = () => { + setSelectedFile(null) + if (fileInputRef.current) { + fileInputRef.current.value = '' + } + } + + return ( + <> +
+
+
+
+ + + {selectedFile && } +
+
+ setInputValue(e.target.value)} + onKeyDown={(e) => e.key === "Enter" && handleSendController()} + placeholder="Type a message..." + className="flex-1 pl-10 rounded-3xl" + disabled={loading} + ref={ref} + /> +
+ +
+
+
+ + ); +}; + +const FileInfoPopup = ({ file, onClose }: { file: FileInfo, onClose: () => void }) => { + return ( +
+
+
+

{file.name}

+ +
+

Type: {file.type}

+

Size: {(file.size / 1024).toFixed(2)} KB

+
+
+ ) +} + +export default InputArea; diff --git a/examples/vercel-ai-sdk-chat-app/src/components/memories.tsx b/examples/vercel-ai-sdk-chat-app/src/components/memories.tsx new file mode 100644 index 0000000000..20a2f22e8e --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/memories.tsx @@ -0,0 +1,93 @@ +import { Badge } from "@/components/ui/badge"; +import { Card } from "@/components/ui/card"; +import { ScrollArea } from "@radix-ui/react-scroll-area"; +import { Memory } from "../types"; +import GlobalContext from "@/contexts/GlobalContext"; +import { useContext, useEffect, useState } from "react"; +import { AnimatePresence, motion } from "framer-motion"; + + +// eslint-disable-next-line @typescript-eslint/no-unused-vars +const MemoryItem = ({ memory, index }: { memory: Memory; index: number }) => { + return ( + +
+

{memory.content}

+
+
+ {new Date(memory.timestamp).toLocaleString()} +
+
+ {memory.tags.map((tag) => ( + + {tag} + + ))} +
+
+ ); +}; + +const Memories = (props: { isMemoriesExpanded: boolean }) => { + const { isMemoriesExpanded } = props; + const { memories } = useContext(GlobalContext); + + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const [prevMemories, setPrevMemories] = useState([]); + + // Track memory positions for animation + useEffect(() => { + setPrevMemories(memories); + }, [memories]); + + return ( + +
+ + Relevant Memories ({memories.length}) + +
+ {memories.length === 0 && ( + + No relevant memories found. +
+ Only the relevant memories will be displayed here. +
+ )} + + + + {memories.map((memory: Memory, index: number) => ( + + ))} + + + +
+ ); +}; + +export default Memories; \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/src/components/messages.tsx b/examples/vercel-ai-sdk-chat-app/src/components/messages.tsx new file mode 100644 index 0000000000..38e5a59e12 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/messages.tsx @@ -0,0 +1,102 @@ +import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar"; +import { ScrollArea } from "@/components/ui/scroll-area"; +import { Message } from "../types"; +import { useContext, useEffect, useRef } from "react"; +import GlobalContext from "@/contexts/GlobalContext"; +import Markdown from "react-markdown"; +import Mem00Logo from "../assets/mem0_logo.jpeg"; +import UserLogo from "../assets/user.jpg"; + +const Messages = () => { + const { messages, thinking } = useContext(GlobalContext); + const scrollAreaRef = useRef(null); + + // scroll to bottom + useEffect(() => { + if (scrollAreaRef.current) { + scrollAreaRef.current.scrollTop += 40; // Scroll down by 40 pixels + } + }, [messages, thinking]); + + return ( + <> + +
+ {messages.map((message: Message) => ( +
+
+
+ + + + {message.sender === "assistant" ? "AI" : "U"} + + +
+
+ {message.image && ( +
+ Message attachment +
+ )} + {message.content} + + {message.timestamp} + +
+
+
+ ))} + {thinking && ( +
+
+ + + {"AI"} + +
+
+
+
+
+
+
+
+
+ )} +
+
+ + ); +}; + +export default Messages; diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/avatar.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/avatar.tsx new file mode 100644 index 0000000000..51e507ba9d --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/avatar.tsx @@ -0,0 +1,50 @@ +"use client" + +import * as React from "react" +import * as AvatarPrimitive from "@radix-ui/react-avatar" + +import { cn } from "@/lib/utils" + +const Avatar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +Avatar.displayName = AvatarPrimitive.Root.displayName + +const AvatarImage = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarImage.displayName = AvatarPrimitive.Image.displayName + +const AvatarFallback = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +AvatarFallback.displayName = AvatarPrimitive.Fallback.displayName + +export { Avatar, AvatarImage, AvatarFallback } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/badge.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/badge.tsx new file mode 100644 index 0000000000..e87d62bf1a --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/badge.tsx @@ -0,0 +1,36 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "inline-flex items-center rounded-md border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2", + { + variants: { + variant: { + default: + "border-transparent bg-primary text-primary-foreground shadow hover:bg-primary/80", + secondary: + "border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80", + destructive: + "border-transparent bg-destructive text-destructive-foreground shadow hover:bg-destructive/80", + outline: "text-foreground", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +export interface BadgeProps + extends React.HTMLAttributes, + VariantProps {} + +function Badge({ className, variant, ...props }: BadgeProps) { + return ( +
+ ) +} + +export { Badge, badgeVariants } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/button.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/button.tsx new file mode 100644 index 0000000000..65d4fcd9ca --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/button.tsx @@ -0,0 +1,57 @@ +import * as React from "react" +import { Slot } from "@radix-ui/react-slot" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const buttonVariants = cva( + "inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-ring disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:size-4 [&_svg]:shrink-0", + { + variants: { + variant: { + default: + "bg-primary text-primary-foreground shadow hover:bg-primary/90", + destructive: + "bg-destructive text-destructive-foreground shadow-sm hover:bg-destructive/90", + outline: + "border border-input bg-background shadow-sm hover:bg-accent hover:text-accent-foreground", + secondary: + "bg-secondary text-secondary-foreground shadow-sm hover:bg-secondary/80", + ghost: "hover:bg-accent hover:text-accent-foreground", + link: "text-primary underline-offset-4 hover:underline", + }, + size: { + default: "h-9 px-4 py-2", + sm: "h-8 rounded-md px-3 text-xs", + lg: "h-10 rounded-md px-8", + icon: "h-9 w-9", + }, + }, + defaultVariants: { + variant: "default", + size: "default", + }, + } +) + +export interface ButtonProps + extends React.ButtonHTMLAttributes, + VariantProps { + asChild?: boolean +} + +const Button = React.forwardRef( + ({ className, variant, size, asChild = false, ...props }, ref) => { + const Comp = asChild ? Slot : "button" + return ( + + ) + } +) +Button.displayName = "Button" + +export { Button, buttonVariants } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/card.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/card.tsx new file mode 100644 index 0000000000..77e9fb789b --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/card.tsx @@ -0,0 +1,76 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +const Card = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +Card.displayName = "Card" + +const CardHeader = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +CardHeader.displayName = "CardHeader" + +const CardTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

+)) +CardTitle.displayName = "CardTitle" + +const CardDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

+)) +CardDescription.displayName = "CardDescription" + +const CardContent = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +

+)) +CardContent.displayName = "CardContent" + +const CardFooter = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +CardFooter.displayName = "CardFooter" + +export { Card, CardHeader, CardFooter, CardTitle, CardDescription, CardContent } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/dialog.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/dialog.tsx new file mode 100644 index 0000000000..5d16351fa7 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/dialog.tsx @@ -0,0 +1,120 @@ +import * as React from "react" +import * as DialogPrimitive from "@radix-ui/react-dialog" +import { Cross2Icon } from "@radix-ui/react-icons" + +import { cn } from "@/lib/utils" + +const Dialog = DialogPrimitive.Root + +const DialogTrigger = DialogPrimitive.Trigger + +const DialogPortal = DialogPrimitive.Portal + +const DialogClose = DialogPrimitive.Close + +const DialogOverlay = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +DialogOverlay.displayName = DialogPrimitive.Overlay.displayName + +const DialogContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + + {children} + + + Close + + + +)) +DialogContent.displayName = DialogPrimitive.Content.displayName + +const DialogHeader = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +DialogHeader.displayName = "DialogHeader" + +const DialogFooter = ({ + className, + ...props +}: React.HTMLAttributes) => ( +
+) +DialogFooter.displayName = "DialogFooter" + +const DialogTitle = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +DialogTitle.displayName = DialogPrimitive.Title.displayName + +const DialogDescription = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +DialogDescription.displayName = DialogPrimitive.Description.displayName + +export { + Dialog, + DialogPortal, + DialogOverlay, + DialogTrigger, + DialogClose, + DialogContent, + DialogHeader, + DialogFooter, + DialogTitle, + DialogDescription, +} diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/input.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/input.tsx new file mode 100644 index 0000000000..5af26b2c1a --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/input.tsx @@ -0,0 +1,25 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +export interface InputProps + extends React.InputHTMLAttributes {} + +const Input = React.forwardRef( + ({ className, type, ...props }, ref) => { + return ( + + ) + } +) +Input.displayName = "Input" + +export { Input } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/label.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/label.tsx new file mode 100644 index 0000000000..683faa7938 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/label.tsx @@ -0,0 +1,24 @@ +import * as React from "react" +import * as LabelPrimitive from "@radix-ui/react-label" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const labelVariants = cva( + "text-sm font-medium leading-none peer-disabled:cursor-not-allowed peer-disabled:opacity-70" +) + +const Label = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef & + VariantProps +>(({ className, ...props }, ref) => ( + +)) +Label.displayName = LabelPrimitive.Root.displayName + +export { Label } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/scroll-area.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/scroll-area.tsx new file mode 100644 index 0000000000..cf253cf170 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/scroll-area.tsx @@ -0,0 +1,46 @@ +import * as React from "react" +import * as ScrollAreaPrimitive from "@radix-ui/react-scroll-area" + +import { cn } from "@/lib/utils" + +const ScrollArea = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + {children} + + + + +)) +ScrollArea.displayName = ScrollAreaPrimitive.Root.displayName + +const ScrollBar = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, orientation = "vertical", ...props }, ref) => ( + + + +)) +ScrollBar.displayName = ScrollAreaPrimitive.ScrollAreaScrollbar.displayName + +export { ScrollArea, ScrollBar } diff --git a/examples/vercel-ai-sdk-chat-app/src/components/ui/select.tsx b/examples/vercel-ai-sdk-chat-app/src/components/ui/select.tsx new file mode 100644 index 0000000000..ac2a8f2b9c --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/components/ui/select.tsx @@ -0,0 +1,164 @@ +"use client" + +import * as React from "react" +import { + CaretSortIcon, + CheckIcon, + ChevronDownIcon, + ChevronUpIcon, +} from "@radix-ui/react-icons" +import * as SelectPrimitive from "@radix-ui/react-select" + +import { cn } from "@/lib/utils" + +const Select = SelectPrimitive.Root + +const SelectGroup = SelectPrimitive.Group + +const SelectValue = SelectPrimitive.Value + +const SelectTrigger = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + span]:line-clamp-1", + className + )} + {...props} + > + {children} + + + + +)) +SelectTrigger.displayName = SelectPrimitive.Trigger.displayName + +const SelectScrollUpButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)) +SelectScrollUpButton.displayName = SelectPrimitive.ScrollUpButton.displayName + +const SelectScrollDownButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)) +SelectScrollDownButton.displayName = + SelectPrimitive.ScrollDownButton.displayName + +const SelectContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, position = "popper", ...props }, ref) => ( + + + + + {children} + + + + +)) +SelectContent.displayName = SelectPrimitive.Content.displayName + +const SelectLabel = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +SelectLabel.displayName = SelectPrimitive.Label.displayName + +const SelectItem = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, children, ...props }, ref) => ( + + + + + + + {children} + +)) +SelectItem.displayName = SelectPrimitive.Item.displayName + +const SelectSeparator = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +SelectSeparator.displayName = SelectPrimitive.Separator.displayName + +export { + Select, + SelectGroup, + SelectValue, + SelectTrigger, + SelectContent, + SelectLabel, + SelectItem, + SelectSeparator, + SelectScrollUpButton, + SelectScrollDownButton, +} diff --git a/examples/vercel-ai-sdk-chat-app/src/contexts/GlobalContext.tsx b/examples/vercel-ai-sdk-chat-app/src/contexts/GlobalContext.tsx new file mode 100644 index 0000000000..10f1b683b2 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/contexts/GlobalContext.tsx @@ -0,0 +1,324 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +import { createContext, useEffect, useState } from "react"; +import { createMem0, searchMemories } from "@mem0/vercel-ai-provider"; +import { LanguageModelV1Prompt, streamText } from "ai"; +import { Message, Memory, FileInfo } from "@/types"; +import { Buffer } from 'buffer'; + +const GlobalContext = createContext({}); + +const WelcomeMessage: Message = { + id: "1", + content: + "👋 Hi there! I'm your personal assistant. How can I help you today? 😊", + sender: "assistant", + timestamp: new Date().toLocaleTimeString(), +}; + +const InvalidConfigMessage: Message = { + id: "2", + content: + "Invalid configuration. Please check your API keys, and add a user and try again.", + sender: "assistant", + timestamp: new Date().toLocaleTimeString(), +}; + +const SomethingWentWrongMessage: Message = { + id: "3", + content: "Something went wrong. Please try again.", + sender: "assistant", + timestamp: new Date().toLocaleTimeString(), +}; + +const models = { + "openai": "gpt-4o", + "anthropic": "claude-3-haiku-20240307", + "cohere": "command-r-plus", + "groq": "gemma2-9b-it" +} + +const getModel = (provider: string) => { + switch (provider) { + case "openai": + return models.openai; + case "anthropic": + return models.anthropic; + case "cohere": + return models.cohere; + case "groq": + return models.groq; + default: + return models.openai; + } +} + +const GlobalState = (props: any) => { + const [memories, setMemories] = useState([]); + const [messages, setMessages] = useState([]); + const [selectedUser, setSelectedUser] = useState(""); + const [thinking, setThinking] = useState(false); + const [selectedOpenAIKey, setSelectedOpenAIKey] = useState(""); + const [selectedMem0Key, setSelectedMem0Key] = useState(""); + const [selectedProvider, setSelectedProvider] = useState("openai"); + const [selectedFile, setSelectedFile] = useState(null) + const [file, setFile] = useState(null) + + const mem0 = createMem0({ + provider: selectedProvider, + mem0ApiKey: selectedMem0Key, + apiKey: selectedOpenAIKey, + }); + + const clearConfiguration = () => { + localStorage.removeItem("mem0ApiKey"); + localStorage.removeItem("openaiApiKey"); + localStorage.removeItem("provider"); + setSelectedMem0Key(""); + setSelectedOpenAIKey(""); + setSelectedProvider("openai"); + setSelectedUser(""); + setMessages([WelcomeMessage]); + setMemories([]); + setFile(null); + }; + + const selectorHandler = (mem0: string, openai: string, provider: string) => { + setSelectedMem0Key(mem0); + setSelectedOpenAIKey(openai); + setSelectedProvider(provider); + localStorage.setItem("mem0ApiKey", mem0); + localStorage.setItem("openaiApiKey", openai); + localStorage.setItem("provider", provider); + }; + + + useEffect(() => { + const mem0 = localStorage.getItem("mem0ApiKey"); + const openai = localStorage.getItem("openaiApiKey"); + const provider = localStorage.getItem("provider"); + const user = localStorage.getItem("user"); + if (mem0 && openai && provider) { + selectorHandler(mem0, openai, provider); + } + if (user) { + setSelectedUser(user); + } + }, []); + + const selectUserHandler = (user: string) => { + setSelectedUser(user); + localStorage.setItem("user", user); + }; + + const clearUserHandler = () => { + setSelectedUser(""); + setMemories([]); + }; + + const getMemories = async (messages: LanguageModelV1Prompt) => { + try { + const smemories = await searchMemories(messages, { + user_id: selectedUser || "", + mem0ApiKey: import.meta.env.VITE_MEM0_API_KEY, + }); + + const newMemories = smemories.map((memory: any) => ({ + id: memory.id, + content: memory.memory, + timestamp: memory.updated_at, + tags: memory.categories, + })); + setMemories(newMemories); + } catch (error) { + console.error("Error in getMemories:", error); + } + }; + + const handleSend = async (inputValue: string) => { + if (!inputValue.trim() && !file) return; + if (!selectedUser) { + const newMessage: Message = { + id: Date.now().toString(), + content: inputValue, + sender: "user", + timestamp: new Date().toLocaleTimeString(), + }; + setMessages((prev) => [...prev, newMessage, InvalidConfigMessage]); + return; + } + + const userMessage: Message = { + id: Date.now().toString(), + content: inputValue, + sender: "user", + timestamp: new Date().toLocaleTimeString(), + }; + + let fileData; + if (file) { + if (file.type.startsWith("image/")) { + // Convert image to Base64 + fileData = await convertToBase64(file); + userMessage.image = fileData; + } else if (file.type.startsWith("audio/")) { + // Convert audio to ArrayBuffer + fileData = await getFileBuffer(file); + userMessage.audio = fileData; + } + } + + // Update the state with the new user message + setMessages((prev) => [...prev, userMessage]); + setThinking(true); + + // Transform messages into the required format + const messagesForPrompt: LanguageModelV1Prompt = []; + messages.map((message) => { + const messageContent: any = { + role: message.sender, + content: [ + { + type: "text", + text: message.content, + }, + ], + }; + if (message.image) { + messageContent.content.push({ + type: "image", + image: message.image, + }); + } + if (message.audio) { + messageContent.content.push({ + type: 'file', + mimeType: 'audio/mpeg', + data: message.audio, + }); + } + if(!message.audio) messagesForPrompt.push(messageContent); + }); + + const newMessage: any = { + role: "user", + content: [ + { + type: "text", + text: inputValue, + }, + ], + }; + if (file) { + if (file.type.startsWith("image/")) { + newMessage.content.push({ + type: "image", + image: userMessage.image, + }); + } else if (file.type.startsWith("audio/")) { + newMessage.content.push({ + type: 'file', + mimeType: 'audio/mpeg', + data: userMessage.audio, + }); + } + } + + messagesForPrompt.push(newMessage); + getMemories(messagesForPrompt); + + setFile(null); + setSelectedFile(null); + + try { + const { textStream } = await streamText({ + model: mem0(getModel(selectedProvider), { + user_id: selectedUser || "", + }), + messages: messagesForPrompt, + }); + + const assistantMessageId = Date.now() + 1; + const assistantMessage: Message = { + id: assistantMessageId.toString(), + content: "", + sender: "assistant", + timestamp: new Date().toLocaleTimeString(), + }; + + setMessages((prev) => [...prev, assistantMessage]); + + // Stream the text part by part + for await (const textPart of textStream) { + assistantMessage.content += textPart; + setThinking(false); + setFile(null); + setSelectedFile(null); + + setMessages((prev) => + prev.map((msg) => + msg.id === assistantMessageId.toString() + ? { ...msg, content: assistantMessage.content } + : msg + ) + ); + } + + setThinking(false); + } catch (error) { + console.error("Error in handleSend:", error); + setMessages((prev) => [...prev, SomethingWentWrongMessage]); + setThinking(false); + setFile(null); + setSelectedFile(null); + } + }; + + useEffect(() => { + setMessages([WelcomeMessage]); + }, []); + + return ( + + {props.children} + + ); +}; + +export default GlobalContext; +export { GlobalState }; + + +const convertToBase64 = (file: File): Promise => { + return new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.readAsDataURL(file); + reader.onload = () => resolve(reader.result as string); // Resolve with Base64 string + reader.onerror = error => reject(error); // Reject on error + }); +}; + +async function getFileBuffer(file: any) { + const response = await fetch(file); + const arrayBuffer = await response.arrayBuffer(); + const buffer = Buffer.from(arrayBuffer); + return buffer; +} \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/src/index.css b/examples/vercel-ai-sdk-chat-app/src/index.css new file mode 100644 index 0000000000..405a75d58d --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/index.css @@ -0,0 +1,97 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; +@layer base { + :root { + --background: 0 0% 100%; + --foreground: 240 10% 3.9%; + --card: 0 0% 100%; + --card-foreground: 240 10% 3.9%; + --popover: 0 0% 100%; + --popover-foreground: 240 10% 3.9%; + --primary: 240 5.9% 10%; + --primary-foreground: 0 0% 98%; + --secondary: 240 4.8% 95.9%; + --secondary-foreground: 240 5.9% 10%; + --muted: 240 4.8% 95.9%; + --muted-foreground: 240 3.8% 46.1%; + --accent: 240 4.8% 95.9%; + --accent-foreground: 240 5.9% 10%; + --destructive: 0 84.2% 60.2%; + --destructive-foreground: 0 0% 98%; + --border: 240 5.9% 90%; + --input: 240 5.9% 90%; + --ring: 240 10% 3.9%; + --chart-1: 12 76% 61%; + --chart-2: 173 58% 39%; + --chart-3: 197 37% 24%; + --chart-4: 43 74% 66%; + --chart-5: 27 87% 67%; + --radius: 0.5rem + } + .dark { + --background: 240 10% 3.9%; + --foreground: 0 0% 98%; + --card: 240 10% 3.9%; + --card-foreground: 0 0% 98%; + --popover: 240 10% 3.9%; + --popover-foreground: 0 0% 98%; + --primary: 0 0% 98%; + --primary-foreground: 240 5.9% 10%; + --secondary: 240 3.7% 15.9%; + --secondary-foreground: 0 0% 98%; + --muted: 240 3.7% 15.9%; + --muted-foreground: 240 5% 64.9%; + --accent: 240 3.7% 15.9%; + --accent-foreground: 0 0% 98%; + --destructive: 0 62.8% 30.6%; + --destructive-foreground: 0 0% 98%; + --border: 240 3.7% 15.9%; + --input: 240 3.7% 15.9%; + --ring: 240 4.9% 83.9%; + --chart-1: 220 70% 50%; + --chart-2: 160 60% 45%; + --chart-3: 30 80% 55%; + --chart-4: 280 65% 60%; + --chart-5: 340 75% 55% + } +} +@layer base { + * { + @apply border-border; + } + body { + @apply bg-background text-foreground; + } +} + +.loader { + display: flex; + align-items: flex-end; + gap: 5px; +} + +.ball { + width: 6px; + height: 6px; + background-color: #4e4e4e; + border-radius: 50%; + animation: bounce 0.6s infinite alternate; +} + +.ball:nth-child(2) { + animation-delay: 0.2s; +} + +.ball:nth-child(3) { + animation-delay: 0.4s; +} + +@keyframes bounce { + from { + transform: translateY(0); + } + to { + transform: translateY(-4px); + } +} diff --git a/examples/vercel-ai-sdk-chat-app/src/main.tsx b/examples/vercel-ai-sdk-chat-app/src/main.tsx new file mode 100644 index 0000000000..bef5202a32 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/main.tsx @@ -0,0 +1,10 @@ +import { StrictMode } from 'react' +import { createRoot } from 'react-dom/client' +import './index.css' +import App from './App.tsx' + +createRoot(document.getElementById('root')!).render( + + + , +) diff --git a/examples/vercel-ai-sdk-chat-app/src/page.tsx b/examples/vercel-ai-sdk-chat-app/src/page.tsx new file mode 100644 index 0000000000..1f99e8561c --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/page.tsx @@ -0,0 +1,14 @@ +"use client"; +import { GlobalState } from "./contexts/GlobalContext"; +import Component from "./pages/home"; + + +export default function Home() { + return ( +
+ + + +
+ ); +} diff --git a/examples/vercel-ai-sdk-chat-app/src/pages/home.tsx b/examples/vercel-ai-sdk-chat-app/src/pages/home.tsx new file mode 100644 index 0000000000..f72b175ee8 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/pages/home.tsx @@ -0,0 +1,41 @@ +import { useState } from "react"; +import ApiSettingsPopup from "../components/api-settings-popup"; +import Memories from "../components/memories"; +import Header from "../components/header"; +import Messages from "../components/messages"; +import InputArea from "../components/input-area"; +import ChevronToggle from "../components/chevron-toggle"; + + +export default function Home() { + const [isMemoriesExpanded, setIsMemoriesExpanded] = useState(true); + const [isSettingsOpen, setIsSettingsOpen] = useState(false); + + return ( + <> + +
+ {/* Main Chat Area */} +
+ {/* Header */} +
+ + {/* Messages */} + + + {/* Input Area */} + +
+ + {/* Chevron Toggle */} + + + {/* Memories Sidebar */} + +
+ + ); +} diff --git a/examples/vercel-ai-sdk-chat-app/src/types.ts b/examples/vercel-ai-sdk-chat-app/src/types.ts new file mode 100644 index 0000000000..770bc23f7d --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/types.ts @@ -0,0 +1,22 @@ +/* eslint-disable @typescript-eslint/no-explicit-any */ +export interface Memory { + id: string; + content: string; + timestamp: string; + tags: string[]; +} + +export interface Message { + id: string; + content: string; + sender: "user" | "assistant"; + timestamp: string; + image?: string; + audio?: any; +} + +export interface FileInfo { + name: string; + type: string; + size: number; +} \ No newline at end of file diff --git a/examples/vercel-ai-sdk-chat-app/src/vite-env.d.ts b/examples/vercel-ai-sdk-chat-app/src/vite-env.d.ts new file mode 100644 index 0000000000..11f02fe2a0 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/src/vite-env.d.ts @@ -0,0 +1 @@ +/// diff --git a/examples/vercel-ai-sdk-chat-app/tailwind.config.js b/examples/vercel-ai-sdk-chat-app/tailwind.config.js new file mode 100644 index 0000000000..150128518e --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/tailwind.config.js @@ -0,0 +1,62 @@ +// tailwind.config.js +/* eslint-env node */ + +/** @type {import('tailwindcss').Config} */ +import tailwindcssAnimate from 'tailwindcss-animate'; + +export default { + darkMode: ["class"], + content: ["./index.html", "./src/**/*.{ts,tsx,js,jsx}"], + theme: { + extend: { + borderRadius: { + lg: 'var(--radius)', + md: 'calc(var(--radius) - 2px)', + sm: 'calc(var(--radius) - 4px)', + }, + colors: { + background: 'hsl(var(--background))', + foreground: 'hsl(var(--foreground))', + card: { + DEFAULT: 'hsl(var(--card))', + foreground: 'hsl(var(--card-foreground))', + }, + popover: { + DEFAULT: 'hsl(var(--popover))', + foreground: 'hsl(var(--popover-foreground))', + }, + primary: { + DEFAULT: 'hsl(var(--primary))', + foreground: 'hsl(var(--primary-foreground))', + }, + secondary: { + DEFAULT: 'hsl(var(--secondary))', + foreground: 'hsl(var(--secondary-foreground))', + }, + muted: { + DEFAULT: 'hsl(var(--muted))', + foreground: 'hsl(var(--muted-foreground))', + }, + accent: { + DEFAULT: 'hsl(var(--accent))', + foreground: 'hsl(var(--accent-foreground))', + }, + destructive: { + DEFAULT: 'hsl(var(--destructive))', + foreground: 'hsl(var(--destructive-foreground))', + }, + border: 'hsl(var(--border))', + input: 'hsl(var(--input))', + ring: 'hsl(var(--ring))', + chart: { + '1': 'hsl(var(--chart-1))', + '2': 'hsl(var(--chart-2))', + '3': 'hsl(var(--chart-3))', + '4': 'hsl(var(--chart-4))', + '5': 'hsl(var(--chart-5))', + }, + }, + }, + }, + plugins: [tailwindcssAnimate], +}; diff --git a/examples/vercel-ai-sdk-chat-app/tsconfig.app.json b/examples/vercel-ai-sdk-chat-app/tsconfig.app.json new file mode 100644 index 0000000000..6d0c89af2c --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/tsconfig.app.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", + "target": "ES2020", + "useDefineForClassFields": true, + "lib": ["ES2020", "DOM", "DOM.Iterable"], + "module": "ESNext", + "skipLibCheck": true, + "baseUrl": ".", + "paths": { + "@/*": [ + "./src/*" + ] + }, + + /* Bundler mode */ + "moduleResolution": "Bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["src"] +} diff --git a/examples/vercel-ai-sdk-chat-app/tsconfig.json b/examples/vercel-ai-sdk-chat-app/tsconfig.json new file mode 100644 index 0000000000..fec8c8e5c2 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/tsconfig.json @@ -0,0 +1,13 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ], + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + } +} diff --git a/examples/vercel-ai-sdk-chat-app/tsconfig.node.json b/examples/vercel-ai-sdk-chat-app/tsconfig.node.json new file mode 100644 index 0000000000..abcd7f0dac --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/tsconfig.node.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", + "target": "ES2022", + "lib": ["ES2023"], + "module": "ESNext", + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "Bundler", + "allowImportingTsExtensions": true, + "isolatedModules": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true, + "noUncheckedSideEffectImports": true + }, + "include": ["vite.config.ts"] +} diff --git a/examples/vercel-ai-sdk-chat-app/vite.config.ts b/examples/vercel-ai-sdk-chat-app/vite.config.ts new file mode 100644 index 0000000000..a761a87054 --- /dev/null +++ b/examples/vercel-ai-sdk-chat-app/vite.config.ts @@ -0,0 +1,13 @@ +import path from "path" +import react from "@vitejs/plugin-react" +import { defineConfig } from "vite" + +export default defineConfig({ + plugins: [react()], + resolve: { + alias: { + "@": path.resolve(__dirname, "./src"), + buffer: 'buffer' + }, + }, +}) diff --git a/vercel-ai-sdk/.gitattributes b/vercel-ai-sdk/.gitattributes new file mode 100644 index 0000000000..dfe0770424 --- /dev/null +++ b/vercel-ai-sdk/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/vercel-ai-sdk/.gitignore b/vercel-ai-sdk/.gitignore new file mode 100644 index 0000000000..72d04c8773 --- /dev/null +++ b/vercel-ai-sdk/.gitignore @@ -0,0 +1,10 @@ +**/.env +**/node_modules +**/.DS_Store + +# Ignore test-related files +**/coverage.data +**/coverage/ + +# Build files +**/dist \ No newline at end of file diff --git a/vercel-ai-sdk/README.md b/vercel-ai-sdk/README.md new file mode 100644 index 0000000000..aaff79efe5 --- /dev/null +++ b/vercel-ai-sdk/README.md @@ -0,0 +1,228 @@ +# Mem0 AI SDK Provider + +The **Mem0 AI SDK Provider** is a community-maintained library developed by [Mem0](https://mem0.ai/) to integrate with the Vercel AI SDK. This library brings enhanced AI interaction capabilities to your applications by introducing persistent memory functionality. With Mem0, language model conversations gain memory, enabling more contextualized and personalized responses based on past interactions. + +Discover more of **Mem0** on [GitHub](https://github.com/mem0ai). +Explore the [Mem0 Documentation](https://docs.mem0.ai/overview) to gain deeper control and flexibility in managing your memories. + +For detailed information on using the Vercel AI SDK, refer to Vercel’s [API Reference](https://sdk.vercel.ai/docs/reference) and [Documentation](https://sdk.vercel.ai/docs). + +## Features + +- 🧠 Persistent memory storage for AI conversations +- 🔄 Seamless integration with Vercel AI SDK +- 🚀 Support for multiple LLM providers +- 📝 Rich message format support +- ⚡ Streaming capabilities +- 🔍 Context-aware responses + +## Installation + +```bash +npm install @mem0/vercel-ai-provider +``` + +## Before We Begin + +### Setting Up Mem0 + +1. Obtain your [Mem0 API Key](https://app.mem0.ai/dashboard/api-keys) from the Mem0 dashboard. + +2. Initialize the Mem0 Client: + +```typescript +import { createMem0 } from "@mem0/vercel-ai-provider"; + +const mem0 = createMem0({ + provider: "openai", + mem0ApiKey: "m0-xxx", + apiKey: "openai-api-key", + config: { + compatibility: "strict", + // Additional model-specific configuration options can be added here. + }, +}); +``` + +### Note +By default, the `openai` provider is used, so specifying it is optional: +```typescript +const mem0 = createMem0(); +``` +For better security, consider setting `MEM0_API_KEY` and `OPENAI_API_KEY` as environment variables. + +3. Add Memories to Enhance Context: + +```typescript +import { LanguageModelV1Prompt } from "ai"; +import { addMemories } from "@mem0/vercel-ai-provider"; + +const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "I love red cars." }, + { type: "text", text: "I like Toyota Cars." }, + { type: "text", text: "I prefer SUVs." }, + ], + }, +]; + +await addMemories(messages, { user_id: "borat" }); +``` + +These memories are now stored in your profile. You can view and manage them on the [Mem0 Dashboard](https://app.mem0.ai/dashboard/users). + +### Note: + +For standalone features, such as `addMemories` and `retrieveMemories`, +you must either set `MEM0_API_KEY` as an environment variable or pass it directly in the function call. + +Example: + +```typescript +await addMemories(messages, { user_id: "borat", mem0ApiKey: "m0-xxx" }); +await retrieveMemories(prompt, { user_id: "borat", mem0ApiKey: "m0-xxx" }); +``` + +## Usage Examples + +### 1. Basic Text Generation with Memory Context + +```typescript +import { generateText } from "ai"; +import { createMem0 } from "@mem0/vercel-ai-provider"; + +const mem0 = createMem0(); + +const { text } = await generateText({ + model: mem0("gpt-4-turbo", { + user_id: "borat", + }), + prompt: "Suggest me a good car to buy!", +}); +``` + +### 2. Combining OpenAI Provider with Memory Utils + +```typescript +import { generateText } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { retrieveMemories } from "@mem0/vercel-ai-provider"; + +const prompt = "Suggest me a good car to buy."; +const memories = await retrieveMemories(prompt, { user_id: "borat" }); + +const { text } = await generateText({ + model: openai("gpt-4-turbo"), + prompt: prompt, + system: memories, +}); +``` + +### 3. Structured Message Format with Memory + +```typescript +import { generateText } from "ai"; +import { createMem0 } from "@mem0/vercel-ai-provider"; + +const mem0 = createMem0(); + +const { text } = await generateText({ + model: mem0("gpt-4-turbo", { + user_id: "borat", + }), + messages: [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: "Why is it better than the other cars for me?" }, + { type: "text", text: "Give options for every price range." }, + ], + }, + ], +}); +``` + +### 4. Advanced Memory Integration with OpenAI + +```typescript +import { generateText, LanguageModelV1Prompt } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { retrieveMemories } from "@mem0/vercel-ai-provider"; + +// New format using system parameter for memory context +const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: "Why is it better than the other cars for me?" }, + { type: "text", text: "Give options for every price range." }, + ], + }, +]; + +const memories = await retrieveMemories(messages, { user_id: "borat" }); + +const { text } = await generateText({ + model: openai("gpt-4-turbo"), + messages: messages, + system: memories, +}); +``` + +### 5. Streaming Responses with Memory Context + +```typescript +import { streamText } from "ai"; +import { createMem0 } from "@mem0/vercel-ai-provider"; + +const mem0 = createMem0(); + +const { textStream } = await streamText({ + model: mem0("gpt-4-turbo", { + user_id: "borat", + }), + prompt: + "Suggest me a good car to buy! Why is it better than the other cars for me? Give options for every price range.", +}); + +for await (const textPart of textStream) { + process.stdout.write(textPart); +} +``` + +## Core Functions + +- `createMem0()`: Initializes a new mem0 provider instance with optional configuration +- `retrieveMemories()`: Enriches prompts with relevant memories +- `addMemories()`: Add memories to your profile + +## Configuration Options + +```typescript +const mem0 = createMem0({ + config: { + ... + // Additional model-specific configuration options can be added here. + }, +}); +``` + +## Best Practices + +1. **User Identification**: Always provide a unique `user_id` identifier for consistent memory retrieval +2. **Context Management**: Use appropriate context window sizes to balance performance and memory +3. **Error Handling**: Implement proper error handling for memory operations +4. **Memory Cleanup**: Regularly clean up unused memory contexts to optimize performance + +We also have support for `agent_id`, `app_id`, and `run_id`. Refer [Docs](https://docs.mem0.ai/api-reference/memory/add-memories). + +## Notes + +- Requires proper API key configuration for underlying providers (e.g., OpenAI) +- Memory features depend on proper user identification via `user_id` +- Supports both streaming and non-streaming responses +- Compatible with all Vercel AI SDK features and patterns diff --git a/vercel-ai-sdk/config/test-config.ts b/vercel-ai-sdk/config/test-config.ts new file mode 100644 index 0000000000..b59fcbafe6 --- /dev/null +++ b/vercel-ai-sdk/config/test-config.ts @@ -0,0 +1,105 @@ +import dotenv from "dotenv"; +import { createMem0 } from "../src"; + +dotenv.config(); + +export interface Provider { + name: string; + activeModel: string; + apiKey: string | undefined; +} + +export const testConfig = { + apiKey: process.env.MEM0_API_KEY, + userId: "mem0-ai-sdk-test-user-1134774", + deleteId: "", + providers: [ + { + name: "openai", + activeModel: "gpt-4-turbo", + apiKey: process.env.OPENAI_API_KEY, + } + , + { + name: "anthropic", + activeModel: "claude-3-5-sonnet-20240620", + apiKey: process.env.ANTHROPIC_API_KEY, + }, + // { + // name: "groq", + // activeModel: "gemma2-9b-it", + // apiKey: process.env.GROQ_API_KEY, + // }, + { + name: "cohere", + activeModel: "command-r-plus", + apiKey: process.env.COHERE_API_KEY, + } + ], + models: { + openai: "gpt-4-turbo", + anthropic: "claude-3-haiku-20240307", + groq: "gemma2-9b-it", + cohere: "command-r-plus" + }, + apiKeys: { + openai: process.env.OPENAI_API_KEY, + anthropic: process.env.ANTHROPIC_API_KEY, + groq: process.env.GROQ_API_KEY, + cohere: process.env.COHERE_API_KEY, + }, + + createTestClient: (provider: Provider) => { + return createMem0({ + provider: provider.name, + mem0ApiKey: process.env.MEM0_API_KEY, + apiKey: provider.apiKey, + }); + }, + fetchDeleteId: async function () { + const options = { + method: 'GET', + headers: { + Authorization: `Token ${this.apiKey}`, + }, + }; + + try { + const response = await fetch('https://api.mem0.ai/v1/entities/', options); + const data = await response.json(); + const entity = data.results.find((item: any) => item.name === this.userId); + if (entity) { + this.deleteId = entity.id; + } else { + console.error("No matching entity found for userId:", this.userId); + } + } catch (error) { + console.error("Error fetching deleteId:", error); + throw error; + } + }, + deleteUser: async function () { + if (!this.deleteId) { + console.error("deleteId is not set. Ensure fetchDeleteId is called first."); + return; + } + + const options = { + method: 'DELETE', + headers: { + Authorization: `Token ${this.apiKey}`, + }, + }; + + try { + const response = await fetch(`https://api.mem0.ai/v1/entities/user/${this.deleteId}/`, options); + if (!response.ok) { + throw new Error(`Failed to delete user: ${response.statusText}`); + } + await response.json(); + } catch (error) { + console.error("Error deleting user:", error); + throw error; + } + }, +}; diff --git a/vercel-ai-sdk/jest.config.js b/vercel-ai-sdk/jest.config.js new file mode 100644 index 0000000000..49b6d5a60f --- /dev/null +++ b/vercel-ai-sdk/jest.config.js @@ -0,0 +1,6 @@ +module.exports = { + preset: 'ts-jest', + testEnvironment: 'node', + globalTeardown: './teardown.ts', +}; + \ No newline at end of file diff --git a/vercel-ai-sdk/nodemon.json b/vercel-ai-sdk/nodemon.json new file mode 100644 index 0000000000..3cb5efa707 --- /dev/null +++ b/vercel-ai-sdk/nodemon.json @@ -0,0 +1,5 @@ +{ + "watch": ["src"], + "ext": ".ts,.js", + "exec": "ts-node ./example/index.ts" +} \ No newline at end of file diff --git a/vercel-ai-sdk/package.json b/vercel-ai-sdk/package.json new file mode 100644 index 0000000000..22f7aacc31 --- /dev/null +++ b/vercel-ai-sdk/package.json @@ -0,0 +1,69 @@ +{ + "name": "@mem0/vercel-ai-provider", + "version": "0.0.7", + "description": "Vercel AI Provider for providing memory to LLMs", + "main": "./dist/index.js", + "module": "./dist/index.mjs", + "types": "./dist/index.d.ts", + "files": [ + "dist/**/*" + ], + "scripts": { + "build": "tsup", + "clean": "rm -rf dist", + "dev": "nodemon", + "lint": "eslint \"./**/*.ts*\"", + "type-check": "tsc --noEmit", + "prettier-check": "prettier --check \"./**/*.ts*\"", + "test": "jest", + "test:edge": "vitest --config vitest.edge.config.js --run", + "test:node": "vitest --config vitest.node.config.js --run" + }, + "keywords": [ + "ai", + "vercel-ai" + ], + "author": "Saket Aryan ", + "license": "Apache-2.0", + "dependencies": { + "@ai-sdk/anthropic": "^0.0.54", + "@ai-sdk/cohere": "^0.0.28", + "@ai-sdk/groq": "^0.0.3", + "@ai-sdk/openai": "^0.0.71", + "@ai-sdk/provider": "^0.0.26", + "@ai-sdk/provider-utils": "^1.0.22", + "ai": "^3.4.31", + "dotenv": "^16.4.5", + "partial-json": "0.1.7", + "ts-node": "^10.9.2", + "zod": "^3.0.0" + }, + "devDependencies": { + "@edge-runtime/vm": "^3.2.0", + "@types/jest": "^29.5.14", + "@types/node": "^18.19.46", + "jest": "^29.7.0", + "nodemon": "^3.1.7", + "ts-jest": "^29.2.5", + "tsup": "^8.3.0", + "typescript": "5.5.4" + }, + "peerDependencies": { + "zod": "^3.0.0" + }, + "peerDependenciesMeta": { + "zod": { + "optional": true + } + }, + "engines": { + "node": ">=18" + }, + "publishConfig": { + "access": "public" + }, + "directories": { + "example": "example", + "test": "tests" + } +} diff --git a/vercel-ai-sdk/src/index.ts b/vercel-ai-sdk/src/index.ts new file mode 100644 index 0000000000..584d73f014 --- /dev/null +++ b/vercel-ai-sdk/src/index.ts @@ -0,0 +1,4 @@ +export * from './mem0-facade' +export type { Mem0Provider, Mem0ProviderSettings } from './mem0-provider' +export { createMem0, mem0 } from './mem0-provider' +export {addMemories, retrieveMemories, searchMemories } from './mem0-utils' \ No newline at end of file diff --git a/vercel-ai-sdk/src/mem0-chat-language-model.ts b/vercel-ai-sdk/src/mem0-chat-language-model.ts new file mode 100644 index 0000000000..3ff53efcf3 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-chat-language-model.ts @@ -0,0 +1,150 @@ +/* eslint-disable camelcase */ +import { + LanguageModelV1, + LanguageModelV1CallOptions, + LanguageModelV1CallWarning, + LanguageModelV1FinishReason, + LanguageModelV1FunctionToolCall, + LanguageModelV1LogProbs, + LanguageModelV1ProviderMetadata, + LanguageModelV1StreamPart, +} from "@ai-sdk/provider"; + +import { Mem0ChatModelId, Mem0ChatSettings } from "./mem0-chat-settings"; +import { Mem0ClassSelector } from "./mem0-provider-selector"; +import { filterStream } from "./stream-utils"; +import { Mem0Config } from "./mem0-chat-settings"; +import { OpenAIProviderSettings } from "@ai-sdk/openai"; +import { Mem0ProviderSettings } from "./mem0-provider"; + + +interface Mem0ChatConfig { + baseURL: string; + fetch?: typeof fetch; + headers: () => Record; + provider: string; + organization?: string; + project?: string; + name?: string; + apiKey?: string; + mem0_api_key?: string; +} + +export class Mem0ChatLanguageModel implements LanguageModelV1 { + readonly specificationVersion = "v1"; + readonly defaultObjectGenerationMode = "json"; + readonly supportsImageUrls = false; + + constructor( + public readonly modelId: Mem0ChatModelId, + public readonly settings: Mem0ChatSettings, + public readonly config: Mem0ChatConfig, + public readonly provider_config?: OpenAIProviderSettings + ) { + this.provider = config.provider; + } + + provider: string; + supportsStructuredOutputs?: boolean | undefined; + + async doGenerate(options: LanguageModelV1CallOptions): Promise<{ + text?: string; + toolCalls?: Array; + finishReason: LanguageModelV1FinishReason; + usage: { promptTokens: number; completionTokens: number }; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + response?: { id?: string; timestamp?: Date; modelId?: string }; + warnings?: LanguageModelV1CallWarning[]; + providerMetadata?: LanguageModelV1ProviderMetadata; + logprobs?: LanguageModelV1LogProbs; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + modelType: "chat" + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey}; + + const ans = await model.generateText(messagesPrompts, config); + + + return { + text: ans.text, + finishReason: ans.finishReason, + usage: ans.usage, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + response: ans.response, + warnings: ans.warnings, + }; + } catch (error) { + // Handle errors properly + console.error("Error in doGenerate:", error); + throw new Error("Failed to generate response."); + } + } + + async doStream(options: LanguageModelV1CallOptions): Promise<{ + stream: ReadableStream; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + warnings?: LanguageModelV1CallWarning[]; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + modelType: "chat" + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey}; + const response = await model.streamText(messagesPrompts, config); + // @ts-ignore + const filteredStream = await filterStream(response.originalStream); + return { + // @ts-ignore + stream: filteredStream, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + ...response, + }; + } catch (error) { + console.error("Error in doStream:", error); + throw new Error("Streaming failed or method not implemented."); + } + } +} diff --git a/vercel-ai-sdk/src/mem0-chat-settings.ts b/vercel-ai-sdk/src/mem0-chat-settings.ts new file mode 100644 index 0000000000..737a9c8646 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-chat-settings.ts @@ -0,0 +1,36 @@ +import { OpenAIChatSettings } from "@ai-sdk/openai/internal"; + +export type Mem0ChatModelId = + | "o1-preview" + | "o1-mini" + | "gpt-4o" + | "gpt-4o-2024-05-13" + | "gpt-4o-2024-08-06" + | "gpt-4o-audio-preview" + | "gpt-4o-audio-preview-2024-10-01" + | "gpt-4o-mini" + | "gpt-4o-mini-2024-07-18" + | "gpt-4-turbo" + | "gpt-4-turbo-2024-04-09" + | "gpt-4-turbo-preview" + | "gpt-4-0125-preview" + | "gpt-4-1106-preview" + | "gpt-4" + | "gpt-4-0613" + | "gpt-3.5-turbo-0125" + | "gpt-3.5-turbo" + | "gpt-3.5-turbo-1106" + | (string & NonNullable); + +export interface Mem0ChatSettings extends OpenAIChatSettings { + user_id?: string; + app_id?: string; + agent_id?: string; + run_id?: string; + org_name?: string; + project_name?: string; + mem0ApiKey?: string; + structuredOutputs?: boolean; +} + +export interface Mem0Config extends Mem0ChatSettings {} diff --git a/vercel-ai-sdk/src/mem0-completion-language-model.ts b/vercel-ai-sdk/src/mem0-completion-language-model.ts new file mode 100644 index 0000000000..b10f50eda8 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-completion-language-model.ts @@ -0,0 +1,150 @@ +/* eslint-disable camelcase */ +import { + LanguageModelV1, + LanguageModelV1CallOptions, + LanguageModelV1CallWarning, + LanguageModelV1FinishReason, + LanguageModelV1FunctionToolCall, + LanguageModelV1LogProbs, + LanguageModelV1ProviderMetadata, + LanguageModelV1StreamPart, +} from "@ai-sdk/provider"; + +import { Mem0ChatModelId, Mem0ChatSettings } from "./mem0-chat-settings"; +import { Mem0ClassSelector } from "./mem0-provider-selector"; +import { filterStream } from "./stream-utils"; +import { Mem0Config } from "./mem0-completion-settings"; +import { OpenAIProviderSettings } from "@ai-sdk/openai"; +import { Mem0ProviderSettings } from "./mem0-provider"; + + +interface Mem0CompletionConfig { + baseURL: string; + fetch?: typeof fetch; + headers: () => Record; + provider: string; + organization?: string; + project?: string; + name?: string; + apiKey?: string; + mem0_api_key?: string; +} + +export class Mem0CompletionLanguageModel implements LanguageModelV1 { + readonly specificationVersion = "v1"; + readonly defaultObjectGenerationMode = "json"; + readonly supportsImageUrls = false; + + constructor( + public readonly modelId: Mem0ChatModelId, + public readonly settings: Mem0ChatSettings, + public readonly config: Mem0CompletionConfig, + public readonly provider_config?: OpenAIProviderSettings + ) { + this.provider = config.provider; + } + + provider: string; + supportsStructuredOutputs?: boolean | undefined; + + async doGenerate(options: LanguageModelV1CallOptions): Promise<{ + text?: string; + toolCalls?: Array; + finishReason: LanguageModelV1FinishReason; + usage: { promptTokens: number; completionTokens: number }; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + response?: { id?: string; timestamp?: Date; modelId?: string }; + warnings?: LanguageModelV1CallWarning[]; + providerMetadata?: LanguageModelV1ProviderMetadata; + logprobs?: LanguageModelV1LogProbs; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + modelType: "completion" + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey, modelType: "completion"}; + + const ans = await model.generateText(messagesPrompts, config); + + + return { + text: ans.text, + finishReason: ans.finishReason, + usage: ans.usage, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + response: ans.response, + warnings: ans.warnings, + }; + } catch (error) { + // Handle errors properly + console.error("Error in doGenerate:", error); + throw new Error("Failed to generate response."); + } + } + + async doStream(options: LanguageModelV1CallOptions): Promise<{ + stream: ReadableStream; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + warnings?: LanguageModelV1CallWarning[]; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + modelType: "completion" + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey, modelType: "completion"}; + const response = await model.streamText(messagesPrompts, config); + // @ts-ignore + const filteredStream = await filterStream(response.originalStream); + return { + // @ts-ignore + stream: filteredStream, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + ...response, + }; + } catch (error) { + console.error("Error in doStream:", error); + throw new Error("Streaming failed or method not implemented."); + } + } +} diff --git a/vercel-ai-sdk/src/mem0-completion-settings.ts b/vercel-ai-sdk/src/mem0-completion-settings.ts new file mode 100644 index 0000000000..c4ae2e654d --- /dev/null +++ b/vercel-ai-sdk/src/mem0-completion-settings.ts @@ -0,0 +1,19 @@ +import { OpenAICompletionSettings } from "@ai-sdk/openai/internal"; + +export type Mem0CompletionModelId = + | "gpt-3.5-turbo" + | (string & NonNullable); + +export interface Mem0CompletionSettings extends OpenAICompletionSettings { + user_id?: string; + app_id?: string; + agent_id?: string; + run_id?: string; + org_name?: string; + project_name?: string; + mem0ApiKey?: string; + structuredOutputs?: boolean; + modelType?: string; +} + +export interface Mem0Config extends Mem0CompletionSettings {} diff --git a/vercel-ai-sdk/src/mem0-facade.ts b/vercel-ai-sdk/src/mem0-facade.ts new file mode 100644 index 0000000000..6702166469 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-facade.ts @@ -0,0 +1,36 @@ +import { withoutTrailingSlash } from '@ai-sdk/provider-utils' + +import { Mem0ChatLanguageModel } from './mem0-chat-language-model' +import { Mem0ChatModelId, Mem0ChatSettings } from './mem0-chat-settings' +import { Mem0ProviderSettings } from './mem0-provider' + +export class Mem0 { + readonly baseURL: string + + readonly headers?: Record + + constructor(options: Mem0ProviderSettings = { + provider: 'openai', + }) { + this.baseURL = + withoutTrailingSlash(options.baseURL) ?? 'http://127.0.0.1:11434/api' + + this.headers = options.headers + } + + private get baseConfig() { + return { + baseURL: this.baseURL, + headers: () => ({ + ...this.headers, + }), + } + } + + chat(modelId: Mem0ChatModelId, settings: Mem0ChatSettings = {}) { + return new Mem0ChatLanguageModel(modelId, settings, { + provider: 'openai', + ...this.baseConfig, + }) + } +} \ No newline at end of file diff --git a/vercel-ai-sdk/src/mem0-generic-language-model.ts b/vercel-ai-sdk/src/mem0-generic-language-model.ts new file mode 100644 index 0000000000..c315975269 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-generic-language-model.ts @@ -0,0 +1,148 @@ +/* eslint-disable camelcase */ +import { + LanguageModelV1, + LanguageModelV1CallOptions, + LanguageModelV1CallWarning, + LanguageModelV1FinishReason, + LanguageModelV1FunctionToolCall, + LanguageModelV1LogProbs, + LanguageModelV1ProviderMetadata, + LanguageModelV1StreamPart, +} from "@ai-sdk/provider"; + +import { Mem0ChatModelId, Mem0ChatSettings } from "./mem0-chat-settings"; +import { Mem0ClassSelector } from "./mem0-provider-selector"; +import { filterStream } from "./stream-utils"; +import { Mem0Config } from "./mem0-chat-settings"; +import { OpenAIProviderSettings } from "@ai-sdk/openai"; +import { Mem0ProviderSettings } from "./mem0-provider"; + + +interface Mem0ChatConfig { + baseURL: string; + fetch?: typeof fetch; + headers: () => Record; + provider: string; + organization?: string; + project?: string; + name?: string; + apiKey?: string; + mem0_api_key?: string; +} + +export class Mem0GenericLanguageModel implements LanguageModelV1 { + readonly specificationVersion = "v1"; + readonly defaultObjectGenerationMode = "json"; + readonly supportsImageUrls = false; + + constructor( + public readonly modelId: Mem0ChatModelId, + public readonly settings: Mem0ChatSettings, + public readonly config: Mem0ChatConfig, + public readonly provider_config?: OpenAIProviderSettings + ) { + this.provider = config.provider; + } + + provider: string; + supportsStructuredOutputs?: boolean | undefined; + + async doGenerate(options: LanguageModelV1CallOptions): Promise<{ + text?: string; + toolCalls?: Array; + finishReason: LanguageModelV1FinishReason; + usage: { promptTokens: number; completionTokens: number }; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + response?: { id?: string; timestamp?: Date; modelId?: string }; + warnings?: LanguageModelV1CallWarning[]; + providerMetadata?: LanguageModelV1ProviderMetadata; + logprobs?: LanguageModelV1LogProbs; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey}; + + const ans = await model.generateText(messagesPrompts, config); + + + return { + text: ans.text, + finishReason: ans.finishReason, + usage: ans.usage, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + response: ans.response, + warnings: ans.warnings, + }; + } catch (error) { + // Handle errors properly + console.error("Error in doGenerate:", error); + throw new Error("Failed to generate response."); + } + } + + async doStream(options: LanguageModelV1CallOptions): Promise<{ + stream: ReadableStream; + rawCall: { rawPrompt: unknown; rawSettings: Record }; + rawResponse?: { headers?: Record }; + warnings?: LanguageModelV1CallWarning[]; + }> { + try { + const provider = this.config.provider; + const mem0_api_key = this.config.mem0_api_key; + const settings: Mem0ProviderSettings = { + provider: provider, + mem0ApiKey: mem0_api_key, + apiKey: this.config.apiKey, + } + const selector = new Mem0ClassSelector(this.modelId, settings,this.provider_config); + let messagesPrompts = options.prompt; + const model = selector.createProvider(); + const user_id = this.settings.user_id; + const app_id = this.settings.app_id; + const agent_id = this.settings.agent_id; + const run_id = this.settings.run_id; + const org_name = this.settings.org_name; + const project_name = this.settings.project_name; + + const apiKey = mem0_api_key; + + const config: Mem0Config = {user_id, app_id, agent_id, run_id, org_name, project_name, mem0ApiKey: apiKey}; + const response = await model.streamText(messagesPrompts, config); + // @ts-ignore + const filteredStream = await filterStream(response.originalStream); + return { + // @ts-ignore + stream: filteredStream, + rawCall: { + rawPrompt: options.prompt, + rawSettings: {}, + }, + ...response, + }; + } catch (error) { + console.error("Error in doStream:", error); + throw new Error("Streaming failed or method not implemented."); + } + } +} diff --git a/vercel-ai-sdk/src/mem0-provider-selector.ts b/vercel-ai-sdk/src/mem0-provider-selector.ts new file mode 100644 index 0000000000..e6737edc23 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-provider-selector.ts @@ -0,0 +1,34 @@ +import { OpenAIProviderSettings } from "@ai-sdk/openai"; +import { Mem0ProviderSettings } from "./mem0-provider"; +import Mem0AITextGenerator, { ProviderSettings } from "./provider-response-provider"; + +class Mem0ClassSelector { + modelId: string; + provider_wrapper: string; + model: string; + config: Mem0ProviderSettings; + provider_config?: ProviderSettings; + static supportedProviders = ["openai", "anthropic", "cohere", "groq"]; + + constructor(modelId: string, config: Mem0ProviderSettings, provider_config?: ProviderSettings) { + this.modelId = modelId; + this.provider_wrapper = config.provider || "openai"; + this.model = this.modelId; + this.provider_config = provider_config; + if(config) this.config = config; + else this.config = { + provider: this.provider_wrapper, + }; + + // Check if provider_wrapper is supported + if (!Mem0ClassSelector.supportedProviders.includes(this.provider_wrapper)) { + throw new Error(`Model not supported: ${this.provider_wrapper}`); + } + } + + createProvider() { + return new Mem0AITextGenerator(this.provider_wrapper, this.model, this.config , this.provider_config || {}); + } +} + +export { Mem0ClassSelector }; diff --git a/vercel-ai-sdk/src/mem0-provider.ts b/vercel-ai-sdk/src/mem0-provider.ts new file mode 100644 index 0000000000..55f0f7d6ef --- /dev/null +++ b/vercel-ai-sdk/src/mem0-provider.ts @@ -0,0 +1,145 @@ +import { LanguageModelV1, ProviderV1 } from '@ai-sdk/provider' +import { withoutTrailingSlash } from '@ai-sdk/provider-utils' + +import { Mem0ChatLanguageModel } from './mem0-chat-language-model' +import { Mem0ChatModelId, Mem0ChatSettings } from './mem0-chat-settings' +import { OpenAIProviderSettings } from '@ai-sdk/openai' +import { Mem0CompletionModelId, Mem0CompletionSettings } from './mem0-completion-settings' +import { Mem0GenericLanguageModel } from './mem0-generic-language-model' +import { Mem0CompletionLanguageModel } from './mem0-completion-language-model' + + +export interface Mem0Provider extends ProviderV1 { + (modelId: Mem0ChatModelId, settings?: Mem0ChatSettings): LanguageModelV1 + + chat( + modelId: Mem0ChatModelId, + settings?: Mem0ChatSettings, + ): LanguageModelV1 + + + languageModel( + modelId: Mem0ChatModelId, + settings?: Mem0ChatSettings, + ): LanguageModelV1 + + completion( + modelId: Mem0CompletionModelId, + settings?: Mem0CompletionSettings, + ): LanguageModelV1 +} + +export interface Mem0ProviderSettings extends OpenAIProviderSettings { + baseURL?: string + /** + * Custom fetch implementation. You can use it as a middleware to intercept + * requests or to provide a custom fetch implementation for e.g. testing + */ + fetch?: typeof fetch + /** + * @internal + */ + generateId?: () => string + /** + * Custom headers to include in the requests. + */ + headers?: Record + organization?: string; + project?: string; + name?: string; + mem0ApiKey?: string; + apiKey?: string; + provider?: string; + config?: OpenAIProviderSettings; + modelType?: "completion" | "chat"; +} + +export function createMem0( + options: Mem0ProviderSettings = { + provider: "openai", + }, +): Mem0Provider { + const baseURL = + withoutTrailingSlash(options.baseURL) ?? 'http://127.0.0.1:11434/api' + + const getHeaders = () => ({ + ...options.headers, + }) + + const createGenericModel = ( + modelId: Mem0ChatModelId, + settings: Mem0ChatSettings = {}, + ) => + new Mem0GenericLanguageModel(modelId, settings, { + baseURL, + fetch: options.fetch, + headers: getHeaders, + provider: options.provider || "openai", + organization: options.organization, + project: options.project, + name: options.name, + mem0_api_key: options.mem0ApiKey, + apiKey: options.apiKey, + }, options.config) + + const createChatModel = ( + modelId: Mem0ChatModelId, + settings: Mem0ChatSettings = {}, + ) => + + new Mem0ChatLanguageModel(modelId, settings, { + baseURL, + fetch: options.fetch, + headers: getHeaders, + provider: options.provider || "openai", + organization: options.organization, + project: options.project, + name: options.name, + mem0_api_key: options.mem0ApiKey, + apiKey: options.apiKey, + }, options.config) + + const createCompletionModel = ( + modelId: Mem0CompletionModelId, + settings: Mem0CompletionSettings = {} + ) => + new Mem0CompletionLanguageModel( + modelId, + settings, + { + baseURL, + fetch: options.fetch, + headers: getHeaders, + provider: options.provider || "openai", + organization: options.organization, + project: options.project, + name: options.name, + mem0_api_key: options.mem0ApiKey, + apiKey: options.apiKey + }, + options.config + ); + + const provider = function ( + modelId: Mem0ChatModelId, + settings?: Mem0ChatSettings, + ) { + if (new.target) { + throw new Error( + 'The Mem0 model function cannot be called with the new keyword.', + ) + } + + return createGenericModel(modelId, settings) + } + + + + provider.chat = createChatModel + provider.completion = createCompletionModel + provider.languageModel = createChatModel + + return provider as unknown as Mem0Provider +} + +export const mem0 = createMem0() \ No newline at end of file diff --git a/vercel-ai-sdk/src/mem0-utils.ts b/vercel-ai-sdk/src/mem0-utils.ts new file mode 100644 index 0000000000..f654604232 --- /dev/null +++ b/vercel-ai-sdk/src/mem0-utils.ts @@ -0,0 +1,114 @@ +import { LanguageModelV1Prompt } from 'ai'; +import { Mem0Config } from './mem0-chat-settings'; +if (typeof process !== 'undefined' && process.env && process.env.NODE_ENV !== 'production') { + // Dynamically import dotenv only in non-production environments + import('dotenv').then((dotenv) => dotenv.config()); +} + +const tokenIsPresent = (config?: Mem0Config)=>{ + if(!config && !config!.mem0ApiKey && (typeof process !== 'undefined' && process.env && !process.env.MEM0_API_KEY)){ + throw Error("MEM0_API_KEY is not present. Please set env MEM0_API_KEY as the value of your API KEY."); + } +} + +interface Message { + role: string; + content: string | Array<{type: string, text: string}>; +} + +const flattenPrompt = (prompt: LanguageModelV1Prompt) => { + return prompt.map((part) => { + if (part.role === "user") { + return part.content + .filter((obj) => obj.type === 'text') + .map((obj) => obj.text) + .join(" "); + } + return ""; + }).join(" "); +} + +const searchInternalMemories = async (query: string, config?: Mem0Config, top_k: number = 5)=> { + tokenIsPresent(config); + const filters = { + OR: [ + { + user_id: config&&config.user_id, + }, + { + app_id: config&&config.app_id, + }, + { + agent_id: config&&config.agent_id, + }, + { + run_id: config&&config.run_id, + }, + ], + }; + const options = { + method: 'POST', + headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'}, + body: JSON.stringify({query, filters, top_k, version: "v2", org_name: config&&config.org_name, project_name: config&&config.project_name}), + }; + const response = await fetch('https://api.mem0.ai/v2/memories/search/', options); + const data = await response.json(); + return data; +} + +const addMemories = async (messages: LanguageModelV1Prompt, config?: Mem0Config)=>{ + tokenIsPresent(config); + const message = flattenPrompt(messages); + const response = await updateMemories([ + { role: "user", content: message }, + { role: "assistant", content: "Thank You!" }, + ], config); + return response; +} + +const updateMemories = async (messages: Array, config?: Mem0Config)=>{ + tokenIsPresent(config); + const options = { + method: 'POST', + headers: {Authorization: `Token ${(config&&config.mem0ApiKey) || (typeof process !== 'undefined' && process.env && process.env.MEM0_API_KEY) || ""}`, 'Content-Type': 'application/json'}, + body: JSON.stringify({messages, ...config}), + }; + + const response = await fetch('https://api.mem0.ai/v1/memories/', options); + const data = await response.json(); + return data; +} + +const retrieveMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{ + tokenIsPresent(config); + const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt); + const systemPrompt = "These are the memories I have stored. Give more weightage to the question by users and try to answer that first. You have to modify your answer based on the memories I have provided. If the memories are irrelevant you can ignore them. Also don't reply to this section of the prompt, or the memories, they are only for your reference. The System prompt starts after text System Message: \n\n"; + const memories = await searchInternalMemories(message, config); + let memoriesText = ""; + try{ + // @ts-ignore + memoriesText = memories.map((memory: any)=>{ + return `Memory: ${memory.memory}\n\n`; + }).join("\n\n"); + }catch(e){ + console.error("Error while parsing memories"); + // console.log(e); + } + return `System Message: ${systemPrompt} ${memoriesText}`; +} + +const searchMemories = async (prompt: LanguageModelV1Prompt | string, config?: Mem0Config)=>{ + tokenIsPresent(config); + const message = typeof prompt === 'string' ? prompt : flattenPrompt(prompt); + let memories = []; + try{ + // @ts-ignore + memories = await searchInternalMemories(message, config); + } + catch(e){ + console.error("Error while searching memories"); + } + return memories; +} + +export {addMemories, updateMemories, retrieveMemories, flattenPrompt, searchMemories}; \ No newline at end of file diff --git a/vercel-ai-sdk/src/provider-response-provider.ts b/vercel-ai-sdk/src/provider-response-provider.ts new file mode 100644 index 0000000000..0ad753b95a --- /dev/null +++ b/vercel-ai-sdk/src/provider-response-provider.ts @@ -0,0 +1,113 @@ +import { createOpenAI, OpenAIProviderSettings } from "@ai-sdk/openai"; +import { generateText as aiGenerateText, streamText as aiStreamText, LanguageModelV1Prompt } from "ai"; +import { updateMemories, retrieveMemories, flattenPrompt } from "./mem0-utils"; +import { Mem0Config } from "./mem0-chat-settings"; +import { Mem0ProviderSettings } from "./mem0-provider"; +import { CohereProviderSettings, createCohere } from "@ai-sdk/cohere"; +import { AnthropicProviderSettings, createAnthropic } from "@ai-sdk/anthropic"; +import { createGroq, GroqProviderSettings } from "@ai-sdk/groq"; + +export type Provider = ReturnType | ReturnType | ReturnType | ReturnType | any; +export type ProviderSettings = OpenAIProviderSettings | CohereProviderSettings | AnthropicProviderSettings | GroqProviderSettings; + +class Mem0AITextGenerator { + provider: Provider; + model: string; + provider_config?: ProviderSettings; + config: Mem0ProviderSettings; + + constructor(provider: string, model: string, config: Mem0ProviderSettings, provider_config: ProviderSettings) { + switch (provider) { + case "openai": + this.provider = createOpenAI({ + apiKey: config?.apiKey, + ...provider_config, + }); + if(config?.modelType === "completion"){ + this.provider = createOpenAI({ + apiKey: config?.apiKey, + ...provider_config, + }).completion; + }else if(config?.modelType === "chat"){ + this.provider = createOpenAI({ + apiKey: config?.apiKey, + ...provider_config, + }).chat; + } + break; + case "cohere": + this.provider = createCohere({ + apiKey: config?.apiKey, + ...provider_config, + }); + break; + case "anthropic": + this.provider = createAnthropic({ + apiKey: config?.apiKey, + ...provider_config, + }); + break; + case "groq": + this.provider = createGroq({ + apiKey: config?.apiKey, + ...provider_config, + }); + break; + default: + throw new Error("Invalid provider"); + } + this.model = model; + this.provider_config = provider_config; + this.config = config!; + } + + + async generateText(prompt: LanguageModelV1Prompt, config: Mem0Config) { + try { + const flattenPromptResponse = flattenPrompt(prompt); + const newPrompt = await retrieveMemories(prompt, config); + const response = await aiGenerateText({ + // @ts-ignore + model: this.provider(this.model), + messages: prompt, + system: newPrompt + }); + + await updateMemories([ + { role: "user", content: flattenPromptResponse }, + { role: "assistant", content: response.text }, + ], config); + + return response; + } catch (error) { + console.error("Error generating text:", error); + throw error; + } + } + + async streamText(prompt: LanguageModelV1Prompt, config: Mem0Config) { + try { + const flattenPromptResponse = flattenPrompt(prompt); + const newPrompt = await retrieveMemories(prompt, config); + + await updateMemories([ + { role: "user", content: flattenPromptResponse }, + { role: "assistant", content: "Thank You!" }, + ], config); + + const response = await aiStreamText({ + // @ts-ignore + model: this.provider(this.model), + messages: prompt, + system: newPrompt + }); + + return response; + } catch (error) { + console.error("Error generating text:", error); + throw error; + } + } +} + +export default Mem0AITextGenerator; diff --git a/vercel-ai-sdk/src/stream-utils.ts b/vercel-ai-sdk/src/stream-utils.ts new file mode 100644 index 0000000000..0082653d79 --- /dev/null +++ b/vercel-ai-sdk/src/stream-utils.ts @@ -0,0 +1,28 @@ +async function filterStream(originalStream: ReadableStream) { + const reader = originalStream.getReader(); + const filteredStream = new ReadableStream({ + async start(controller) { + while (true) { + const { done, value } = await reader.read(); + if (done) { + controller.close(); + break; + } + try { + const chunk = JSON.parse(value); + if (chunk.type !== "step-finish") { + controller.enqueue(value); + } + } catch (error) { + if (!(value.type==='step-finish')) { + controller.enqueue(value); + } + } + } + } + }); + + return filteredStream; +} + +export { filterStream }; \ No newline at end of file diff --git a/vercel-ai-sdk/teardown.ts b/vercel-ai-sdk/teardown.ts new file mode 100644 index 0000000000..65e4ca1bef --- /dev/null +++ b/vercel-ai-sdk/teardown.ts @@ -0,0 +1,12 @@ +import { testConfig } from './config/test-config'; + +export default async function () { + console.log("Running global teardown..."); + try { + await testConfig.fetchDeleteId(); + await testConfig.deleteUser(); + console.log("User deleted successfully after all tests."); + } catch (error) { + console.error("Failed to delete user after all tests:", error); + } +} \ No newline at end of file diff --git a/vercel-ai-sdk/tests/anthropic-structured-ouput.test.ts b/vercel-ai-sdk/tests/anthropic-structured-ouput.test.ts new file mode 100644 index 0000000000..f9a7f1b8ce --- /dev/null +++ b/vercel-ai-sdk/tests/anthropic-structured-ouput.test.ts @@ -0,0 +1,110 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { generateObject } from "ai"; +import { testConfig } from "../config/test-config"; +import { z } from "zod"; + +interface Provider { + name: string; + activeModel: string; + apiKey: string | undefined; +} + +const provider: Provider = { + name: "anthropic", + activeModel: "claude-3-5-sonnet-20240620", + apiKey: process.env.ANTHROPIC_API_KEY, +} +describe("ANTHROPIC Structured Outputs", () => { + const { userId } = testConfig; + let mem0: ReturnType; + jest.setTimeout(30000); + + beforeEach(() => { + mem0 = testConfig.createTestClient(provider); + }); + + describe("ANTHROPIC Object Generation Tests", () => { + // Test 1: Generate a car preference object + it("should generate a car preference object with name and steps", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + schema: z.object({ + car: z.object({ + name: z.string(), + steps: z.array(z.string()), + }), + }), + prompt: "Which car would I like?", + }); + + expect(object.car).toBeDefined(); + expect(typeof object.car.name).toBe("string"); + expect(Array.isArray(object.car.steps)).toBe(true); + expect(object.car.steps.every((step) => typeof step === "string")).toBe(true); + }); + + // Test 2: Generate an array of car objects + it("should generate an array of three car objects with name, class, and description", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "array", + schema: z.object({ + name: z.string(), + class: z.string(), + description: z.string(), + }), + prompt: "Write name of three cars that I would like.", + }); + + expect(Array.isArray(object)).toBe(true); + expect(object.length).toBe(3); + object.forEach((car) => { + expect(car).toHaveProperty("name"); + expect(typeof car.name).toBe("string"); + expect(car).toHaveProperty("class"); + expect(typeof car.class).toBe("string"); + expect(car).toHaveProperty("description"); + expect(typeof car.description).toBe("string"); + }); + }); + + // Test 3: Generate an enum for movie genre classification + it("should classify the genre of a movie plot", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "enum", + enum: ["action", "comedy", "drama", "horror", "sci-fi"], + prompt: 'Classify the genre of this movie plot: "A group of astronauts travel through a wormhole in search of a new habitable planet for humanity."', + }); + + expect(object).toBeDefined(); + expect(object).toBe("sci-fi"); + }); + + // Test 4: Generate an object of car names without schema + it("should generate an object with car names", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "no-schema", + prompt: "Write name of 3 cars that I would like.", + }); + + const carObject = object as { cars: string[] }; + + expect(carObject).toBeDefined(); + expect(Array.isArray(carObject.cars)).toBe(true); + expect(carObject.cars.length).toBe(3); + expect(carObject.cars.every((car) => typeof car === "string")).toBe(true); + }); + }); +}); diff --git a/vercel-ai-sdk/tests/anthropic.test.ts b/vercel-ai-sdk/tests/anthropic.test.ts new file mode 100644 index 0000000000..3a7f7c39de --- /dev/null +++ b/vercel-ai-sdk/tests/anthropic.test.ts @@ -0,0 +1,61 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { retrieveMemories } from "../src"; +import { generateText, LanguageModelV1Prompt } from "ai"; +import { testConfig } from "../config/test-config"; +import { createAnthropic } from "@ai-sdk/anthropic"; + +describe("ANTHROPIC Functions", () => { + const { userId } = testConfig; + jest.setTimeout(30000); + + let anthropic: any; + + beforeEach(() => { + anthropic = createAnthropic({ + apiKey: process.env.ANTHROPIC_API_KEY, + }); + }); + + it("should retrieve memories and generate text using ANTHROPIC provider", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: " Write only the car name and it's color." }, + ], + }, + ]; + + // Retrieve memories based on previous messages + const memories = await retrieveMemories(messages, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: anthropic("claude-3-haiku-20240307"), + messages: messages, + system: memories, + }); + + // Expect text to be a string + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should generate text using ANTHROPIC provider with memories", async () => { + const prompt = "Suggest me a good car to buy."; + const memories = await retrieveMemories(prompt, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: anthropic("claude-3-haiku-20240307"), + prompt: prompt, + system: memories + }); + + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/cohere.test.ts b/vercel-ai-sdk/tests/cohere.test.ts new file mode 100644 index 0000000000..2329536f0c --- /dev/null +++ b/vercel-ai-sdk/tests/cohere.test.ts @@ -0,0 +1,60 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { retrieveMemories } from "../src"; +import { generateText, LanguageModelV1Prompt } from "ai"; +import { testConfig } from "../config/test-config"; +import { createCohere } from "@ai-sdk/cohere"; + +describe("COHERE Functions", () => { + const { userId } = testConfig; + jest.setTimeout(30000); + let cohere: any; + + beforeEach(() => { + cohere = createCohere({ + apiKey: process.env.COHERE_API_KEY, + }); + }); + + it("should retrieve memories and generate text using COHERE provider", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: " Write only the car name and it's color." }, + ], + }, + ]; + + // Retrieve memories based on previous messages + const memories = await retrieveMemories(messages, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: cohere("command-r-plus"), + messages: messages, + system: memories, + }); + + // Expect text to be a string + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should generate text using COHERE provider with memories", async () => { + const prompt = "Suggest me a good car to buy."; + const memories = await retrieveMemories(prompt, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: cohere("command-r-plus"), + prompt: prompt, + system: memories + }); + + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/generate-output.test.ts b/vercel-ai-sdk/tests/generate-output.test.ts new file mode 100644 index 0000000000..06107f7646 --- /dev/null +++ b/vercel-ai-sdk/tests/generate-output.test.ts @@ -0,0 +1,86 @@ +import { generateText, LanguageModelV1Prompt, streamText } from "ai"; +import { addMemories } from "../src"; +import { testConfig } from "../config/test-config"; + +interface Provider { + name: string; + activeModel: string; + apiKey: string | undefined; +} + +describe.each(testConfig.providers)('TESTS: Generate/Stream Text with model %s', (provider: Provider) => { + const { userId } = testConfig; + let mem0: ReturnType; + jest.setTimeout(50000); + + beforeEach(() => { + mem0 = testConfig.createTestClient(provider); + }); + + beforeAll(async () => { + // Add some test memories before all tests + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "I love red cars." }, + { type: "text", text: "I like Toyota Cars." }, + { type: "text", text: "I prefer SUVs." }, + ], + } + ]; + await addMemories(messages, { user_id: userId }); + }); + + it("should generate text using mem0 model", async () => { + const { text } = await generateText({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + prompt: "Suggest me a good car to buy!", + }); + + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should generate text using provider with memories", async () => { + const { text } = await generateText({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + messages: [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: "Write only the car name and it's color." }, + ], + } + ], + }); + // Expect text to be a string + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should stream text using Mem0 provider", async () => { + const { textStream } = await streamText({ + model: mem0(provider.activeModel, { + user_id: userId, // Use the uniform userId + }), + prompt: "Suggest me a good car to buy! Write only the car name and it's color.", + }); + + // Collect streamed text parts + let streamedText = ''; + for await (const textPart of textStream) { + streamedText += textPart; + } + + // Ensure the streamed text is a string + expect(typeof streamedText).toBe('string'); + expect(streamedText.length).toBeGreaterThan(0); + }); + +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/groq.test.ts b/vercel-ai-sdk/tests/groq.test.ts new file mode 100644 index 0000000000..9b99f16b7f --- /dev/null +++ b/vercel-ai-sdk/tests/groq.test.ts @@ -0,0 +1,61 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { retrieveMemories } from "../src"; +import { generateText, LanguageModelV1Prompt } from "ai"; +import { testConfig } from "../config/test-config"; +import { createGroq } from "@ai-sdk/groq"; + +describe("GROQ Functions", () => { + const { userId } = testConfig; + jest.setTimeout(30000); + + let groq: any; + + beforeEach(() => { + groq = createGroq({ + apiKey: process.env.GROQ_API_KEY, + }); + }); + + it("should retrieve memories and generate text using GROQ provider", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: " Write only the car name and it's color." }, + ], + }, + ]; + + // Retrieve memories based on previous messages + const memories = await retrieveMemories(messages, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: groq("gemma2-9b-it"), + messages: messages, + system: memories, + }); + + // Expect text to be a string + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should generate text using GROQ provider with memories", async () => { + const prompt = "Suggest me a good car to buy."; + const memories = await retrieveMemories(prompt, { user_id: userId }); + + const { text } = await generateText({ + // @ts-ignore + model: groq("gemma2-9b-it"), + prompt: prompt, + system: memories + }); + + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/memory-core.test.ts b/vercel-ai-sdk/tests/memory-core.test.ts new file mode 100644 index 0000000000..9d32b15cb8 --- /dev/null +++ b/vercel-ai-sdk/tests/memory-core.test.ts @@ -0,0 +1,75 @@ +import { addMemories, retrieveMemories } from "../src"; +import { LanguageModelV1Prompt } from "ai"; +import { testConfig } from "../config/test-config"; + +describe("Memory Core Functions", () => { + const { userId } = testConfig; + jest.setTimeout(10000); + + describe("addMemories", () => { + it("should successfully add memories and return correct format", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "I love red cars." }, + { type: "text", text: "I like Toyota Cars." }, + { type: "text", text: "I prefer SUVs." }, + ], + } + ]; + + const response = await addMemories(messages, { user_id: userId }); + + expect(Array.isArray(response)).toBe(true); + response.forEach((memory: { event: any; }) => { + expect(memory).toHaveProperty('id'); + expect(memory).toHaveProperty('data'); + expect(memory).toHaveProperty('event'); + expect(memory.event).toBe('ADD'); + }); + }); + }); + + describe("retrieveMemories", () => { + beforeEach(async () => { + // Add some test memories before each retrieval test + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "I love red cars." }, + { type: "text", text: "I like Toyota Cars." }, + { type: "text", text: "I prefer SUVs." }, + ], + } + ]; + await addMemories(messages, { user_id: userId }); + }); + + it("should retrieve memories with string prompt", async () => { + const prompt = "Which car would I prefer?"; + const response = await retrieveMemories(prompt, { user_id: userId }); + + expect(typeof response).toBe('string'); + expect(response.match(/Memory:/g)?.length).toBeGreaterThan(2); + }); + + it("should retrieve memories with array of prompts", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Which car would I prefer?" }, + { type: "text", text: "Suggest me some cars" }, + ], + } + ]; + + const response = await retrieveMemories(messages, { user_id: userId }); + + expect(typeof response).toBe('string'); + expect(response.match(/Memory:/g)?.length).toBeGreaterThan(2); + }); + }); +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/openai-structured-ouput.test.ts b/vercel-ai-sdk/tests/openai-structured-ouput.test.ts new file mode 100644 index 0000000000..a7497f6b01 --- /dev/null +++ b/vercel-ai-sdk/tests/openai-structured-ouput.test.ts @@ -0,0 +1,110 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { generateObject } from "ai"; +import { testConfig } from "../config/test-config"; +import { z } from "zod"; + +interface Provider { + name: string; + activeModel: string; + apiKey: string | undefined; +} + +const provider: Provider = { + name: "openai", + activeModel: "gpt-4-turbo", + apiKey: process.env.OPENAI_API_KEY, +} +describe("OPENAI Structured Outputs", () => { + const { userId } = testConfig; + let mem0: ReturnType; + jest.setTimeout(30000); + + beforeEach(() => { + mem0 = testConfig.createTestClient(provider); + }); + + describe("openai Object Generation Tests", () => { + // Test 1: Generate a car preference object + it("should generate a car preference object with name and steps", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + schema: z.object({ + car: z.object({ + name: z.string(), + steps: z.array(z.string()), + }), + }), + prompt: "Which car would I like?", + }); + + expect(object.car).toBeDefined(); + expect(typeof object.car.name).toBe("string"); + expect(Array.isArray(object.car.steps)).toBe(true); + expect(object.car.steps.every((step) => typeof step === "string")).toBe(true); + }); + + // Test 2: Generate an array of car objects + it("should generate an array of three car objects with name, class, and description", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "array", + schema: z.object({ + name: z.string(), + class: z.string().describe('Cars should be "SUV", "Sedan", or "Hatchback"'), + description: z.string(), + }), + prompt: "Write name of three cars that I would like.", + }); + + expect(Array.isArray(object)).toBe(true); + expect(object.length).toBe(3); + object.forEach((car) => { + expect(car).toHaveProperty("name"); + expect(typeof car.name).toBe("string"); + expect(car).toHaveProperty("class"); + expect(typeof car.class).toBe("string"); + expect(car).toHaveProperty("description"); + expect(typeof car.description).toBe("string"); + }); + }); + + // Test 3: Generate an enum for movie genre classification + it("should classify the genre of a movie plot", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "enum", + enum: ["action", "comedy", "drama", "horror", "sci-fi"], + prompt: 'Classify the genre of this movie plot: "A group of astronauts travel through a wormhole in search of a new habitable planet for humanity."', + }); + + expect(object).toBeDefined(); + expect(object).toBe("sci-fi"); + }); + + // Test 4: Generate an object of car names without schema + it("should generate an object with car names", async () => { + const { object } = await generateObject({ + model: mem0(provider.activeModel, { + user_id: userId, + }), + output: "no-schema", + prompt: "Write name of 3 cars that I would like.", + }); + + const carObject = object as { cars: string[] }; + + expect(carObject).toBeDefined(); + expect(Array.isArray(carObject.cars)).toBe(true); + expect(carObject.cars.length).toBe(3); + expect(carObject.cars.every((car) => typeof car === "string")).toBe(true); + }); + }); +}); diff --git a/vercel-ai-sdk/tests/openai.test.ts b/vercel-ai-sdk/tests/openai.test.ts new file mode 100644 index 0000000000..d3f57a49aa --- /dev/null +++ b/vercel-ai-sdk/tests/openai.test.ts @@ -0,0 +1,58 @@ +import dotenv from "dotenv"; +dotenv.config(); + +import { retrieveMemories } from "../src"; +import { generateText, LanguageModelV1Prompt } from "ai"; +import { testConfig } from "../config/test-config"; +import { createOpenAI } from "@ai-sdk/openai"; + +describe("OPENAI Functions", () => { + const { userId } = testConfig; + jest.setTimeout(30000); + let openai: any; + + beforeEach(() => { + openai = createOpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }); + }); + + it("should retrieve memories and generate text using OpenAI provider", async () => { + const messages: LanguageModelV1Prompt = [ + { + role: "user", + content: [ + { type: "text", text: "Suggest me a good car to buy." }, + { type: "text", text: " Write only the car name and it's color." }, + ], + }, + ]; + + // Retrieve memories based on previous messages + const memories = await retrieveMemories(messages, { user_id: userId }); + + const { text } = await generateText({ + model: openai("gpt-4-turbo"), + messages: messages, + system: memories, + }); + + // Expect text to be a string + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); + + it("should generate text using openai provider with memories", async () => { + const prompt = "Suggest me a good car to buy."; + const memories = await retrieveMemories(prompt, { user_id: userId }); + + const { text } = await generateText({ + model: openai("gpt-4-turbo"), + prompt: prompt, + system: memories + }); + + expect(typeof text).toBe('string'); + expect(text.length).toBeGreaterThan(0); + }); +}); \ No newline at end of file diff --git a/vercel-ai-sdk/tests/text-properties.test.ts b/vercel-ai-sdk/tests/text-properties.test.ts new file mode 100644 index 0000000000..e61788fd02 --- /dev/null +++ b/vercel-ai-sdk/tests/text-properties.test.ts @@ -0,0 +1,77 @@ +import { generateText, streamText } from "ai"; +import { testConfig } from "../config/test-config"; + +interface Provider { + name: string; + activeModel: string; + apiKey: string | undefined; +} + +describe.each(testConfig.providers)('TEXT/STREAM PROPERTIES: Tests with model %s', (provider: Provider) => { + const { userId } = testConfig; + let mem0: ReturnType; + jest.setTimeout(50000); + + beforeEach(() => { + mem0 = testConfig.createTestClient(provider); + }); + + it("should stream text with onChunk handler", async () => { + const chunkTexts: string[] = []; + const { textStream } = await streamText({ + model: mem0(provider.activeModel, { + user_id: userId, // Use the uniform userId + }), + prompt: "Write only the name of the car I prefer and its color.", + onChunk({ chunk }) { + if (chunk.type === "text-delta") { + // Store chunk text for assertions + chunkTexts.push(chunk.textDelta); + } + }, + }); + + // Wait for the stream to complete + for await (const _ of textStream) { + } + + // Ensure chunks are collected + expect(chunkTexts.length).toBeGreaterThan(0); + expect(chunkTexts.every((text) => typeof text === "string")).toBe(true); + }); + + it("should call onFinish handler without throwing an error", async () => { + await streamText({ + model: mem0(provider.activeModel, { + user_id: userId, // Use the uniform userId + }), + prompt: "Write only the name of the car I prefer and its color.", + onFinish({ text, finishReason, usage }) { + + }, + }); + }); + + it("should generate fullStream with expected usage", async () => { + const { + text, // combined text + usage, // combined usage of all steps + } = await generateText({ + model: mem0(provider.activeModel), // Ensure the model name is correct + maxSteps: 5, // Enable multi-step calls + experimental_continueSteps: true, + prompt: + "Suggest me some good cars to buy. Each response MUST HAVE at least 200 words.", + }); + + // Ensure text is a string + expect(typeof text).toBe("string"); + + // Check usage + // promptTokens is a number, so we use toBeCloseTo instead of toBe and it should be in the range 155 to 165 + expect(usage.promptTokens).toBeGreaterThanOrEqual(100); + expect(usage.promptTokens).toBeLessThanOrEqual(500); + expect(usage.completionTokens).toBeGreaterThanOrEqual(250); // Check completion tokens are above 250 + expect(usage.totalTokens).toBeGreaterThan(400); // Check total tokens are above 400 + }); +}); diff --git a/vercel-ai-sdk/tsconfig.json b/vercel-ai-sdk/tsconfig.json new file mode 100644 index 0000000000..a716cbc9b2 --- /dev/null +++ b/vercel-ai-sdk/tsconfig.json @@ -0,0 +1,29 @@ +{ + "$schema": "https://json.schemastore.org/tsconfig", + "compilerOptions": { + "composite": false, + "declaration": true, + "declarationMap": true, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "inlineSources": false, + "isolatedModules": true, + "moduleResolution": "node", + "noUnusedLocals": false, + "noUnusedParameters": false, + "preserveWatchOutput": true, + "skipLibCheck": true, + "strict": true, + "types": ["@types/node", "jest"], + "jsx": "react-jsx", + "lib": ["dom", "ES2021"], + "module": "ESNext", + "target": "ES2018", + "stripInternal": true, + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["."], + "exclude": ["dist", "build", "node_modules"] + } \ No newline at end of file diff --git a/vercel-ai-sdk/tsup.config.ts b/vercel-ai-sdk/tsup.config.ts new file mode 100644 index 0000000000..2c8f74a6df --- /dev/null +++ b/vercel-ai-sdk/tsup.config.ts @@ -0,0 +1,10 @@ +import { defineConfig } from 'tsup' + +export default defineConfig([ + { + dts: true, + entry: ['src/index.ts'], + format: ['cjs', 'esm'], + sourcemap: true, + }, +]) \ No newline at end of file