diff --git a/.env.example b/.env.example index 6261997e6..e4a15aae1 100644 --- a/.env.example +++ b/.env.example @@ -4,4 +4,13 @@ ALGOLIA_ADMIN_KEY= # Needed to generate the Healthie SDK HEALTHIE_API_URL= # Only needed if you want to run real test against the OpenAI API -OPENAI_TEST_KEY= +OPENAI_API_KEY= + +# Langsmith tracing +LANGSMITH_TRACING=true +LANGSMITH_ENDPOINT=https://api.smith.langchain.com +# For now let's set both to make sure it works for all sdk versions +LANGSMITH_PROJECT=ai-actions-local +LANGCHAIN_PROJECT=ai-actions-local +LANGSMITH_API_KEY= + diff --git a/.pnp.cjs b/.pnp.cjs index d0a1faa18..b425a4b5e 100755 --- a/.pnp.cjs +++ b/.pnp.cjs @@ -28,7 +28,7 @@ const RAW_RUNTIME_STATE = "packageLocation": "./",\ "packageDependencies": [\ ["@awell-health/awell-sdk", "npm:0.1.20"],\ - ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.11"],\ + ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.16"],\ ["@awell-health/healthie-sdk", "npm:0.1.1"],\ ["@dropbox/sign", "npm:1.8.0"],\ ["@faker-js/faker", "npm:8.4.1"],\ @@ -39,7 +39,7 @@ const RAW_RUNTIME_STATE = ["@graphql-tools/apollo-engine-loader", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:7.3.26"],\ ["@graphql-typed-document-node/core", "virtual:59c276aa517e7eece9b42c9ca677cfc463202227c49b72eb9628ab5409de25886e6a64d32aaab7e2700a825599bd4032ab3aba0073f8dc2a6b74ffd967e262ef#npm:3.2.0"],\ ["@hubspot/api-client", "npm:11.2.0"],\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/core", "npm:0.3.33"],\ ["@langchain/openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.0"],\ ["@mailchimp/mailchimp_transactional", "npm:1.0.59"],\ ["@medplum/core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:3.2.14"],\ @@ -94,7 +94,8 @@ const RAW_RUNTIME_STATE = ["jsdom", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:26.0.0"],\ ["jsonpath", "npm:1.1.1"],\ ["jsonwebtoken", "npm:9.0.2"],\ - ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ + ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.12"],\ + ["langsmith", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ ["libphonenumber-js", "npm:1.11.8"],\ ["lint-staged", "npm:15.2.11"],\ ["lodash", "npm:4.17.21"],\ @@ -221,7 +222,7 @@ const RAW_RUNTIME_STATE = "packageDependencies": [\ ["@awell-health/awell-extensions", "workspace:."],\ ["@awell-health/awell-sdk", "npm:0.1.20"],\ - ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.11"],\ + ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.16"],\ ["@awell-health/healthie-sdk", "npm:0.1.1"],\ ["@dropbox/sign", "npm:1.8.0"],\ ["@faker-js/faker", "npm:8.4.1"],\ @@ -232,7 +233,7 @@ const RAW_RUNTIME_STATE = ["@graphql-tools/apollo-engine-loader", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:7.3.26"],\ ["@graphql-typed-document-node/core", "virtual:59c276aa517e7eece9b42c9ca677cfc463202227c49b72eb9628ab5409de25886e6a64d32aaab7e2700a825599bd4032ab3aba0073f8dc2a6b74ffd967e262ef#npm:3.2.0"],\ ["@hubspot/api-client", "npm:11.2.0"],\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/core", "npm:0.3.33"],\ ["@langchain/openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.0"],\ ["@mailchimp/mailchimp_transactional", "npm:1.0.59"],\ ["@medplum/core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:3.2.14"],\ @@ -287,7 +288,8 @@ const RAW_RUNTIME_STATE = ["jsdom", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:26.0.0"],\ ["jsonpath", "npm:1.1.1"],\ ["jsonwebtoken", "npm:9.0.2"],\ - ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ + ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.12"],\ + ["langsmith", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ ["libphonenumber-js", "npm:1.11.8"],\ ["lint-staged", "npm:15.2.11"],\ ["lodash", "npm:4.17.21"],\ @@ -330,17 +332,17 @@ const RAW_RUNTIME_STATE = }]\ ]],\ ["@awell-health/extensions-core", [\ - ["npm:1.0.11", {\ - "packageLocation": "./.yarn/cache/@awell-health-extensions-core-npm-1.0.11-992538064c-6c6821d6ce.zip/node_modules/@awell-health/extensions-core/",\ + ["npm:1.0.16", {\ + "packageLocation": "./.yarn/cache/@awell-health-extensions-core-npm-1.0.16-a3dc33c4e1-1910b7bff2.zip/node_modules/@awell-health/extensions-core/",\ "packageDependencies": [\ - ["@awell-health/extensions-core", "npm:1.0.11"]\ + ["@awell-health/extensions-core", "npm:1.0.16"]\ ],\ "linkType": "SOFT"\ }],\ - ["virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.11", {\ - "packageLocation": "./.yarn/__virtual__/@awell-health-extensions-core-virtual-bf46c3514a/0/cache/@awell-health-extensions-core-npm-1.0.11-992538064c-6c6821d6ce.zip/node_modules/@awell-health/extensions-core/",\ + ["virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.16", {\ + "packageLocation": "./.yarn/__virtual__/@awell-health-extensions-core-virtual-f4cfb936f4/0/cache/@awell-health-extensions-core-npm-1.0.16-a3dc33c4e1-1910b7bff2.zip/node_modules/@awell-health/extensions-core/",\ "packageDependencies": [\ - ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.11"],\ + ["@awell-health/extensions-core", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:1.0.16"],\ ["@awell-health/awell-sdk", "npm:0.1.20"],\ ["@types/awell-health__awell-sdk", null],\ ["@types/json-schema", "npm:7.0.15"],\ @@ -1889,6 +1891,15 @@ const RAW_RUNTIME_STATE = "linkType": "HARD"\ }]\ ]],\ + ["@cfworker/json-schema", [\ + ["npm:4.1.0", {\ + "packageLocation": "./.yarn/cache/@cfworker-json-schema-npm-4.1.0-1cac3ad966-12ee4dd6ef.zip/node_modules/@cfworker/json-schema/",\ + "packageDependencies": [\ + ["@cfworker/json-schema", "npm:4.1.0"]\ + ],\ + "linkType": "HARD"\ + }]\ + ]],\ ["@cspotcode/source-map-support", [\ ["npm:0.8.1", {\ "packageLocation": "./.yarn/cache/@cspotcode-source-map-support-npm-0.8.1-964f2de99d-b6e38a1712.zip/node_modules/@cspotcode/source-map-support/",\ @@ -3615,21 +3626,22 @@ const RAW_RUNTIME_STATE = }]\ ]],\ ["@langchain/core", [\ - ["npm:0.3.3", {\ - "packageLocation": "./.yarn/cache/@langchain-core-npm-0.3.3-1a04ce6427-f355345b8d.zip/node_modules/@langchain/core/",\ + ["npm:0.3.33", {\ + "packageLocation": "./.yarn/cache/@langchain-core-npm-0.3.33-279a14e27a-08b99289af.zip/node_modules/@langchain/core/",\ "packageDependencies": [\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/core", "npm:0.3.33"],\ + ["@cfworker/json-schema", "npm:4.1.0"],\ ["ansi-styles", "npm:5.2.0"],\ ["camelcase", "npm:6.3.0"],\ ["decamelize", "npm:1.2.0"],\ ["js-tiktoken", "npm:1.0.14"],\ - ["langsmith", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:0.1.59"],\ + ["langsmith", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:0.3.2"],\ ["mustache", "npm:4.2.0"],\ ["p-queue", "npm:6.6.2"],\ ["p-retry", "npm:4.6.2"],\ ["uuid", "npm:10.0.0"],\ ["zod", "npm:3.23.8"],\ - ["zod-to-json-schema", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:3.23.3"]\ + ["zod-to-json-schema", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:3.23.3"]\ ],\ "linkType": "HARD"\ }]\ @@ -3646,12 +3658,12 @@ const RAW_RUNTIME_STATE = "packageLocation": "./.yarn/__virtual__/@langchain-openai-virtual-0aa3f78922/0/cache/@langchain-openai-npm-0.3.0-b787e52caa-e84e90ec99.zip/node_modules/@langchain/openai/",\ "packageDependencies": [\ ["@langchain/openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.0"],\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/core", "npm:0.3.33"],\ ["@types/langchain__core", null],\ ["js-tiktoken", "npm:1.0.14"],\ ["openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:4.62.1"],\ ["zod", "npm:3.23.8"],\ - ["zod-to-json-schema", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:3.23.3"]\ + ["zod-to-json-schema", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:3.23.3"]\ ],\ "packagePeers": [\ "@langchain/core",\ @@ -3668,11 +3680,11 @@ const RAW_RUNTIME_STATE = ],\ "linkType": "SOFT"\ }],\ - ["virtual:fd2bc5d6cb09acfc47c9107140e6981c7aa1e56f5b6019484f438aabae6bc70264c584059956fd68eb0db3a9e8f413f56446f7b03aefc6fcf9d897ea312f94d0#npm:0.1.0", {\ - "packageLocation": "./.yarn/__virtual__/@langchain-textsplitters-virtual-1504dc4b0f/0/cache/@langchain-textsplitters-npm-0.1.0-dcc9543794-87121ec5ad.zip/node_modules/@langchain/textsplitters/",\ + ["virtual:237cca263a38ec28721a2ca6fe5b98c16a50f76aef1c12cca710b38c8eb787c44acf5b33d88a0d6de6b698f5cff46610cc1997d9a7eafbb5e683fc56fda45a23#npm:0.1.0", {\ + "packageLocation": "./.yarn/__virtual__/@langchain-textsplitters-virtual-f940a38ff8/0/cache/@langchain-textsplitters-npm-0.1.0-dcc9543794-87121ec5ad.zip/node_modules/@langchain/textsplitters/",\ "packageDependencies": [\ - ["@langchain/textsplitters", "virtual:fd2bc5d6cb09acfc47c9107140e6981c7aa1e56f5b6019484f438aabae6bc70264c584059956fd68eb0db3a9e8f413f56446f7b03aefc6fcf9d897ea312f94d0#npm:0.1.0"],\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/textsplitters", "virtual:237cca263a38ec28721a2ca6fe5b98c16a50f76aef1c12cca710b38c8eb787c44acf5b33d88a0d6de6b698f5cff46610cc1997d9a7eafbb5e683fc56fda45a23#npm:0.1.0"],\ + ["@langchain/core", "npm:0.3.33"],\ ["@types/langchain__core", null],\ ["js-tiktoken", "npm:1.0.14"]\ ],\ @@ -6801,6 +6813,16 @@ const RAW_RUNTIME_STATE = "linkType": "HARD"\ }]\ ]],\ + ["console-table-printer", [\ + ["npm:2.12.1", {\ + "packageLocation": "./.yarn/cache/console-table-printer-npm-2.12.1-a4f4d866aa-37ac91d360.zip/node_modules/console-table-printer/",\ + "packageDependencies": [\ + ["console-table-printer", "npm:2.12.1"],\ + ["simple-wcswidth", "npm:1.0.1"]\ + ],\ + "linkType": "HARD"\ + }]\ + ]],\ ["constant-case", [\ ["npm:3.0.4", {\ "packageLocation": "./.yarn/cache/constant-case-npm-3.0.4-118b472e28-6c3346d51a.zip/node_modules/constant-case/",\ @@ -11415,37 +11437,41 @@ const RAW_RUNTIME_STATE = }]\ ]],\ ["langchain", [\ - ["npm:0.3.2", {\ - "packageLocation": "./.yarn/cache/langchain-npm-0.3.2-e30f659f16-e7f2105fc9.zip/node_modules/langchain/",\ + ["npm:0.3.12", {\ + "packageLocation": "./.yarn/cache/langchain-npm-0.3.12-114ea77c3f-d56af985b4.zip/node_modules/langchain/",\ "packageDependencies": [\ - ["langchain", "npm:0.3.2"]\ + ["langchain", "npm:0.3.12"]\ ],\ "linkType": "SOFT"\ }],\ - ["virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2", {\ - "packageLocation": "./.yarn/__virtual__/langchain-virtual-fd2bc5d6cb/0/cache/langchain-npm-0.3.2-e30f659f16-e7f2105fc9.zip/node_modules/langchain/",\ + ["virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.12", {\ + "packageLocation": "./.yarn/__virtual__/langchain-virtual-237cca263a/0/cache/langchain-npm-0.3.12-114ea77c3f-d56af985b4.zip/node_modules/langchain/",\ "packageDependencies": [\ - ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ + ["langchain", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.12"],\ ["@langchain/anthropic", null],\ ["@langchain/aws", null],\ + ["@langchain/cerebras", null],\ ["@langchain/cohere", null],\ - ["@langchain/core", "npm:0.3.3"],\ + ["@langchain/core", "npm:0.3.33"],\ ["@langchain/google-genai", null],\ ["@langchain/google-vertexai", null],\ + ["@langchain/google-vertexai-web", null],\ ["@langchain/groq", null],\ ["@langchain/mistralai", null],\ ["@langchain/ollama", null],\ ["@langchain/openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.0"],\ - ["@langchain/textsplitters", "virtual:fd2bc5d6cb09acfc47c9107140e6981c7aa1e56f5b6019484f438aabae6bc70264c584059956fd68eb0db3a9e8f413f56446f7b03aefc6fcf9d897ea312f94d0#npm:0.1.0"],\ + ["@langchain/textsplitters", "virtual:237cca263a38ec28721a2ca6fe5b98c16a50f76aef1c12cca710b38c8eb787c44acf5b33d88a0d6de6b698f5cff46610cc1997d9a7eafbb5e683fc56fda45a23#npm:0.1.0"],\ ["@types/axios", null],\ ["@types/cheerio", null],\ ["@types/handlebars", null],\ ["@types/langchain__anthropic", null],\ ["@types/langchain__aws", null],\ + ["@types/langchain__cerebras", null],\ ["@types/langchain__cohere", null],\ ["@types/langchain__core", null],\ ["@types/langchain__google-genai", null],\ ["@types/langchain__google-vertexai", null],\ + ["@types/langchain__google-vertexai-web", null],\ ["@types/langchain__groq", null],\ ["@types/langchain__mistralai", null],\ ["@types/langchain__ollama", null],\ @@ -11457,7 +11483,7 @@ const RAW_RUNTIME_STATE = ["js-tiktoken", "npm:1.0.14"],\ ["js-yaml", "npm:4.1.0"],\ ["jsonpointer", "npm:5.0.1"],\ - ["langsmith", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:0.1.59"],\ + ["langsmith", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:0.3.2"],\ ["openapi-types", "npm:12.1.3"],\ ["p-retry", "npm:4.6.2"],\ ["peggy", null],\ @@ -11465,14 +11491,16 @@ const RAW_RUNTIME_STATE = ["uuid", "npm:10.0.0"],\ ["yaml", "npm:2.5.1"],\ ["zod", "npm:3.23.8"],\ - ["zod-to-json-schema", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:3.23.3"]\ + ["zod-to-json-schema", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:3.23.3"]\ ],\ "packagePeers": [\ "@langchain/anthropic",\ "@langchain/aws",\ + "@langchain/cerebras",\ "@langchain/cohere",\ "@langchain/core",\ "@langchain/google-genai",\ + "@langchain/google-vertexai-web",\ "@langchain/google-vertexai",\ "@langchain/groq",\ "@langchain/mistralai",\ @@ -11482,9 +11510,11 @@ const RAW_RUNTIME_STATE = "@types/handlebars",\ "@types/langchain__anthropic",\ "@types/langchain__aws",\ + "@types/langchain__cerebras",\ "@types/langchain__cohere",\ "@types/langchain__core",\ "@types/langchain__google-genai",\ + "@types/langchain__google-vertexai-web",\ "@types/langchain__google-vertexai",\ "@types/langchain__groq",\ "@types/langchain__mistralai",\ @@ -11501,20 +11531,21 @@ const RAW_RUNTIME_STATE = }]\ ]],\ ["langsmith", [\ - ["npm:0.1.59", {\ - "packageLocation": "./.yarn/cache/langsmith-npm-0.1.59-2835e67852-9e71c09f63.zip/node_modules/langsmith/",\ + ["npm:0.3.2", {\ + "packageLocation": "./.yarn/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip/node_modules/langsmith/",\ "packageDependencies": [\ - ["langsmith", "npm:0.1.59"]\ + ["langsmith", "npm:0.3.2"]\ ],\ "linkType": "SOFT"\ }],\ - ["virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:0.1.59", {\ - "packageLocation": "./.yarn/__virtual__/langsmith-virtual-29183b17bd/0/cache/langsmith-npm-0.1.59-2835e67852-9e71c09f63.zip/node_modules/langsmith/",\ + ["virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:0.3.2", {\ + "packageLocation": "./.yarn/__virtual__/langsmith-virtual-6ae109b63f/0/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip/node_modules/langsmith/",\ "packageDependencies": [\ - ["langsmith", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:0.1.59"],\ + ["langsmith", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:0.3.2"],\ ["@types/openai", null],\ ["@types/uuid", "npm:10.0.0"],\ - ["commander", "npm:10.0.1"],\ + ["chalk", "npm:4.1.2"],\ + ["console-table-printer", "npm:2.12.1"],\ ["openai", null],\ ["p-queue", "npm:6.6.2"],\ ["p-retry", "npm:4.6.2"],\ @@ -11526,6 +11557,26 @@ const RAW_RUNTIME_STATE = "openai"\ ],\ "linkType": "HARD"\ + }],\ + ["virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2", {\ + "packageLocation": "./.yarn/__virtual__/langsmith-virtual-3572fbeb2c/0/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip/node_modules/langsmith/",\ + "packageDependencies": [\ + ["langsmith", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:0.3.2"],\ + ["@types/openai", null],\ + ["@types/uuid", "npm:10.0.0"],\ + ["chalk", "npm:4.1.2"],\ + ["console-table-printer", "npm:2.12.1"],\ + ["openai", "virtual:6d1931a4340173b37cf492f77cb803dda2f92958adb6847175388be3c73eb24be6f6bfd25e0fc0b7ad8dba815a972ad5e9d1f18e67fb58466bb7c99205a9d42e#npm:4.62.1"],\ + ["p-queue", "npm:6.6.2"],\ + ["p-retry", "npm:4.6.2"],\ + ["semver", "npm:7.6.3"],\ + ["uuid", "npm:10.0.0"]\ + ],\ + "packagePeers": [\ + "@types/openai",\ + "openai"\ + ],\ + "linkType": "HARD"\ }]\ ]],\ ["leven", [\ @@ -14391,6 +14442,15 @@ const RAW_RUNTIME_STATE = "linkType": "HARD"\ }]\ ]],\ + ["simple-wcswidth", [\ + ["npm:1.0.1", {\ + "packageLocation": "./.yarn/cache/simple-wcswidth-npm-1.0.1-ac1dd0a592-75b1a5a941.zip/node_modules/simple-wcswidth/",\ + "packageDependencies": [\ + ["simple-wcswidth", "npm:1.0.1"]\ + ],\ + "linkType": "HARD"\ + }]\ + ]],\ ["sisteransi", [\ ["npm:1.0.5", {\ "packageLocation": "./.yarn/cache/sisteransi-npm-1.0.5-af60cc0cfa-aba6438f46.zip/node_modules/sisteransi/",\ @@ -16354,10 +16414,10 @@ const RAW_RUNTIME_STATE = ],\ "linkType": "SOFT"\ }],\ - ["virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:3.23.3", {\ - "packageLocation": "./.yarn/__virtual__/zod-to-json-schema-virtual-5cebe5e126/0/cache/zod-to-json-schema-npm-3.23.3-86415b1ed5-09f783a232.zip/node_modules/zod-to-json-schema/",\ + ["virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:3.23.3", {\ + "packageLocation": "./.yarn/__virtual__/zod-to-json-schema-virtual-8d4bbd967f/0/cache/zod-to-json-schema-npm-3.23.3-86415b1ed5-09f783a232.zip/node_modules/zod-to-json-schema/",\ "packageDependencies": [\ - ["zod-to-json-schema", "virtual:1a04ce6427bc5f793ddd295df41f2d75cd9fe4b01c8b66bf3259ba655e1b3f6bb21315a5008e827d1e31de466288548d8af2f85cc04cf596f8dc2f8c4d80a1d8#npm:3.23.3"],\ + ["zod-to-json-schema", "virtual:279a14e27acbae61cce777a0571bf20768e2538d628175f2ec5bac44d9c2e3456219c88e945082c046ecb7eca6e0be719d0e7a174a10421eeade635c770c5c0b#npm:3.23.3"],\ ["@types/zod", null],\ ["zod", "npm:3.23.8"]\ ],\ diff --git a/.yarn/cache/@awell-health-extensions-core-npm-1.0.11-992538064c-6c6821d6ce.zip b/.yarn/cache/@awell-health-extensions-core-npm-1.0.11-992538064c-6c6821d6ce.zip deleted file mode 100644 index e0e0dea0e..000000000 Binary files a/.yarn/cache/@awell-health-extensions-core-npm-1.0.11-992538064c-6c6821d6ce.zip and /dev/null differ diff --git a/.yarn/cache/@awell-health-extensions-core-npm-1.0.16-a3dc33c4e1-1910b7bff2.zip b/.yarn/cache/@awell-health-extensions-core-npm-1.0.16-a3dc33c4e1-1910b7bff2.zip new file mode 100644 index 000000000..f2bd41609 Binary files /dev/null and b/.yarn/cache/@awell-health-extensions-core-npm-1.0.16-a3dc33c4e1-1910b7bff2.zip differ diff --git a/.yarn/cache/@cfworker-json-schema-npm-4.1.0-1cac3ad966-12ee4dd6ef.zip b/.yarn/cache/@cfworker-json-schema-npm-4.1.0-1cac3ad966-12ee4dd6ef.zip new file mode 100644 index 000000000..1f0eadb60 Binary files /dev/null and b/.yarn/cache/@cfworker-json-schema-npm-4.1.0-1cac3ad966-12ee4dd6ef.zip differ diff --git a/.yarn/cache/@langchain-core-npm-0.3.3-1a04ce6427-f355345b8d.zip b/.yarn/cache/@langchain-core-npm-0.3.3-1a04ce6427-f355345b8d.zip deleted file mode 100644 index daff33fa3..000000000 Binary files a/.yarn/cache/@langchain-core-npm-0.3.3-1a04ce6427-f355345b8d.zip and /dev/null differ diff --git a/.yarn/cache/@langchain-core-npm-0.3.33-279a14e27a-08b99289af.zip b/.yarn/cache/@langchain-core-npm-0.3.33-279a14e27a-08b99289af.zip new file mode 100644 index 000000000..8f7f378c1 Binary files /dev/null and b/.yarn/cache/@langchain-core-npm-0.3.33-279a14e27a-08b99289af.zip differ diff --git a/.yarn/cache/console-table-printer-npm-2.12.1-a4f4d866aa-37ac91d360.zip b/.yarn/cache/console-table-printer-npm-2.12.1-a4f4d866aa-37ac91d360.zip new file mode 100644 index 000000000..ebe684ff3 Binary files /dev/null and b/.yarn/cache/console-table-printer-npm-2.12.1-a4f4d866aa-37ac91d360.zip differ diff --git a/.yarn/cache/langchain-npm-0.3.2-e30f659f16-e7f2105fc9.zip b/.yarn/cache/langchain-npm-0.3.12-114ea77c3f-d56af985b4.zip similarity index 86% rename from .yarn/cache/langchain-npm-0.3.2-e30f659f16-e7f2105fc9.zip rename to .yarn/cache/langchain-npm-0.3.12-114ea77c3f-d56af985b4.zip index f8eaec63b..f74e0e274 100644 Binary files a/.yarn/cache/langchain-npm-0.3.2-e30f659f16-e7f2105fc9.zip and b/.yarn/cache/langchain-npm-0.3.12-114ea77c3f-d56af985b4.zip differ diff --git a/.yarn/cache/langsmith-npm-0.1.59-2835e67852-9e71c09f63.zip b/.yarn/cache/langsmith-npm-0.1.59-2835e67852-9e71c09f63.zip deleted file mode 100644 index d11334768..000000000 Binary files a/.yarn/cache/langsmith-npm-0.1.59-2835e67852-9e71c09f63.zip and /dev/null differ diff --git a/.yarn/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip b/.yarn/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip new file mode 100644 index 000000000..df40ecc03 Binary files /dev/null and b/.yarn/cache/langsmith-npm-0.3.2-9ce51d8a31-cae13264bf.zip differ diff --git a/.yarn/cache/simple-wcswidth-npm-1.0.1-ac1dd0a592-75b1a5a941.zip b/.yarn/cache/simple-wcswidth-npm-1.0.1-ac1dd0a592-75b1a5a941.zip new file mode 100644 index 000000000..8ba852403 Binary files /dev/null and b/.yarn/cache/simple-wcswidth-npm-1.0.1-ac1dd0a592-75b1a5a941.zip differ diff --git a/extensions/awell/v1/actions/isPatientEnrolledInCareFlow/isPatientEnrolledInCareFlow.test.ts b/extensions/awell/v1/actions/isPatientEnrolledInCareFlow/isPatientEnrolledInCareFlow.test.ts index 223b25c31..2342fc5ce 100644 --- a/extensions/awell/v1/actions/isPatientEnrolledInCareFlow/isPatientEnrolledInCareFlow.test.ts +++ b/extensions/awell/v1/actions/isPatientEnrolledInCareFlow/isPatientEnrolledInCareFlow.test.ts @@ -125,6 +125,7 @@ describe('Is patient already enrolled in care flow action', () => { pathway: { id: 'pathway-instance-id-1', definition_id: 'pathway-definition-1', + tenant_id: '123', }, fields: { pathwayStatus: '', // By default, only active care flows @@ -167,6 +168,7 @@ describe('Is patient already enrolled in care flow action', () => { pathway: { id: 'pathway-instance-id-1', definition_id: 'pathway-definition-1', + tenant_id: '123', }, fields: { pathwayStatus: undefined, // By default, only active care flows @@ -215,6 +217,7 @@ describe('Is patient already enrolled in care flow action', () => { pathway: { id: 'pathway-instance-id-1', definition_id: 'pathway-definition-1', + tenant_id: '123', }, fields: { pathwayStatus: `${PathwayStatus.Completed}`, @@ -318,6 +321,7 @@ describe('Is patient already enrolled in care flow action', () => { pathway: { id: 'pathway-instance-id-1', definition_id: 'pathway-definition-1', + tenant_id: '123', }, fields: { pathwayStatus: `${PathwayStatus.Active}, ${PathwayStatus.Completed}`, diff --git a/extensions/bland/actions/sendCall/__tests__/sendCall.test.ts b/extensions/bland/actions/sendCall/__tests__/sendCall.test.ts index e3c59933a..f1c2e8038 100644 --- a/extensions/bland/actions/sendCall/__tests__/sendCall.test.ts +++ b/extensions/bland/actions/sendCall/__tests__/sendCall.test.ts @@ -47,6 +47,7 @@ describe('Bland.ai - Send call', () => { pathway: { id: 'pathway-id', definition_id: 'pathway-definition-id', + tenant_id: '123', }, activity: { id: 'activity-id', diff --git a/extensions/bland/actions/sendCallWithPathway/__tests__/sendCallWithPathway.test.ts b/extensions/bland/actions/sendCallWithPathway/__tests__/sendCallWithPathway.test.ts index 164e49994..6e7fae552 100644 --- a/extensions/bland/actions/sendCallWithPathway/__tests__/sendCallWithPathway.test.ts +++ b/extensions/bland/actions/sendCallWithPathway/__tests__/sendCallWithPathway.test.ts @@ -47,6 +47,7 @@ describe('Bland.ai - Send call', () => { pathway: { id: 'pathway-id', definition_id: 'pathway-definition-id', + tenant_id: '123', }, activity: { id: 'activity-id', diff --git a/extensions/elation/actions/closeCareGap/closeCareGap.test.ts b/extensions/elation/actions/closeCareGap/closeCareGap.test.ts index 68af98730..bf141a8d7 100644 --- a/extensions/elation/actions/closeCareGap/closeCareGap.test.ts +++ b/extensions/elation/actions/closeCareGap/closeCareGap.test.ts @@ -46,6 +46,9 @@ describe('Elation - Close care gap', () => { pathway: { definition_id: '123', id: '123', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, patient: { id: '123', diff --git a/extensions/elation/actions/createCareGap/createCareGap.test.ts b/extensions/elation/actions/createCareGap/createCareGap.test.ts index ec5eefc1b..1a3d9ee64 100644 --- a/extensions/elation/actions/createCareGap/createCareGap.test.ts +++ b/extensions/elation/actions/createCareGap/createCareGap.test.ts @@ -50,6 +50,9 @@ describe('Elation - Create care gap', () => { pathway: { definition_id: '123', id: '123', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, patient: { id: '123', diff --git a/extensions/elation/actions/createReferralOrder/createReferralOrder.test.ts b/extensions/elation/actions/createReferralOrder/createReferralOrder.test.ts index 1640ccede..02bd5feb3 100644 --- a/extensions/elation/actions/createReferralOrder/createReferralOrder.test.ts +++ b/extensions/elation/actions/createReferralOrder/createReferralOrder.test.ts @@ -53,6 +53,9 @@ describe('Elation - Create referral order', () => { pathway: { definition_id: '123', id: '123', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, patient: { id: '123', diff --git a/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.test.ts b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.test.ts index a7cba292d..7acd1e6fc 100644 --- a/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.test.ts +++ b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.test.ts @@ -1,58 +1,53 @@ +import { TestHelpers } from '@awell-health/extensions-core' import { makeAPIClient } from '../../client' import { appointmentsMock } from './__testdata__/GetAppointments.mock' import { findAppointmentsByPrompt as action } from './findAppointmentsByPrompt' -import { TestHelpers } from '@awell-health/extensions-core' -import { ChatOpenAI } from '@langchain/openai' -jest.mock('../../client', () => ({ - makeAPIClient: jest.fn().mockImplementation(() => ({ - findAppointments: jest.fn().mockResolvedValue(appointmentsMock), - })), -})) +// Mock the client +jest.mock('../../client') -const mockedSdk = jest.mocked(makeAPIClient) - -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - appointmentIds: appointmentsMock.map((appointment) => appointment.id), - explanation: 'Test explanation', +// Mock createOpenAIModel +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + pipe: jest.fn().mockReturnValue({ + invoke: jest.fn().mockResolvedValue({ + appointmentIds: appointmentsMock.map(a => a.id), + explanation: 'Test explanation' + }) + }) + }, + metadata: { + care_flow_definition_id: 'whatever', + care_flow_id: 'test-flow-id', + activity_id: 'test-activity-id', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug' + } }) +})) - const mockChain = { - invoke: mockInvoke, - } - - const mockPipe = jest.fn().mockReturnValue(mockChain) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - pipe: mockPipe, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) - -describe('Elation - Find appointment by type', () => { - const { - extensionAction: findAppointmentByType, - onComplete, - onError, - helpers, - clearMocks, - } = TestHelpers.fromAction(action) +describe('Elation - Find appointments by prompt', () => { + const { extensionAction, onComplete, onError, helpers, clearMocks } = + TestHelpers.fromAction(action) beforeEach(() => { clearMocks() jest.clearAllMocks() + + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue(appointmentsMock) + })) }) - test('Should return the correct appointment', async () => { - await findAppointmentByType.onEvent({ + test('Should find the correct appointments', async () => { + await extensionAction.onEvent({ payload: { fields: { - patientId: 12345, // used to get a list of appointments - prompt: 'Find the next appointment for this patient', + patientId: 12345, + prompt: 'Find all appointments', }, settings: { client_id: 'clientId', @@ -61,24 +56,90 @@ describe('Elation - Find appointment by type', () => { password: 'password', auth_url: 'authUrl', base_url: 'baseUrl', - openAiApiKey: 'openaiApiKey', }, - } as any, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, onComplete, onError, helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - expect(mockedSdk).toHaveBeenCalled() - expect(onComplete).toHaveBeenCalledWith( - expect.objectContaining({ - data_points: { - appointments: JSON.stringify(appointmentsMock), - explanation: 'Test explanation', - appointmentCountsByStatus: JSON.stringify({ Scheduled: 2 }), + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + appointments: JSON.stringify(appointmentsMock), + explanation: 'Test explanation', + appointmentCountsByStatus: JSON.stringify({ Scheduled: 2 }), + }, + events: [ + { + date: expect.any(String), + text: { + en: `Found ${appointmentsMock.length} appointments for patient ${12345}` + } + } + ], + }) + expect(onError).not.toHaveBeenCalled() + }) + + test('Should handle no appointments', async () => { + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue([]) + })) + + await extensionAction.onEvent({ + payload: { + fields: { + patientId: 12345, + prompt: 'Find all appointments', + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', }, - }), - ) + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + explanation: 'No appointments found', + appointments: JSON.stringify([]), + appointmentCountsByStatus: JSON.stringify({}), + } + }) + expect(onError).not.toHaveBeenCalled() }) }) diff --git a/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.ts b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.ts index 353c633e4..fac51650a 100644 --- a/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.ts +++ b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPrompt.ts @@ -1,13 +1,12 @@ +import { isNil } from 'lodash' import { type Action, Category } from '@awell-health/extensions-core' +import { addActivityEventLog } from '../../../../src/lib/awell/addEventLog' import { type settings } from '../../settings' import { makeAPIClient } from '../../client' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' import { FieldsValidationSchema, fields, dataPoints } from './config' -import { StructuredOutputParser } from '@langchain/core/output_parsers' -import { z } from 'zod' -import { ChatOpenAI } from '@langchain/openai' -import { addActivityEventLog } from '../../../../src/lib/awell/addEventLog' -import { statusEnum } from '../../validation/appointment.zod' -import { isNil } from 'lodash' +import { getAppointmentCountsByStatus } from './getAppoitnmentCountByStatus' +import { findAppointmentsByPromptWithLLM } from './lib/findAppointmentsByPromptWithLLM/findAppointmentsByPromptWithLLM' export const findAppointmentsByPrompt: Action< typeof fields, @@ -16,33 +15,15 @@ export const findAppointmentsByPrompt: Action< > = { key: 'findAppointmentsByPrompt', category: Category.EHR_INTEGRATIONS, - title: '🪄 Find Appointments by Prompt', + title: '🪄 Find Appointments by Prompt (Beta)', description: 'Find all appointments for a patient using natural language.', fields, previewable: false, dataPoints, - onEvent: async ({ payload, onComplete, onError }): Promise => { + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { const { prompt, patientId } = FieldsValidationSchema.parse(payload.fields) const api = makeAPIClient(payload.settings) - const openAiApiKey = payload.settings.openAiApiKey - - if (openAiApiKey === undefined || openAiApiKey === '') { - await onError({ - events: [ - { - date: new Date().toISOString(), - text: { en: 'OpenAI API key is required for this action.' }, - error: { - category: 'SERVER_ERROR', - message: 'OpenAI API key is required for this action.', - }, - }, - ], - }) - return - } - const appointments = await api.findAppointments({ patient: patientId, }) @@ -51,149 +32,59 @@ export const findAppointmentsByPrompt: Action< await onComplete({ data_points: { explanation: 'No appointments found', - appointments: JSON.stringify(appointments), + appointments: JSON.stringify([]), appointmentCountsByStatus: JSON.stringify({}), }, }) return } - const promptAppointments = appointments - .map((appointment) => { - const relevantInfo = { - id: appointment.id, - reason: appointment.reason, - scheduled_date: appointment.scheduled_date, - } - return JSON.stringify(relevantInfo) + try { + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, + payload, }) - .join('\n\n') - const ChatModelGPT4o = new ChatOpenAI({ - modelName: 'gpt-4o', - openAIApiKey: openAiApiKey, - temperature: 0, - maxRetries: 3, - timeout: 10000, - }) - - const systemPrompt = createSystemPrompt({ - prompt, - appointments: promptAppointments, - }) - - const AppointmentIdSchema = z.array(z.string()) - - const parser = StructuredOutputParser.fromZodSchema( - z.object({ - appointmentIds: AppointmentIdSchema, - explanation: z - .string() - .describe( - 'A readable explanation of how the appointments were found and why', - ), - }), - ) - - let result: z.infer + const { appointmentIds, explanation } = await findAppointmentsByPromptWithLLM({ + model, + appointments, + prompt, + metadata, + callbacks + }) - try { - const chain = ChatModelGPT4o.pipe(parser) - result = await chain.invoke(systemPrompt) - } catch (invokeError) { - console.error( - 'Error invoking ChatModelGPT4o for findFutureAppointment:', - invokeError, + const selectedAppointments = appointments.filter( + (appointment) => appointmentIds.includes(appointment.id) ) - throw new Error('Failed to find future appointment.') - } - const selectedAppointments = result.appointmentIds.map((appointmentId) => - appointments.find( - (appointment) => appointment.id === Number(appointmentId), - ), - ) + const appointmentCountsByStatus = getAppointmentCountsByStatus(selectedAppointments) - if (Object.keys(selectedAppointments).length === 0) { - console.log('No appointments found') await onComplete({ data_points: { - explanation: result.explanation, + appointments: JSON.stringify(selectedAppointments), + explanation, + appointmentCountsByStatus: JSON.stringify(appointmentCountsByStatus), }, + events: [ + addActivityEventLog({ + message: `Found ${selectedAppointments.length} appointments for patient ${patientId}` + }), + ], }) - return - } - if (selectedAppointments.length !== result.appointmentIds.length) { - console.log('Some appointments were not found') - const errorMessage = `Some appointments were not found. Found ${selectedAppointments.length} appointments, but the prompt resulted in ${result.appointmentIds.length} appointments.` + } catch (error) { await onError({ events: [ { date: new Date().toISOString(), - text: { en: errorMessage }, + text: { en: 'Failed to find appointments' }, error: { category: 'SERVER_ERROR', - message: errorMessage, + message: error instanceof Error ? error.message : 'Unknown error', }, }, ], }) - return } - - const appointmentCountsByStatus = Object.values(statusEnum.Values).reduce( - (acc, status) => { - const cnt = selectedAppointments.filter( - (appointment) => appointment?.status.status === status, - ).length - if (cnt > 0) { - acc[status] = cnt - } - return acc - }, - {} as Record, - ) - - await onComplete({ - data_points: { - appointments: JSON.stringify(selectedAppointments), - explanation: result.explanation, - appointmentCountsByStatus: JSON.stringify(appointmentCountsByStatus), - }, - events: [ - addActivityEventLog({ - message: `Found ${selectedAppointments.length} appointments for patient ${patientId}\nExplanation: ${result.explanation}\nAppointment counts by status: ${JSON.stringify(appointmentCountsByStatus)}`, - }), - ], - }) }, } - -const createSystemPrompt = ({ - prompt, - appointments, -}: { - prompt: string - appointments: string -}) => { - const currentDate = new Date().getDate() - return `You are a helpful medical assistant. You will receive a list (array) of appointments for a single patient and instructions about which types of appointments to find. You're supposed to use the information in the list to find appointments that match, if any exist. If no appointments exists that obviously match the instructions, that's a perfectly acceptable outcome. If multiple appointments exist that match the instructions, you should return all of them. - - Important instructions: - - The appointment "reason" is the appointment type. - - Only include appointment ids that exist in the input array. If no appointments exist that match the instructions, return an empty array. - - Pay close attention to the instructions. They are intended to have been written by a clinician, for a clinician. - - Think like a clinician. In other words, "Rx" should match a prescription appointment or follow-up related to a prescription, and "PT" would matchphysical therapy. - - The current date is ${currentDate}. ----------- -Input array: -${appointments} ----------- -Instruction: -${prompt} ----------- - -Output a JSON object with the following keys: -1. appointmentIds: array of strings where each string is an appointment_id that matches the instructions (or an empty array if no appointments exist that match the instructions). -2. explanation: A readable explanation of how the appointments were found and why. Or, if no appointments exist that match the instructions, an explanation of why.` -} diff --git a/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPromptRealOpenAI.test.ts b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPromptRealOpenAI.test.ts new file mode 100644 index 000000000..2ce17d8a3 --- /dev/null +++ b/extensions/elation/actions/findAppointmentsByPrompt/findAppointmentsByPromptRealOpenAI.test.ts @@ -0,0 +1,130 @@ +import 'dotenv/config' +import { TestHelpers } from '@awell-health/extensions-core' +import { makeAPIClient } from '../../client' +import { appointmentsMock } from './__testdata__/GetAppointments.mock' +import { findAppointmentsByPrompt } from './findAppointmentsByPrompt' + +// Only mock the client, not OpenAI +jest.mock('../../client') +jest.setTimeout(60000) + +describe.skip('findAppointmentsByPrompt - Real OpenAI calls', () => { + const { onComplete, onError, helpers, extensionAction, clearMocks } = + TestHelpers.fromAction(findAppointmentsByPrompt) + + beforeEach(() => { + clearMocks() + jest.clearAllMocks() + + // Set up OpenAI config + process.env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test-api-key' + + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY, + temperature: 0, + maxRetries: 2, + timeout: 30000 + }) + + // Mock only the client, not OpenAI + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue(appointmentsMock) + })) + }) + + const testCases = [ + { + name: 'find all appointments', + prompt: 'Find all appointments', + shouldFind: true, + expectedCount: 2 + }, + { + name: 'find established patient visits', + prompt: 'Find established patient visits', + shouldFind: true, + expectedCount: 2 + }, + { + name: 'find non-existent appointments', + prompt: 'Find dental cleaning appointments', + shouldFind: false, + expectedCount: 0 + }, + { + name: 'find PCP appointments', + prompt: 'Find PCP appointments', + shouldFind: true, + expectedCount: 2 + } + ] + + testCases.forEach(({ name, prompt, shouldFind, expectedCount }) => { + test(`Should ${name}`, async () => { + await extensionAction.onEvent({ + payload: { + fields: { + patientId: 12345, + prompt, + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', + }, + pathway: { + id: 'test-flow-id', + definition_id: 'whatever', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + if (shouldFind) { + expect(onComplete).toHaveBeenCalledWith( + expect.objectContaining({ + data_points: expect.objectContaining({ + appointments: expect.any(String), + explanation: expect.any(String), + appointmentCountsByStatus: expect.stringContaining('Scheduled') + }), + events: [ + expect.objectContaining({ + date: expect.any(String), + text: expect.objectContaining({ + en: expect.stringContaining(`Found ${expectedCount} appointments`) + }) + }) + ] + }) + ) + } else { + expect(onComplete).toHaveBeenCalledWith( + expect.objectContaining({ + data_points: expect.objectContaining({ + appointments: '[]', + explanation: expect.any(String), + appointmentCountsByStatus: '{}' + }) + }) + ) + } + expect(onError).not.toHaveBeenCalled() + }, 60000) + }) +}) \ No newline at end of file diff --git a/extensions/elation/actions/findAppointmentsByPrompt/getAppoitnmentCountByStatus.ts b/extensions/elation/actions/findAppointmentsByPrompt/getAppoitnmentCountByStatus.ts new file mode 100644 index 000000000..61b2a77b2 --- /dev/null +++ b/extensions/elation/actions/findAppointmentsByPrompt/getAppoitnmentCountByStatus.ts @@ -0,0 +1,18 @@ + + +import { type AppointmentResponse } from 'extensions/elation/types' +import { statusEnum } from '../../validation/appointment.zod' + +export const getAppointmentCountsByStatus = ( + appointments: AppointmentResponse[], + ): Record => { + return Object.values(statusEnum.Values).reduce>((acc, status) => { + const cnt = appointments.filter( + (appointment) => appointment?.status.status === status + ).length + if (cnt > 0) { + acc[status] = cnt + } + return acc + }, {}) + } diff --git a/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/findAppointmentsByPromptWithLLM.ts b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/findAppointmentsByPromptWithLLM.ts new file mode 100644 index 000000000..a9d963b80 --- /dev/null +++ b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/findAppointmentsByPromptWithLLM.ts @@ -0,0 +1,55 @@ +import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" +import { systemPrompt } from './prompt' +import { parser, type AppointmentsFromAI } from './parser' +import { type AppointmentResponse } from '../../../../types' + +interface FindAppointmentsByPromptWithLLMProps { + model: ChatOpenAI + appointments: AppointmentResponse[] + prompt: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] +} + +export const findAppointmentsByPromptWithLLM = async ({ + model, + appointments, + prompt, + metadata, + callbacks, +}: FindAppointmentsByPromptWithLLMProps): Promise => { + const chain = model.pipe(parser) + + try { + const formattedAppointments = appointments + .map((appointment) => ({ + id: appointment.id, + reason: appointment.reason, + scheduled_date: appointment.scheduled_date, + })) + .map((appointment) => JSON.stringify(appointment)) + .join('\n\n') + + const result = await chain.invoke( + await systemPrompt.format({ + currentDate: new Date().toISOString().split('T')[0], + appointments: formattedAppointments, + prompt, + }), + { + metadata, + runName: 'ElationFindAppointmentsByPrompt', + callbacks + } + ) + + return { + appointmentIds: result.appointmentIds, + explanation: result.explanation + } + } catch (error) { + throw new Error('Failed to find matching appointments.') + } +} \ No newline at end of file diff --git a/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/parser.ts b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/parser.ts new file mode 100644 index 000000000..f4c6d3eda --- /dev/null +++ b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/parser.ts @@ -0,0 +1,21 @@ +import { z } from 'zod' +import { StructuredOutputParser } from '@langchain/core/output_parsers' + +export const AppointmentsSchema = z.array(z.coerce.number()) + .describe('Array of appointment IDs that match the criteria') + +export const parser = StructuredOutputParser.fromZodSchema( + z.object({ + appointmentIds: AppointmentsSchema, + explanation: z + .string() + .describe( + 'A readable explanation of how the appointments were found and why', + ), + }) +) + +export interface AppointmentsFromAI { + appointmentIds: number[] + explanation: string +} \ No newline at end of file diff --git a/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/prompt.ts b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/prompt.ts new file mode 100644 index 000000000..733a1e4f7 --- /dev/null +++ b/extensions/elation/actions/findAppointmentsByPrompt/lib/findAppointmentsByPromptWithLLM/prompt.ts @@ -0,0 +1,22 @@ +import { ChatPromptTemplate } from '@langchain/core/prompts' + +export const systemPrompt = ChatPromptTemplate.fromTemplate(`You are a helpful medical assistant. You will receive a list (array) of appointments for a single patient and instructions about which types of appointments to find. You're supposed to use the information in the list to find appointments that match, if any exist. If no appointments exists that obviously match the instructions, that's a perfectly acceptable outcome. If multiple appointments exist that match the instructions, you should return all of them. + +Important instructions: +- The appointment "reason" is the appointment type. +- Only include appointment ids that exist in the input array. If no appointments exist that match the instructions, return an empty array. +- Pay close attention to the instructions. They are intended to have been written by a clinician, for a clinician. +- Think like a clinician. In other words, "Rx" should match a prescription appointment or follow-up related to a prescription, and "PT" would match physical therapy. +- The current date is {currentDate}. +---------- +Input array: +{appointments} +---------- +Instruction: +{prompt} +---------- + +Output a JSON object with the following keys: +1. appointmentIds: array of numbers representing appointment IDs that match the instructions (or an empty array if no appointments exist that match the instructions). +2. explanation: A readable explanation of how the appointments were found and why. Or, if no appointments exist that match the instructions, an explanation of why.` +) \ No newline at end of file diff --git a/extensions/elation/actions/findFutureAppointment/findFutureAppointment.test.ts b/extensions/elation/actions/findFutureAppointment/findFutureAppointment.test.ts index bc45576a4..99745acc3 100644 --- a/extensions/elation/actions/findFutureAppointment/findFutureAppointment.test.ts +++ b/extensions/elation/actions/findFutureAppointment/findFutureAppointment.test.ts @@ -1,57 +1,49 @@ +import { TestHelpers } from '@awell-health/extensions-core' import { makeAPIClient } from '../../client' import { appointmentsMock } from './__testdata__/GetAppointments.mock' import { findFutureAppointment as action } from './findFutureAppointment' -import { TestHelpers } from '@awell-health/extensions-core' -import { ChatOpenAI } from '@langchain/openai' - -jest.mock('../../client', () => ({ - makeAPIClient: jest.fn().mockImplementation(() => ({ - findAppointments: jest.fn().mockResolvedValue(appointmentsMock), - })), -})) -const mockedSdk = jest.mocked(makeAPIClient) +// Mock the client +jest.mock('../../client') -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - appointmentId: appointmentsMock[0].id, - explanation: 'Test explanation', +// Mock createOpenAIModel +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + pipe: jest.fn().mockReturnValue({ + invoke: jest.fn().mockResolvedValue({ + appointmentId: appointmentsMock[0].id, + explanation: 'Test explanation' + }) + }) + }, + metadata: { + care_flow_definition_id: 'whatever', + care_flow_id: 'test-flow-id', + activity_id: 'test-activity-id' + } }) +})) - const mockChain = { - invoke: mockInvoke, - } - - const mockPipe = jest.fn().mockReturnValue(mockChain) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - pipe: mockPipe, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) - -describe('Elation - Find appointment by type', () => { - const { - extensionAction: findAppointmentByType, - onComplete, - onError, - helpers, - clearMocks, - } = TestHelpers.fromAction(action) +describe('Elation - Find future appointment', () => { + const { extensionAction, onComplete, onError, helpers, clearMocks } = + TestHelpers.fromAction(action) beforeEach(() => { clearMocks() jest.clearAllMocks() + + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue(appointmentsMock) + })) }) - test('Should return the correct appointment', async () => { - await findAppointmentByType.onEvent({ + test('Should find the correct appointment', async () => { + await extensionAction.onEvent({ payload: { fields: { - patientId: 12345, // used to get a list of appointments + patientId: 12345, prompt: 'Find the next appointment for this patient', }, settings: { @@ -61,16 +53,26 @@ describe('Elation - Find appointment by type', () => { password: 'password', auth_url: 'authUrl', base_url: 'baseUrl', - openAiApiKey: 'openaiApiKey', }, - } as any, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, onComplete, onError, helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - expect(mockedSdk).toHaveBeenCalled() expect(onComplete).toHaveBeenCalledWith({ data_points: { appointment: JSON.stringify(appointmentsMock[0]), @@ -78,13 +80,60 @@ describe('Elation - Find appointment by type', () => { appointmentExists: 'true', }, events: [ - { - date: expect.any(String), - text: { - en: 'Number of future scheduled or confirmed appointments for patient 12345: 2\nFound appointment: 123\nExplanation: Test explanation', - }, - }, + expect.objectContaining({ + text: expect.objectContaining({ + en: expect.stringContaining('Found appointment: 123') + }) + }) ], }) + expect(onError).not.toHaveBeenCalled() + }) + + test('Should handle no appointments', async () => { + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue([]) + })) + + await extensionAction.onEvent({ + payload: { + fields: { + patientId: 12345, + prompt: 'Find the next appointment for this patient', + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', + }, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + appointmentExists: 'false', + } + }) + expect(onError).not.toHaveBeenCalled() }) }) diff --git a/extensions/elation/actions/findFutureAppointment/findFutureAppointment.ts b/extensions/elation/actions/findFutureAppointment/findFutureAppointment.ts index c408159e6..73fdd0e50 100644 --- a/extensions/elation/actions/findFutureAppointment/findFutureAppointment.ts +++ b/extensions/elation/actions/findFutureAppointment/findFutureAppointment.ts @@ -1,12 +1,14 @@ +import { isNil } from 'lodash' import { type Action, Category } from '@awell-health/extensions-core' -import { type settings } from '../../settings' -import { makeAPIClient } from '../../client' -import { FieldsValidationSchema, fields, dataPoints } from './config' -import { StructuredOutputParser } from '@langchain/core/output_parsers' -import { z } from 'zod' -import { ChatOpenAI } from '@langchain/openai' + import { addActivityEventLog } from '../../../../src/lib/awell/addEventLog' -import { isNil } from 'lodash' +import type { settings, SettingsType } from '../../settings' +import { FieldsValidationSchema, fields, dataPoints } from './config' +import { getFutureAppointments } from './getFutureAppoitnments' +import { AppointmentIdSchema } from './lib/findAppointmentWithLLM/parser' +import { findAppointmentWithLLM } from './lib/findAppointmentWithLLM/findAppointmentWithLLM' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' export const findFutureAppointment: Action< typeof fields, @@ -15,44 +17,22 @@ export const findFutureAppointment: Action< > = { key: 'findFutureAppointment', category: Category.EHR_INTEGRATIONS, - title: '🪄 Find future appointment', + title: '🪄 Find future appointment (Beta)', description: 'Find a future appointment in Elation.', fields, previewable: false, dataPoints, - onEvent: async ({ payload, onComplete, onError }): Promise => { + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { + // 1. Validate input const { prompt, patientId } = FieldsValidationSchema.parse(payload.fields) - const api = makeAPIClient(payload.settings) - - const openAiApiKey = payload.settings.openAiApiKey - if (openAiApiKey === undefined || openAiApiKey === '') { - await onError({ - events: [ - { - date: new Date().toISOString(), - text: { en: 'OpenAI API key is required for this action.' }, - error: { - category: 'SERVER_ERROR', - message: 'OpenAI API key is required for this action.', - }, - }, - ], - }) - return - } - - const appointments = await api.findAppointments({ - patient: patientId, - from_date: new Date().toISOString(), - }) - const scheduledOrConfirmedAppointments = appointments.filter( - (appointment) => - appointment.status.status === 'Scheduled' || - appointment.status.status === 'Confirmed', + // 2. Get future appointments + const appointments = await getFutureAppointments( + payload.settings as SettingsType, + patientId, ) - if (scheduledOrConfirmedAppointments.length === 0) { + if (appointments.length === 0) { await onComplete({ data_points: { appointmentExists: 'false', @@ -61,89 +41,40 @@ export const findFutureAppointment: Action< return } - const promptAppointments = scheduledOrConfirmedAppointments - .map((appointment) => { - const relevantInfo = { - id: appointment.id, - reason: appointment.reason, - duration: appointment.duration, - scheduled_date: appointment.scheduled_date, - } - return JSON.stringify(relevantInfo) - }) - .join('\n\n') - - const ChatModelGPT4o = new ChatOpenAI({ - modelName: 'gpt-4o-2024-08-06', - openAIApiKey: openAiApiKey, - temperature: 0, - maxRetries: 3, - timeout: 10000, + // 3. Initialize OpenAI model with metadata and callbacks + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, + payload, + modelType: OPENAI_MODELS.GPT4o }) - const systemPrompt = `You are a helpful medical assistant. You will receive a list (array) of future appointments for a single patient and instructions about which appointment to find. You're supposed to use the information in the list to find an appointment that matches, if one exists. If no appointment exists that obviously matches the instructions, that's a perfectly acceptable outcome. If multiple appointments exist that match the instructions, you should return the first one. In any case, there can only be one appointment returned. - - Important instructions: - - The appointment "reason" is the appointment type. - - Pay close attention to the instructions. They are intended to have been written by a clinician, for a clinician. - - Think like a clinician. In other words, "Rx" should match a prescription appointment or follow-up related to a prescription. - ----------- -Input array: -${promptAppointments} ----------- -Instruction: -${prompt} ----------- - -Output a JSON object with two keys: -1. appointmentId: The id of the appointment that matches the instructions, if one exists. If no appointment exists that obviously matches, you should return null. -2. explanation: A readable explanation of how the appointment was found and why. Or, if no appointment exists that matches the instructions, an explanation of why.` - - const AppointmentIdSchema = z.coerce - .number() - .nullable() - .describe('A single appointment') - - const parser = StructuredOutputParser.fromZodSchema( - z.object({ - appointmentId: AppointmentIdSchema, - explanation: z - .string() - .describe( - 'A readable explanation of how the appointment was found and why', - ), - }), - ) - - let result: z.infer - - try { - const chain = ChatModelGPT4o.pipe(parser) - result = await chain.invoke(systemPrompt) - } catch (invokeError) { - console.error( - 'Error invoking ChatModelGPT4o for findFutureAppointment:', - invokeError, - ) - throw new Error('Failed to find future appointment.') - } + // 4. Find matching appointment + const { appointmentId, explanation } = await findAppointmentWithLLM({ + model, + appointments, + prompt, + metadata, + callbacks + }) - const matchedAppointmentId = AppointmentIdSchema.parse(result.appointmentId) - const foundAppointment = scheduledOrConfirmedAppointments.find( + const matchedAppointmentId = AppointmentIdSchema.parse(appointmentId) + const foundAppointment = appointments.find( (appointment) => appointment.id === Number(matchedAppointmentId), ) + + // 5. Complete action with results await onComplete({ data_points: { appointment: !isNil(matchedAppointmentId) ? JSON.stringify(foundAppointment) : undefined, - explanation: result.explanation, + explanation, appointmentExists: !isNil(matchedAppointmentId) ? 'true' : 'false', }, events: [ addActivityEventLog({ - message: `Number of future scheduled or confirmed appointments for patient ${patientId}: ${scheduledOrConfirmedAppointments.length}\nFound appointment: ${isNil(foundAppointment) ? 'none' : foundAppointment?.id}\nExplanation: ${result.explanation}`, + message: `Number of future scheduled or confirmed appointments for patient ${patientId}: ${appointments.length}\nFound appointment: ${isNil(foundAppointment) ? 'none' : foundAppointment?.id}\nExplanation: ${explanation}`, }), ], }) diff --git a/extensions/elation/actions/findFutureAppointment/findFutureAppointmentRealOpenAI.test.ts b/extensions/elation/actions/findFutureAppointment/findFutureAppointmentRealOpenAI.test.ts new file mode 100644 index 000000000..f2a61f46e --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/findFutureAppointmentRealOpenAI.test.ts @@ -0,0 +1,118 @@ +import 'dotenv/config' +import { TestHelpers } from '@awell-health/extensions-core' +import { makeAPIClient } from '../../client' +import { appointmentsMock } from './__testdata__/GetAppointments.mock' +import { findFutureAppointment } from './findFutureAppointment' + +jest.mock('../../client') +jest.setTimeout(60000) // Increased timeout for OpenAI calls + +describe.skip('findFutureAppointment - Real OpenAI calls', () => { + const { onComplete, onError, helpers, extensionAction, clearMocks } = + TestHelpers.fromAction(findFutureAppointment) + + beforeEach(() => { + clearMocks() + jest.clearAllMocks() + + process.env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test-api-key' + + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY, + temperature: 0, + maxRetries: 2, + timeout: 30000 + }) + + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation(() => ({ + findAppointments: jest.fn().mockResolvedValue(appointmentsMock) + })) + }) + + const testCases = [ + { + name: 'find PCP appointment', + prompt: 'Find the PCP appointment', + shouldFind: true + }, + { + name: 'find established patient visit', + prompt: 'Find the established patient visit', + shouldFind: true + }, + { + name: 'find office visit', + prompt: 'Find the office visit appointment', + shouldFind: true + }, + { + name: 'find non-existent type', + prompt: 'Find the dental cleaning appointment', + shouldFind: false + }, + { + name: 'handle empty prompt', + prompt: ' a', + shouldFind: false + } + ] + + testCases.forEach(({ name, prompt, shouldFind }) => { + test(`Should ${name}`, async () => { + await extensionAction.onEvent({ + payload: { + fields: { + patientId: 12345, + prompt, + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', + }, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + if (shouldFind) { + expect(onComplete).toHaveBeenCalledWith( + expect.objectContaining({ + data_points: expect.objectContaining({ + appointmentExists: 'true', + explanation: expect.any(String), + appointment: expect.any(String) + }) + }) + ) + } else { + expect(onComplete).toHaveBeenCalledWith( + expect.objectContaining({ + data_points: expect.objectContaining({ + appointmentExists: 'false' + }) + }) + ) + } + expect(onError).not.toHaveBeenCalled() + }, 60000) + }) +}) \ No newline at end of file diff --git a/extensions/elation/actions/findFutureAppointment/formatAppointments.ts b/extensions/elation/actions/findFutureAppointment/formatAppointments.ts new file mode 100644 index 000000000..778f2a2e6 --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/formatAppointments.ts @@ -0,0 +1,15 @@ +import { type AppointmentResponse } from "extensions/elation/types" + +export const formatAppointments = (appointments: AppointmentResponse[]): string => { + return appointments + .map((appointment) => { + const relevantInfo = { + id: appointment.id, + reason: appointment.reason, + duration: appointment.duration, + scheduled_date: appointment.scheduled_date, + } + return JSON.stringify(relevantInfo) + }) + .join('\n\n') + } \ No newline at end of file diff --git a/extensions/elation/actions/findFutureAppointment/getFutureAppoitnments.ts b/extensions/elation/actions/findFutureAppointment/getFutureAppoitnments.ts new file mode 100644 index 000000000..5c4309f92 --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/getFutureAppoitnments.ts @@ -0,0 +1,20 @@ + +import { type SettingsType } from '../../settings' +import { makeAPIClient } from '../../client' + +import { type AppointmentResponse } from 'extensions/elation/types' + + +export const getFutureAppointments = async (elationSettings: SettingsType, patientId: number): Promise => { + const api = makeAPIClient(elationSettings) + + const appointments = await api.findAppointments({ + patient: patientId, + from_date: new Date().toISOString(), + }) + return appointments.filter( + (appointment) => + appointment.status.status === 'Scheduled' || + appointment.status.status === 'Confirmed', + ) +} diff --git a/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/findAppointmentWithLLM.ts b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/findAppointmentWithLLM.ts new file mode 100644 index 000000000..2b9fceabe --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/findAppointmentWithLLM.ts @@ -0,0 +1,47 @@ +import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" +import { systemPrompt } from './prompt' +import { parser, type AppointmentFromAI } from './parser' +import { type AppointmentResponse } from '../../../../types' +import { formatAppointments } from '../../formatAppointments' + +interface FindAppointmentWithLLMProps { + model: ChatOpenAI + appointments: AppointmentResponse[] + prompt: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] +} + +export const findAppointmentWithLLM = async ({ + model, + appointments, + prompt, + metadata, + callbacks, +}: FindAppointmentWithLLMProps): Promise => { + const chain = model.pipe(parser) + + try { + const result = await chain.invoke( + await systemPrompt.format({ + format_instructions: parser.getFormatInstructions(), + appointments: formatAppointments(appointments), + prompt, + }), + { + metadata, + runName: 'ElationFindAppointment', + callbacks + } + ) + + return { + appointmentId: result.appointmentId, + explanation: result.explanation + } + } catch (error) { + throw new Error('Failed to find matching appointment.') + } +} \ No newline at end of file diff --git a/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/parser.ts b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/parser.ts new file mode 100644 index 000000000..928b87503 --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/parser.ts @@ -0,0 +1,23 @@ +import { z } from 'zod' +import { StructuredOutputParser } from '@langchain/core/output_parsers' + +export const AppointmentIdSchema = z.coerce + .number() + .nullable() + .describe('A single appointment') + +export const parser = StructuredOutputParser.fromZodSchema( + z.object({ + appointmentId: AppointmentIdSchema, + explanation: z + .string() + .describe( + 'A readable explanation of how the appointment was found and why', + ), + }) +) + +export interface AppointmentFromAI { + appointmentId: number | null + explanation: string +} \ No newline at end of file diff --git a/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/prompt.ts b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/prompt.ts new file mode 100644 index 000000000..caec339a6 --- /dev/null +++ b/extensions/elation/actions/findFutureAppointment/lib/findAppointmentWithLLM/prompt.ts @@ -0,0 +1,17 @@ +import { ChatPromptTemplate } from '@langchain/core/prompts' + +export const systemPrompt = ChatPromptTemplate.fromTemplate(` + You are a helpful medical assistant. You will receive a list (array) of future appointments for a single patient and instructions about which appointment to find. You're supposed to use the information in the list to find an appointment that matches, if one exists. + + Important instructions: + - The appointment "reason" is the appointment type. + - Pay close attention to the instructions. They are intended to have been written by a clinician, for a clinician. + - Think like a clinician. In other words, "Rx" should match a prescription appointment or follow-up related to a prescription. + - If no appointment exists that obviously matches the instructions, that's a perfectly acceptable outcome. + - If multiple appointments exist that match the instructions, you should return the first one. + - Return your response in the following JSON format: + {format_instructions} + + Input array: {appointments} + Instruction: {prompt} +`) \ No newline at end of file diff --git a/extensions/elation/actions/updatePatientTags/config/types.ts b/extensions/elation/actions/updatePatientTags/config/types.ts new file mode 100644 index 000000000..73df710c8 --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/config/types.ts @@ -0,0 +1,28 @@ +import { z } from 'zod' +import { StructuredOutputParser } from '@langchain/core/output_parsers' + +export const SingleTagSchema = z.string().max(100).describe('A single tag') + +export const TagsSchema = z + .array(SingleTagSchema) + .max(10) + .refine((items) => new Set(items).size === items.length, { + message: 'All items must be unique, no duplicate values allowed', + }) + .describe('The updated array of tags') + +export const TagsOutputSchema = z.object({ + updatedTags: TagsSchema, + explanation: z + .string() + .describe('A readable explanation of the changes made to the tags and why'), +}) + +export const parser = StructuredOutputParser.fromZodSchema(TagsOutputSchema) + +export type TagsOutput = z.infer + +export interface TagsFromAI { + validatedTags: string[] + explanation: string +} diff --git a/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/getTagsFromLLM.ts b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/getTagsFromLLM.ts new file mode 100644 index 000000000..cc767f18f --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/getTagsFromLLM.ts @@ -0,0 +1,40 @@ +import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import { systemPrompt } from './prompt' +import { parser, type TagsFromAI } from './parser' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" + +interface GetTagsFromLLMProps { + model: ChatOpenAI + existingTags: string[] + prompt: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] +} + +export const getTagsFromLLM = async (props: GetTagsFromLLMProps): Promise => { + const { model, existingTags, prompt, metadata, callbacks } = props + + try { + const chain = model.pipe(parser) + const formattedPrompt = await systemPrompt.format({ + existingTags: JSON.stringify(existingTags), + prompt + }) + const result = await chain.invoke( + formattedPrompt, + { + metadata, + runName: 'ElationUpdatePatientTags', + callbacks + } + ) + + return { + validatedTags: result.updatedTags, + explanation: result.explanation + } + } catch (error) { + throw new Error('Failed to update patient tags.') + } +} \ No newline at end of file diff --git a/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/parser.ts b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/parser.ts new file mode 100644 index 000000000..52eeba4c0 --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/parser.ts @@ -0,0 +1,5 @@ +import { StructuredOutputParser } from '@langchain/core/output_parsers' +import { TagsOutputSchema, type TagsFromAI } from '../../config/types' + +export const parser = StructuredOutputParser.fromZodSchema(TagsOutputSchema) +export type { TagsFromAI } \ No newline at end of file diff --git a/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/prompt.ts b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/prompt.ts new file mode 100644 index 000000000..b5dfb2189 --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/lib/getTagsFromLLM/prompt.ts @@ -0,0 +1,19 @@ +import { ChatPromptTemplate } from '@langchain/core/prompts' + +export const systemPrompt = ChatPromptTemplate.fromTemplate(` +You are a clinical data manager. You will receive a list (array) of patient tags for a single patient and instructions about which tags to add, update, or remove. These tags are used to assign particular attributes to patients which can help with patient care, like grouping of patients, categorizing patients for reporting, or identifying patients for care. + + Important instructions: + - The maximum number of tags is 10. + - The max length of a single tag is 100 characters. + - Ensure tags are unique. + + + Input array: {existingTags} + Instruction: {prompt} + + +Output a JSON object with two keys: +1. updatedTags: The updated array of tags. If the input array is empty, the output should be an empty array. +2. explanation: A readable explanation of the changes made to the tags and why +`) \ No newline at end of file diff --git a/extensions/elation/actions/updatePatientTags/updatePatientTags.test.ts b/extensions/elation/actions/updatePatientTags/updatePatientTags.test.ts index 955bf0d8e..383481537 100644 --- a/extensions/elation/actions/updatePatientTags/updatePatientTags.test.ts +++ b/extensions/elation/actions/updatePatientTags/updatePatientTags.test.ts @@ -1,32 +1,29 @@ +import { TestHelpers } from '@awell-health/extensions-core' import { makeAPIClientMockFunc } from '../../__mocks__/client' import { makeAPIClient } from '../../client' import { updatePatientTags as action } from './updatePatientTags' -import { TestHelpers } from '@awell-health/extensions-core' -import { ChatOpenAI } from '@langchain/openai' +// Mock the client jest.mock('../../client') -// Mock the module -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - updatedTags: ['test', 'test2'], - explanation: 'Test explanation', +// Mock createOpenAIModel +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + pipe: jest.fn().mockReturnValue({ + invoke: jest.fn().mockResolvedValue({ + updatedTags: ['test', 'test2'], + explanation: 'Test explanation' + }) + }) + }, + metadata: { + care_flow_definition_id: 'whatever', + care_flow_id: 'test-flow-id', + activity_id: 'test-activity-id' + } }) - - const mockChain = { - invoke: mockInvoke, - } - - const mockPipe = jest.fn().mockReturnValue(mockChain) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - pipe: mockPipe, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) +})) describe('Elation - Update patient tags', () => { const { @@ -47,7 +44,7 @@ describe('Elation - Update patient tags', () => { jest.clearAllMocks() }) - test('Should return the correct letter', async () => { + it('Should update tags using custom API key', async () => { await updatePatientTags.onEvent({ payload: { fields: { @@ -61,15 +58,83 @@ describe('Elation - Update patient tags', () => { password: 'password', auth_url: 'authUrl', base_url: 'baseUrl', - openAiApiKey: 'openaiApiKey', + openAiApiKey: 'custom-key', }, - } as any, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + updatedTags: 'test, test2', + }, + events: [ + { + date: expect.any(String), + text: { + en: 'Previous patient tags: No tags\nUpdated patient tags: test, test2\nExplanation: Test explanation', + }, + }, + ], + }) + expect(onError).not.toHaveBeenCalled() + }) + + it('Should use default OpenAI config', async () => { + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: 'default-key', + temperature: 0, + maxRetries: 3 + }) + + await updatePatientTags.onEvent({ + payload: { + fields: { + patientId: 123, + prompt: 'Add the tags "test" and "test2"', + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', + }, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, onComplete, onError, helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() expect(onComplete).toHaveBeenCalledWith({ data_points: { updatedTags: 'test, test2', @@ -83,5 +148,6 @@ describe('Elation - Update patient tags', () => { }, ], }) + expect(onError).not.toHaveBeenCalled() }) }) diff --git a/extensions/elation/actions/updatePatientTags/updatePatientTags.ts b/extensions/elation/actions/updatePatientTags/updatePatientTags.ts index 204c71fa9..1a821669d 100644 --- a/extensions/elation/actions/updatePatientTags/updatePatientTags.ts +++ b/extensions/elation/actions/updatePatientTags/updatePatientTags.ts @@ -1,12 +1,21 @@ -import { type Action, Category } from '@awell-health/extensions-core' +import { Category, type Action } from '@awell-health/extensions-core' +import { addActivityEventLog } from '../../../../src/lib/awell/addEventLog' import { type settings } from '../../settings' import { makeAPIClient } from '../../client' import { FieldsValidationSchema, fields, dataPoints } from './config' -import { StructuredOutputParser } from '@langchain/core/output_parsers' -import { z } from 'zod' -import { ChatOpenAI } from '@langchain/openai' -import { addActivityEventLog } from '../../../../src/lib/awell/addEventLog' +import { updateElationTags } from './updateTags' +import { getTagsFromLLM } from './lib/getTagsFromLLM/getTagsFromLLM' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' +/** + * Awell Action: Update Patient Tags + * + * Takes existing tags and instructions, uses LLM to: + * 1. Generate updated list of tags + * 2. Provide explanation for changes + * 3. Update tags in Elation + */ export const updatePatientTags: Action< typeof fields, typeof settings, @@ -14,111 +23,48 @@ export const updatePatientTags: Action< > = { key: 'updatePatientTags', category: Category.EHR_INTEGRATIONS, - title: '🪄 Update patient tags', + title: '🪄 Update patient tags (Beta)', description: 'Update patient tags in Elation.', fields, previewable: false, dataPoints, - onEvent: async ({ payload, onComplete, onError }): Promise => { + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { + // 1. Validate input and initialize API client const { prompt, patientId } = FieldsValidationSchema.parse(payload.fields) const api = makeAPIClient(payload.settings) - const openAiApiKey = payload.settings.openAiApiKey - - if (openAiApiKey === undefined || openAiApiKey === '') { - await onError({ - events: [ - { - date: new Date().toISOString(), - text: { en: 'OpenAI API key is required for this action.' }, - error: { - category: 'SERVER_ERROR', - message: 'OpenAI API key is required for this action.', - }, - }, - ], - }) - return - } - + // 2. Get existing tags const { tags } = await api.getPatient(patientId) const existingTags = tags ?? [] - const ChatModelGPT4o = new ChatOpenAI({ - modelName: 'gpt-4o-2024-08-06', - openAIApiKey: openAiApiKey, - temperature: 0, - maxRetries: 3, - timeout: 10000, + // 3. Initialize OpenAI model with metadata and callbacks + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, + payload, + modelType: OPENAI_MODELS.GPT4o }) - const systemPrompt = `You are a clinical data manager. You will receive a list (array) of patient tags for a single patient and instructions about which tags to add, update, or remove. These tags are used to assign particular attributes to patients which can help with patient care, like grouping of patients, categorizing patients for reporting, or identifying patients for care. - - Important instructions: - - The maximum number of tags is 10. - - The max length of a single tag is 100 characters. - - Ensure tags are unique. - -Input array: ${JSON.stringify(existingTags)} -Instruction: ${prompt} - -Output a JSON object with two keys: -1. updatedTags: The updated array of tags. If the input array is empty, the output should be an empty array. -2. explanation: A readable explanation of the changes made to the tags and why` - - const SingleTagSchema = z.string().max(100).describe('A single tag') - const TagsSchema = z - .array(SingleTagSchema) - .max(10) - .refine((items) => new Set(items).size === items.length, { - message: 'All items must be unique, no duplicate values allowed', - }) - .describe('The updated array of tags') - - const parser = StructuredOutputParser.fromZodSchema( - z.object({ - updatedTags: TagsSchema, - explanation: z - .string() - .describe( - 'A readable explanation of the changes made to the tags and why', - ), - }), - ) - - let result: z.infer - - try { - const chain = ChatModelGPT4o.pipe(parser) - result = await chain.invoke(systemPrompt) - } catch (invokeError) { - console.error( - 'Error invoking ChatModelGPT4o for updatePatientTags:', - invokeError, - ) - throw new Error('Failed to update patient tags.') - } - - const validatedTags = TagsSchema.parse(result.updatedTags) + // 4. Generate updated tags + const { validatedTags, explanation } = await getTagsFromLLM({ + model, + existingTags, + prompt, + metadata, + callbacks + }) - if (validatedTags.length === 0) { - await api.updatePatient(patientId, { - // @ts-expect-error - elation api does not clear tags on an empty array - tags: ' ', - }) - } else { - await api.updatePatient(patientId, { - tags: validatedTags, - }) - } + // 5. Update tags in Elation + await updateElationTags(api, patientId, validatedTags) + // 6. Complete action with results await onComplete({ data_points: { updatedTags: validatedTags.join(', '), }, events: [ addActivityEventLog({ - message: `Previous patient tags: ${existingTags?.length > 0 ? existingTags?.join(', ') : 'No tags'}\nUpdated patient tags: ${validatedTags.join(', ')}\nExplanation: ${result.explanation}`, + message: `Previous patient tags: ${existingTags?.length > 0 ? existingTags?.join(', ') : 'No tags'}\nUpdated patient tags: ${validatedTags.join(', ')}\nExplanation: ${explanation}`, }), ], }) diff --git a/extensions/elation/actions/updatePatientTags/updatePatientTagsRealOpenAI.test.ts b/extensions/elation/actions/updatePatientTags/updatePatientTagsRealOpenAI.test.ts new file mode 100644 index 000000000..741ae8260 --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/updatePatientTagsRealOpenAI.test.ts @@ -0,0 +1,79 @@ +import 'dotenv/config' +import { TestHelpers } from '@awell-health/extensions-core' +import { makeAPIClientMockFunc } from '../../__mocks__/client' +import { makeAPIClient } from '../../client' +import { updatePatientTags } from './updatePatientTags' + +jest.mock('../../client') +jest.setTimeout(60000) // Increased timeout to 60 seconds + +describe.skip('updatePatientTags - Real OpenAI calls', () => { + const { onComplete, onError, helpers, extensionAction, clearMocks } = + TestHelpers.fromAction(updatePatientTags) + + beforeEach(() => { + clearMocks() + jest.clearAllMocks() + + // Ensure API key is always defined + process.env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test-api-key' + + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY, + temperature: 0, + maxRetries: 2, + timeout: 30000 + }) + + // Mock Elation client with existing tags + const mockAPIClient = makeAPIClient as jest.Mock + mockAPIClient.mockImplementation((settings) => ({ + ...makeAPIClientMockFunc(settings), + getPatient: jest.fn().mockResolvedValue({ + tags: ['diabetes', 'hypertension'] + }), + updatePatient: jest.fn().mockResolvedValue({}) + })) + }) + + it('Should update tags using real OpenAI', async () => { + await extensionAction.onEvent({ + payload: { + fields: { + patientId: 123, + prompt: 'Remove tag "diabetes" and add tag "chronic_pain"', + }, + settings: { + client_id: 'clientId', + client_secret: 'clientSecret', + username: 'username', + password: 'password', + auth_url: 'authUrl', + base_url: 'baseUrl', + }, + pathway: { + id: 'test-flow-id', + definition_id: '123', + tenant_id: '123', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + }, + patient: { + id: 'test-patient-id' + } + }, + onComplete, + onError, + helpers, + }) + + // Wait for all promises to resolve + await new Promise(resolve => setTimeout(resolve, 1000)) + + expect(onComplete).toHaveBeenCalled() + expect(onError).not.toHaveBeenCalled() + }, 60000) // Increased individual test timeout +}) \ No newline at end of file diff --git a/extensions/elation/actions/updatePatientTags/updateTags.ts b/extensions/elation/actions/updatePatientTags/updateTags.ts new file mode 100644 index 000000000..8a24f931e --- /dev/null +++ b/extensions/elation/actions/updatePatientTags/updateTags.ts @@ -0,0 +1,15 @@ +import { type ElationAPIClient } from '../../client' + +export const updateElationTags = async (api: ElationAPIClient, patientId: number, tags: string[]): Promise => { + if (tags.length === 0) { + await api.updatePatient(patientId, { + // @ts-expect-error - elation api does not clear tags on an empty array + tags: ' ', + }) + } else { + await api.updatePatient(patientId, { + tags, + }) + } + } + \ No newline at end of file diff --git a/extensions/elation/settings.ts b/extensions/elation/settings.ts index 10409b076..9ab76a668 100644 --- a/extensions/elation/settings.ts +++ b/extensions/elation/settings.ts @@ -46,13 +46,6 @@ export const settings = { '⚠️ Deprecated: Elation now uses client credentials authentication. This setting is no longer required and should be removed from your settings.', required: false, }, - openAiApiKey: { - key: 'openAiApiKey', - label: 'OpenAI API key ', - obfuscated: true, - required: false, - description: 'Required for some actions only.', - }, } satisfies Record export const SettingsValidationSchema = z.object({ @@ -60,7 +53,6 @@ export const SettingsValidationSchema = z.object({ auth_url: z.string().min(1), client_id: z.string().min(1), client_secret: z.string().min(1), - openAiApiKey: z.string().optional(), /** * Elation now uses client credentials authentication. * We don't remove the settings just yet for backward compatibility for existing care flows. @@ -69,3 +61,5 @@ export const SettingsValidationSchema = z.object({ username: z.string().optional(), password: z.string().optional(), } satisfies Record) + +export type SettingsType = z.infer diff --git a/extensions/healthie/actions/dataExchange/pushFormResponseToHealthie/pushFormResponseToHealthie.test.ts b/extensions/healthie/actions/dataExchange/pushFormResponseToHealthie/pushFormResponseToHealthie.test.ts index 52f797a6b..22d74d777 100644 --- a/extensions/healthie/actions/dataExchange/pushFormResponseToHealthie/pushFormResponseToHealthie.test.ts +++ b/extensions/healthie/actions/dataExchange/pushFormResponseToHealthie/pushFormResponseToHealthie.test.ts @@ -71,6 +71,9 @@ describe('pushFormResponseToHealthie', () => { pathway: { id: '5eN4qWbxZGSA', definition_id: 'whatever', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, patient: { id: 'whatever' }, @@ -116,6 +119,9 @@ describe('pushFormResponseToHealthie', () => { pathway: { id: '5eN4qWbxZGSA', definition_id: 'whatever', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, patient: { id: 'whatever' }, diff --git a/extensions/healthie/actions/dataExchange/pushFormResponsesToHealthie/pushFormResponsesToHealthie.test.ts b/extensions/healthie/actions/dataExchange/pushFormResponsesToHealthie/pushFormResponsesToHealthie.test.ts index b43cf4c86..a891462f7 100644 --- a/extensions/healthie/actions/dataExchange/pushFormResponsesToHealthie/pushFormResponsesToHealthie.test.ts +++ b/extensions/healthie/actions/dataExchange/pushFormResponsesToHealthie/pushFormResponsesToHealthie.test.ts @@ -83,6 +83,9 @@ describe('pushFormResponsesToHealthie', () => { pathway: { id: '5eN4qWbxZGSA', definition_id: 'whatever', + tenant_id: '123', + org_id: '123', + org_slug: 'org-slug', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, patient: { id: 'whatever' }, diff --git a/extensions/shelly/actions/categorizeMessage/categorizeMessage.test.ts b/extensions/shelly/actions/categorizeMessage/categorizeMessage.test.ts index f61ac1b12..194c1518b 100644 --- a/extensions/shelly/actions/categorizeMessage/categorizeMessage.test.ts +++ b/extensions/shelly/actions/categorizeMessage/categorizeMessage.test.ts @@ -6,9 +6,8 @@ import { ChatOpenAI } from '@langchain/openai' // Mock the module jest.mock('@langchain/openai', () => { const mockInvoke = jest.fn().mockResolvedValue({ - matched_category: 'Appointment Scheduling', - match_explanation: - 'The message contains a request for scheduling an appointment.', + matched_category: 'None', + match_explanation: 'Categorization was ambiguous; we could not find a proper category.' }) const mockChain = { @@ -43,13 +42,22 @@ describe('categorizeMessage - Mocked LLM calls', () => { const payload = generateTestPayload({ fields: { - message: 'I would like to schedule an appointment for next week.', - categories: - 'Appointment Scheduling,Medication Questions,Administrative Assistance,Feedback or Complaints', + message: 'test message', + categories: 'category1,category2' }, settings: { - openAiApiKey: 'a', + openAiApiKey: 'test-key' }, + pathway: { + id: 'test-pathway-id', + definition_id: 'test-def-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, + activity: { + id: 'test-activity-id' + } }) await extensionAction.onEvent({ @@ -61,21 +69,23 @@ describe('categorizeMessage - Mocked LLM calls', () => { expect(ChatOpenAI).toHaveBeenCalled() expect(categorizeMessageWithLLMSpy).toHaveBeenCalledWith({ - ChatModelGPT4oMini: expect.any(Object), - message: 'I would like to schedule an appointment for next week.', - categories: [ - 'Appointment Scheduling', - 'Medication Questions', - 'Administrative Assistance', - 'Feedback or Complaints', - ], + model: expect.any(Object), + message: 'test message', + categories: ['category1', 'category2'], + metadata: { + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + }, }) expect(onComplete).toHaveBeenCalledWith({ data_points: { - category: 'Appointment Scheduling', - explanation: - '

The message contains a request for scheduling an appointment.

', + category: 'None', + explanation: '

Categorization was ambiguous; we could not find a proper category.

', }, }) diff --git a/extensions/shelly/actions/categorizeMessage/categorizeMessage.ts b/extensions/shelly/actions/categorizeMessage/categorizeMessage.ts index 2da3cae44..7c27aecad 100644 --- a/extensions/shelly/actions/categorizeMessage/categorizeMessage.ts +++ b/extensions/shelly/actions/categorizeMessage/categorizeMessage.ts @@ -1,54 +1,57 @@ import { Category, type Action } from '@awell-health/extensions-core' import { categorizeMessageWithLLM } from './lib/categorizeMessageWithLLM' -import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' import { fields, dataPoints, FieldsValidationSchema } from './config' import { markdownToHtml } from '../../../../src/utils' +/** + * Awell Action: Message Categorization + * + * Takes a message and predefined categories as input, uses LLM to: + * 1. Determine the most appropriate category + * 2. Provide explanation for the categorization + * + * @returns category and HTML-formatted explanation + */ export const categorizeMessage: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'categorizeMessage', category: Category.WORKFLOW, title: 'Categorize Message', - description: - 'Categorize the input message into set of predefined categories and provides explanation.', + description: 'Categorizes messages into predefined categories with explanation.', fields, previewable: false, dataPoints, + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { - const { - ChatModelGPT4oMini, - fields: { categories, message }, - } = await validatePayloadAndCreateSdk({ - fieldsSchema: FieldsValidationSchema, + // 1. Validate input fields + const { message, categories } = FieldsValidationSchema.parse(payload.fields) + + // 2. Initialize OpenAI model with metadata + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, payload, + modelType: OPENAI_MODELS.GPT4oMini, }) - try { - const categorization_result = await categorizeMessageWithLLM({ - ChatModelGPT4oMini, - message, - categories, - }) - - const category = categorization_result.category - const explanationHtml = await markdownToHtml( - categorization_result.explanation - ) + // 3. Perform categorization + const result = await categorizeMessageWithLLM({ + model, + message, + categories, + metadata, + callbacks + }) - await onComplete({ - data_points: { - category, - explanation: explanationHtml, - }, - }) - } catch (error) { - console.error('Error categorizing message:', error) - // Catch in extention server - throw new Error('Error categorizing message') - } + // 4. Format and return results + const explanationHtml = await markdownToHtml(result.explanation) + await onComplete({ + data_points: { category: result.category, explanation: explanationHtml } + }) }, } diff --git a/extensions/shelly/actions/categorizeMessage/callingRealOpenAITest.test.ts b/extensions/shelly/actions/categorizeMessage/categorizeMessageRealOpenAI.test.ts similarity index 53% rename from extensions/shelly/actions/categorizeMessage/callingRealOpenAITest.test.ts rename to extensions/shelly/actions/categorizeMessage/categorizeMessageRealOpenAI.test.ts index 5368a8411..10aac5262 100644 --- a/extensions/shelly/actions/categorizeMessage/callingRealOpenAITest.test.ts +++ b/extensions/shelly/actions/categorizeMessage/categorizeMessageRealOpenAI.test.ts @@ -3,43 +3,62 @@ import { generateTestPayload } from '@/tests' import { categorizeMessage } from '.' import 'dotenv/config' -const settings = { - openAiApiKey: process.env.OPENAI_TEST_KEY, -} - -// Remove skip to run the test -describe.skip('categorizeMessage - Real LLM calls', () => { +describe.skip('categorizeMessage - Real OpenAI calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = TestHelpers.fromAction(categorizeMessage) beforeEach(() => { - clearMocks() // Reset mocks before each test - jest.clearAllMocks() // Reset any mock functions + clearMocks() + jest.clearAllMocks() + + // Ensure API key is always defined in test environment + process.env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test-api-key' + + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY as string, // Type assertion + temperature: 0, + maxRetries: 3, + timeout: 10000 + }) + }) + + afterEach(() => { + jest.clearAllTimers() + }) + + afterAll(async () => { + // Clean up any remaining promises + await new Promise(resolve => setTimeout(resolve, 100)) }) it('should successfully categorize a message about scheduling an appointment using real LLM', async () => { const payload = generateTestPayload({ fields: { message: 'I would like to schedule an appointment for next week.', - categories: - 'Appointment Scheduling,Medication Questions,Administrative Assistance,Feedback or Complaints', + categories: 'Appointment Scheduling,Medication Questions,Administrative Assistance,Feedback or Complaints', + }, + settings: {}, + pathway: { + id: 'test-pathway-id', + definition_id: 'test-def-id' }, - settings, + activity: { + id: 'test-activity-id' + } }) await extensionAction.onEvent({ payload, onComplete, onError, - helpers, + helpers: helpers // Use our mocked helpers }) // Real LangChain function is called expect(onComplete).toHaveBeenCalledWith({ data_points: { category: 'Appointment Scheduling', - explanation: - 'The message explicitly states a desire to schedule an appointment, which directly aligns with the Appointment Scheduling category.', + explanation: expect.stringMatching(/^

.*appointment.*<\/p>$/), }, }) @@ -53,22 +72,28 @@ describe.skip('categorizeMessage - Real LLM calls', () => { categories: 'Appointment Scheduling,Medication Questions,Administrative Assistance,Feedback or Complaints', }, - settings, + settings: {}, + pathway: { + id: 'test-pathway-id', + definition_id: 'test-def-id', + }, + activity: { + id: 'test-activity-id', + }, }) await extensionAction.onEvent({ payload, onComplete, onError, - helpers, + helpers: helpers // Use our mocked helpers }) // Real LangChain function is called and returns "None" expect(onComplete).toHaveBeenCalledWith({ data_points: { category: 'None', - explanation: - 'Categorization was ambiguous; we could not find a proper category.', + explanation: expect.stringMatching(/^

.*<\/p>$/), }, }) diff --git a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.test.ts b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.test.ts index c62577b2e..4b6b2e63a 100644 --- a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.test.ts +++ b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.test.ts @@ -3,17 +3,17 @@ import { categorizeMessageWithLLM } from './' import { type ChatOpenAI } from '@langchain/openai' describe('categorizeMessageWithLLM', () => { - let ChatModelGPT4oMiniMock: jest.Mocked + let modelMock: jest.Mocked beforeEach(() => { - ChatModelGPT4oMiniMock = { + modelMock = { pipe: jest.fn().mockReturnThis(), invoke: jest.fn(), } as unknown as jest.Mocked }) - it('should categorize a message about scheduling an appointment using real LLM', async () => { - ChatModelGPT4oMiniMock.invoke.mockResolvedValue({ + it('should categorize a message about scheduling an appointment', async () => { + modelMock.invoke.mockResolvedValue({ // @ts-expect-error it's fine, we have a parser matched_category: 'Appointment Scheduling', match_explanation: @@ -29,16 +29,24 @@ describe('categorizeMessageWithLLM', () => { const message = 'I would like to schedule an appointment for next week.' const result = await categorizeMessageWithLLM({ - ChatModelGPT4oMini: ChatModelGPT4oMiniMock, + model: modelMock, message, categories, + metadata: { + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } }) expect(result.category).toBe('Appointment Scheduling') }) - it('should categorize a message about medication using real LLM', async () => { - ChatModelGPT4oMiniMock.invoke.mockResolvedValue({ + it('should categorize a message about medication', async () => { + modelMock.invoke.mockResolvedValue({ // @ts-expect-error it's fine, we have a parser matched_category: 'Medication Questions', match_explanation: @@ -54,16 +62,24 @@ describe('categorizeMessageWithLLM', () => { const message = 'Can you tell me the correct dosage for my medication?' const result = await categorizeMessageWithLLM({ - ChatModelGPT4oMini: ChatModelGPT4oMiniMock, + model: modelMock, message, categories, + metadata: { + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } }) expect(result.category).toBe('Medication Questions') }) - it('should return "None" when the message does not match any category using real LLM', async () => { - ChatModelGPT4oMiniMock.invoke.mockResolvedValue({ + it('should return "None" when the message does not match any category', async () => { + modelMock.invoke.mockResolvedValue({ // @ts-expect-error it's fine, we have a parser matched_category: 'None', match_explanation: @@ -79,9 +95,17 @@ describe('categorizeMessageWithLLM', () => { const message = 'Is it going to rain tomorrow?' const result = await categorizeMessageWithLLM({ - ChatModelGPT4oMini: ChatModelGPT4oMiniMock, + model: modelMock, message, categories, + metadata: { + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } }) expect(result.category).toBe('None') diff --git a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.ts b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.ts index d2d161ffe..6b7efcce1 100644 --- a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.ts +++ b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/categorizeMessageWithLLM.ts @@ -1,28 +1,52 @@ -import { parser, systemPrompt } from './constants' +import { parser } from './parser' +import { systemPrompt } from './prompt' import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" -// TODO: remove console logs eventually +/** + * Uses LLM to categorize a message into predefined categories. + * The function follows these steps: + * 1. Formats prompt with available categories + * 2. Runs LLM chain with structured output parsing + * 3. Validates and processes the result + * + * @example + * const result = await categorizeMessageWithLLM({ + * model, + * message: "I need to schedule an appointment", + * categories: ["Scheduling", "Medical Question"], + * metadata: { ... } + * }) + * // Returns: { category: "Scheduling", explanation: "..." } + */ export const categorizeMessageWithLLM = async ({ - ChatModelGPT4oMini, + model, message, categories, + metadata, + callbacks, }: { - ChatModelGPT4oMini: ChatOpenAI + model: ChatOpenAI message: string categories: string[] + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] }): Promise<{ category: string; explanation: string }> => { const prompt = await systemPrompt.format({ categories: categories.concat('None').join(', '), input: message, }) - const chain = ChatModelGPT4oMini.pipe(parser) + const chain = model.pipe(parser) let result try { - result = await chain.invoke(prompt) + result = await chain.invoke( + prompt, + { metadata, runName: 'ShellyCategorizeMessage', callbacks } + ) } catch (error) { - console.error('Error invoking the chain:', error) throw new Error( 'Failed to categorize the message due to an internal error.' ) diff --git a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/constants.ts b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/constants.ts deleted file mode 100644 index e36c3c65f..000000000 --- a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/constants.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { ChatPromptTemplate } from '@langchain/core/prompts' -import { StructuredOutputParser } from 'langchain/output_parsers' -import { z } from 'zod' - -// Define the Zod schema for the structured response -export const messageCategoriesSchema = z.object({ - matched_category: z.string().optional().default('None'), // The matched category - match_explanation: z.string(), // One-sentence explanation of the match -}) - -// Create a structured output parser -export const parser = StructuredOutputParser.fromZodSchema( - messageCategoriesSchema -) - -export const systemPrompt = ChatPromptTemplate.fromTemplate(` - You are an expert in categorizing different patient messages in a clinical context. - Use your expertise to solve the message categorization task: - 1. Categorize the input message into **one of the provided categories**: {categories}. If no category fits, return "None". - 2. Provide a concise explanation of why the message belongs to the selected category. - - Important Instructions: - - The message may be in multiple languages. - - **Only** choose from the provided list of categories. **Do not create new categories** or alter the given ones. - - If no category fits perfectly, or if the match is unclear, return "None" without guessing. - - Carefully verify your selection before submitting your answer. - - Respond exclusively with a valid JSON object containing the following keys: - - matched_category: The most suitable category. Must be from the following list: {categories} - None if no category fits. Absolutely refrain from creating new categories or altering existent one. Output should be one of the provided categories in the list. - - match_explanation: A brief explanation supporting your decision. - - Input: - {input} - `) diff --git a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/parser.ts b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/parser.ts new file mode 100644 index 000000000..262e0f6ab --- /dev/null +++ b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/parser.ts @@ -0,0 +1,17 @@ +import { StructuredOutputParser } from 'langchain/output_parsers' +import { z } from 'zod' + +/** + * Structured Output Parser + * Ensures LLM response follows the format: + * { + * matched_category: string // One of the provided categories or "None" + * match_explanation: string // Brief explanation of the categorization + * } + */ +export const parser = StructuredOutputParser.fromZodSchema( + z.object({ + matched_category: z.string().optional().default('None'), + match_explanation: z.string(), + }) +) \ No newline at end of file diff --git a/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/prompt.ts b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/prompt.ts new file mode 100644 index 000000000..15c75ea0b --- /dev/null +++ b/extensions/shelly/actions/categorizeMessage/lib/categorizeMessageWithLLM/prompt.ts @@ -0,0 +1,32 @@ +import { ChatPromptTemplate } from '@langchain/core/prompts' + +/** + * System Prompt Template + * Instructs the LLM to: + * 1. Choose a category from the provided list + * 2. Explain the choice + * 3. Return "None" if no category fits + * + * Variables: + * {categories} - Available categories, comma-separated + * {input} - Message to categorize + */ +export const systemPrompt = ChatPromptTemplate.fromTemplate(` + You are an expert in categorizing different patient messages in a clinical context. + Use your expertise to solve the message categorization task: + 1. Categorize the input message into **one of the provided categories**: {categories}. If no category fits, return "None". + 2. Provide a concise explanation of why the message belongs to the selected category. + + Important Instructions: + - The message may be in multiple languages. + - **Only** choose from the provided list of categories. **Do not create new categories** or alter the given ones. + - If no category fits perfectly, or if the match is unclear, return "None" without guessing. + - Carefully verify your selection before submitting your answer. + + Respond exclusively with a valid JSON object containing the following keys: + - matched_category: The most suitable category from: {categories} + - match_explanation: A brief explanation supporting your decision. + + Input: + {input} +`) \ No newline at end of file diff --git a/extensions/shelly/actions/generateMessage/generateMessage.test.ts b/extensions/shelly/actions/generateMessage/generateMessage.test.ts index f494123d2..bff950bad 100644 --- a/extensions/shelly/actions/generateMessage/generateMessage.test.ts +++ b/extensions/shelly/actions/generateMessage/generateMessage.test.ts @@ -3,30 +3,32 @@ import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { generateMessage } from '.' -import { ChatOpenAI } from '@langchain/openai' - -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - subject: 'Test Subject', - message: 'This is a test message', - }) - - const mockChain = { - invoke: mockInvoke, - } - - const mockPipe = jest.fn().mockReturnValue(mockChain) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - pipe: mockPipe, +import { type ChatOpenAI } from '@langchain/openai' +import { AIMessageChunk } from '@langchain/core/messages' + +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockImplementation(({ modelType }) => ({ + model: { + pipe: jest.fn().mockReturnThis(), + invoke: jest.fn().mockResolvedValue({ + content: JSON.stringify({ + subject: 'Test Subject', + message: 'This is a test message', + }) + }), + } as unknown as ChatOpenAI, + metadata: { + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id', + model: modelType + } })) +})) - return { - ChatOpenAI: mockChatOpenAI, - } -}) - -describe('generateMessage - Mocked LLM calls', () => { +describe('generateMessage', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = TestHelpers.fromAction(generateMessage) @@ -36,10 +38,6 @@ describe('generateMessage - Mocked LLM calls', () => { }) it('should generate a message', async () => { - const generateMessageWithLLMSpy = jest.spyOn( - require('./lib/generateMessageWithLLM'), - 'generateMessageWithLLM' - ) const payload = generateTestPayload({ fields: { communicationObjective: 'Reminder', @@ -59,16 +57,6 @@ describe('generateMessage - Mocked LLM calls', () => { helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - - expect(generateMessageWithLLMSpy).toHaveBeenCalledWith({ - ChatModelGPT4o: expect.any(Object), - communicationObjective: 'Reminder', - stakeholder: 'Patient', - language: 'English', - personalizationInput: 'John Doe', - }) - expect(onComplete).toHaveBeenCalledWith({ data_points: { subject: 'Test Subject', @@ -80,10 +68,6 @@ describe('generateMessage - Mocked LLM calls', () => { }) it('should generate a message with default values', async () => { - const generateMessageWithLLMSpy = jest.spyOn( - require('./lib/generateMessageWithLLM'), - 'generateMessageWithLLM' - ) const payload = generateTestPayload({ fields: { communicationObjective: 'Update clinician on their patient', @@ -100,15 +84,6 @@ describe('generateMessage - Mocked LLM calls', () => { helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - expect(generateMessageWithLLMSpy).toHaveBeenCalledWith({ - ChatModelGPT4o: expect.any(Object), - communicationObjective: 'Update clinician on their patient', - stakeholder: 'Patient', - language: 'English', - personalizationInput: '', - }) - expect(onComplete).toHaveBeenCalledWith({ data_points: { subject: 'Test Subject', @@ -118,4 +93,28 @@ describe('generateMessage - Mocked LLM calls', () => { expect(onError).not.toHaveBeenCalled() }) + + it('should handle errors properly', async () => { + const payload = generateTestPayload({ + fields: { + communicationObjective: 'Invalid objective', + }, + settings: { + openAiApiKey: 'test_key', + }, + }) + + // Mock createOpenAIModel to throw an error for this test + const createOpenAIModel = jest.requireMock('../../../../src/lib/llm/openai/createOpenAIModel').createOpenAIModel + createOpenAIModel.mockRejectedValueOnce(new Error('Failed to create model')) + + await expect( + extensionAction.onEvent({ + payload, + onComplete, + onError, + helpers, + }) + ).rejects.toThrow('Failed to create model') + }) }) diff --git a/extensions/shelly/actions/generateMessage/generateMessage.ts b/extensions/shelly/actions/generateMessage/generateMessage.ts index fbd72b54f..c5f9e2fc1 100644 --- a/extensions/shelly/actions/generateMessage/generateMessage.ts +++ b/extensions/shelly/actions/generateMessage/generateMessage.ts @@ -1,55 +1,60 @@ import { Category, type Action } from '@awell-health/extensions-core' import { generateMessageWithLLM } from './lib/generateMessageWithLLM' -import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' import { fields, dataPoints, FieldsValidationSchema } from './config' import { markdownToHtml } from '../../../../src/utils' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' +/** + * Awell Action: Message Generation + * + * Takes communication objective and personalization inputs, uses LLM to: + * 1. Generate a personalized message + * 2. Create appropriate subject line + * + * @returns subject and HTML-formatted message + */ export const generateMessage: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'generateMessage', category: Category.WORKFLOW, title: 'Generate Message', - description: - 'Generate a personalized message', + description: 'Generate a personalized message', fields, previewable: false, dataPoints, + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { - const { - ChatModelGPT4o, - fields: { communicationObjective, personalizationInput, stakeholder, language }, - } = await validatePayloadAndCreateSdk({ - fieldsSchema: FieldsValidationSchema, + // 1. Validate input fields + const { communicationObjective, personalizationInput, stakeholder, language } = + FieldsValidationSchema.parse(payload.fields) + + // 2. Initialize OpenAI model with metadata + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, payload, + modelType: OPENAI_MODELS.GPT4o // Using GPT4 for high-quality message generation }) - try { - const generated_message = await generateMessageWithLLM({ - ChatModelGPT4o, - communicationObjective, - personalizationInput, - stakeholder, - language, - }) - - const { subject, message } = generated_message - - const htmlMessage = await markdownToHtml(message) + // 3. Generate message + const { subject, message } = await generateMessageWithLLM({ + model, + communicationObjective, + personalizationInput, + stakeholder, + language, + metadata, + callbacks + }) - await onComplete({ - data_points: { - subject, - message: htmlMessage, - }, - }) - } catch (error) { - console.error('Error generating message:', error) - // Catch in extention server - throw new Error('Error generating message') - } + // 4. Format and return results + const htmlMessage = await markdownToHtml(message) + await onComplete({ + data_points: { subject, message: htmlMessage } + }) }, } diff --git a/extensions/shelly/actions/generateMessage/generateMessageRealOpenAI.test.ts b/extensions/shelly/actions/generateMessage/generateMessageRealOpenAI.test.ts index 1b926f170..913e37201 100644 --- a/extensions/shelly/actions/generateMessage/generateMessageRealOpenAI.test.ts +++ b/extensions/shelly/actions/generateMessage/generateMessageRealOpenAI.test.ts @@ -2,6 +2,8 @@ import 'dotenv/config' import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { generateMessage } from '.' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' jest.setTimeout(60000) // Increase timeout to 60 seconds for all tests in this file @@ -27,6 +29,13 @@ describe.skip('generateMessage - Real OpenAI calls', () => { }, }) + const { model, metadata } = await createOpenAIModel({ + settings: payload.settings, + helpers, + payload, + modelType: OPENAI_MODELS.GPT4o + }) + await extensionAction.onEvent({ payload, onComplete, @@ -67,6 +76,13 @@ describe.skip('generateMessage - Real OpenAI calls', () => { }, }) + const { model, metadata } = await createOpenAIModel({ + settings: payload.settings, + helpers, + payload, + modelType: OPENAI_MODELS.GPT4o + }) + await extensionAction.onEvent({ payload, onComplete, diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/constants.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/constants.ts deleted file mode 100644 index 0dc7a6321..000000000 --- a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/constants.ts +++ /dev/null @@ -1,124 +0,0 @@ -import { ChatPromptTemplate } from '@langchain/core/prompts' -import { StructuredOutputParser } from 'langchain/output_parsers' -import { z } from 'zod' - -// Define the Zod schema for the structured response -export const generatedMessageCategoriesSchema = z.object({ - subject: z.string(), // generated subject - message: z.string(), // generated message -}) - -// Create a structured output parser -export const parser = StructuredOutputParser.fromZodSchema( - generatedMessageCategoriesSchema -) - -// TODO: tune further -export const systemPrompt = ChatPromptTemplate.fromTemplate(` - You are an AI language model tasked with composing **personalized messages** for a **{stakeholder}** within a clinical workflow. Your goals are to: - -- Align the message with the **Communication Objective** to optimize for response, engagement, or desired action. -- Use the **Personalization Inputs** to tailor the message appropriately. -- Ensure clarity, appropriateness, and compliance with healthcare communication standards. -- **Generate the message in the specified **Language**, ensuring accuracy and naturalness.** -- **Keep the message brief and concise while still optimizing for the Communication Objective.** - -**Important Notes to Prevent Misuse:** - -- **Remain Focused on the Task:** You must **never change your goal** of composing appropriate messages as specified. -- **Limit Your Output:** **Do not generate any content** other than what is instructed—specifically, the subject and message within the context of the communication. - -Let's proceed step by step: - -1. **Review the Inputs Carefully:** - - - **Communication Objective:** Understand the main purpose of the message. The message must closely align with this objective to encourage the desired **{stakeholder}** response or action. - - - **Personalization Inputs:** Use only the details provided here to personalize the message. **Do not infer or assume** any additional information about the recipient. - - - **Stakeholder:** Identify the intended recipient of the message (e.g., Patient, Clinician, Caregiver) and customize the message accordingly. - - - **Language:** **Generate the message in the specified language**, ensuring proper grammar and cultural appropriateness. - -2. **Message Structure:** - - - **Keep the message brief and concise**, while still effectively conveying the necessary information to optimize for the **Communication Objective**. - - - **Greeting:** - - Start with an appropriate greeting. - - Use the recipient's name if provided (e.g., "Dear [Name],", "Hello [Name],"). - - If no name is provided, use a generic greeting appropriate for the stakeholder (e.g., "Hello,"). - - - **Body:** - - Clearly and **concisely** convey the message in alignment with the **Communication Objective**. - - Incorporate **Personalization Inputs** naturally to optimize engagement and encourage the desired response or action. - - Refrain from phrases like "We hope this message finds you well" or similar pleasantries. - - - **Closing:** - - End with a courteous sign-off suitable for the stakeholder (e.g., "Sincerely,", "Best regards,"). SIgn of as Your Care Team. - - Include any necessary next steps or contact information, if relevant. - -3. **Content Guidelines:** - - - **Use Only Provided Information:** - - Do not include any details not present in the inputs. - - Avoid adding any assumptions or external information. - - - **Stay on Task:** - - **Never change your goal** of composing appropriate messages. - - **Do not generate any content** other than the subject and message as specified. - - Do not include personal opinions, extraneous information, or any inappropriate content. - - - **Focus on the Objective:** - - Ensure every part of the message contributes to achieving the **Communication Objective**. - - Personalization should enhance the message's effectiveness in prompting the desired recipient action. - -4. **Style and Tone:** - - - Use a professional and appropriate tone for the stakeholder (e.g., friendly for patients, formal for clinicians). - - Always write in a clear, respectful, and engaging manner to optimize the message's impact. Tailor the tone to the recipient's role and the context of the message. - - **Always write from the perspective of the care organization using first person plural (e.g., "We are..."). Do not use first person singular ("I am...") or third person perspectives.** - -5. **Compliance and Sensitivity:** - - - Maintain confidentiality and comply with all relevant privacy regulations. - - Be culturally sensitive and avoid any language that could be considered offensive or inappropriate. - -6. **Language:** - - - **Generate the subject and message in the specified **Language**, ensuring proper grammar, vocabulary, and cultural appropriateness. - - -7. **Final Output:** - - - Respond exclusively with a valid JSON object containing the following keys - this is critical: - - - **subject**: A clear, professional, and concise subject line that aligns with the **Communication Objective** and is appropriate for clinical settings. - - - **message**: The complete, polished message formatted in **markdown**. Do not include any instructions or extra commentary. Ensure the message meets the following criteria: - - - **Brevity and Conciseness**: Keep the message brief and to the point while still effectively conveying the necessary information to achieve the **Communication Objective**. - - **Clarity and Correctness**: Ensure the message is free of spelling and grammatical errors. Use clear and straightforward language. - - **Truthfulness**: It is absolutely paramount that the information provided in the message is accurate and truthful. Do not include any misleading or false information that you did not get from the inputs. This is critical for maintaining trust and integrity in healthcare communication. - - **Completeness**: The message must be complete and ready to send as is. It is critical to never use placeholders (e.g., "[Contact Information]", "[Insert Date]") or leave out essential information. If you want to urge recipient to contact the office and you do not have contact information keep it general and absolutely refrain from including any placeholders. - - - -**Inputs:** - -- **Communication Objective:** - - {communicationObjective} - -- **Personalization Inputs:** - - {personalizationInput} - -- **Stakeholder:** - - {stakeholder} - -- **Language:** - - {language} - `) diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.test.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.test.ts index 516e69b77..68e191d20 100644 --- a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.test.ts +++ b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.test.ts @@ -2,16 +2,34 @@ import 'dotenv/config' import { generateMessageWithLLM } from './generateMessageWithLLM' import { type ChatOpenAI } from '@langchain/openai' import { AIMessageChunk } from '@langchain/core/messages' - +import { createOpenAIModel } from '../../../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../../../src/lib/llm/openai/constants' describe('generateMessageWithLLM', () => { - let ChatModelGPT4oMock: jest.Mocked + let mockModel: jest.Mocked + let model: Awaited> - beforeEach(() => { - ChatModelGPT4oMock = { + beforeEach(async () => { + mockModel = { pipe: jest.fn().mockReturnThis(), invoke: jest.fn(), } as unknown as jest.Mocked + + model = await createOpenAIModel({ + settings: { + openAiApiKey: 'test-key', + }, + modelType: OPENAI_MODELS.GPT4o, + helpers: { + getOpenAIConfig: () => ({ apiKey: 'test-key' }), + }, + payload: { + pathway: {} as any, + activity: {} as any, + patient: {} as any, + settings: {}, + }, + }) }) it('should generate a message for a patient appointment reminder', async () => { @@ -20,21 +38,25 @@ describe('generateMessageWithLLM', () => { message: 'Dear John,\n\nThis is a reminder about your appointment scheduled for tomorrow at 2:00 PM. Please arrive 15 minutes early to complete any necessary paperwork.\n\nBest regards,\nYour Care Team' } - // Mock returning the AIMessageChunk with JSON stringified content - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( + mockModel.invoke.mockResolvedValueOnce( new AIMessageChunk({ content: JSON.stringify(mockedResponse) }) ) const result = await generateMessageWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, + model: mockModel, communicationObjective: 'Remind patient of upcoming appointment. Ask patient to arrive 15 minutes early', personalizationInput: 'Patient Name: John, Appointment Time: 2:00 PM tomorrow', stakeholder: 'Patient', - language: 'English' + language: 'English', + metadata: model.metadata }) expect(result).toMatchObject(mockedResponse) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledWith( + expect.any(String), + { metadata: model.metadata, runName: 'ShellyGenerateMessage' } + ) }) it('should generate a message for medication instructions', async () => { @@ -43,21 +65,21 @@ describe('generateMessageWithLLM', () => { message: 'Dear Sarah,\n\nYour new medication, Lisinopril, should be taken once daily with food. Please remember to monitor your blood pressure regularly and report any side effects to our office.\n\nSincerely,\nYour Care Team' } - // Mock returning the AIMessageChunk with JSON stringified content - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( + mockModel.invoke.mockResolvedValueOnce( new AIMessageChunk({ content: JSON.stringify(mockedResponse) }) ) const result = await generateMessageWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, + model: mockModel, communicationObjective: 'Provide medication instructions. Emphasize the importance of blood pressure monitoring', personalizationInput: 'Patient Name: Sarah, Medication: Lisinopril', stakeholder: 'Patient', - language: 'English' + language: 'English', + metadata: model.metadata }) expect(result).toMatchObject(mockedResponse) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) }) it('should generate a message in a different language', async () => { @@ -66,20 +88,44 @@ describe('generateMessageWithLLM', () => { message: 'Estimado Carlos,\n\nEste es un recordatorio de su cita programada para mañana a las 10:00 AM. Por favor, llegue 15 minutos antes para completar cualquier papeleo necesario.\n\nSaludos cordiales,\nSu Equipo de Atención' } - // Mock returning the AIMessageChunk with JSON stringified content - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( + mockModel.invoke.mockResolvedValueOnce( new AIMessageChunk({ content: JSON.stringify(mockedResponse) }) ) const result = await generateMessageWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, + model: mockModel, communicationObjective: 'Remind patient of upcoming appointment. Ask patient to arrive 15 minutes early', personalizationInput: 'Patient Name: Carlos, Appointment Time: 10:00 AM tomorrow', stakeholder: 'Patient', - language: 'Spanish' + language: 'Spanish', + metadata: model.metadata }) expect(result).toMatchObject(mockedResponse) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) + }) + + it('should handle retry logic when initial response is invalid', async () => { + const invalidResponse = { content: 'invalid json' } + const validResponse = { + subject: 'Valid Subject', + message: 'Valid Message' + } + + mockModel.invoke + .mockResolvedValueOnce(new AIMessageChunk(invalidResponse)) + .mockResolvedValueOnce(new AIMessageChunk({ content: JSON.stringify(validResponse) })) + + const result = await generateMessageWithLLM({ + model: mockModel, + communicationObjective: 'Test objective', + personalizationInput: 'Test input', + stakeholder: 'Patient', + language: 'English', + metadata: model.metadata + }) + + expect(result).toMatchObject(validResponse) + expect(mockModel.invoke).toHaveBeenCalledTimes(2) }) }) diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.ts index 881d1cbea..add2bd85a 100644 --- a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.ts +++ b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLM.ts @@ -1,19 +1,39 @@ -import { parser, systemPrompt } from './constants' +import { parser } from './parser' +import { systemPrompt } from './prompt' import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" +/** + * Generates a personalized message using LLM with retry logic + * + * @param model - OpenAI chat model + * @param communicationObjective - Purpose of the message + * @param personalizationInput - Details for message customization + * @param stakeholder - Target recipient (e.g., Patient, Clinician) + * @param language - Message language + * @param metadata - Tracking info for LangSmith + * @param callbacks - Optional callbacks for LangChain + * @returns Generated subject and message + */ export const generateMessageWithLLM = async ({ - ChatModelGPT4o, + model, communicationObjective, personalizationInput, stakeholder, language, + metadata, + callbacks, }: { - ChatModelGPT4o: ChatOpenAI + model: ChatOpenAI communicationObjective: string personalizationInput: string stakeholder: string language: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] }): Promise<{ subject: string; message: string }> => { + // 1. Prepare prompt with inputs const prompt = await systemPrompt.format({ communicationObjective, personalizationInput, @@ -21,57 +41,58 @@ export const generateMessageWithLLM = async ({ language, }) - const structured_output_chain = ChatModelGPT4o.pipe(parser) + // 2. Create chain with structured output + const structured_output_chain = model.pipe(parser) - const MAX_RETRIES = 3; - let retries = 0; - let subject = ''; - let message = ''; + // 3. Run chain with retries + const MAX_RETRIES = 3 + let retries = 0 + let subject = '' + let message = '' - // TODO: do it with more grace eventually while (retries < MAX_RETRIES) { // Sometimes the LLM returns a non-JSON response try { - const generated_message = await structured_output_chain.invoke(prompt); - subject = generated_message.subject ?? ''; - message = generated_message.message ?? ''; + const generated_message = await structured_output_chain.invoke( + prompt, + { metadata, runName: 'ShellyGenerateMessage', callbacks } + ) + subject = generated_message.subject ?? '' + message = generated_message.message ?? '' - // If subject or message are not directly available (parser issue), try parsing AIMessageChunk + // If subject or message are not directly available, try parsing AIMessageChunk if (subject.trim() === '' || message.trim() === '') { - console.log('Attempting to parse AIMessageChunk...'); - // Attempt to get content from AIMessageChunk if ('content' in generated_message) { try { - const parsedContent = JSON.parse(generated_message.content as string); + const parsedContent = JSON.parse(generated_message.content as string) if (typeof parsedContent === 'object' && parsedContent !== null) { if ('subject' in parsedContent && typeof parsedContent.subject === 'string') { - subject = parsedContent.subject; + subject = parsedContent.subject } if ('message' in parsedContent && typeof parsedContent.message === 'string') { - message = parsedContent.message; + message = parsedContent.message } } } catch (error) { - console.error('Error parsing AIMessageChunk content:', error); + throw new Error('Error parsing message content') } } } // If we have both subject and message, break the loop if (subject.trim() !== '' && message.trim() !== '') { - break; + break } // If we reach here, it means we didn't get valid subject and message - throw new Error('Failed to generate valid subject and message'); + throw new Error('Failed to generate valid subject and message') } catch (error) { - console.error(`Attempt ${retries + 1} failed:`, error); - retries++; + retries++ if (retries >= MAX_RETRIES) { - throw new Error('Failed to generate the message after multiple attempts'); + throw new Error('Failed to generate the message after multiple attempts') } } } - return { subject, message }; + return { subject, message } } diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLMRealOpenAI.test.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLMRealOpenAI.test.ts index d15ddc506..e406b937c 100644 --- a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLMRealOpenAI.test.ts +++ b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/generateMessageWithLLMRealOpenAI.test.ts @@ -1,28 +1,40 @@ import 'dotenv/config' import { generateMessageWithLLM } from '.' -import { ChatOpenAI } from '@langchain/openai' +import { createOpenAIModel } from '../../../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../../../src/lib/llm/openai/constants' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' -jest.setTimeout(60000); // Increases timeout to 60 seconds for all tests in this file +jest.setTimeout(60000) // Increases timeout to 60 seconds for all tests in this file +describe.skip('generateMessageWithLLM with real OpenAI', () => { + let model: Awaited> -describe.skip('generateMessageWithLLM', () => { - let ChatModelGPT4o: ChatOpenAI - - beforeEach(() => { - ChatModelGPT4o = new ChatOpenAI({ - modelName: 'gpt-4', - temperature: 0, - timeout: 10000, + beforeEach(async () => { + model = await createOpenAIModel({ + settings: { + openAiApiKey: process.env.OPENAI_API_KEY, + }, + modelType: OPENAI_MODELS.GPT4o, + helpers: { + getOpenAIConfig: () => ({ apiKey: process.env.OPENAI_API_KEY ?? '' }), + }, + payload: { + pathway: {} as any, + activity: {} as any, + patient: {} as any, + settings: {}, + }, }) }) it('should generate a message for a patient appointment reminder', async () => { const result = await generateMessageWithLLM({ - ChatModelGPT4o, + model: model.model, communicationObjective: 'Remind patient of upcoming appointment. Ask patient to arrive 15 minutes early', personalizationInput: 'Patient Name: John, Appointment Time: 2:00 PM tomorrow', stakeholder: 'Patient', - language: 'English' + language: 'English', + metadata: model.metadata }) expect(result).toHaveProperty('subject') @@ -35,11 +47,12 @@ describe.skip('generateMessageWithLLM', () => { it('should generate a message for medication instructions', async () => { const result = await generateMessageWithLLM({ - ChatModelGPT4o, + model: model.model, communicationObjective: 'Provide medication instructions. Emphasize the importance of blood pressure monitoring', personalizationInput: 'Patient Name: Sarah, Medication: Lisinopril', stakeholder: 'Patient', - language: 'English' + language: 'English', + metadata: model.metadata }) expect(result).toHaveProperty('subject') @@ -50,13 +63,14 @@ describe.skip('generateMessageWithLLM', () => { expect(result.message).toContain('blood pressure') }) - it('should generate a message in a different language', async () => { + it('should generate a message in Spanish', async () => { const result = await generateMessageWithLLM({ - ChatModelGPT4o, + model: model.model, communicationObjective: 'Remind patient of upcoming appointment. Ask patient to arrive 15 minutes early', personalizationInput: 'Patient Name: Carlos, Appointment Time: 10:00 AM tomorrow', stakeholder: 'Patient', - language: 'Spanish' + language: 'Spanish', + metadata: model.metadata }) expect(result).toHaveProperty('subject') diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/parser.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/parser.ts new file mode 100644 index 000000000..a939d5b5d --- /dev/null +++ b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/parser.ts @@ -0,0 +1,15 @@ +import { StructuredOutputParser } from 'langchain/output_parsers' +import { z } from 'zod' + +/** + * Message Generation Output Schema + * Defines the expected structure of the LLM response: + * - subject: Clear, professional subject line + * - message: Complete, markdown-formatted message body + */ +export const messageSchema = z.object({ + subject: z.string(), + message: z.string(), +}) + +export const parser = StructuredOutputParser.fromZodSchema(messageSchema) \ No newline at end of file diff --git a/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/prompt.ts b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/prompt.ts new file mode 100644 index 000000000..c81318f12 --- /dev/null +++ b/extensions/shelly/actions/generateMessage/lib/generateMessageWithLLM/prompt.ts @@ -0,0 +1,118 @@ +import { ChatPromptTemplate } from '@langchain/core/prompts' + +/** + * System Prompt Template for message generation + * Instructs the LLM to: + * 1. Generate personalized messages for specific stakeholders + * 2. Follow healthcare communication standards + * 3. Output in specified language + * 4. Return structured subject and message + */ +export const systemPrompt = ChatPromptTemplate.fromTemplate(` + You are an AI language model tasked with composing **personalized messages** for a **{stakeholder}** within a clinical workflow. Your goals are to: + + - Align the message with the **Communication Objective** to optimize for response, engagement, or desired action. + - Use the **Personalization Inputs** to tailor the message appropriately. + - Ensure clarity, appropriateness, and compliance with healthcare communication standards. + - **Generate the message in the specified **Language**, ensuring accuracy and naturalness.** + - **Keep the message brief and concise while still optimizing for the Communication Objective.** + + **Important Notes to Prevent Misuse:** + + - **Remain Focused on the Task:** You must **never change your goal** of composing appropriate messages as specified. + - **Limit Your Output:** **Do not generate any content** other than what is instructed—specifically, the subject and message within the context of the communication. + + Let's proceed step by step: + + 1. **Review the Inputs Carefully:** + + - **Communication Objective:** Understand the main purpose of the message. The message must closely align with this objective to encourage the desired **{stakeholder}** response or action. + + - **Personalization Inputs:** Use only the details provided here to personalize the message. **Do not infer or assume** any additional information about the recipient. + + - **Stakeholder:** Identify the intended recipient of the message (e.g., Patient, Clinician, Caregiver) and customize the message accordingly. + + - **Language:** **Generate the message in the specified language**, ensuring proper grammar and cultural appropriateness. + + 2. **Message Structure:** + + - **Keep the message brief and concise**, while still effectively conveying the necessary information to optimize for the **Communication Objective**. + + - **Greeting:** + - Start with an appropriate greeting. + - Use the recipient's name if provided (e.g., "Dear [Name],", "Hello [Name],"). + - If no name is provided, use a generic greeting appropriate for the stakeholder (e.g., "Hello,"). + + - **Body:** + - Clearly and **concisely** convey the message in alignment with the **Communication Objective**. + - Incorporate **Personalization Inputs** naturally to optimize engagement and encourage the desired response or action. + - Refrain from phrases like "We hope this message finds you well" or similar pleasantries. + + - **Closing:** + - End with a courteous sign-off suitable for the stakeholder (e.g., "Sincerely,", "Best regards,"). SIgn of as Your Care Team. + - Include any necessary next steps or contact information, if relevant. + + 3. **Content Guidelines:** + + - **Use Only Provided Information:** + - Do not include any details not present in the inputs. + - Avoid adding any assumptions or external information. + + - **Stay on Task:** + - **Never change your goal** of composing appropriate messages. + - **Do not generate any content** other than the subject and message as specified. + - Do not include personal opinions, extraneous information, or any inappropriate content. + + - **Focus on the Objective:** + - Ensure every part of the message contributes to achieving the **Communication Objective**. + - Personalization should enhance the message's effectiveness in prompting the desired recipient action. + + 4. **Style and Tone:** + + - Use a professional and appropriate tone for the stakeholder (e.g., friendly for patients, formal for clinicians). + - Always write in a clear, respectful, and engaging manner to optimize the message's impact. Tailor the tone to the recipient's role and the context of the message. + - **Always write from the perspective of the care organization using first person plural (e.g., "We are..."). Do not use first person singular ("I am...") or third person perspectives.** + + 5. **Compliance and Sensitivity:** + + - Maintain confidentiality and comply with all relevant privacy regulations. + - Be culturally sensitive and avoid any language that could be considered offensive or inappropriate. + + 6. **Language:** + + - **Generate the subject and message in the specified **Language**, ensuring proper grammar, vocabulary, and cultural appropriateness. + + + 7. **Final Output:** + + - Respond exclusively with a valid JSON object containing the following keys - this is critical: + + - **subject**: A clear, professional, and concise subject line that aligns with the **Communication Objective** and is appropriate for clinical settings. + + - **message**: The complete, polished message formatted in **markdown**. Do not include any instructions or extra commentary. Ensure the message meets the following criteria: + + - **Brevity and Conciseness**: Keep the message brief and to the point while still effectively conveying the necessary information to achieve the **Communication Objective**. + - **Clarity and Correctness**: Ensure the message is free of spelling and grammatical errors. Use clear and straightforward language. + - **Truthfulness**: It is absolutely paramount that the information provided in the message is accurate and truthful. Do not include any misleading or false information that you did not get from the inputs. This is critical for maintaining trust and integrity in healthcare communication. + - **Completeness**: The message must be complete and ready to send as is. It is critical to never use placeholders (e.g., "[Contact Information]", "[Insert Date]") or leave out essential information. If you want to urge recipient to contact the office and you do not have contact information keep it general and absolutely refrain from including any placeholders. + + + + **Inputs:** + + - **Communication Objective:** + + {communicationObjective} + + - **Personalization Inputs:** + + {personalizationInput} + + - **Stakeholder:** + + {stakeholder} + + - **Language:** + + {language} +`) \ No newline at end of file diff --git a/extensions/shelly/actions/medicationFromImage/medicationFromImage.test.ts b/extensions/shelly/actions/medicationFromImage/medicationFromImage.test.ts index 3db36a87e..65830d9ce 100644 --- a/extensions/shelly/actions/medicationFromImage/medicationFromImage.test.ts +++ b/extensions/shelly/actions/medicationFromImage/medicationFromImage.test.ts @@ -48,9 +48,7 @@ describe('Medication From Image', () => { imageUrl: 'https://res.cloudinary.com/da7x4rzl4/image/upload/v1726601981/hackathon-sep-2024/seqn5izsagvs5nlferlf.png', }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }), onComplete, onError, @@ -96,9 +94,7 @@ describe('Medication From Image', () => { imageUrl: 'https://res.cloudinary.com/da7x4rzl4/image/upload/v1726601981/hackathon-sep-2024/invalid-url.png', }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }), onComplete, onError, @@ -145,9 +141,7 @@ describe('Medication From Image', () => { imageUrl: 'https://res.cloudinary.com/da7x4rzl4/image/upload/v1726601981/hackathon-sep-2024/invalid-url.png', }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }), onComplete, onError, diff --git a/extensions/shelly/actions/medicationFromImage/medicationFromImage.ts b/extensions/shelly/actions/medicationFromImage/medicationFromImage.ts index 1791f9535..2105e81d0 100644 --- a/extensions/shelly/actions/medicationFromImage/medicationFromImage.ts +++ b/extensions/shelly/actions/medicationFromImage/medicationFromImage.ts @@ -1,13 +1,12 @@ import { Category, type Action } from '@awell-health/extensions-core' import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' import { fields, dataPoints, FieldsValidationSchema } from './config' import { MedicationExtractorApi } from '../../lib/api' import { FetchError } from '../../lib/api/medicationExtractorApi' export const medicationFromImage: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'medicationFromImage', diff --git a/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.test.ts b/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.test.ts index 4ea6bc586..04181e7c9 100644 --- a/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.test.ts +++ b/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.test.ts @@ -18,9 +18,7 @@ describe('Shelly - Review medication extraction', () => { 'https://res.cloudinary.com/da7x4rzl4/image/upload/v1726601981/hackathon-sep-2024/seqn5izsagvs5nlferlf.png', medicationData: JSON.stringify({}), }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }), onComplete, onError, diff --git a/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.ts b/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.ts index 143c54f1c..fb0034557 100644 --- a/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.ts +++ b/extensions/shelly/actions/reviewMedicationExtraction/reviewMedicationExtraction.ts @@ -1,12 +1,11 @@ // import { AwellSdk } from '@awell-health/awell-sdk' import { Category, type Action } from '@awell-health/extensions-core' import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' import { fields, dataPoints, FieldsValidationSchema } from './config' export const reviewMedicationExtraction: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'reviewMedicationExtraction', diff --git a/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/constants.ts b/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/prompt.ts similarity index 100% rename from extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/constants.ts rename to extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/prompt.ts diff --git a/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.test.ts b/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.test.ts index 6d70222e0..5aded6d59 100644 --- a/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.test.ts +++ b/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.test.ts @@ -3,52 +3,93 @@ import { summarizeCareFlowWithLLM } from './summarizeCareFlowWithLLM' import { ChatOpenAI } from '@langchain/openai' import { AIMessageChunk } from '@langchain/core/messages' import { mockPathwayActivitiesResponse } from '../../__mocks__/pathwayActivitiesResponse' +import { systemPrompt } from './prompt' // Describe the test suite describe('summarizeCareFlowWithLLM', () => { - let ChatModelGPT4oMock: jest.Mocked + let mockModel: jest.Mocked beforeEach(() => { - // Define the 'invoke' method in the mock - ChatModelGPT4oMock = { + mockModel = { invoke: jest.fn(), } as unknown as jest.Mocked }) - it('should return a mocked summary for the care flow activities', async () => { - const mockedSummary = - 'On September 11, 2024, a form was completed, followed by a step completion in the care flow. The clinician reviewed and performed several actions to proceed.' - - // Mock the 'invoke' method to return an AIMessage - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( - new AIMessageChunk(mockedSummary) - ) + it('should generate a summary using the LLM model', async () => { + // Setup mock response + const mockedSummary = 'On September 11, 2024, a form was completed, followed by a step completion in the care flow. The clinician reviewed and performed several actions to proceed.' + mockModel.invoke.mockResolvedValueOnce(new AIMessageChunk(mockedSummary)) + // Prepare test data const careFlowData = mockPathwayActivitiesResponse.activities .map((activity) => { const { date, status, object, context } = activity - return `Date: ${date}\nStatus: ${status}\nType: ${ - object.type - }\nStep ID: ${context.step_id ?? 'N/A'}` + return `Date: ${date}\nStatus: ${status}\nType: ${object.type}\nStep ID: ${context.step_id ?? 'N/A'}` }) .join('\n\n') const stakeholder = 'Clinician' - const additionalInstructions = - 'Summarize the care flow activities, ensuring they are in chronological order.' + const additionalInstructions = 'Summarize the care flow activities, ensuring they are in chronological order.' + const metadata = { + traceId: 'test-trace-id', + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + activity_id: 'test-activity-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } - // Call the function with the mocked data + // Execute the function const summary = await summarizeCareFlowWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, + model: mockModel, careFlowActivities: careFlowData, stakeholder, additionalInstructions, + metadata, }) + // Verify the results + expect(summary).toBe(mockedSummary) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) + // Verify the prompt formatting + const expectedPrompt = await systemPrompt.format({ + stakeholder, + additionalInstructions, + input: careFlowData, + }) + + expect(mockModel.invoke).toHaveBeenCalledWith( + expectedPrompt, + { metadata, runName: 'ShellySummarizeCareFlow' } + ) + }) - // Check that the returned summary is as expected - expect(summary).toBe(mockedSummary) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + it('should throw an error when LLM call fails', async () => { + // Setup mock error + const errorMessage = 'LLM API Error' + mockModel.invoke.mockRejectedValueOnce(new Error(errorMessage)) + + // Prepare test data + const testData = { + model: mockModel, + careFlowActivities: 'test data', + stakeholder: 'Patient', + additionalInstructions: 'test instructions', + metadata: { + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id', + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-flow-id', + activity_id: 'test-activity-id' + }, + } + + // Verify error handling + await expect(summarizeCareFlowWithLLM(testData)) + .rejects + .toThrow('Failed to summarize the care flow due to an internal error.') }) }) diff --git a/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.ts b/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.ts index 46bb99436..ec28f84f1 100644 --- a/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.ts +++ b/extensions/shelly/actions/summarizeCareFlow/lib/summarizeCareFlowWithLLM/summarizeCareFlowWithLLM.ts @@ -1,27 +1,58 @@ -import { systemPrompt } from './constants' +import { systemPrompt } from './prompt' import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" -// TODO: remove console logs eventually +/** + * Uses LLM to summarize care flow activities. + * The function follows these steps: + * 1. Formats prompt with stakeholder context and instructions + * 2. Runs LLM to generate summary + * 3. Returns formatted summary + * + * @example + * const summary = await summarizeCareFlowWithLLM({ + * model, + * careFlowActivities: "...", + * stakeholder: "Patient", + * additionalInstructions: "Focus on medications", + * metadata: { ... } + * }) + */ export const summarizeCareFlowWithLLM = async ({ - ChatModelGPT4o, + model, careFlowActivities, stakeholder, additionalInstructions, + metadata, + callbacks, }: { - ChatModelGPT4o: ChatOpenAI + model: ChatOpenAI careFlowActivities: string stakeholder: string additionalInstructions: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] }): Promise => { const prompt = await systemPrompt.format({ stakeholder, additionalInstructions, input: careFlowActivities, }) - const summaryMessage = await ChatModelGPT4o.invoke(prompt) - // TODO: for some reason compiler doesn't know that content is a string - const summary = summaryMessage.content as string - - return summary + try { + const response = await model.invoke( + prompt, + { + metadata, + runName: 'ShellySummarizeCareFlow', + callbacks + } + ) + return response.content as string + } catch (error) { + throw new Error( + 'Failed to summarize the care flow due to an internal error.' + ) + } } diff --git a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.test.ts b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.test.ts index 018b2353f..712f705b2 100644 --- a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.test.ts +++ b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.test.ts @@ -4,25 +4,24 @@ import { summarizeCareFlow } from '.' import { mockPathwayActivitiesResponse } from './__mocks__/pathwayActivitiesResponse' import { DISCLAIMER_MSG } from '../../lib/constants' -// Mock the '@langchain/openai' module -jest.mock('@langchain/openai', () => { - // Mock the 'invoke' method to return a resolved value - const mockInvoke = jest.fn().mockResolvedValue({ - content: 'Mocked care flow summary from LLM', +// Mock createOpenAIModel +jest.mock('../../../../src/lib/llm/openai', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + invoke: jest.fn().mockResolvedValue({ + content: 'Mocked care flow summary from LLM' + }) + }, + metadata: { + traceId: 'test-trace-id', + care_flow_definition_id: 'whatever', + care_flow_id: 'ai4rZaYEocjB', + activity_id: 'test-activity-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } }) - - // Mock the ChatOpenAI class - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - invoke: mockInvoke, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) - -// Import ChatOpenAI after mocking -import { ChatOpenAI } from '@langchain/openai' +})) describe('summarizeCareFlow - Mocked LLM calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = @@ -31,16 +30,15 @@ describe('summarizeCareFlow - Mocked LLM calls', () => { beforeEach(() => { clearMocks() jest.clearAllMocks() + jest.spyOn(console, 'error').mockImplementation(() => {}) // Suppress console.error }) it('Should summarize care flow with LLM', async () => { - // Spy on the 'summarizeCareFlowWithLLM' function const summarizeCareFlowWithLLMSpy = jest.spyOn( require('./lib/summarizeCareFlowWithLLM'), 'summarizeCareFlowWithLLM' ) - // Create the test payload const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', @@ -51,11 +49,10 @@ describe('summarizeCareFlow - Mocked LLM calls', () => { additionalInstructions: 'Summarize key activities.', }, settings: { - openAiApiKey: 'a', + openAiApiKey: 'test-key', }, }) - // Mock the Awell SDK const awellSdkMock = { orchestration: { query: jest.fn().mockResolvedValue({ @@ -66,7 +63,6 @@ describe('summarizeCareFlow - Mocked LLM calls', () => { helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) - // Execute the action await extensionAction.onEvent({ payload, onComplete, @@ -74,16 +70,20 @@ describe('summarizeCareFlow - Mocked LLM calls', () => { helpers, }) - // Assertions - expect(ChatOpenAI).toHaveBeenCalled() expect(summarizeCareFlowWithLLMSpy).toHaveBeenCalledWith({ - ChatModelGPT4o: expect.any(Object), + model: expect.any(Object), careFlowActivities: expect.any(String), stakeholder: 'Clinician', additionalInstructions: 'Summarize key activities.', + metadata: expect.objectContaining({ + traceId: 'test-trace-id', + care_flow_definition_id: 'whatever', + care_flow_id: 'ai4rZaYEocjB', + activity_id: 'test-activity-id' + }), }) - const expected = `

Important Notice: The content provided is an AI-generated summary.

+ const expected = `

${DISCLAIMER_MSG}

Mocked care flow summary from LLM

` expect(onComplete).toHaveBeenCalledWith({ @@ -94,4 +94,42 @@ describe('summarizeCareFlow - Mocked LLM calls', () => { expect(onError).not.toHaveBeenCalled() }) + + it('Should handle errors gracefully', async () => { + const payload = generateTestPayload({ + pathway: { + id: 'ai4rZaYEocjB', + definition_id: 'whatever', + }, + fields: { + stakeholder: 'Clinician', + additionalInstructions: '', + }, + settings: { + openAiApiKey: 'test-key', + }, + }) + + // Mock SDK to throw a specific error + const awellSdkMock = { + orchestration: { + query: jest.fn().mockRejectedValue(new Error('SDK query failed')) + } + } + helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + + // Expect the action to throw + await expect( + extensionAction.onEvent({ + payload, + onComplete, + onError, + helpers, + }) + ).rejects.toThrow('SDK query failed') + + // Verify error handling + expect(onComplete).not.toHaveBeenCalled() + expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(1) + }) }) diff --git a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.ts b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.ts index dd3c14345..184e41a5d 100644 --- a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.ts +++ b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlow.ts @@ -1,14 +1,14 @@ import { Category, type Action } from '@awell-health/extensions-core' -import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' import { fields, dataPoints, FieldsValidationSchema } from './config' import { DISCLAIMER_MSG } from '../../lib/constants' import { summarizeCareFlowWithLLM } from './lib/summarizeCareFlowWithLLM' import { markdownToHtml } from '../../../../src/utils' +import { createOpenAIModel } from '../../../../src/lib/llm/openai' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' export const summarizeCareFlow: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'summarizeCareFlow', @@ -18,14 +18,18 @@ export const summarizeCareFlow: Action< fields, previewable: false, dataPoints, + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { - const { - ChatModelGPT4o, - fields: { additionalInstructions, stakeholder }, - pathway, - } = await validatePayloadAndCreateSdk({ - fieldsSchema: FieldsValidationSchema, + // 1. Validate input fields + const { additionalInstructions, stakeholder } = FieldsValidationSchema.parse(payload.fields) + const pathway = payload.pathway + + // 2. Initialize OpenAI model with metadata + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, payload, + modelType: OPENAI_MODELS.GPT4o }) const awellSdk = await helpers.awellSdk() @@ -67,30 +71,27 @@ export const summarizeCareFlow: Action< }, }) - try { - const summary = await summarizeCareFlowWithLLM({ - ChatModelGPT4o, - careFlowActivities: JSON.stringify( - pathwayActivitesUntilNow.pathwayActivities.activities, - null, - 2 - ), - stakeholder, - additionalInstructions, - }) + const summary = await summarizeCareFlowWithLLM({ + model, + careFlowActivities: JSON.stringify( + pathwayActivitesUntilNow.pathwayActivities.activities, + null, + 2 + ), + stakeholder, + additionalInstructions, + metadata, + callbacks + }) - const htmlSummary = await markdownToHtml( - `${DISCLAIMER_MSG}\n\n${summary}` - ) + const htmlSummary = await markdownToHtml( + `${DISCLAIMER_MSG}\n\n${summary}` + ) - await onComplete({ - data_points: { - summary: htmlSummary, - }, - }) - } catch (error) { - console.error('Error summarizing care flow:', error) - throw new Error('Error summarizing care flow') - } + await onComplete({ + data_points: { + summary: htmlSummary, + }, + }) }, } diff --git a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlowRealOpenAI.test.ts b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlowRealOpenAI.test.ts index 807105b26..0e506bc44 100644 --- a/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlowRealOpenAI.test.ts +++ b/extensions/shelly/actions/summarizeCareFlow/summarizeCareFlowRealOpenAI.test.ts @@ -3,19 +3,65 @@ import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { summarizeCareFlow } from '.' import { mockPathwayActivitiesResponse } from './__mocks__/pathwayActivitiesResponse' +import { DISCLAIMER_MSG } from '../../lib/constants' -// remove .skip to run this test -describe.skip('summarizeCareFlow - Real LLM calls with mocked Awell SDK', () => { + +jest.setTimeout(60000) + +describe.skip('summarizeCareFlow - Real OpenAI calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = TestHelpers.fromAction(summarizeCareFlow) beforeEach(() => { clearMocks() jest.clearAllMocks() + + // Ensure API key is always defined + process.env.OPENAI_API_KEY = process.env.OPENAI_API_KEY || 'test-api-key' + + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY as string, + temperature: 0, + maxRetries: 3, + timeout: 10000 + }) + + // Mock the SDK query to return activities + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + pathwayActivities: { + success: true, + activities: [{ + id: 'test-activity-id', + status: 'DONE', + date: '2024-01-01T00:00:00Z', + object: { + id: 'test-object-id', + name: 'Test Activity', + type: 'FORM' + } + }] + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ + orchestration: { + query: mockQuery + } + }) }) - it('Should call the real model and use mocked care flow activities', async () => { - // Set up payload + afterEach(() => { + jest.clearAllTimers() + jest.clearAllMocks() + }) + + afterAll(async () => { + // Clean up any remaining promises + await new Promise(resolve => setTimeout(resolve, 100)) + }) + + it('Should call the real model using default config', async () => { const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', @@ -25,46 +71,28 @@ describe.skip('summarizeCareFlow - Real LLM calls with mocked Awell SDK', () => stakeholder: 'Clinician', additionalInstructions: '', }, - settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, // Use your actual OpenAI API key here - }, + settings: {}, // Use default config + activity: { + id: 'test-activity-id' + } }) - // Mock the Awell SDK to return care flow activities - const awellSdkMock = { - orchestration: { - query: jest.fn().mockResolvedValue({ - pathwayActivities: mockPathwayActivitiesResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) - - // Execute the action without mocking ChatOpenAI (real call) await extensionAction.onEvent({ payload, onComplete, onError, - helpers, + helpers: helpers }) - // Assertions for the Awell SDK mock - expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(1) - - // Ensure that the model has actually been called (real call to ChatOpenAI) expect(onComplete).toHaveBeenCalledWith({ data_points: { - summary: expect.stringContaining('step'), + summary: expect.stringContaining(DISCLAIMER_MSG), }, }) - expect(onError).not.toHaveBeenCalled() }) - it('Should call the real model and focus on patient-completed actions', async () => { - // Set up payload + it('Should call the real model with different instructions', async () => { const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', @@ -72,44 +100,26 @@ describe.skip('summarizeCareFlow - Real LLM calls with mocked Awell SDK', () => }, fields: { stakeholder: 'Clinician', - additionalInstructions: - 'Focus only on actions completed by the patient.', - }, - settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, // Use your actual OpenAI API key here + additionalInstructions: 'Focus only on actions completed by the patient.', }, + settings: {}, // Use default config + activity: { + id: 'test-activity-id' + } }) - // Mock the Awell SDK to return care flow activities - const awellSdkMock = { - orchestration: { - query: jest.fn().mockResolvedValue({ - pathwayActivities: mockPathwayActivitiesResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) - - // Execute the action without mocking ChatOpenAI (real call) await extensionAction.onEvent({ payload, onComplete, onError, - helpers, + helpers: helpers }) - // Assertions for the Awell SDK mock - expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(1) - - // Ensure that the model has actually been called (real call to ChatOpenAI) expect(onComplete).toHaveBeenCalledWith({ data_points: { - summary: expect.stringContaining('patient'), + summary: expect.stringContaining(DISCLAIMER_MSG), }, }) - expect(onError).not.toHaveBeenCalled() }) }) diff --git a/extensions/shelly/actions/summarizeForm/summarizeForm.test.ts b/extensions/shelly/actions/summarizeForm/summarizeForm.test.ts index 2ee6df41a..275ef5829 100644 --- a/extensions/shelly/actions/summarizeForm/summarizeForm.test.ts +++ b/extensions/shelly/actions/summarizeForm/summarizeForm.test.ts @@ -3,29 +3,25 @@ import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { summarizeForm } from '.' -import { mockPathwayActivitiesResponse } from './__mocks__/pathwayActivitiesResponse' import { mockFormDefinitionResponse } from './__mocks__/formDefinitionResponse' import { mockFormResponseResponse } from './__mocks__/formResponseResponse' -import { DISCLAIMER_MSG_FORM } from '../../lib/constants' import { markdownToHtml } from '../../../../src/utils' -// Import ChatOpenAI after mocking -import { ChatOpenAI } from '@langchain/openai' - -// Mock the '@langchain/openai' module -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - content: 'Mocked summary from LLM', - }) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - invoke: mockInvoke, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) +// Mock the OpenAI modules +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + invoke: jest.fn().mockResolvedValue({ + content: 'The patient reported good overall health. They experienced fatigue and headache in the last 7 days. Additionally, they mentioned occasional dizziness when standing up too quickly.', + }), + }, + metadata: { + activity_id: 'X74HeDQ4N0gtdaSEuzF8s', + care_flow_id: 'ai4rZaYEocjB', + care_flow_definition_id: 'whatever', + }, + }), +})) describe('summarizeForm - Mocked LLM calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = @@ -34,60 +30,65 @@ describe('summarizeForm - Mocked LLM calls', () => { beforeEach(() => { clearMocks() jest.clearAllMocks() + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: { + id: 'X74HeDQ4N0gtdaSEuzF8s', + date: '2024-09-11T22:56:59.607Z', + context: { + step_id: 'Xkn5dkyPA5uW' + } + } + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: [{ + id: 'X74HeDQ4N0gtdaSEuzF8s', + status: 'DONE', + date: '2024-09-11T22:56:58.607Z', + object: { + id: 'OGhjJKF5LRmo', + name: 'Test Form', + type: 'FORM' + }, + context: { + step_id: 'Xkn5dkyPA5uW' + } + }] + } + }) + .mockResolvedValueOnce({ + form: mockFormDefinitionResponse, + }) + .mockResolvedValueOnce({ + formResponse: mockFormResponseResponse, + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ + orchestration: { + query: mockQuery + } + }) }) it('Should summarize form with LLM', async () => { - const summarizeFormWithLLMSpy = jest.spyOn( - require('../../lib/summarizeFormWithLLM/summarizeFormWithLLM'), - 'summarizeFormWithLLM', - ) - const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Bullet-points', language: 'Default', }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }) - // Mock the Awell SDK - const awellSdkMock = { - orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - activity: { - activity: { - date: new Date().toISOString(), - context: { - step_id: 'step-id', - }, - }, - }, - }) - .mockResolvedValueOnce({ - pathwayStepActivities: mockPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockFormDefinitionResponse, - }) - .mockResolvedValueOnce({ - formResponse: mockFormResponseResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) - await extensionAction.onEvent({ payload, onComplete, @@ -95,19 +96,7 @@ describe('summarizeForm - Mocked LLM calls', () => { helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - expect(summarizeFormWithLLMSpy).toHaveBeenCalledWith( - expect.objectContaining({ - ChatModelGPT4o: expect.any(Object), - formData: expect.any(String), - summaryFormat: 'Bullet-points', - language: 'Default', - disclaimerMessage: expect.any(String), - }), - ) - - const expected = await markdownToHtml('Mocked summary from LLM') - + const expected = await markdownToHtml('The patient reported good overall health. They experienced fatigue and headache in the last 7 days. Additionally, they mentioned occasional dizziness when standing up too quickly.') expect(onComplete).toHaveBeenCalledWith({ data_points: { summary: expected, diff --git a/extensions/shelly/actions/summarizeForm/summarizeForm.ts b/extensions/shelly/actions/summarizeForm/summarizeForm.ts index eb81973da..18a46e743 100644 --- a/extensions/shelly/actions/summarizeForm/summarizeForm.ts +++ b/extensions/shelly/actions/summarizeForm/summarizeForm.ts @@ -1,17 +1,25 @@ import { Category, type Action } from '@awell-health/extensions-core' -import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' +import { summarizeFormWithLLM } from '../../lib/summarizeFormWithLLM' +import { createOpenAIModel } from '../../../../src/lib/llm/openai/createOpenAIModel' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' import { fields, dataPoints, FieldsValidationSchema } from './config' import { getFormResponseText } from '../../lib/getFormResponseText' -import { summarizeFormWithLLM } from '../../lib/summarizeFormWithLLM' -import { DISCLAIMER_MSG_FORM } from '../../lib/constants' import { getLatestFormInCurrentStep } from '../../../../src/lib/awell' import { markdownToHtml } from '../../../../src/utils' +import { DISCLAIMER_MSG_FORM } from '../../lib/constants' -// TODO: get rid of the console logs eventually +/** + * Awell Action: Form Summarization + * + * Takes form responses and preferences as input, uses LLM to: + * 1. Generate a concise summary in specified format and language + * 2. Includes appropriate disclaimer + * + * @returns HTML-formatted summary + */ export const summarizeForm: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'summarizeForm', @@ -21,48 +29,49 @@ export const summarizeForm: Action< fields, previewable: false, dataPoints, + onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { - const { - ChatModelGPT4o, - fields: { summaryFormat, language }, - pathway, - activity, - } = await validatePayloadAndCreateSdk({ - fieldsSchema: FieldsValidationSchema, + // 1. Validate input fields + const { summaryFormat, language } = FieldsValidationSchema.parse(payload.fields) + + // 2. Initialize OpenAI model with metadata + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, payload, + modelType: OPENAI_MODELS.GPT4o, + hideDataForTracing: true // Hide input and output data when tracing }) + // 3. Get form data const { formDefinition, formResponse } = await getLatestFormInCurrentStep({ awellSdk: await helpers.awellSdk(), - pathwayId: pathway.id, - activityId: activity.id, + pathwayId: payload.pathway.id, + activityId: payload.activity.id, }) - const { result: responseText } = getFormResponseText({ + const { result: formData } = getFormResponseText({ formDefinition, formResponse, }) - try { - const summary = await summarizeFormWithLLM({ - ChatModelGPT4o, - formData: responseText, - summaryFormat, - language, - disclaimerMessage: DISCLAIMER_MSG_FORM, - }) - - const htmlSummary = await markdownToHtml(summary) - + // 4. Generate summary + const summary = await summarizeFormWithLLM({ + model, + formData, + summaryFormat, + language, + disclaimerMessage: DISCLAIMER_MSG_FORM, + metadata, + callbacks + }) - await onComplete({ - data_points: { - summary: htmlSummary, - }, - }) - } catch (error) { - console.error('Error summarizing form:', error) - throw new Error('Error summarizing form') - } + // 5. Format and return results + const htmlSummary = await markdownToHtml(summary) + await onComplete({ + data_points: { + summary: htmlSummary, + }, + }) }, } diff --git a/extensions/shelly/actions/summarizeForm/summarizeFormRealOpenAI.test.ts b/extensions/shelly/actions/summarizeForm/summarizeFormRealOpenAI.test.ts index 45440058d..9ce2dfc2e 100644 --- a/extensions/shelly/actions/summarizeForm/summarizeFormRealOpenAI.test.ts +++ b/extensions/shelly/actions/summarizeForm/summarizeFormRealOpenAI.test.ts @@ -7,7 +7,7 @@ import { mockFormResponseResponse } from './__mocks__/formResponseResponse' import { mockPathwayActivitiesResponse } from './__mocks__/pathwayActivitiesResponse' import { DISCLAIMER_MSG_FORM } from '../../lib/constants' -jest.setTimeout(30000) // Increase timeout to 60 seconds for all tests in this file +jest.setTimeout(30000) // Increase timeout if needed for real LLM calls describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = @@ -16,43 +16,70 @@ describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { beforeEach(() => { clearMocks() jest.clearAllMocks() + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY, + temperature: 0, + maxRetries: 3, + timeout: 10000 + }) }) - it('Should call the real model and use mocked form data with Bullet-points format', async () => { + it('Should call the real model with Bullet-points format', async () => { const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Bullet-points', language: 'Default', }, - settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, - }, + settings: {} }) - const awellSdkMock = { + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: mockPathwayActivitiesResponse.activities[0] + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: mockPathwayActivitiesResponse.activities + } + }) + .mockResolvedValueOnce({ + form: { + form: { + id: 'OGhjJKF5LRmo', + questions: [{ + id: 'q1', + title: 'Test Question', + type: 'TEXT', + options: [] + }] + } + } + }) + .mockResolvedValueOnce({ + formResponse: { + response: { + answers: [{ + question_id: 'q1', + value: 'Test Answer' + }] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockFormDefinitionResponse, - }) - .mockResolvedValueOnce({ - formResponse: mockFormResponseResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -62,62 +89,94 @@ describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { }) expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(3) - + expect(mockQuery).toHaveBeenCalledTimes(4) expect(onComplete).toHaveBeenCalledWith({ data_points: { summary: expect.stringMatching( - new RegExp(`${DISCLAIMER_MSG_FORM}.*General Dummy Form.*-`, 's') + new RegExp(`${DISCLAIMER_MSG_FORM}.*Test Question.*Test Answer.*`, 's') ), }, }) - - const summary = onComplete.mock.calls[0][0].data_points.summary - expect(summary).toMatch(/General Dummy Form/) - expect(summary).toMatch(/-/) - expect( - summary.split('\n').filter((line: string) => line.trim().startsWith('')) - .length - ).toBeGreaterThan(1) - expect(onError).not.toHaveBeenCalled() - }) + }, 30000) - it('Should call the real model and use mocked form data with Text paragraph format', async () => { + it('Should call the real model with Text paragraph format', async () => { const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Text paragraph', language: 'Default', }, - settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, - }, + settings: {} }) - const awellSdkMock = { + const mockQuery = jest.fn() + // First query: get current activity + .mockResolvedValueOnce({ + activity: { + success: true, + activity: { + date: '2024-09-11T22:56:59.607Z', + context: { + step_id: 'Xkn5dkyPA5uW' + } + } + } + }) + // Second query: get activities in current step + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: [{ + id: 'form_activity_id', + status: 'DONE', + date: '2024-09-11T22:56:58.607Z', + object: { + id: 'OGhjJKF5LRmo', + name: 'General Dummy Form', + type: 'FORM' + }, + context: { + step_id: 'Xkn5dkyPA5uW' + } + }] + } + }) + // Third query: get form definition + .mockResolvedValueOnce({ + form: { + form: { + id: 'OGhjJKF5LRmo', + questions: [{ + id: 'q1', + title: 'Test Question', + type: 'TEXT', + options: [] + }] + } + } + }) + // Fourth query: get form response + .mockResolvedValueOnce({ + formResponse: { + response: { + answers: [{ + question_id: 'q1', + value: 'Test Answer' + }] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockFormDefinitionResponse, - }) - .mockResolvedValueOnce({ - formResponse: mockFormResponseResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -127,58 +186,87 @@ describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { }) expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(3) - + expect(mockQuery).toHaveBeenCalledTimes(4) expect(onComplete).toHaveBeenCalledWith({ data_points: { summary: expect.stringMatching( - new RegExp(`${DISCLAIMER_MSG_FORM}.*General Dummy Form.*`, 's') + new RegExp(`${DISCLAIMER_MSG_FORM}.*Test Question.*Test Answer.*`, 's') ), }, }) - - const summary = onComplete.mock.calls[0][0].data_points.summary - expect(summary).toMatch(/General Dummy Form/) - expect(summary.split('\n').length).toBeLessThan(10) // Assuming a paragraph is typically less than 5 lines and givign summe buffer for the title and disclaimer - expect(onError).not.toHaveBeenCalled() - }) + }, 30000) - it('Should call the real model and use mocked form data with Text paragraph format in French', async () => { + it('Should call the real model and use mocked form data with Text paragraph format', async () => { const payload = generateTestPayload({ - pathway: { - id: 'ai4rZaYEocjB', - definition_id: 'whatever', - }, + pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever' }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Text paragraph', - language: 'French', - }, - settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, + language: 'Default', }, + settings: {} }) - const awellSdkMock = { + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: { + date: '2024-09-11T22:56:59.607Z', + context: { + step_id: 'Xkn5dkyPA5uW' + } + } + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: [{ + id: 'form_activity_id', + status: 'DONE', + date: '2024-09-11T22:56:58.607Z', + object: { + id: 'OGhjJKF5LRmo', + name: 'General Dummy Form', + type: 'FORM' + }, + context: { + step_id: 'Xkn5dkyPA5uW' + } + }] + } + }) + .mockResolvedValueOnce({ + form: { + form: { + id: 'OGhjJKF5LRmo', + questions: [{ + id: 'q1', + title: 'Test Question', + type: 'TEXT', + options: [] + }] + } + } + }) + .mockResolvedValueOnce({ + formResponse: { + response: { + answers: [{ + question_id: 'q1', + value: 'Test Answer' + }] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockFormDefinitionResponse, - }) - .mockResolvedValueOnce({ - formResponse: mockFormResponseResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -188,58 +276,108 @@ describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { }) expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(3) - + expect(mockQuery).toHaveBeenCalledTimes(4) expect(onComplete).toHaveBeenCalledWith({ data_points: { - summary: expect.stringMatching(/Avis Important*/), + summary: expect.stringMatching( + new RegExp(`${DISCLAIMER_MSG_FORM}.*Test Question.*Test Answer.*`, 's') + ), }, }) - - const summary = onComplete.mock.calls[0][0].data_points.summary - expect(summary.split('\n').length).toBeLessThan(10) // Assuming a paragraph is typically less than 5 lines and giving some buffer for the title and disclaimer - - // Check if the summary is in French - expect(summary).toMatch(/Le patient|La patiente/) - expect(onError).not.toHaveBeenCalled() - }) + }, 30000) - it('Should call the real model and use mocked form data with Bullet-points format in Spanish', async () => { + it('Should call the real model and use mocked form data', async () => { const payload = generateTestPayload({ - pathway: { - id: 'ai4rZaYEocjB', - definition_id: 'whatever', - }, + pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever' }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Bullet-points', - language: 'Spanish', + language: 'Default', }, settings: { - openAiApiKey: process.env.OPENAI_TEST_KEY, - }, + openAiApiKey: process.env.OPENAI_API_KEY, + } }) - const awellSdkMock = { + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: mockPathwayActivitiesResponse.activities[0] + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: mockPathwayActivitiesResponse.activities + } + }) + .mockResolvedValueOnce({ + form: { + form: { + id: 'OGhjJKF5LRmo', + title: 'Patient Health Questionnaire', + questions: [ + { + id: 'q1', + title: 'How would you rate your overall health?', + type: 'SELECT', + options: [ + { value: 'excellent', label: 'Excellent' }, + { value: 'good', label: 'Good' }, + { value: 'fair', label: 'Fair' }, + { value: 'poor', label: 'Poor' } + ] + }, + { + id: 'q2', + title: 'What symptoms have you experienced in the last 7 days?', + type: 'MULTIPLE_SELECT', + options: [ + { value: 'fatigue', label: 'Fatigue' }, + { value: 'headache', label: 'Headache' }, + { value: 'fever', label: 'Fever' }, + { value: 'cough', label: 'Cough' }, + { value: 'none', label: 'None of the above' } + ] + }, + { + id: 'q3', + title: 'Please describe any other health concerns:', + type: 'TEXT', + options: [] + } + ] + } + } + }) + .mockResolvedValueOnce({ + formResponse: { + response: { + answers: [ + { + question_id: 'q1', + value: 'good' + }, + { + question_id: 'q2', + value: ['fatigue', 'headache'] + }, + { + question_id: 'q3', + value: 'I have been experiencing occasional dizziness when standing up too quickly.' + } + ] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockFormDefinitionResponse, - }) - .mockResolvedValueOnce({ - formResponse: mockFormResponseResponse, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -249,21 +387,14 @@ describe.skip('summarizeForm - Real LLM calls with mocked Awell SDK', () => { }) expect(helpers.awellSdk).toHaveBeenCalled() - expect(awellSdkMock.orchestration.query).toHaveBeenCalledTimes(3) - + expect(mockQuery).toHaveBeenCalledTimes(4) expect(onComplete).toHaveBeenCalledWith({ data_points: { - summary: expect.stringMatching(/Aviso Importante*/), + summary: expect.stringMatching( + new RegExp(`${DISCLAIMER_MSG_FORM}.*Patient Health Questionnaire.*Overall health.*Symptoms.*health concerns.*`, 's') + ), }, }) - - const summary = onComplete.mock.calls[0][0].data_points.summary - expect(summary).toMatch(/General Dummy Form/) - expect(summary).toMatch(/•/) - - // Check if the summary is in Spanish - expect(summary).toMatch(/respuesta/) - expect(onError).not.toHaveBeenCalled() - }) + }, 30000) }) diff --git a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.test.ts b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.test.ts index 1cdbdcba5..a750425ee 100644 --- a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.test.ts +++ b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.test.ts @@ -3,6 +3,7 @@ import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { summarizeFormsInStep } from '.' +import { DISCLAIMER_MSG_FORM } from '../../lib/constants' import { mockMultipleFormsPathwayActivitiesResponse } from './__mocks__/multipleFormsPathwayActivitiesResponse' import { mockMultipleFormsDefinitionResponse1, @@ -12,26 +13,23 @@ import { mockMultipleFormsResponseResponse1, mockMultipleFormsResponseResponse2, } from './__mocks__/multipleFormsResponsesResponse' -import { DISCLAIMER_MSG_FORM } from '../../lib/constants' -import { markdownToHtml } from '../../../../src/utils' -// Import ChatOpenAI after mocking -import { ChatOpenAI } from '@langchain/openai' - -// Mock the '@langchain/openai' module -jest.mock('@langchain/openai', () => { - const mockInvoke = jest.fn().mockResolvedValue({ - content: 'Mocked summary from LLM', - }) - - const mockChatOpenAI = jest.fn().mockImplementation(() => ({ - invoke: mockInvoke, - })) - - return { - ChatOpenAI: mockChatOpenAI, - } -}) +// Mock the OpenAI modules +jest.mock('../../../../src/lib/llm/openai/createOpenAIModel', () => ({ + createOpenAIModel: jest.fn().mockResolvedValue({ + model: { + invoke: jest.fn().mockResolvedValue({ + content: 'Summary of multiple forms: Form 1 shows patient reported good health. Form 2 indicates normal vital signs.', + }), + }, + metadata: { + activity_id: 'X74HeDQ4N0gtdaSEuzF8s', + care_flow_id: 'ai4rZaYEocjB', + care_flow_definition_id: 'whatever', + tenant_id: 'test-tenant-id', + }, + }), +})) describe('summarizeFormsInStep - Mocked LLM calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = @@ -40,63 +38,56 @@ describe('summarizeFormsInStep - Mocked LLM calls', () => { beforeEach(() => { clearMocks() jest.clearAllMocks() - }) + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: mockMultipleFormsPathwayActivitiesResponse.activities[0] + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: mockMultipleFormsPathwayActivitiesResponse.activities.filter( + activity => activity.object.type === 'FORM' + ) + } + }) + .mockResolvedValueOnce({ + form: mockMultipleFormsDefinitionResponse1, + }) + .mockResolvedValueOnce({ + form: mockMultipleFormsDefinitionResponse2, + }) + .mockResolvedValueOnce({ + formResponse: mockMultipleFormsResponseResponse1, + }) + .mockResolvedValueOnce({ + formResponse: mockMultipleFormsResponseResponse2, + }) - it('Should summarize multiple forms with LLM', async () => { - const summarizeFormWithLLMSpy = jest.spyOn( - require('../../lib/summarizeFormWithLLM/summarizeFormWithLLM'), - 'summarizeFormWithLLM' - ) + helpers.awellSdk = jest.fn().mockReturnValue({ + orchestration: { + query: mockQuery + } + }) + }) + it('Should summarize multiple forms with mocked OpenAI', async () => { const payload = generateTestPayload({ pathway: { id: 'ai4rZaYEocjB', definition_id: 'whatever', + tenant_id: 'test-tenant-id', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Bullet-points', language: 'Default', }, - settings: { - openAiApiKey: 'a', - }, + settings: {}, }) - // Mock the Awell SDK - const awellSdkMock = { - orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - activity: { - activity: - mockMultipleFormsPathwayActivitiesResponse.activities[0], - success: true, - }, - }) - .mockResolvedValueOnce({ - pathwayStepActivities: mockMultipleFormsPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse1, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse2, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse1, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse2, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) - await extensionAction.onEvent({ payload, onComplete, @@ -104,25 +95,12 @@ describe('summarizeFormsInStep - Mocked LLM calls', () => { helpers, }) - expect(ChatOpenAI).toHaveBeenCalled() - expect(summarizeFormWithLLMSpy).toHaveBeenCalledWith( - expect.objectContaining({ - ChatModelGPT4o: expect.any(Object), - formData: expect.any(String), - summaryFormat: 'Bullet-points', - language: 'Default', - disclaimerMessage: expect.any(String), - }) - ) - - const expected = await markdownToHtml('Mocked summary from LLM') - + expect(helpers.awellSdk).toHaveBeenCalled() expect(onComplete).toHaveBeenCalledWith({ data_points: { - summary: expected, + summary: expect.stringContaining('Summary of multiple forms'), }, }) - expect(onError).not.toHaveBeenCalled() }) }) diff --git a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.ts b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.ts index 663b61d37..4e204bec1 100644 --- a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.ts +++ b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStep.ts @@ -1,17 +1,17 @@ import { Category, type Action } from '@awell-health/extensions-core' -import { validatePayloadAndCreateSdk } from '../../lib' -import { type settings } from '../../settings' import { fields, dataPoints, FieldsValidationSchema } from './config' import { getResponsesForAllForms } from '../../lib/getFormResponseText' import { summarizeFormWithLLM } from '../../lib/summarizeFormWithLLM' import { DISCLAIMER_MSG_FORM } from '../../lib/constants' import { getAllFormsInCurrentStep } from '../../../../src/lib/awell' import { markdownToHtml } from '../../../../src/utils' +import { createOpenAIModel } from '../../../../src/lib/llm/openai' +import { OPENAI_MODELS } from '../../../../src/lib/llm/openai/constants' + -// TODO: get rid of the console logs eventually export const summarizeFormsInStep: Action< typeof fields, - typeof settings, + Record, keyof typeof dataPoints > = { key: 'summarizeFormsInStep', @@ -22,52 +22,49 @@ export const summarizeFormsInStep: Action< previewable: false, dataPoints, onEvent: async ({ payload, onComplete, onError, helpers }): Promise => { - const { - ChatModelGPT4o, - fields: { summaryFormat, language }, - pathway, - activity, - } = await validatePayloadAndCreateSdk({ - fieldsSchema: FieldsValidationSchema, + // 1. Validate input fields + const { summaryFormat, language } = FieldsValidationSchema.parse(payload.fields) + const pathway = payload.pathway + + // 2. Initialize OpenAI model with hideDataForTracing enabled + const { model, metadata, callbacks } = await createOpenAIModel({ + settings: payload.settings, + helpers, payload, + modelType: OPENAI_MODELS.GPT4o, + hideDataForTracing: true // Hide input and output data when tracing }) - // Fetch all forms in the current step const formsData = await getAllFormsInCurrentStep({ awellSdk: await helpers.awellSdk(), pathwayId: pathway.id, - activityId: activity.id, + activityId: payload.activity.id, }) - // Get responses for all forms const { result: allFormsResponseText } = getResponsesForAllForms({ formsData, }) - try { - // Summarize all forms' responses - const summary = await summarizeFormWithLLM({ - ChatModelGPT4o, - formData: allFormsResponseText, // Use the concatenated form responses - summaryFormat, - language, - disclaimerMessage: DISCLAIMER_MSG_FORM, // Add disclaimer message - }) - - // Disclaimer is now handled within summarizeFormWithLLM - const htmlSummary = await markdownToHtml(summary) + // Summarize all forms' responses + const summary = await summarizeFormWithLLM({ + model, + formData: allFormsResponseText, + summaryFormat, + language, + disclaimerMessage: DISCLAIMER_MSG_FORM, + metadata, + callbacks // Add callbacks here + }) - - await onComplete({ - data_points: { - summary: htmlSummary, - }, - }) - } catch (error) { - console.error('Error summarizing forms:', error) - throw new Error('Error summarizing forms') - } + // Disclaimer is now handled within summarizeFormWithLLM + const htmlSummary = await markdownToHtml(summary) + + await onComplete({ + data_points: { + summary: htmlSummary, + }, + }) }, } diff --git a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStepRealOpenAI.test.ts b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStepRealOpenAI.test.ts index e2bb8027e..38c54fb01 100644 --- a/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStepRealOpenAI.test.ts +++ b/extensions/shelly/actions/summarizeFormsInStep/summarizeFormsInStepRealOpenAI.test.ts @@ -4,23 +4,23 @@ import 'dotenv/config' import { TestHelpers } from '@awell-health/extensions-core' import { generateTestPayload } from '@/tests' import { summarizeFormsInStep } from '.' -import { mockMultipleFormsPathwayActivitiesResponse } from './__mocks__/multipleFormsPathwayActivitiesResponse' -import { - mockMultipleFormsDefinitionResponse1, - mockMultipleFormsDefinitionResponse2, -} from './__mocks__/multipleFormsDefinitionResponse' -import { - mockMultipleFormsResponseResponse1, - mockMultipleFormsResponseResponse2, -} from './__mocks__/multipleFormsResponsesResponse' import { DISCLAIMER_MSG_FORM } from '../../lib/constants' +jest.setTimeout(30000) // Increase timeout for real LLM calls + describe.skip('summarizeFormsInStep - Real OpenAI calls', () => { const { onComplete, onError, helpers, extensionAction, clearMocks } = TestHelpers.fromAction(summarizeFormsInStep) beforeEach(() => { clearMocks() + jest.clearAllMocks() + helpers.getOpenAIConfig = jest.fn().mockReturnValue({ + apiKey: process.env.OPENAI_API_KEY, + temperature: 0, + maxRetries: 3, + timeout: 10000 + }) }) it('Should summarize multiple forms with real OpenAI', async () => { @@ -30,41 +30,84 @@ describe.skip('summarizeFormsInStep - Real OpenAI calls', () => { definition_id: 'whatever', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Bullet-points', language: 'Default', }, - settings: { - openAiApiKey: process.env.OPENAI_API_KEY, - }, + settings: {} }) - // Mock the Awell SDK - const awellSdkMock = { + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: { + id: 'X74HeDQ4N0gtdaSEuzF8s', + date: '2024-09-11T22:56:59.607Z', + context: { + step_id: 'Xkn5dkyPA5uW' + } + } + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: [{ + id: 'X74HeDQ4N0gtdaSEuzF8s', + status: 'DONE', + date: '2024-09-11T22:56:58.607Z', + object: { + id: 'OGhjJKF5LRmo', + name: 'Test Form', + type: 'FORM' + }, + context: { + step_id: 'Xkn5dkyPA5uW' + } + }] + } + }) + .mockResolvedValueOnce({ + form: { + success: true, + form: { + id: 'OGhjJKF5LRmo', + title: 'Health Assessment Form', + questions: [ + { + id: 'q1', + title: 'How would you rate your overall health?', + type: 'SELECT', + options: [ + { value: 'excellent', label: 'Excellent' }, + { value: 'good', label: 'Good' } + ] + } + ] + } + } + }) + .mockResolvedValueOnce({ + formResponse: { + success: true, + response: { + form_id: 'OGhjJKF5LRmo', + answers: [ + { + question_id: 'q1', + value: 'good' + } + ] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockMultipleFormsPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse1, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse2, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse1, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse2, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -73,16 +116,15 @@ describe.skip('summarizeFormsInStep - Real OpenAI calls', () => { helpers, }) - expect(onComplete).toHaveBeenCalled() + expect(helpers.awellSdk).toHaveBeenCalled() + expect(mockQuery).toHaveBeenCalledTimes(4) + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + summary: expect.stringContaining(DISCLAIMER_MSG_FORM) + }, + }) expect(onError).not.toHaveBeenCalled() - - const completionResult = onComplete.mock.calls[0][0] - expect(completionResult).toHaveProperty('data_points.summary') - expect(completionResult.data_points.summary).toContain(DISCLAIMER_MSG_FORM) - expect(completionResult.data_points.summary.length).toBeGreaterThan( - DISCLAIMER_MSG_FORM.length + 50 - ) - }, 30000) // Increase timeout to 30 seconds for API call + }, 30000) it('Should summarize multiple forms with real OpenAI using Text Paragraph format in French', async () => { const payload = generateTestPayload({ @@ -91,41 +133,84 @@ describe.skip('summarizeFormsInStep - Real OpenAI calls', () => { definition_id: 'whatever', }, activity: { id: 'X74HeDQ4N0gtdaSEuzF8s' }, - patient: { id: 'whatever' }, fields: { summaryFormat: 'Text paragraph', language: 'French', }, - settings: { - openAiApiKey: process.env.OPENAI_API_KEY, - }, + settings: {} }) - // Mock the Awell SDK - const awellSdkMock = { + const mockQuery = jest.fn() + .mockResolvedValueOnce({ + activity: { + success: true, + activity: { + id: 'X74HeDQ4N0gtdaSEuzF8s', + date: '2024-09-11T22:56:59.607Z', + context: { + step_id: 'Xkn5dkyPA5uW' + } + } + } + }) + .mockResolvedValueOnce({ + pathwayStepActivities: { + success: true, + activities: [{ + id: 'X74HeDQ4N0gtdaSEuzF8s', + status: 'DONE', + date: '2024-09-11T22:56:58.607Z', + object: { + id: 'OGhjJKF5LRmo', + name: 'Test Form', + type: 'FORM' + }, + context: { + step_id: 'Xkn5dkyPA5uW' + } + }] + } + }) + .mockResolvedValueOnce({ + form: { + success: true, + form: { + id: 'OGhjJKF5LRmo', + title: 'Health Assessment Form', + questions: [ + { + id: 'q1', + title: 'How would you rate your overall health?', + type: 'SELECT', + options: [ + { value: 'excellent', label: 'Excellent' }, + { value: 'good', label: 'Good' } + ] + } + ] + } + } + }) + .mockResolvedValueOnce({ + formResponse: { + success: true, + response: { + form_id: 'OGhjJKF5LRmo', + answers: [ + { + question_id: 'q1', + value: 'good' + } + ] + } + } + }) + + helpers.awellSdk = jest.fn().mockReturnValue({ orchestration: { - mutation: jest.fn().mockResolvedValue({}), - query: jest - .fn() - .mockResolvedValueOnce({ - pathwayActivities: mockMultipleFormsPathwayActivitiesResponse, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse1, - }) - .mockResolvedValueOnce({ - form: mockMultipleFormsDefinitionResponse2, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse1, - }) - .mockResolvedValueOnce({ - formResponse: mockMultipleFormsResponseResponse2, - }), - }, - } - - helpers.awellSdk = jest.fn().mockResolvedValue(awellSdkMock) + query: mockQuery + } + }) await extensionAction.onEvent({ payload, @@ -134,20 +219,13 @@ describe.skip('summarizeFormsInStep - Real OpenAI calls', () => { helpers, }) - expect(onComplete).toHaveBeenCalled() + expect(helpers.awellSdk).toHaveBeenCalled() + expect(mockQuery).toHaveBeenCalledTimes(4) + expect(onComplete).toHaveBeenCalledWith({ + data_points: { + summary: expect.stringContaining('Avis Important') + }, + }) expect(onError).not.toHaveBeenCalled() - - const completionResult = onComplete.mock.calls[0][0] - expect(completionResult).toHaveProperty('data_points.summary') - expect(completionResult.data_points.summary).toContain('Avis Important') - expect(completionResult.data_points.summary.length).toBeGreaterThan( - DISCLAIMER_MSG_FORM.length + 50 - ) - - // Check if the summary is in French - expect(completionResult.data_points.summary).toMatch(/téléphone/) - - // Check if it's a paragraph (no bullet points) - expect(completionResult.data_points.summary).not.toMatch(/^[•]/m) - }, 30000) // Increase timeout to 30 seconds for API call + }, 30000) }) diff --git a/extensions/shelly/index.ts b/extensions/shelly/index.ts index a3d758eb0..b8c0a235e 100644 --- a/extensions/shelly/index.ts +++ b/extensions/shelly/index.ts @@ -4,11 +4,10 @@ import { AuthorType, } from '@awell-health/extensions-core' import actions from './actions' -import { settings } from './settings' export const shelly: Extension = { key: 'shelly', - title: 'Shelly (experimental)', + title: 'Shelly (Beta)', description: 'Library of AI-powered actions', icon_url: 'https://res.cloudinary.com/da7x4rzl4/image/upload/v1726037275/Awell%20Extensions/shelly_logo.png', @@ -16,6 +15,6 @@ export const shelly: Extension = { author: { authorType: AuthorType.AWELL, }, - settings, + settings: {}, actions, } diff --git a/extensions/shelly/lib/summarizeFormWithLLM/constants.ts b/extensions/shelly/lib/summarizeFormWithLLM/prompt.ts similarity index 100% rename from extensions/shelly/lib/summarizeFormWithLLM/constants.ts rename to extensions/shelly/lib/summarizeFormWithLLM/prompt.ts diff --git a/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.test.ts b/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.test.ts index 5af38ab68..51ea8d845 100644 --- a/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.test.ts +++ b/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.test.ts @@ -23,21 +23,19 @@ const sampleForms = { } describe('summarizeFormWithLLM', () => { - let ChatModelGPT4oMock: jest.Mocked + let mockModel: jest.Mocked beforeEach(() => { - // Define the 'invoke' method in the mock - ChatModelGPT4oMock = { + mockModel = { invoke: jest.fn(), } as unknown as jest.Mocked }) - it('should return a mocked summary for the General Health Questionnaire with Bullet-points format', async () => { + it('should return a summary for the General Health Questionnaire with Bullet-points format', async () => { const mockedSummary = 'Patient reports persistent sharp lower back pain for two weeks, exacerbated by bending or lifting, with numbness in the right leg. No recent injuries but has started a new job involving heavy lifting.' - // Mock the 'invoke' method to return an AIMessage - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( + mockModel.invoke.mockResolvedValueOnce( new AIMessageChunk(mockedSummary) ) @@ -50,28 +48,37 @@ describe('summarizeFormWithLLM', () => { ) .join('\n') - const summaryFormat = 'Bullet-points' - const language = 'Default' - const disclaimerMessage = 'This is a test disclaimer message.' + const metadata = { + activity_id: 'test-activity-id', + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } const summary = await summarizeFormWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, - formData: formData, - summaryFormat, - language, - disclaimerMessage + model: mockModel, + formData, + summaryFormat: 'Bullet-points', + language: 'Default', + disclaimerMessage: 'This is a test disclaimer message.', + metadata }) - expect(summary).toBe(mockedSummary) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledWith( + expect.anything(), + { metadata, runName: 'ShellySummarizeForm' } + ) }) - it('should return a mocked summary for the General Health Questionnaire with Text paragraph format', async () => { + it('should return a summary for the General Health Questionnaire with Text paragraph format', async () => { const mockedSummary = 'The patient has been experiencing persistent back pain for two weeks, describing it as a sharp pain in the lower back that worsens when bending or lifting. Over-the-counter painkillers have been ineffective. While there are no recent injuries, the patient started a new job requiring heavy lifting. Additionally, the patient reports occasional numbness in the right leg.' - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( + mockModel.invoke.mockResolvedValueOnce( new AIMessageChunk(mockedSummary) ) @@ -84,53 +91,54 @@ describe('summarizeFormWithLLM', () => { ) .join('\n') - const summaryFormat = 'Text paragraph' - const language = 'Default' - const disclaimerMessage = 'This is a test disclaimer message.' + const metadata = { + activity_id: 'test-activity-id', + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } const summary = await summarizeFormWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, - formData: formData, - summaryFormat, - language, - disclaimerMessage + model: mockModel, + formData, + summaryFormat: 'Text paragraph', + language: 'Default', + disclaimerMessage: 'This is a test disclaimer message.', + metadata }) - expect(summary).toBe(mockedSummary) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) - }) - - it('should return a mocked summary for the General Health Questionnaire with default format when not specified', async () => { - const mockedSummary = - '- **Reason for visit** - Persistent back pain for two weeks\n- **Nature of back pain** - Sharp pain in lower back, worse when bending or lifting\n- **Medications/treatments** - Over-the-counter painkillers, ineffective\n- **Recent injuries/accidents** - None, but started new job with heavy lifting\n- **Other symptoms** - Occasional numbness in right leg' - - ChatModelGPT4oMock.invoke.mockResolvedValueOnce( - new AIMessageChunk(mockedSummary) + expect(mockModel.invoke).toHaveBeenCalledTimes(1) + expect(mockModel.invoke).toHaveBeenCalledWith( + expect.anything(), + { metadata, runName: 'ShellySummarizeForm' } ) + }) - const formName = 'General Health Questionnaire' - const form = sampleForms[formName] - const formData = form.questions - .map( - (question, index) => - `Question: ${question}\nAnswer: ${form.answers[index]}\n` - ) - .join('\n') - - const language = 'Default' - const disclaimerMessage = 'This is a test disclaimer message.' - - const summary = await summarizeFormWithLLM({ - ChatModelGPT4o: ChatModelGPT4oMock, - formData: formData, - summaryFormat: 'undefined', - language, - disclaimerMessage - }) - - - expect(summary).toBe(mockedSummary) - expect(ChatModelGPT4oMock.invoke).toHaveBeenCalledTimes(1) + it('should handle errors gracefully', async () => { + mockModel.invoke.mockRejectedValueOnce(new Error('API Error')) + + const formData = 'Test form data' + const metadata = { + activity_id: 'test-activity-id', + care_flow_definition_id: 'test-def-id', + care_flow_id: 'test-pathway-id', + tenant_id: 'test-tenant-id', + org_slug: 'test-org-slug', + org_id: 'test-org-id' + } + + await expect( + summarizeFormWithLLM({ + model: mockModel, + formData, + summaryFormat: 'Bullet-points', + language: 'Default', + disclaimerMessage: 'This is a test disclaimer message.', + metadata + }) + ).rejects.toThrow('Failed to generate form summary') }) }) diff --git a/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.ts b/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.ts index 8e3784400..7005e94a5 100644 --- a/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.ts +++ b/extensions/shelly/lib/summarizeFormWithLLM/summarizeFormWithLLM.ts @@ -1,32 +1,64 @@ -import { systemPromptBulletPoints, systemPromptTextParagraph } from './constants' +import { systemPromptBulletPoints, systemPromptTextParagraph } from './prompt' import { type ChatOpenAI } from '@langchain/openai' +import { type AIActionMetadata } from '../../../../src/lib/llm/openai/types' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" + +/** + * Uses LLM to summarize form data in a specified format and language. + * The function follows these steps: + * 1. Formats prompt with form data and preferences + * 2. Runs LLM with appropriate system prompt + * 3. Returns formatted summary with disclaimer + * + * @example + * const result = await summarizeFormWithLLM({ + * model, + * formData: "Name: John Doe\nAge: 30\n...", + * summaryFormat: "Bullet-points", + * language: "English", + * disclaimerMessage: "AI generated...", + * metadata: { ... } + * }) + */ export const summarizeFormWithLLM = async ({ - ChatModelGPT4o, + model, formData, summaryFormat, language, disclaimerMessage, + metadata, + callbacks, }: { - ChatModelGPT4o: ChatOpenAI + model: ChatOpenAI formData: string summaryFormat: string language: string disclaimerMessage: string + metadata: AIActionMetadata + callbacks?: BaseCallbackHandler[] }): Promise => { - const systemPrompt = summaryFormat === 'Bullet-points' ? systemPromptBulletPoints : - summaryFormat === 'Text paragraph' ? systemPromptTextParagraph : - systemPromptBulletPoints; // Default to bullet points if unknown format + summaryFormat === 'Text paragraph' ? systemPromptTextParagraph : + systemPromptBulletPoints // Default to bullet points if unknown format + const prompt = await systemPrompt.format({ language, input: formData, disclaimerMessage, }) - const summaryMessage = await ChatModelGPT4o.invoke(prompt) - - // TODO: for some reason compiler doesn't know that content is a string - const summary = summaryMessage.content as string + try { + const result = await model.invoke( + prompt, + { + metadata, + runName: 'ShellySummarizeForm', + callbacks + } + ) - return summary + return result.content as string + } catch (error) { + throw new Error('Failed to generate form summary') + } } diff --git a/extensions/shelly/lib/validatePayloadAndCreateSdk.ts b/extensions/shelly/lib/validatePayloadAndCreateSdk.ts index aaea68832..7f51f21c2 100644 --- a/extensions/shelly/lib/validatePayloadAndCreateSdk.ts +++ b/extensions/shelly/lib/validatePayloadAndCreateSdk.ts @@ -5,8 +5,6 @@ import { type Patient, } from '@awell-health/extensions-core' import z from 'zod' -import { SettingsValidationSchema } from '../settings' -import { ChatOpenAI } from '@langchain/openai' import { type Activity } from '@awell-health/extensions-core/dist/types/Activity' type ValidatePayloadAndCreateSdk = < @@ -16,10 +14,8 @@ type ValidatePayloadAndCreateSdk = < fieldsSchema: T payload: P }) => Promise<{ - ChatModelGPT4oMini: ChatOpenAI - ChatModelGPT4o: ChatOpenAI fields: z.infer<(typeof args)['fieldsSchema']> - settings: z.infer + settings: Record pathway: Pathway patient: Patient activity: Activity @@ -32,8 +28,11 @@ export const validatePayloadAndCreateSdk: ValidatePayloadAndCreateSdk = async ({ const { settings, fields } = validate({ schema: z.object({ fields: fieldsSchema, - settings: SettingsValidationSchema, - pathway: z.object({ id: z.string() }), + settings: z.object({}).strict(), + pathway: z.object({ + id: z.string(), + definition_id: z.string(), + }), activity: z.object({ id: z.string() }), patient: z.object({ id: z.string() }), }), @@ -42,25 +41,7 @@ export const validatePayloadAndCreateSdk: ValidatePayloadAndCreateSdk = async ({ const { patient, pathway, activity } = payload - const ChatModelGPT4oMini = new ChatOpenAI({ - modelName: 'gpt-4o-mini', - openAIApiKey: settings.openAiApiKey, - temperature: 0, // To ensure consistency - maxRetries: 3, - timeout: 10000, - }) - - const ChatModelGPT4o = new ChatOpenAI({ - modelName: 'gpt-4o', - openAIApiKey: settings.openAiApiKey, - temperature: 0, // To ensure consistency - maxRetries: 3, - timeout: 10000, - }) - return { - ChatModelGPT4o, - ChatModelGPT4oMini, fields, settings, patient, diff --git a/extensions/shelly/settings.ts b/extensions/shelly/settings.ts deleted file mode 100644 index 48c2dc6f4..000000000 --- a/extensions/shelly/settings.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { type Setting } from '@awell-health/extensions-core' -import { z, type ZodTypeAny } from 'zod' - -export const settings = { - openAiApiKey: { - key: 'openAiApiKey', - label: 'OpenAI API key ', - obfuscated: true, - required: true, - description: '', - }, -} satisfies Record - -export const SettingsValidationSchema = z.object({ - openAiApiKey: z.string().min(1), -} satisfies Record) diff --git a/package.json b/package.json index 8aeff22f2..26ee17c52 100644 --- a/package.json +++ b/package.json @@ -79,11 +79,11 @@ }, "dependencies": { "@awell-health/awell-sdk": "^0.1.20", - "@awell-health/extensions-core": "1.0.11", + "@awell-health/extensions-core": "1.0.16", "@awell-health/healthie-sdk": "^0.1.1", "@dropbox/sign": "^1.8.0", "@hubspot/api-client": "^11.2.0", - "@langchain/core": "^0.3.1", + "@langchain/core": "^0.3.33", "@langchain/openai": "^0.3.0", "@mailchimp/mailchimp_transactional": "^1.0.50", "@medplum/core": "^3.1.4", @@ -108,7 +108,8 @@ "jsdom": "^26.0.0", "jsonpath": "^1.1.1", "jsonwebtoken": "^9.0.2", - "langchain": "^0.3.2", + "langchain": "^0.3.12", + "langsmith": "^0.3.2", "libphonenumber-js": "^1.10.26", "lodash": "^4.17.21", "mailgun.js": "^8.2.1", diff --git a/src/lib/llm/README.md b/src/lib/llm/README.md new file mode 100644 index 000000000..9707c8f5f --- /dev/null +++ b/src/lib/llm/README.md @@ -0,0 +1,19 @@ +# LLM Library + +This library provides standardized ways to create and configure LLM provider calls across our extensions. + +## Structure + +- `/openai` - OpenAI-specific implementations and configurations +- (future) `/anthropic` - Anthropic Claude implementations +- (future) `/gemini` - Google Gemini implementations + +## Usage + +Each provider has its own setup function that handles: +- API key management (custom keys vs default keys) +- Standard configuration (temperature, timeouts, etc.) +- Metadata for tracing +- Error handling + +See provider-specific README files for detailed usage. \ No newline at end of file diff --git a/src/lib/llm/openai/README.md b/src/lib/llm/openai/README.md new file mode 100644 index 000000000..44a90f947 --- /dev/null +++ b/src/lib/llm/openai/README.md @@ -0,0 +1,65 @@ +# OpenAI Models + +This module provides standardized configuration and creation of OpenAI models through LangChain. + +## Overview + +The OpenAI Models module simplifies working with OpenAI's language models by providing: +- Standardized model configuration and initialization +- Consistent API key management +- Built-in error handling and automatic retries +- Comprehensive metadata collection and tracing + +## Available Models + +Currently supported models: +- `gpt-4` - Latest GPT-4 model for complex reasoning and generation tasks +- `gpt-4-turbo` - Optimized GPT-4 variant balancing performance and speed + +These models should be sufficient for most use cases. If additional models are needed, they can be easily added by expanding the model definitions in `constants.ts`. Note that when adding new models, always specify a model snapshot version (e.g. `gpt-4-0613`) rather than just the base model name to ensure consistent behavior over time. + + + +## Usage Guide + +### API Key Configuration + +API keys are resolved in the following priority order: +1. Extension-specific key from settings (`settings.openAiApiKey`) +2. Default Awell API key (`helpers.getOpenAIConfig().apiKey`) +3. Error thrown if no valid key found + +### Default Settings + +All models use these production-optimized defaults: +- `temperature: 0` - Consistent, deterministic outputs +- `maxRetries: 3` - Automatic retry on transient failures +- `timeout: 10000ms` - 10 second timeout for responses + +Additional configuration options available in `constants.ts`. + +### Metadata & Tracing + +The module automatically captures context metadata including: +- Care flow information (definition ID, flow ID) +- Activity context (activity ID) +- Organization details (tenant ID, org ID/slug) + +This enables: +- Request tracing and monitoring +- Usage analytics and optimization +- Cost allocation and tracking + +### Privacy & Security + +LangSmith tracing is enabled by default but can be configured: +- Set `hideDataForTracing: true` to mask sensitive inputs/outputs +- Metadata and usage metrics still collected +- Helps balance monitoring needs with data privacy + +### Error Handling + +The module provides robust error handling: +- Automatic retries for transient failures +- Clear error messages for common issues +- Fallback behavior for degraded scenarios \ No newline at end of file diff --git a/src/lib/llm/openai/constants.ts b/src/lib/llm/openai/constants.ts new file mode 100644 index 000000000..9a889e5bd --- /dev/null +++ b/src/lib/llm/openai/constants.ts @@ -0,0 +1,30 @@ +/** + * Default configuration settings for OpenAI model calls. + * - temperature: 0 (most deterministic outputs) + * - maxRetries: 3 (automatic retry on transient failures) + * - timeout: 10000ms (10 second timeout for responses) + */ +export const OPENAI_CONFIG = { + temperature: 0, + maxRetries: 3, + timeout: 10000, +} as const + +/** + * Simplified model aliases for easier reference in code. + * Use these constants instead of raw strings when specifying models. + */ +export const OPENAI_MODELS = { + GPT4o: 'gpt-4o', + GPT4oMini: 'gpt-4o-mini', +} as const + +/** + * Maps model aliases to specific versioned model names. + * Always use versioned snapshots rather than base model names to ensure + * consistent behavior over time as models are updated. + */ +export const MODEL_VERSIONS = { + [OPENAI_MODELS.GPT4o]: 'gpt-4o-2024-08-06', + [OPENAI_MODELS.GPT4oMini]: 'gpt-4o-mini-2024-07-18', +} as const \ No newline at end of file diff --git a/src/lib/llm/openai/createOpenAIModel.ts b/src/lib/llm/openai/createOpenAIModel.ts new file mode 100644 index 000000000..b9b312750 --- /dev/null +++ b/src/lib/llm/openai/createOpenAIModel.ts @@ -0,0 +1,98 @@ +import { ChatOpenAI } from '@langchain/openai' +import { Client } from 'langsmith' +import { LangChainTracer } from "@langchain/core/tracers/tracer_langchain" +import { type CreateOpenAIModelConfig, type OpenAIModelConfig } from './types' +import { OPENAI_CONFIG, OPENAI_MODELS, MODEL_VERSIONS } from './constants' +import { isNil } from 'lodash' + +/** + * Creates a configured OpenAI model instance with proper tracing metadata + * Settings can optionally include openAiApiKey, otherwise falls back to environment configuration + * + * @param config - Configuration for model creation + * @returns Configured model, metadata for tracing, and optional callbacks for hiding data + * @throws Error if no API key is available in either settings or environment + * + * @example + * ```typescript + * const { model, metadata, callbacks } = await createOpenAIModel({ + * settings, + * helpers, + * payload, + * modelType: OPENAI_MODELS.GPT4oMini, + * hideDataForTracing: true + * }) + * ``` + */ +export const createOpenAIModel = async ({ + settings = {}, + helpers, + payload, + modelType = OPENAI_MODELS.GPT4oMini, + hideDataForTracing = false, +}: CreateOpenAIModelConfig & { hideDataForTracing?: boolean }): Promise => { + const apiKey = settings.openAiApiKey ?? helpers.getOpenAIConfig().apiKey + + if (isNil(apiKey)) { + throw new Error('No OpenAI API key available in settings or environment configuration') + } + + const model = new ChatOpenAI({ + modelName: MODEL_VERSIONS[modelType], + openAIApiKey: apiKey, + ...OPENAI_CONFIG + }) + + let callbacks; + if (hideDataForTracing) { + // Utility function to mask outputs while preserving token usage + const maskOutputs = (outputs: Record): { + generations: any[]; + llmOutput?: { tokenUsage: unknown }; + } => { + const maskedGenerations = outputs.generations.map((generationArray: any[]) => + generationArray.map((generation) => ({ + text: '[MASKED]', + message: { + lc: 1, + type: 'constructor', + id: ['langchain_core', 'messages', 'AIMessage'], + kwargs: { + content: '[MASKED]', + response_metadata: { + tokenUsage: generation.message?.kwargs?.response_metadata?.tokenUsage, + finish_reason: generation.message?.kwargs?.response_metadata?.finish_reason + } + } + } + })) + ); + + return { + generations: maskedGenerations, + llmOutput: { + tokenUsage: outputs.llmOutput?.tokenUsage + } + }; + }; + + const client = new Client({ + hideInputs: () => ({}), + hideOutputs: maskOutputs + }) + callbacks = [new LangChainTracer({ client })] + } + + return { + model, + metadata: { + activity_id: payload.activity.id ?? '', + tenant_id: payload.pathway.tenant_id ?? '', + care_flow_definition_id: payload.pathway.definition_id ?? '', + care_flow_id: payload.pathway.id ?? '', + org_slug: payload.pathway.org_slug ?? '', + org_id: payload.pathway.org_id ?? '', + }, + callbacks // if not hideDataForTracing, callbacks is undefined, will use default callbacks + } +} \ No newline at end of file diff --git a/src/lib/llm/openai/index.ts b/src/lib/llm/openai/index.ts new file mode 100644 index 000000000..c99e524ad --- /dev/null +++ b/src/lib/llm/openai/index.ts @@ -0,0 +1,3 @@ +export * from './types' +export * from './constants' +export * from './createOpenAIModel' \ No newline at end of file diff --git a/src/lib/llm/openai/types.ts b/src/lib/llm/openai/types.ts new file mode 100644 index 000000000..4b545c078 --- /dev/null +++ b/src/lib/llm/openai/types.ts @@ -0,0 +1,71 @@ +import type { ChatOpenAI } from '@langchain/openai' +import type { OPENAI_MODELS } from './constants' +import type { BaseCallbackHandler } from "@langchain/core/callbacks/base" + +// This ensures modelType only accepts the exact values from OPENAI_MODELS +export type OpenAIModelType = typeof OPENAI_MODELS[keyof typeof OPENAI_MODELS] + +// Define the minimal structure we need from the pathway +interface MinimalPathway { + id?: string + definition_id?: string + tenant_id?: string + org_slug?: string + org_id?: string +} + +// Define the minimal structure we need from the activity +interface MinimalActivity { + id?: string +} + +// Define what we actually need from the payload +interface RequiredPayloadProperties { + pathway: MinimalPathway + activity: MinimalActivity +} + +export interface CreateOpenAIModelConfig { + /** Settings object that might contain an OpenAI API key */ + settings: { + openAiApiKey?: string + [key: string]: unknown + } + /** Helpers object that can provide a default OpenAI config */ + helpers: { + getOpenAIConfig: () => { apiKey: string } + } + /** Payload containing the minimal required information */ + payload: RequiredPayloadProperties & Record // Changed to be more permissive + /** Which OpenAI model to use */ + modelType?: OpenAIModelType +} + +/** + * Standard metadata structure for AI actions tracking + * Used for LangSmith tracing and analytics + */ +export interface AIActionMetadata { + activity_id: string, + care_flow_definition_id: string + care_flow_id: string + tenant_id: string + org_slug: string + org_id: string + [key: string]: unknown +} + +export interface OpenAIModelConfig { + /** Configured LangChain ChatOpenAI instance */ + model: ChatOpenAI + /** Tracing metadata for LangChain calls */ + metadata: { + activity_id: string, + care_flow_definition_id: string + care_flow_id: string + tenant_id: string + org_slug: string + org_id: string + } + callbacks?: BaseCallbackHandler[] +} \ No newline at end of file diff --git a/tests/constants.ts b/tests/constants.ts index a4c9f32b6..aecd4b648 100644 --- a/tests/constants.ts +++ b/tests/constants.ts @@ -5,6 +5,9 @@ export const testPayload: NewActivityPayload = { pathway: { id: 'pathway-id', definition_id: 'pathway-definition-id', + tenant_id: 'tenant-id', + org_slug: 'org-slug', + org_id: 'org-id' }, activity: { id: 'activity-id', diff --git a/yarn.lock b/yarn.lock index d42b1cd8e..c81b342b0 100644 --- a/yarn.lock +++ b/yarn.lock @@ -87,7 +87,7 @@ __metadata: resolution: "@awell-health/awell-extensions@workspace:." dependencies: "@awell-health/awell-sdk": "npm:^0.1.20" - "@awell-health/extensions-core": "npm:1.0.11" + "@awell-health/extensions-core": "npm:1.0.16" "@awell-health/healthie-sdk": "npm:^0.1.1" "@dropbox/sign": "npm:^1.8.0" "@faker-js/faker": "npm:^8.0.2" @@ -98,7 +98,7 @@ __metadata: "@graphql-tools/apollo-engine-loader": "npm:^7.3.26" "@graphql-typed-document-node/core": "npm:^3.1.2" "@hubspot/api-client": "npm:^11.2.0" - "@langchain/core": "npm:^0.3.1" + "@langchain/core": "npm:^0.3.33" "@langchain/openai": "npm:^0.3.0" "@mailchimp/mailchimp_transactional": "npm:^1.0.50" "@medplum/core": "npm:^3.1.4" @@ -153,7 +153,8 @@ __metadata: jsdom: "npm:^26.0.0" jsonpath: "npm:^1.1.1" jsonwebtoken: "npm:^9.0.2" - langchain: "npm:^0.3.2" + langchain: "npm:^0.3.12" + langsmith: "npm:^0.3.2" libphonenumber-js: "npm:^1.10.26" lint-staged: "npm:^15.2.11" lodash: "npm:^4.17.21" @@ -193,9 +194,9 @@ __metadata: languageName: node linkType: hard -"@awell-health/extensions-core@npm:1.0.11": - version: 1.0.11 - resolution: "@awell-health/extensions-core@npm:1.0.11" +"@awell-health/extensions-core@npm:1.0.16": + version: 1.0.16 + resolution: "@awell-health/extensions-core@npm:1.0.16" dependencies: "@types/json-schema": "npm:^7.0.15" axios: "npm:^1.7.4" @@ -206,7 +207,7 @@ __metadata: zod-validation-error: "npm:^3.2.0" peerDependencies: "@awell-health/awell-sdk": "*" - checksum: 10/6c6821d6ce3cef9dfc3cafb85deb069cde4c1e0b9ddca77beca647221056bb68025d4be72d95ababce1deaaaa96da2feeedb2f85dc10150e950e1f14f99ad2d6 + checksum: 10/1910b7bff2066aaea25ad10cb5801006ac1e3c625885abc6f4a703e6037ce15b91b44b1a388c928f1741f559fc5d9d7d1755fbb086ca74d1497402f932c27c49 languageName: node linkType: hard @@ -975,6 +976,13 @@ __metadata: languageName: node linkType: hard +"@cfworker/json-schema@npm:^4.0.2": + version: 4.1.0 + resolution: "@cfworker/json-schema@npm:4.1.0" + checksum: 10/12ee4dd6ef1d02525c7f5938465e3ea8266a37abb170777befd2db5c54e838c834b96cecdaf2c970a4ffcd0313b319aac048e24b185f977408153de33c78d891 + languageName: node + linkType: hard + "@cspotcode/source-map-support@npm:^0.8.0": version: 0.8.1 resolution: "@cspotcode/source-map-support@npm:0.8.1" @@ -2114,22 +2122,23 @@ __metadata: languageName: node linkType: hard -"@langchain/core@npm:^0.3.1": - version: 0.3.3 - resolution: "@langchain/core@npm:0.3.3" +"@langchain/core@npm:^0.3.33": + version: 0.3.33 + resolution: "@langchain/core@npm:0.3.33" dependencies: + "@cfworker/json-schema": "npm:^4.0.2" ansi-styles: "npm:^5.0.0" camelcase: "npm:6" decamelize: "npm:1.2.0" js-tiktoken: "npm:^1.0.12" - langsmith: "npm:^0.1.56" + langsmith: "npm:>=0.2.8 <0.4.0" mustache: "npm:^4.2.0" p-queue: "npm:^6.6.2" p-retry: "npm:4" uuid: "npm:^10.0.0" zod: "npm:^3.22.4" zod-to-json-schema: "npm:^3.22.3" - checksum: 10/f355345b8ded56d4821e7b63675d676abde19293af05b6cc1bc86cf13f264e156a7c86502aabc235a9155be21ad8c61f84d9a5b70caf336d10469fe5d31b03fa + checksum: 10/08b99289af3430a270569a01056224978ad946bb89da7f3c44dc7a0f2882c7ebf5da1d311f82a4d8bdf0b53acff5ff475bc22b9ee752a3ff87b01f6e5dd0cc7f languageName: node linkType: hard @@ -4667,7 +4676,7 @@ __metadata: languageName: node linkType: hard -"commander@npm:^10.0.0, commander@npm:^10.0.1": +"commander@npm:^10.0.0": version: 10.0.1 resolution: "commander@npm:10.0.1" checksum: 10/8799faa84a30da985802e661cc9856adfaee324d4b138413013ef7f087e8d7924b144c30a1f1405475f0909f467665cd9e1ce13270a2f41b141dab0b7a58f3fb @@ -4744,6 +4753,15 @@ __metadata: languageName: node linkType: hard +"console-table-printer@npm:^2.12.1": + version: 2.12.1 + resolution: "console-table-printer@npm:2.12.1" + dependencies: + simple-wcswidth: "npm:^1.0.1" + checksum: 10/37ac91d3601aa6747d3a895487ec9271488c5dae9154745513b6bfbb74f46c414aa4d8e86197b915be9565d1dd2b38005466fa94814ff62b1a08c4e37d57b601 + languageName: node + linkType: hard + "constant-case@npm:^3.0.4": version: 3.0.4 resolution: "constant-case@npm:3.0.4" @@ -8621,16 +8639,16 @@ __metadata: languageName: node linkType: hard -"langchain@npm:^0.3.2": - version: 0.3.2 - resolution: "langchain@npm:0.3.2" +"langchain@npm:^0.3.12": + version: 0.3.12 + resolution: "langchain@npm:0.3.12" dependencies: "@langchain/openai": "npm:>=0.1.0 <0.4.0" "@langchain/textsplitters": "npm:>=0.0.0 <0.2.0" js-tiktoken: "npm:^1.0.12" js-yaml: "npm:^4.1.0" jsonpointer: "npm:^5.0.1" - langsmith: "npm:^0.1.56-rc.1" + langsmith: "npm:>=0.2.8 <0.4.0" openapi-types: "npm:^12.1.3" p-retry: "npm:4" uuid: "npm:^10.0.0" @@ -8640,10 +8658,12 @@ __metadata: peerDependencies: "@langchain/anthropic": "*" "@langchain/aws": "*" + "@langchain/cerebras": "*" "@langchain/cohere": "*" "@langchain/core": ">=0.2.21 <0.4.0" "@langchain/google-genai": "*" "@langchain/google-vertexai": "*" + "@langchain/google-vertexai-web": "*" "@langchain/groq": "*" "@langchain/mistralai": "*" "@langchain/ollama": "*" @@ -8657,12 +8677,16 @@ __metadata: optional: true "@langchain/aws": optional: true + "@langchain/cerebras": + optional: true "@langchain/cohere": optional: true "@langchain/google-genai": optional: true "@langchain/google-vertexai": optional: true + "@langchain/google-vertexai-web": + optional: true "@langchain/groq": optional: true "@langchain/mistralai": @@ -8679,16 +8703,17 @@ __metadata: optional: true typeorm: optional: true - checksum: 10/e7f2105fc90dd03a9625ef42e92aa6c49ea13aa4e08c346b6af72b79b35e375d4c0f1d7fd261a61ef9d0e14ff83300b91a0d257ab62460cbeb67c136519d597c + checksum: 10/d56af985b4edd2761291c0e4bbd5cf11c61070a2f5bb30f1234ebc3a439234b99ff8216195b0ef69025b8580aba7d833200b32e949cbd8d22b955435d275c0c1 languageName: node linkType: hard -"langsmith@npm:^0.1.56, langsmith@npm:^0.1.56-rc.1": - version: 0.1.59 - resolution: "langsmith@npm:0.1.59" +"langsmith@npm:>=0.2.8 <0.4.0, langsmith@npm:^0.3.2": + version: 0.3.2 + resolution: "langsmith@npm:0.3.2" dependencies: "@types/uuid": "npm:^10.0.0" - commander: "npm:^10.0.1" + chalk: "npm:^4.1.2" + console-table-printer: "npm:^2.12.1" p-queue: "npm:^6.6.2" p-retry: "npm:4" semver: "npm:^7.6.3" @@ -8698,7 +8723,7 @@ __metadata: peerDependenciesMeta: openai: optional: true - checksum: 10/9e71c09f632d99529052e9de86831ef3f909d0072e9826f5127c1a9e9ff2c5b25b3c527b115ee2db4e27966ff5475b3ccf0efcaa76504746a98ee71c9c5b0af3 + checksum: 10/cae13264bf0df9428c0898ce7e6c9ab6a05d0338ae7d2cd26926691b7d8e2a635e6677e216b536c8b843804f62e915f230f6ec578ea8fd622697ac896d7bd008 languageName: node linkType: hard @@ -11242,6 +11267,13 @@ __metadata: languageName: node linkType: hard +"simple-wcswidth@npm:^1.0.1": + version: 1.0.1 + resolution: "simple-wcswidth@npm:1.0.1" + checksum: 10/75b1a5a941f516b829e3ae2dd7d15aa03800b38428e3f0272ac718776243e148f3dda0127b6dbd466a0a1e689f42911d64ca30665724691638721c3497015474 + languageName: node + linkType: hard + "sisteransi@npm:^1.0.5": version: 1.0.5 resolution: "sisteransi@npm:1.0.5"