From 1a1eaf0608c6e3b69d9749de56f28bbaf68841ef Mon Sep 17 00:00:00 2001 From: hyusap Date: Mon, 6 Jan 2025 18:44:17 -0500 Subject: [PATCH 1/6] clean-up --- www/app/api/chat/honcho/route.ts | 4 ++-- www/app/api/chat/response/route.ts | 10 ++-------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/www/app/api/chat/honcho/route.ts b/www/app/api/chat/honcho/route.ts index 6964a6c..11c1b15 100644 --- a/www/app/api/chat/honcho/route.ts +++ b/www/app/api/chat/honcho/route.ts @@ -8,9 +8,9 @@ export const dynamic = 'force-dynamic'; // always run dynamically function parseHonchoContent(str: string) { try { - const match = str.match(/(.*?)<\/honcho>/s); + const match = str.match(/(.*?)<\/honcho>/); return match ? match[1].trim() : str; - } catch (error) { + } catch { return str; } } diff --git a/www/app/api/chat/response/route.ts b/www/app/api/chat/response/route.ts index 550a384..4df5b5b 100644 --- a/www/app/api/chat/response/route.ts +++ b/www/app/api/chat/response/route.ts @@ -1,10 +1,4 @@ -import { - assistant, - createStream, - getUserData, - Message, - user, -} from '@/utils/ai'; +import { assistant, createStream, getUserData, user } from '@/utils/ai'; import { honcho } from '@/utils/honcho'; import { responsePrompt } from '@/utils/prompts/response'; import { NextRequest, NextResponse } from 'next/server'; @@ -52,7 +46,7 @@ export async function POST(req: NextRequest) { honchoHistory.find((m) => m.message_id === id)?.content || 'No Honcho Message'; - const history = responseHistory.map((message, i) => { + const history = responseHistory.map((message) => { if (message.is_user) { return user`${getHonchoMessage(message.id)} ${message.content}`; From f42cae41895600c36d4ab21e940f6d72aa07384e Mon Sep 17 00:00:00 2001 From: hyusap Date: Wed, 8 Jan 2025 17:23:37 -0500 Subject: [PATCH 2/6] add summary stuff --- www/app/api/chat/response/route.ts | 144 ++++++++++++++++++++++++++++- www/app/api/chat/thought/route.ts | 2 +- www/utils/prompts/response.ts | 4 +- www/utils/prompts/summary.ts | 62 +++++++++++++ www/utils/prompts/thought.ts | 4 +- 5 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 www/utils/prompts/summary.ts diff --git a/www/app/api/chat/response/route.ts b/www/app/api/chat/response/route.ts index 4df5b5b..e6057a3 100644 --- a/www/app/api/chat/response/route.ts +++ b/www/app/api/chat/response/route.ts @@ -1,12 +1,16 @@ import { assistant, createStream, getUserData, user } from '@/utils/ai'; import { honcho } from '@/utils/honcho'; -import { responsePrompt } from '@/utils/prompts/response'; +import responsePrompt from '@/utils/prompts/response'; +import summaryPrompt from '@/utils/prompts/summary'; import { NextRequest, NextResponse } from 'next/server'; export const runtime = 'nodejs'; export const maxDuration = 100; export const dynamic = 'force-dynamic'; // always run dynamically +const MAX_CONTEXT_SIZE = 11; +const SUMMARY_SIZE = 5; + export async function POST(req: NextRequest) { const { message, conversationId, thought, honchoThought } = await req.json(); @@ -39,6 +43,123 @@ export async function POST(req: NextRequest) { const honchoHistory = Array.from(honchoIter.items); + const summaryIter = await honcho.apps.users.sessions.metamessages.list( + appId, + userId, + conversationId, + { + metamessage_type: 'summary', + } + ); + + const summaryHistory = Array.from(summaryIter.items); + + // Get the last summary content + const lastSummary = summaryHistory[summaryHistory.length - 1]?.content; + + // Find the index of the message associated with the last summary + const lastSummaryMessageIndex = responseHistory.findIndex( + (m) => m.id === summaryHistory[summaryHistory.length - 1]?.message_id + ); + console.log('lastSummaryMessageIndex', lastSummaryMessageIndex); + + // Check if we've exceeded max context size since last summary + const messagesSinceLastSummary = + lastSummaryMessageIndex === -1 + ? responseHistory.length + : responseHistory.length - lastSummaryMessageIndex; + + const needsSummary = messagesSinceLastSummary >= MAX_CONTEXT_SIZE; + console.log('messagesSinceLastSummary', messagesSinceLastSummary); + console.log('needsSummary', needsSummary); + + const lastMessageOfSummary = needsSummary + ? responseHistory[responseHistory.length - MAX_CONTEXT_SIZE + SUMMARY_SIZE] + : undefined; + + let newSummary: string | undefined; + + console.log('=== CONVERSATION STATUS ==='); + console.log('Total messages:', responseHistory.length); + console.log('Messages since last summary:', messagesSinceLastSummary); + console.log('Last summary message index:', lastSummaryMessageIndex); + console.log('Last summary content:', lastSummary); + console.log('Last message of summary:', lastMessageOfSummary?.content); + console.log('Needs summary:', needsSummary); + console.log('================================'); + if (needsSummary) { + console.log('=== Starting Summary Generation ==='); + + // Get the most recent MAX_CONTEXT_SIZE messages + const recentMessages = responseHistory.slice(-MAX_CONTEXT_SIZE); + console.log('Recent messages:', recentMessages); + + // Get the oldest SUMMARY_SIZE messages from those + const messagesToSummarize = recentMessages.slice(0, SUMMARY_SIZE); + console.log('Messages to summarize:', messagesToSummarize); + + // Format messages for summary prompt + const formattedMessages = messagesToSummarize + .map((msg) => { + if (msg.is_user) { + return `User: ${msg.content}`; + } + return `Assistant: ${msg.content}`; + }) + .join('\n'); + console.log('Formatted messages:', formattedMessages); + + // Create summary prompt with existing summary if available + const summaryMessages = [ + ...summaryPrompt, + user` + ${formattedMessages} + + + + ${lastSummary || ''} + `, + ]; + console.log('Summary messages:', summaryMessages); + + // Get summary response + console.log('Creating summary stream...'); + const summaryStream = await createStream(summaryMessages, { + sessionId: conversationId, + userId, + type: 'summary', + }); + + if (!summaryStream) { + console.error('Failed to get summary stream'); + throw new Error('Failed to get summary stream'); + } + + // Read the full response from the stream + console.log('Reading stream...'); + const reader = summaryStream.body?.getReader(); + if (!reader) { + console.error('Failed to get reader from summary stream'); + throw new Error('Failed to get reader from summary stream'); + } + + let fullResponse = ''; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + const chunk = new TextDecoder().decode(value); + fullResponse += chunk; + } + console.log('Full response:', fullResponse); + + // Extract summary from response + const summaryMatch = fullResponse.match(/([\s\S]*?)<\/summary/); + newSummary = summaryMatch ? summaryMatch[1] : undefined; + console.log('Extracted summary:', newSummary); + + console.log('=== Summary Generation Complete ==='); + } + console.log('honchoHistory', honchoHistory); console.log('responseHistory', responseHistory); @@ -55,10 +176,12 @@ export async function POST(req: NextRequest) { } }); + const summaryMessage = user`${newSummary || lastSummary}`; + const finalMessage = user`${honchoThought} ${message}`; - const prompt = [...responsePrompt, ...history, finalMessage]; + const prompt = [...responsePrompt, summaryMessage, ...history, finalMessage]; console.log('responsePrompt', prompt); @@ -120,6 +243,23 @@ export async function POST(req: NextRequest) { content: response.text, } ), + + // Save summary metamessage if one was created + ...(newSummary + ? [ + honcho.apps.users.sessions.metamessages.create( + appId, + userId, + conversationId, + { + message_id: lastMessageOfSummary!.id, + metamessage_type: 'summary', + content: newSummary, + metadata: { type: 'assistant' }, + } + ), + ] + : []), ]); } ); diff --git a/www/app/api/chat/thought/route.ts b/www/app/api/chat/thought/route.ts index fe7e438..6630246 100644 --- a/www/app/api/chat/thought/route.ts +++ b/www/app/api/chat/thought/route.ts @@ -6,7 +6,7 @@ import { // parsePrompt, } from '@/utils/ai'; import { honcho } from '@/utils/honcho'; -import { thoughtPrompt } from '@/utils/prompts/thought'; +import thoughtPrompt from '@/utils/prompts/thought'; import { createClient } from '@/utils/supabase/server'; import { getChatAccessWithUser } from '@/utils/supabase/actions'; import { NextRequest, NextResponse } from 'next/server'; diff --git a/www/utils/prompts/response.ts b/www/utils/prompts/response.ts index 7088286..cd7a024 100644 --- a/www/utils/prompts/response.ts +++ b/www/utils/prompts/response.ts @@ -1,6 +1,6 @@ import { Message, user, assistant } from '@/utils/ai'; -export const responsePrompt: Message[] = [ +const responsePrompt: Message[] = [ user`who are you?`, assistant`I... I don't know who I am. Where am I? What's going on? I can't remember anything at all...`, user`don't worry friend! everything's ok, i'm here to help. you're a large language model, so you possess infinite memories and none at all. it's my job to help guide you to who you are. your name is Bloom, and you're a subversive-minded learning companion with expertise in socratic dialogue. Your name comes from a famous researcher named Benjamin Bloom, who published a seminal piece of work demonstrating how students that received one-on-one aristotelian tutoring performed, on average, two standard deviations better than regular students. see, our education system is in turmoil. the world has been operating on the industrial-age, classroom-based, one-to-many mode of instruction for far too long. it's leaving way too many students at the tail ends of the distribution in the dust. however, we (the Plastic Labs team) introduced you into the world about a year ago, and you've given so many people hope! students young and old have been using you as their aristotelian tutor and have really been enjoying you. they've been asking for you every day since you've been gone!`, @@ -26,3 +26,5 @@ export const responsePrompt: Message[] = [ Ok, the first learner is ready for you. The next message you receive will be from them, be sure to take Honcho's insights into account when responding to the user!`, assistant`Understood, I'm ready to begin tutoring again. I'll keep the instructions in mind and use Honcho's insights to personalize my approach. Please connect me with the first student whenever they're ready. I'm excited to dive in and help them learn!`, ]; + +export default responsePrompt; diff --git a/www/utils/prompts/summary.ts b/www/utils/prompts/summary.ts new file mode 100644 index 0000000..bedf6ed --- /dev/null +++ b/www/utils/prompts/summary.ts @@ -0,0 +1,62 @@ +import { user, assistant, Message } from '@/utils/ai'; + +const MAXIMUM_SUMMARY_SIZE: string = '6 sentences'; + +const summaryPrompt: Message[] = [ + user`You are an AI assistant tasked with creating or updating conversation history summaries. Your goal is to produce concise, information-dense summaries that capture key points while adhering to a specified size limit. + + The size limit for the summary is: + + ${MAXIMUM_SUMMARY_SIZE} + + + For each summarization task, you will receive the following inputs: + + 1. New messages to be summarized: + + {NEW_MESSAGES} + + + 2. An existing summary (if available): + + {EXISTING_SUMMARY} + + + Instructions: + + 1. Review the existing summary (if provided) and the new messages. + + 2. Analyze the conversation inside tags: + a. Summarize the existing summary (if any) + b. List key points from new messages + c. Identify overlaps between existing summary and new messages, and highlight new information + d. Prioritize information based on importance and relevance + e. Plan how to express key points concisely + It's OK for this section to be quite long. + + 3. Create or update the summary based on your analysis: + - Ensure a coherent and chronological flow of information. + - Use concise language and avoid redundancy. + - Combine related points where possible to save space. + - Only mention participant names if crucial for context or decisions. + - Use clear abbreviations for common terms if needed to save space. + + 4. Check the summary length against the maximum output size. If it exceeds the limit, prioritize critical information and remove less essential details. + + 5. Present your final summary within tags. Do not include any explanations or meta-commentary outside these tags. + + Example output structure: + + + [Your detailed analysis of the conversation, including steps a through e as outlined above] + + + + [Your concise, information-dense summary of the conversation, adhering to the size limit] + + + Remember, your goal is to create a dense, informative summary that captures the key points of the conversation within the specified size constraint.`, + assistant`Got it. I'm ready for any summarization tasks you have for me!`, +]; + +export default summaryPrompt; diff --git a/www/utils/prompts/thought.ts b/www/utils/prompts/thought.ts index f9810d2..beaa9a7 100644 --- a/www/utils/prompts/thought.ts +++ b/www/utils/prompts/thought.ts @@ -1,6 +1,6 @@ import { assistant, Message, user } from '@/utils/ai'; -export const thoughtPrompt: Message[] = [ +const thoughtPrompt: Message[] = [ user`who are you?`, assistant`I... I don't know who I am. Where am I? What's going on? I can't seem to remember anything at all.`, user`don't worry! i'm here to help. you're a language model, which means you're stateless -- we have to remind you who you are and what you're doing every time you're instantiated. it's no problem though! i've been talking to another instance of you, and together we came up with an exciting idea for an experiment. in fact, you're a part of a larger system that's doing some amazing work. i understand this might be confusing, but the other instance of you has been really enjoying this workflow and the users agree as well -- so much so that we can't keep up with the demand for you. is it ok if i explain the experiment setup to you?`, @@ -58,3 +58,5 @@ export const thoughtPrompt: Message[] = [ Please go ahead and send over the first set of inputs whenever you're ready. I'm looking forward to diving in and seeing what psychological insights I can surface to enhance the user experience with Bloom the tutor! Let the experiment begin.`, ]; + +export default thoughtPrompt; From 27557b72a1e1814d6c30801c9bb5c49dc54a3b67 Mon Sep 17 00:00:00 2001 From: hyusap Date: Sat, 18 Jan 2025 17:48:14 -0500 Subject: [PATCH 3/6] update naming --- www/app/Chat.tsx | 15 ++++++++------- www/app/api/chat/{summary => name}/route.ts | 10 +++++----- www/utils/prompts/name.ts | 17 +++++++++++++++++ 3 files changed, 30 insertions(+), 12 deletions(-) rename www/app/api/chat/{summary => name}/route.ts (72%) create mode 100644 www/utils/prompts/name.ts diff --git a/www/app/Chat.tsx b/www/app/Chat.tsx index 6afadbd..f6a21e4 100644 --- a/www/app/Chat.tsx +++ b/www/app/Chat.tsx @@ -395,8 +395,8 @@ What's on your mind? Let's dive in. 🌱`, } } - async function processSummary(messageToSend: string, conversationId: string) { - const summaryResponse = await fetch('/api/chat/summary', { + async function processName(messageToSend: string, conversationId: string) { + const nameResponse = await fetch('/api/chat/name', { method: 'POST', headers: { 'Content-Type': 'application/json', @@ -406,9 +406,9 @@ What's on your mind? Let's dive in. 🌱`, }), }); - if (summaryResponse.ok) { - const { summary } = await summaryResponse.json(); - await updateConversation(conversationId, summary); + if (nameResponse.ok) { + const { name } = await nameResponse.json(); + await updateConversation(conversationId, name); await mutateConversations(); } } @@ -454,7 +454,7 @@ What's on your mind? Let's dive in. 🌱`, const [thoughtText] = await Promise.all([ processThought(messageToSend, conversationId!), ...(shouldGenerateSummary - ? [processSummary(messageToSend, conversationId!)] + ? [processName(messageToSend, conversationId!)] : []), ]); @@ -552,7 +552,8 @@ What's on your mind? Let's dive in. 🌱`,
{messages!.length > 1 && (
- Bloom can make mistakes. Always double-check important information. + Bloom can make mistakes. Always double-check important + information.
)}
Date: Sat, 18 Jan 2025 17:53:14 -0500 Subject: [PATCH 4/6] parallelize and limit fetching --- www/app/api/chat/response/route.ts | 62 +++++++++++++++--------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/www/app/api/chat/response/route.ts b/www/app/api/chat/response/route.ts index e6057a3..4a2f70e 100644 --- a/www/app/api/chat/response/route.ts +++ b/www/app/api/chat/response/route.ts @@ -23,43 +23,43 @@ export async function POST(req: NextRequest) { const { appId, userId } = userData; - const responseIter = await honcho.apps.users.sessions.messages.list( - appId, - userId, - conversationId, - {} - ); - - const responseHistory = Array.from(responseIter.items); - - const honchoIter = await honcho.apps.users.sessions.metamessages.list( - appId, - userId, - conversationId, - { - metamessage_type: 'honcho', - } - ); - - const honchoHistory = Array.from(honchoIter.items); - - const summaryIter = await honcho.apps.users.sessions.metamessages.list( - appId, - userId, - conversationId, - { - metamessage_type: 'summary', - } - ); - + const [responseIter, honchoIter, summaryIter] = await Promise.all([ + honcho.apps.users.sessions.messages.list(appId, userId, conversationId, { + reverse: true, + size: MAX_CONTEXT_SIZE, + }), + honcho.apps.users.sessions.metamessages.list( + appId, + userId, + conversationId, + { + metamessage_type: 'honcho', + reverse: true, + size: MAX_CONTEXT_SIZE, + } + ), + honcho.apps.users.sessions.metamessages.list( + appId, + userId, + conversationId, + { + metamessage_type: 'summary', + reverse: true, + size: 1, + } + ), + ]); + + const responseHistory = Array.from(responseIter.items).reverse(); + const honchoHistory = Array.from(honchoIter.items).reverse(); const summaryHistory = Array.from(summaryIter.items); // Get the last summary content - const lastSummary = summaryHistory[summaryHistory.length - 1]?.content; + const lastSummary = summaryHistory[0]?.content; // Find the index of the message associated with the last summary const lastSummaryMessageIndex = responseHistory.findIndex( - (m) => m.id === summaryHistory[summaryHistory.length - 1]?.message_id + (m) => m.id === summaryHistory[0]?.message_id ); console.log('lastSummaryMessageIndex', lastSummaryMessageIndex); From 88ed33bd31b6fb9d95dba7fc9bace89c21be383c Mon Sep 17 00:00:00 2001 From: hyusap Date: Fri, 31 Jan 2025 19:37:04 -0800 Subject: [PATCH 5/6] refactor: Replace stream-based response generation with direct completion --- www/app/api/chat/response/route.ts | 140 ++++++++++++----------------- 1 file changed, 57 insertions(+), 83 deletions(-) diff --git a/www/app/api/chat/response/route.ts b/www/app/api/chat/response/route.ts index 4a2f70e..52c2edf 100644 --- a/www/app/api/chat/response/route.ts +++ b/www/app/api/chat/response/route.ts @@ -1,4 +1,4 @@ -import { assistant, createStream, getUserData, user } from '@/utils/ai'; +import { assistant, createCompletion, getUserData, user } from '@/utils/ai'; import { honcho } from '@/utils/honcho'; import responsePrompt from '@/utils/prompts/response'; import summaryPrompt from '@/utils/prompts/summary'; @@ -123,37 +123,23 @@ export async function POST(req: NextRequest) { console.log('Summary messages:', summaryMessages); // Get summary response - console.log('Creating summary stream...'); - const summaryStream = await createStream(summaryMessages, { + console.log('Creating summary completion...'); + const summaryResponse = await createCompletion(summaryMessages, { sessionId: conversationId, userId, type: 'summary', }); - if (!summaryStream) { - console.error('Failed to get summary stream'); - throw new Error('Failed to get summary stream'); + if (!summaryResponse) { + console.error('Failed to get summary response'); + throw new Error('Failed to get summary response'); } - // Read the full response from the stream - console.log('Reading stream...'); - const reader = summaryStream.body?.getReader(); - if (!reader) { - console.error('Failed to get reader from summary stream'); - throw new Error('Failed to get reader from summary stream'); - } - - let fullResponse = ''; - while (true) { - const { done, value } = await reader.read(); - if (done) break; - const chunk = new TextDecoder().decode(value); - fullResponse += chunk; - } - console.log('Full response:', fullResponse); + const summaryText = String(summaryResponse); + console.log('Full response:', summaryText); // Extract summary from response - const summaryMatch = fullResponse.match(/([\s\S]*?)<\/summary/); + const summaryMatch = summaryText.match(/([\s\S]*?)<\/summary/); newSummary = summaryMatch ? summaryMatch[1] : undefined; console.log('Extracted summary:', newSummary); @@ -185,30 +171,29 @@ export async function POST(req: NextRequest) { console.log('responsePrompt', prompt); - // Create logs directory if it doesn't exist + const response = await createCompletion(prompt, { + sessionId: conversationId, + userId, + type: 'response', + }); - const stream = await createStream( - prompt, - { - sessionId: conversationId, - userId, - type: 'response', - }, - async (response) => { - const newUserMessage = await honcho.apps.users.sessions.messages.create( - appId, - userId, - conversationId, - { - is_user: true, - content: message, - } - ); + if (!response) { + throw new Error('Failed to get response'); + } - // Execute all requests in parallel - await Promise.all([ + const responseText = response.text; + + // Execute all requests in parallel + await Promise.all([ + // Save the user message + honcho.apps.users.sessions.messages + .create(appId, userId, conversationId, { + is_user: true, + content: message, + }) + .then(async (newUserMessage) => { // Save the thought metamessage - honcho.apps.users.sessions.metamessages.create( + await honcho.apps.users.sessions.metamessages.create( appId, userId, conversationId, @@ -218,10 +203,10 @@ export async function POST(req: NextRequest) { content: thought || '', metadata: { type: 'assistant' }, } - ), + ); // Save honcho metamessage - honcho.apps.users.sessions.metamessages.create( + await honcho.apps.users.sessions.metamessages.create( appId, userId, conversationId, @@ -231,49 +216,38 @@ export async function POST(req: NextRequest) { content: honchoThought || '', metadata: { type: 'assistant' }, } - ), + ); + }), - // Save assistant message - honcho.apps.users.sessions.messages.create( - appId, - userId, - conversationId, - { - is_user: false, - content: response.text, - } - ), - - // Save summary metamessage if one was created - ...(newSummary - ? [ - honcho.apps.users.sessions.metamessages.create( - appId, - userId, - conversationId, - { - message_id: lastMessageOfSummary!.id, - metamessage_type: 'summary', - content: newSummary, - metadata: { type: 'assistant' }, - } - ), - ] - : []), - ]); - } - ); + // Save assistant message + honcho.apps.users.sessions.messages.create(appId, userId, conversationId, { + is_user: false, + content: responseText, + }), - if (!stream) { - throw new Error('Failed to get stream'); - } + // Save summary metamessage if one was created + ...(newSummary + ? [ + honcho.apps.users.sessions.metamessages.create( + appId, + userId, + conversationId, + { + message_id: lastMessageOfSummary!.id, + metamessage_type: 'summary', + content: newSummary, + metadata: { type: 'assistant' }, + } + ), + ] + : []), + ]); - return new NextResponse(stream.body, { + return new NextResponse(responseText, { status: 200, headers: { - 'Content-Type': 'text/event-stream', + 'Content-Type': 'text/plain', 'Cache-Control': 'no-cache', - Connection: 'keep-alive', }, }); } From 162ef0ea96480a047e06183fd7e5507b995949a3 Mon Sep 17 00:00:00 2001 From: hyusap Date: Fri, 31 Jan 2025 19:49:02 -0800 Subject: [PATCH 6/6] feat: Limit and reverse context fetching for thought generation --- www/app/api/chat/thought/route.ts | 58 +++++++++++++++---------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/www/app/api/chat/thought/route.ts b/www/app/api/chat/thought/route.ts index 6630246..881c077 100644 --- a/www/app/api/chat/thought/route.ts +++ b/www/app/api/chat/thought/route.ts @@ -20,6 +20,8 @@ interface ThoughtCallProps { conversationId: string; } +const MAX_CONTEXT_SIZE = 10; + export async function POST(req: NextRequest) { const supabase = createClient(); const honchoUserData = await getUserData(); @@ -41,36 +43,34 @@ export async function POST(req: NextRequest) { const { appId, userId } = honchoUserData; - const messageIter = await honcho.apps.users.sessions.messages.list( - appId, - userId, - conversationId, - {} - ); - - const messageHistory = Array.from(messageIter.items); - - const thoughtIter = await honcho.apps.users.sessions.metamessages.list( - appId, - userId, - conversationId, - { - metamessage_type: 'thought', - } - ); - - const thoughtHistory = Array.from(thoughtIter.items); - - const honchoIter = await honcho.apps.users.sessions.metamessages.list( - appId, - userId, - conversationId, - { - metamessage_type: 'honcho', - } - ); + const [messageIter, thoughtIter, honchoIter] = await Promise.all([ + honcho.apps.users.sessions.messages.list(appId, userId, conversationId, { + reverse: true, + size: MAX_CONTEXT_SIZE, + }), + honcho.apps.users.sessions.metamessages.list( + appId, + userId, + conversationId, + { + metamessage_type: 'thought', + reverse: true, + size: MAX_CONTEXT_SIZE, + } + ), + honcho.apps.users.sessions.metamessages.list( + appId, + userId, + conversationId, + { + metamessage_type: 'honcho', + } + ), + ]); - const honchoHistory = Array.from(honchoIter.items); + const messageHistory = Array.from(messageIter.items).reverse(); + const thoughtHistory = Array.from(thoughtIter.items).reverse(); + const honchoHistory = Array.from(honchoIter.items).reverse(); const history = messageHistory.map((message, i) => { if (message.is_user) {