forked from WomB0ComB0/gemini-node-1
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgemini-streaming.js
59 lines (51 loc) · 1.47 KB
/
gemini-streaming.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import dotenv from 'dotenv';
dotenv.config({ path: ".env.local" });
import readline from 'readline';
import { GoogleGenerativeAI } from '@google/generative-ai';
const genAI = new GoogleGenerativeAI(process.env.API_KEY);
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
let isAwaitingResponse = false;
async function run() {
const model = genAI.getGenerativeModel({ model: "gemini-pro" })
const chat = model.startChat({
history: [],
generationConfig: {
maxOutputTokens: 500
}
})
function askAndRespond() {
if (!isAwaitingResponse) {
rl.question("You: ", async (message) => {
if (message.toLowerCase() === "exit") {
rl.close();
return;
} else {
isAwaitingResponse = true;
try {
const result = await chat.sendMessageStream(message)
let text = "";
for await (const chunk of result.stream) {
const chunkText = await chunk.text();
console.log(`Gemini: ${chunkText.replace(/<[^>]*>?/gm, "")}`)
text += chunkText;
}
isAwaitingResponse = false;
askAndRespond()
} catch (error) {
console.log("Error: ", error)
isAwaitingResponse = false;
askAndRespond()
}
}
}
)
} else {
console.log("PLease wait for the current response to complete")
}
}
askAndRespond()
}
run()