Skip to content

Commit

Permalink
Add API
Browse files Browse the repository at this point in the history
  • Loading branch information
anthonyolazabal committed May 2, 2024
1 parent bfc7356 commit 4d23ca2
Show file tree
Hide file tree
Showing 7 changed files with 1,609 additions and 0 deletions.
Binary file added .DS_Store
Binary file not shown.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -396,3 +396,9 @@ FodyWeavers.xsd

# JetBrains Rider
*.sln.iml

# Google creds
service-principal-gemini-credential.json

# Remove local env
.env
18 changes: 18 additions & 0 deletions API/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# AI API
API endpoint providing access to Google Gemini on /api/ai/gemini-pro

## Prerequisites
The API requires nodeJS v18 minimum.

## Running the API
Start by installing the modules
'''
npm i
'''

Run the server
'''
node server.js
'''

The API will be listening on port 3001 by default.
62 changes: 62 additions & 0 deletions API/helpers/gemini.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
const { VertexAI, HarmCategory, HarmBlockThreshold } = require('@google-cloud/vertexai');
require('dotenv').config();

// Constants for project and location should be defined at the top level.
const PROJECT_ID = process.env.PROJECT_ID;
const LOCATION = process.env.LOCATION;

// Initialize Vertex AI with the necessary project and location information once.
const vertexAiOptions = { project: PROJECT_ID, location: LOCATION };
const vertex_ai = new VertexAI(vertexAiOptions);

// Define model names as constants to avoid magic strings and improve readability.
const GEMINI_PRO_MODEL_NAME = process.env.GEMINI_PRO_MODEL_NAME

// Safety settings can be moved outside of the model instantiation,
// if they are static and reused across multiple instances.
const safetySettings = [{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
}];

// Instantiate models once outside of functions to avoid repeated initializations.
const generativeModelOptions = {
model: GEMINI_PRO_MODEL_NAME,
safety_settings: safetySettings,
generation_config: { max_output_tokens: 512 },
};
const generativeModel = vertex_ai.preview.getGenerativeModel(generativeModelOptions);

// The streamGenerateContent function does not need to be an async declaration since it returns a Promise implicitly.
async function streamGenerateContent(promptInitText, question) {
const request = {
contents: [
{
role: 'user',
parts: [{ text: promptInitText + question }]
}
],
};

console.log(request)

// Using implicit return for the async arrow function.
let geminiResponse;
try {
const streamingResp = await generativeModel.generateContentStream(request);
for await (const item of streamingResp.stream) {
console.log('stream chunk: ', item.candidates[0].content.parts[0]);
}
const aggregatedResponse = await streamingResp.response;
console.log("Original question: " + question)
console.log('Gemini response: ', aggregatedResponse.candidates[0].content.parts[0].text);
geminiResponse = aggregatedResponse.candidates[0].content.parts[0].text;
} catch (error) {
console.error('An error occurred during content generation:', error);
geminiResponse = "An error occurred during content generation:" + error;
}

return geminiResponse;
}

module.exports = { streamGenerateContent };
Loading

0 comments on commit 4d23ca2

Please sign in to comment.