File size: 1,408 Bytes
984400f 3e18b1e cfdb131 984400f b3470c4 8f02537 984400f 02b7f1c 984400f 02b7f1c 984400f 02b7f1c 984400f 0d4c6be 760660f 984400f 0d4c6be 984400f ac43283 b32a5e9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import { GoogleGenerativeAI } from "@google/generative-ai";
import express from 'express';
import * as config from './config/config.js';
const GeminiRoutes = express.Router();
const genAI = new GoogleGenerativeAI(config.GoogleAPIKey);
/**
* @param {string} prompt - The input string for the model.
* @param {string} setModel - you can change the model
* @returns {Promise<string>} The generated response text.
*/
async function GeminiResponse(prompt, setModel) {
try {
const model = genAI.getGenerativeModel({
model: setModel,
});
const result = await model.generateContent(prompt);
const text = result.response.candidates[0]?.content;
return text.parts[0].text || "No response content";
} catch (e) {
console.error(`Error in GeminiResponse: ${e.message}`);
return "Error generating response.";
}
}
/**
* @swagger
* tags:
* name: Gemini
* description: Artificial intelligence endpoint to interact with AI, like chatting.
*/
GeminiRoutes.get("/api/v1/google-gemini", async (req, res) => {
try {
const query = req.query.query;
const setmodel = req.query.setmodel || "gemini-1.5-flash"
const results = await GeminiResponse(query, setmodel);
res.json({ message: results });
} catch (e) {
res.status(500).json({ error: e.message });
}
});
export { GeminiRoutes }; |