import { GoogleGenerativeAI } from "@google/generative-ai"; | |
import express from 'express'; | |
import * as config from './config/config.js'; | |
const GeminiRoutes = express.Router(); | |
const genAI = new GoogleGenerativeAI(config.GoogleAPIKey); | |
/** | |
* @param {string} prompt - The input string for the model. | |
* @param {string} setModel - you can change the model | |
* @returns {Promise<string>} The generated response text. | |
*/ | |
async function GeminiResponse(prompt, setModel) { | |
try { | |
const model = genAI.getGenerativeModel({ | |
model: setModel, | |
}); | |
const result = await model.generateContent(prompt); | |
const text = result.response.candidates[0]?.content; | |
return text.parts[0].text || "No response content"; | |
} catch (e) { | |
console.error(`Error in GeminiResponse: ${e.message}`); | |
return "Error generating response."; | |
} | |
} | |
/** | |
* @swagger | |
* tags: | |
* name: Gemini | |
* description: Artificial intelligence endpoint to interact with AI, like chatting. | |
*/ | |
GeminiRoutes.get("/api/v1/google-gemini", async (req, res) => { | |
try { | |
const query = req.query.query; | |
const setmodel = req.query.setmodel || "gemini-1.5-flash" | |
const results = await GeminiResponse(query, setmodel); | |
res.json({ message: results }); | |
} catch (e) { | |
res.status(500).json({ error: e.message }); | |
} | |
}); | |
export { GeminiRoutes }; |