File size: 2,729 Bytes
984400f
3e18b1e
1aa81a8
984400f
b3470c4
 
8f02537
984400f
 
 
02b7f1c
984400f
 
02b7f1c
984400f
 
02b7f1c
984400f
 
0d4c6be
760660f
984400f
 
0d4c6be
984400f
 
 
ac43283
 
 
baf5137
ac43283
 
52a9b55
 
 
 
 
 
baf5137
52a9b55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b32a5e9
 
 
52a9b55
b32a5e9
 
 
 
 
 
 
b27edd6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import { GoogleGenerativeAI } from "@google/generative-ai";
import express from 'express';
import * as config from '../config.js';

const GeminiRoutes = express.Router();

const genAI = new GoogleGenerativeAI(config.GoogleAPIKey);

/**
 * @param {string} prompt - The input string for the model.
 * @param {string} setModel - you can change the model
 * @returns {Promise<string>} The generated response text.
 */
async function GeminiResponse(prompt, setModel) {
    try {
        const model = genAI.getGenerativeModel({
            model: setModel,
        });
        const result = await model.generateContent(prompt);
        const text = result.response.candidates[0]?.content;
        return text.parts[0].text || "No response content";
    } catch (e) {
        console.error(`Error in GeminiResponse: ${e.message}`);
        return "Error generating response.";
    }
}

/**
 * @swagger
 * tags:
 *   name: AI
 *   description: Artificial intelligence endpoint to interact with AI, like chatting.
 */

/**
 * @swagger
 * /api/v1/google-gemini:
 *   get:
 *     summary: Get a response from Google Gemini API
 *     tags: [AI]
 *     description: This endpoint interacts with the Google Gemini API to fetch a response based on the query and model.
 *     parameters:
 *       - in: query
 *         name: query
 *         required: true
 *         description: The query to be processed by the Google Gemini API.
 *         schema:
 *           type: string
 *       - in: query
 *         name: setmodel
 *         required: false
 *         description: The model version to use (default is "gemini-1.5-flash").
 *         schema:
 *           type: string
 *     responses:
 *       200:
 *         description: A successful response containing the API result.
 *         content:
 *           application/json:
 *             schema:
 *               type: object
 *               properties:
 *                 message:
 *                   type: string
 *                   description: The result from the Gemini API.
 *       500:
 *         description: Internal server error.
 *         content:
 *           application/json:
 *             schema:
 *               type: object
 *               properties:
 *                 error:
 *                   type: string
 *                   description: Error message.
 */
GeminiRoutes.get("/api/v1/google-gemini", async (req, res) => {
    try {
        const query = req.query.query;
        const setmodel = req.query.setmodel || "gemini-1.5-flash";
        const results = await GeminiResponse(query, setmodel);
        res.json({ message: results });
    } catch (e) {
        res.status(500).json({ error: e.message });
    }
});

export { GeminiRoutes };