import express from 'express'; | |
import { OpenaiRes } from '../lib/scrapper.js'; | |
import { NvidiaTogether } from '../lib/@randydev/together/llama.js'; | |
import { authenticateApiKey, apiLimiter } from '../middleware/midware.js'; | |
const GptRoutes = express.Router(); | |
/** | |
* @swagger | |
* /api/v1/ai/nvidia/llama-31-70b: | |
* get: | |
* summary: Nvidia LLama AI | |
* tags: [AI] | |
* parameters: | |
* - in: query | |
* name: query | |
* required: true | |
* description: null | |
* schema: | |
* type: string | |
* - in: header | |
* name: x-api-key | |
* required: true | |
* description: API key for authentication | |
* schema: | |
* type: string | |
* responses: | |
* 200: | |
* description: Success | |
*/ | |
GptRoutes.get('/api/v1/ai/nvidia/llama-31-70b', authenticateApiKey, apiLimiter, async (req, res) => { | |
try { | |
const query = req.query.query; | |
const results = await NvidiaTogether(query); | |
res.json({ results }); | |
} catch (error) { | |
res.status(401).json({ error: error.message }); | |
} | |
}); | |
/** | |
* @swagger | |
* /api/v1/ai/gpt-old: | |
* get: | |
* summary: GPT OLD version turbo | |
* tags: [AI] | |
* parameters: | |
* - in: query | |
* name: query | |
* required: true | |
* description: The query to be processed by the GPT OLD. | |
* schema: | |
* type: string | |
* - in: header | |
* name: x-api-key | |
* required: true | |
* description: API key for authentication | |
* schema: | |
* type: string | |
* responses: | |
* 200: | |
* description: Success | |
*/ | |
GptRoutes.get('/api/v1/ai/gpt-old', authenticateApiKey, apiLimiter, async (req, res) => { | |
try { | |
const query = req.query.query; | |
const results = await OpenaiRes(query); | |
res.json({ results }); | |
} catch (error) { | |
res.status(401).json({ error: error.message }); | |
} | |
}); | |
export { GptRoutes }; |