File size: 3,813 Bytes
cd8fa1f 8a16d15 9150c2d 384f920 cd8fa1f 9150c2d 229b70b 378014f 9150c2d 51d3a07 9150c2d 378014f 9150c2d 0cbe030 9150c2d 8a16d15 7b5e0e1 8a16d15 51d3a07 d2f4947 8a16d15 4f1f96f 8a16d15 cd8fa1f d712acc cd8fa1f 0151eaf 13778ad d2f4947 cd8fa1f 384f920 cd8fa1f b324f76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import express from 'express';
import { OpenaiRes } from '../lib/scrapper.js';
import { NvidiaTogether } from '../lib/@randydev/together/llama.js';
import { CohereAI } from '../lib/@randydev/together/cohere.js';
import { authenticateApiKey, apiLimiter } from '../middleware/midware.js';
const GptRoutes = express.Router();
/**
* @swagger
* /api/v1/ai/cohere/command-plus:
* get:
* summary: Cohere AI
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: null
* schema:
* type: string
* - in: query
* name: system_prompt
* required: false
* description: null
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/cohere/command-plus', authenticateApiKey, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
let system_prompt = "Your name is AkenoX AI A kind and friendly AI assistant that answers in\na short and concise answer. Give short step-by-step reasoning if required.\n";
system_prompt = req.query.system_prompt ? req.query.system_prompt : system_prompt;
const results = await CohereAI(query, {
system_prompt: system_prompt
});
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/nvidia/llama-31-70b:
* get:
* summary: Nvidia LLama AI
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: null
* schema:
* type: string
* - in: query
* name: system_prompt
* required: false
* description: null
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/nvidia/llama-31-70b', authenticateApiKey, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
let system_prompt = "Your name is AkenoX AI A kind and friendly AI assistant that answers in\na short and concise answer. Give short step-by-step reasoning if required.\n";
system_prompt = req.query.system_prompt ? req.query.system_prompt : system_prompt;
const results = await NvidiaTogether(query, {
system_prompt: system_prompt
});
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/gpt-old:
* get:
* summary: GPT OLD version turbo
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: The query to be processed by the GPT OLD.
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/gpt-old', authenticateApiKey, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
const results = await OpenaiRes(query);
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
export { GptRoutes }; |