File size: 6,424 Bytes
cd8fa1f 8a16d15 9150c2d 525adcf 3f8234b 419f09b cd8fa1f ef305bf 3f8234b dedf374 3f8234b ef305bf 9150c2d 229b70b 378014f 9150c2d 10e1d3f 9150c2d 51d3a07 10e1d3f 51d3a07 9150c2d d0d1c07 9150c2d 10e1d3f 0cbe030 10e1d3f 0cbe030 10e1d3f 9150c2d 8a16d15 7b5e0e1 8a16d15 51d3a07 d2f4947 8a16d15 d0d1c07 8a16d15 4f1f96f 8a16d15 cd8fa1f 193cfc8 cd8fa1f 0151eaf 13778ad d2f4947 cd8fa1f dffff10 cd8fa1f b324f76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import express from 'express';
import { OpenaiRes } from '../lib/scrapper.js';
import { NvidiaTogether } from '../lib/@randydev/together/llama.js';
import { CohereAI } from '../lib/@randydev/together/cohere.js';
import { AlibabaTogether } from '../lib/@randydev/together/qwen-ai.js';
import { DeepSeekR1 } from '../lib/@randydev/together/blackbox.js';
import { authenticateApiKey, authenticateApiKeyPremium, apiLimiter } from '../middleware/midware.js';
const GptRoutes = express.Router();
/**
* @swagger
* /api/v1/ai/deepseek/deepseek-R1:
* get:
* summary: Deepseek R1
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: User's input query
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/deepseek/deepseek-R1', authenticateApiKeyPremium, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
const results = await DeepSeekR1(query);
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/alibaba/qwen-plus:
* get:
* summary: Alibaba Qwen AI
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: User's input query
* schema:
* type: string
* - in: query
* name: system_prompt
* required: false
* description: Custom system prompt
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/alibaba/qwen-plus', authenticateApiKeyPremium, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
const system_prompt = req.query.system_prompt || "Your name is AkenoX AI A kind and friendly AI assistant that answers in a short and concise answer. Give short step-by-step reasoning if required.";
const results = await AlibabaTogether(query, {
system_prompt: system_prompt
});
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/cohere/command-plus:
* get:
* summary: Cohere AI
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: User's input query
* schema:
* type: string
* - in: query
* name: system_prompt
* required: false
* description: Custom system prompt
* schema:
* type: string
* - in: query
* name: chatHistory
* required: false
* description: Previous chat history in JSON format
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/cohere/command-plus', authenticateApiKeyPremium, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
const chatHistory = req.query.chatHistory ? JSON.parse(req.query.chatHistory) : [];
const system_prompt = req.query.system_prompt || "Your name is AkenoX AI A kind and friendly AI assistant that answers in a short and concise answer. Give short step-by-step reasoning if required.";
const results = await CohereAI(query, {
system_prompt: system_prompt,
chatHistory: chatHistory
});
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/nvidia/llama-31-70b:
* get:
* summary: Nvidia LLama AI
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: null
* schema:
* type: string
* - in: query
* name: system_prompt
* required: false
* description: null
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/nvidia/llama-31-70b', authenticateApiKeyPremium, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
let system_prompt = "Your name is AkenoX AI A kind and friendly AI assistant that answers in\na short and concise answer. Give short step-by-step reasoning if required.\n";
system_prompt = req.query.system_prompt ? req.query.system_prompt : system_prompt;
const results = await NvidiaTogether(query, {
system_prompt: system_prompt
});
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
/**
* @swagger
* /api/v1/ai/openai/gpt-old:
* get:
* summary: GPT OLD version turbo
* tags: [AI]
* parameters:
* - in: query
* name: query
* required: true
* description: The query to be processed by the GPT OLD.
* schema:
* type: string
* - in: header
* name: x-api-key
* required: true
* description: API key for authentication
* schema:
* type: string
* responses:
* 200:
* description: Success
*/
GptRoutes.get('/api/v1/ai/openai/gpt-old', authenticateApiKey, apiLimiter, async (req, res) => {
try {
const query = req.query.query;
const results = await OpenaiRes(query);
res.json({ results });
} catch (error) {
res.status(401).json({ error: error.message });
}
});
export { GptRoutes }; |