Spaces:
Sleeping
Sleeping
Update server.js
Browse files
server.js
CHANGED
@@ -66,6 +66,40 @@ app.post('/pl', async (req, res) => {
|
|
66 |
}
|
67 |
});
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
const port = 7860;
|
71 |
app.listen(port, () => {
|
|
|
66 |
}
|
67 |
});
|
68 |
|
69 |
+
app.post('/pls', async (req, res) => {
|
70 |
+
const prompt = req.body.prompt;
|
71 |
+
const apiKey = req.body.api || getRandomApiKey();
|
72 |
+
|
73 |
+
if (!prompt) {
|
74 |
+
return res.status(400).json(`Нет запроса! :${prompt}`); // Не удалось принять данные
|
75 |
+
}
|
76 |
+
|
77 |
+
try {
|
78 |
+
const response = await axios.post('https://openai-gemini-iota.vercel.app/v1/chat/completions', {
|
79 |
+
messages: [{'role': 'system', 'content': `${start}. Отвечай кратко, но понятно!`}, {'role': 'user', 'content': prompt}],
|
80 |
+
max_tokens: 4000,
|
81 |
+
temperature: 0.7,
|
82 |
+
model: "gemini-1.5-flash-8b",
|
83 |
+
presence_penalty: 0.4,
|
84 |
+
}, {
|
85 |
+
headers: {
|
86 |
+
'Authorization': `Bearer ${apiKey}`,
|
87 |
+
'Content-Type': 'application/json',
|
88 |
+
},
|
89 |
+
});
|
90 |
+
|
91 |
+
if (response.data.choices && response.data.choices.length > 0 && response.data.choices[0].message) {
|
92 |
+
const content = response.data.choices[0].message.content.trim();
|
93 |
+
res.json({ content });
|
94 |
+
} else {
|
95 |
+
res.json({ content: `{"error":"", "title":"Ошибка", "text":"Произошла ошибка на сервере. (Ошибка прочтения)", "okb":"Ок", "oklink":"", "cancelable":"true"}` });
|
96 |
+
}
|
97 |
+
} catch (error) {
|
98 |
+
console.error(error);
|
99 |
+
res.json({ content: `{"error":"", "title":"Ошибка", "text":"Произошла ошибка на сервере. (Ошибка при генерации)", "okb":"Ок", "oklink":"", "cancelable":"true"}` });
|
100 |
+
}
|
101 |
+
});
|
102 |
+
|
103 |
|
104 |
const port = 7860;
|
105 |
app.listen(port, () => {
|