const express = require('express'); const axios = require('axios'); const app = express(); const PORT = process.env.PORT || 7860; const apiToken = process.env.API_KEY; app.use(express.json()); app.post('/chat', async (req, res) => { const { messages, temperature, max_tokens } = req.body; try { // Преобразуем массив сообщений в одну строку const inputText = messages.map(msg => msg.content).join('\n'); const response = await axios.post('https://api-inference.huggingface.co/models/codellama/CodeLlama-34b-Instruct-hf', { inputs: inputText, parameters: { temperature: temperature || 0.7, max_new_tokens: max_tokens || 100 } }, { headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${apiToken}` } }); const data = response.data; const generatedText = data.generated_text; res.json({ generated_text: generatedText }); } catch (error) { console.error("Error during text generation:", error.response ? error.response.data : error.message); res.status(500).json({ error: `Произошла ошибка при генерации текста: ${error.response ? error.response.data : error.message}` }); } }); app.listen(PORT, () => { console.log(`Server is running on port ${PORT}`); });