File size: 1,474 Bytes
abed4cc 38033f0 abed4cc 38033f0 3e67929 f7eb0d4 38033f0 7d9059f 38033f0 57d75b9 38033f0 57d75b9 f7eb0d4 57d75b9 38033f0 3e67929 57d75b9 38033f0 9a439f0 57d75b9 38033f0 57d75b9 38033f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
const express = require('express');
const axios = require('axios');
const app = express();
const PORT = process.env.PORT || 7860;
const apiToken = process.env.API_KEY;
app.use(express.json());
app.post('/chat', async (req, res) => {
const { messages, temperature, max_tokens } = req.body;
try {
// Преобразуем массив сообщений в одну строку
const inputText = messages.map(msg => msg.content).join('\n');
const response = await axios.post('https://api-inference.huggingface.co/models/codellama/CodeLlama-34b-Instruct-hf', {
inputs: inputText,
parameters: {
temperature: temperature || 0.7,
max_new_tokens: max_tokens || 100
}
}, {
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiToken}`
}
});
const data = response.data;
const generatedText = data.generated_text;
res.json({ generated_text: generatedText });
} catch (error) {
console.error("Error during text generation:", error.response ? error.response.data : error.message);
res.status(500).json({ error: `Произошла ошибка при генерации текста: ${error.response ? error.response.data : error.message}` });
}
});
app.listen(PORT, () => {
console.log(`Server is running on port ${PORT}`);
});
|