Spaces:
Sleeping
Sleeping
Add Space configuration and server files
Browse files- .gitattributes +35 -0
- index.js +83 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
index.js
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import express from 'express';
|
2 |
+
import cors from 'cors';
|
3 |
+
import axios from 'axios';
|
4 |
+
import dotenv from 'dotenv';
|
5 |
+
import { fileURLToPath } from 'url';
|
6 |
+
import { dirname } from 'path';
|
7 |
+
|
8 |
+
const __filename = fileURLToPath(import.meta.url);
|
9 |
+
const __dirname = dirname(__filename);
|
10 |
+
|
11 |
+
dotenv.config();
|
12 |
+
|
13 |
+
const app = express();
|
14 |
+
|
15 |
+
// Enhanced CORS configuration
|
16 |
+
app.use(cors({
|
17 |
+
origin: '*',
|
18 |
+
methods: ['GET', 'POST'],
|
19 |
+
allowedHeaders: ['Content-Type']
|
20 |
+
}));
|
21 |
+
|
22 |
+
app.use(express.json());
|
23 |
+
|
24 |
+
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:7860';
|
25 |
+
|
26 |
+
// Model-specific configurations
|
27 |
+
const MODEL_CONFIGS = {
|
28 |
+
'mxbai-embed-large': { temperature: 0.8, top_p: 0.9 },
|
29 |
+
'minicpm-v': { temperature: 0.8, top_p: 0.9 },
|
30 |
+
'qwen2.5-coder': { temperature: 0.6, top_p: 0.95 },
|
31 |
+
'codegemma': { temperature: 0.6, top_p: 0.95 },
|
32 |
+
'codellama': { temperature: 0.7, top_p: 0.95 },
|
33 |
+
'llama3.2-vision': { temperature: 0.8, top_p: 0.9 }
|
34 |
+
};
|
35 |
+
|
36 |
+
// Add these right after your app.use() statements
|
37 |
+
app.get('/health', (req, res) => {
|
38 |
+
res.status(200).json({
|
39 |
+
status: 'healthy',
|
40 |
+
ollama: 'running',
|
41 |
+
model: 'minicpm-v'
|
42 |
+
});
|
43 |
+
});
|
44 |
+
|
45 |
+
// Add a root endpoint
|
46 |
+
app.get('/', (req, res) => {
|
47 |
+
res.status(200).json({
|
48 |
+
message: 'Aide AI Backend Service',
|
49 |
+
version: '1.0'
|
50 |
+
});
|
51 |
+
});
|
52 |
+
|
53 |
+
// Update the chat endpoin`
|
54 |
+
app.post('/api/chat', async (req, res) => {
|
55 |
+
const { message } = req.body;
|
56 |
+
try {
|
57 |
+
const response = await axios.post(`${OLLAMA_URL}/api/generate`, {
|
58 |
+
model: 'minicpm-v',
|
59 |
+
prompt: message,
|
60 |
+
stream: false,
|
61 |
+
options: {
|
62 |
+
num_ctx: 2048,
|
63 |
+
num_thread: 4
|
64 |
+
}
|
65 |
+
});
|
66 |
+
res.json({ response: response.data.response });
|
67 |
+
} catch (error) {
|
68 |
+
console.error('Error:', error);
|
69 |
+
res.status(500).json({ error: 'Model processing error' });
|
70 |
+
}
|
71 |
+
});
|
72 |
+
|
73 |
+
app.get('/api/models', async (req, res) => {
|
74 |
+
try {
|
75 |
+
const response = await axios.get(`${OLLAMA_URL}/api/tags`);
|
76 |
+
res.json(response.data);
|
77 |
+
} catch (error) {
|
78 |
+
res.status(500).json({ error: 'Failed to fetch models' });
|
79 |
+
}
|
80 |
+
});
|
81 |
+
|
82 |
+
const PORT = process.env.PORT || 7860;
|
83 |
+
app.listen(PORT, '0.0.0.0', () => console.log(`Server running on port ${PORT}`));
|