github-actions[bot]
commited on
Commit
·
66c9372
1
Parent(s):
bf2cc97
Update from GitHub Actions
Browse files
server.js
CHANGED
@@ -1,17 +1,7 @@
|
|
1 |
import express from 'express';
|
2 |
import { fal } from '@fal-ai/client';
|
3 |
|
4 |
-
// 从环境变量读取 Fal AI API Key
|
5 |
-
const FAL_KEY = process.env.FAL_KEY;
|
6 |
-
if (!FAL_KEY) {
|
7 |
-
console.error("Error: FAL_KEY environment variable is not set.");
|
8 |
-
process.exit(1);
|
9 |
-
}
|
10 |
|
11 |
-
// 配置 fal 客户端
|
12 |
-
fal.config({
|
13 |
-
credentials: FAL_KEY,
|
14 |
-
});
|
15 |
|
16 |
const app = express();
|
17 |
app.use(express.json({ limit: '50mb' }));
|
@@ -204,6 +194,34 @@ function convertMessagesToFalPrompt(messages) {
|
|
204 |
|
205 |
// POST /v1/chat/completions endpoint (保持不变)
|
206 |
app.post('/v1/chat/completions', async (req, res) => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
|
208 |
|
209 |
console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
|
@@ -335,7 +353,6 @@ app.listen(PORT, () => {
|
|
335 |
console.log(` Fal OpenAI Proxy Server (System Top + Separator + Recency)`); // 更新策略名称
|
336 |
console.log(` Listening on port: ${PORT}`);
|
337 |
console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
|
338 |
-
console.log(` Fal AI Key Loaded: ${FAL_KEY ? 'Yes' : 'No'}`);
|
339 |
console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
|
340 |
console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`);
|
341 |
console.log(`===================================================`);
|
|
|
1 |
import express from 'express';
|
2 |
import { fal } from '@fal-ai/client';
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
|
|
|
|
|
|
|
|
5 |
|
6 |
const app = express();
|
7 |
app.use(express.json({ limit: '50mb' }));
|
|
|
194 |
|
195 |
// POST /v1/chat/completions endpoint (保持不变)
|
196 |
app.post('/v1/chat/completions', async (req, res) => {
|
197 |
+
|
198 |
+
let authKey = null;
|
199 |
+
const authHeader = req.headers.authorization;
|
200 |
+
|
201 |
+
if (authHeader) {
|
202 |
+
const parts = authHeader.split(' ');
|
203 |
+
if (parts.length === 2) {
|
204 |
+
const scheme = parts[0];
|
205 |
+
const credentials = parts[1];
|
206 |
+
|
207 |
+
if (scheme === 'Bearer') {
|
208 |
+
authKey = credentials; // JWT 或其他 token
|
209 |
+
} else if (scheme === 'Basic') {
|
210 |
+
// Basic 认证解码
|
211 |
+
const decoded = Buffer.from(credentials, 'base64').toString('utf8');
|
212 |
+
const [username, password] = decoded.split(':');
|
213 |
+
req.auth = { username, password };
|
214 |
+
authKey = decoded; // 或者只保存 username
|
215 |
+
} else if (scheme === 'ApiKey' || scheme === 'Key') {
|
216 |
+
authKey = credentials;
|
217 |
+
}
|
218 |
+
}
|
219 |
+
}
|
220 |
+
|
221 |
+
fal.config({
|
222 |
+
credentials: authKey,
|
223 |
+
});
|
224 |
+
|
225 |
const { model, messages, stream = false, reasoning = false, ...restOpenAIParams } = req.body;
|
226 |
|
227 |
console.log(`Received chat completion request for model: ${model}, stream: ${stream}`);
|
|
|
353 |
console.log(` Fal OpenAI Proxy Server (System Top + Separator + Recency)`); // 更新策略名称
|
354 |
console.log(` Listening on port: ${PORT}`);
|
355 |
console.log(` Using Limits: System Prompt=${SYSTEM_PROMPT_LIMIT}, Prompt=${PROMPT_LIMIT}`);
|
|
|
356 |
console.log(` Chat Completions Endpoint: POST http://localhost:${PORT}/v1/chat/completions`);
|
357 |
console.log(` Models Endpoint: GET http://localhost:${PORT}/v1/models`);
|
358 |
console.log(`===================================================`);
|