tetel commited on
Commit
7c5a29d
·
verified ·
1 Parent(s): c75cc7f

Upload 6 files

Browse files
Files changed (6) hide show
  1. .env.example +3 -0
  2. .gitignore +8 -0
  3. Dockerfile +16 -0
  4. docker-compose.yml +13 -0
  5. main.py +396 -0
  6. pyproject.toml +29 -0
.env.example ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ SECURE_1PSID=your_psid_value_here
2
+ SECURE_1PSIDTS=your_psidts_value_here
3
+ API_KEY=your_api_key_here
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ .python-version
2
+ .idea
3
+ .venv
4
+ uv.lock
5
+ .env
6
+ __pycache__
7
+ .cursor
8
+ .ruff_cache
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install dependencies
6
+ COPY pyproject.toml .
7
+ RUN uv sync
8
+
9
+ # Copy application code
10
+ COPY main.py .
11
+
12
+ # Expose the port the app runs on
13
+ EXPOSE 8000
14
+
15
+ # Command to run the application
16
+ CMD ["uv", "run", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
docker-compose.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3"
2
+
3
+ services:
4
+ gemini-api:
5
+ build: .
6
+ ports:
7
+ - "8000:8000"
8
+ volumes:
9
+ - ./main.py:/app/main.py
10
+ - ./pyproject.toml:/app/pyproject.toml
11
+ env_file:
12
+ - .env
13
+ restart: unless-stopped
main.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ from datetime import datetime, timezone
4
+ import os
5
+ import base64
6
+ import tempfile
7
+
8
+ from fastapi import FastAPI, HTTPException, Request, Depends, Header
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from fastapi.responses import JSONResponse
11
+ from fastapi.responses import StreamingResponse
12
+ from pydantic import BaseModel
13
+ from typing import List, Optional, Dict, Any, Union
14
+ import time
15
+ import uuid
16
+ import logging
17
+
18
+ from gemini_webapi import GeminiClient, set_log_level
19
+ from gemini_webapi.constants import Model
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO)
23
+ logger = logging.getLogger(__name__)
24
+ set_log_level("INFO")
25
+
26
+ app = FastAPI(title="Gemini API FastAPI Server")
27
+
28
+ # Add CORS middleware
29
+ app.add_middleware(
30
+ CORSMiddleware,
31
+ allow_origins=["*"],
32
+ allow_credentials=True,
33
+ allow_methods=["*"],
34
+ allow_headers=["*"],
35
+ )
36
+
37
+ # Global client
38
+ gemini_client = None
39
+
40
+ # Authentication credentials
41
+ SECURE_1PSID = os.environ.get("SECURE_1PSID", "")
42
+ SECURE_1PSIDTS = os.environ.get("SECURE_1PSIDTS", "")
43
+ API_KEY = os.environ.get("API_KEY", "")
44
+
45
+ # Print debug info at startup
46
+ if not SECURE_1PSID or not SECURE_1PSIDTS:
47
+ logger.warning("⚠️ Gemini API credentials are not set or empty! Please check your environment variables.")
48
+ logger.warning("Make sure SECURE_1PSID and SECURE_1PSIDTS are correctly set in your .env file or environment.")
49
+ logger.warning("If using Docker, ensure the .env file is correctly mounted and formatted.")
50
+ logger.warning("Example format in .env file (no quotes):")
51
+ logger.warning("SECURE_1PSID=your_secure_1psid_value_here")
52
+ logger.warning("SECURE_1PSIDTS=your_secure_1psidts_value_here")
53
+ else:
54
+ # Only log the first few characters for security
55
+ logger.info(f"Credentials found. SECURE_1PSID starts with: {SECURE_1PSID[:5]}...")
56
+ logger.info(f"Credentials found. SECURE_1PSIDTS starts with: {SECURE_1PSIDTS[:5]}...")
57
+
58
+ if not API_KEY:
59
+ logger.warning("⚠️ API_KEY is not set or empty! API authentication will not work.")
60
+ logger.warning("Make sure API_KEY is correctly set in your .env file or environment.")
61
+ else:
62
+ logger.info(f"API_KEY found. API_KEY starts with: {API_KEY[:5]}...")
63
+
64
+
65
+ # Pydantic models for API requests and responses
66
+ class ContentItem(BaseModel):
67
+ type: str
68
+ text: Optional[str] = None
69
+ image_url: Optional[Dict[str, str]] = None
70
+
71
+
72
+ class Message(BaseModel):
73
+ role: str
74
+ content: Union[str, List[ContentItem]]
75
+ name: Optional[str] = None
76
+
77
+
78
+ class ChatCompletionRequest(BaseModel):
79
+ model: str
80
+ messages: List[Message]
81
+ temperature: Optional[float] = 0.7
82
+ top_p: Optional[float] = 1.0
83
+ n: Optional[int] = 1
84
+ stream: Optional[bool] = False
85
+ max_tokens: Optional[int] = None
86
+ presence_penalty: Optional[float] = 0
87
+ frequency_penalty: Optional[float] = 0
88
+ user: Optional[str] = None
89
+
90
+
91
+ class Choice(BaseModel):
92
+ index: int
93
+ message: Message
94
+ finish_reason: str
95
+
96
+
97
+ class Usage(BaseModel):
98
+ prompt_tokens: int
99
+ completion_tokens: int
100
+ total_tokens: int
101
+
102
+
103
+ class ChatCompletionResponse(BaseModel):
104
+ id: str
105
+ object: str = "chat.completion"
106
+ created: int
107
+ model: str
108
+ choices: List[Choice]
109
+ usage: Usage
110
+
111
+
112
+ class ModelData(BaseModel):
113
+ id: str
114
+ object: str = "model"
115
+ created: int
116
+ owned_by: str = "google"
117
+
118
+
119
+ class ModelList(BaseModel):
120
+ object: str = "list"
121
+ data: List[ModelData]
122
+
123
+
124
+ # Authentication dependency
125
+ async def verify_api_key(authorization: str = Header(None)):
126
+ if not API_KEY:
127
+ # If API_KEY is not set in environment, skip validation (for development)
128
+ logger.warning("API key validation skipped - no API_KEY set in environment")
129
+ return
130
+
131
+ if not authorization:
132
+ raise HTTPException(status_code=401, detail="Missing Authorization header")
133
+
134
+ try:
135
+ scheme, token = authorization.split()
136
+ if scheme.lower() != "bearer":
137
+ raise HTTPException(status_code=401, detail="Invalid authentication scheme. Use Bearer token")
138
+
139
+ if token != API_KEY:
140
+ raise HTTPException(status_code=401, detail="Invalid API key")
141
+ except ValueError:
142
+ raise HTTPException(status_code=401, detail="Invalid authorization format. Use 'Bearer YOUR_API_KEY'")
143
+
144
+ return token
145
+
146
+
147
+ # Simple error handler middleware
148
+ @app.middleware("http")
149
+ async def error_handling(request: Request, call_next):
150
+ try:
151
+ return await call_next(request)
152
+ except Exception as e:
153
+ logger.error(f"Request failed: {str(e)}")
154
+ return JSONResponse(status_code=500, content={"error": {"message": str(e), "type": "internal_server_error"}})
155
+
156
+
157
+ # Get list of available models
158
+ @app.get("/v1/models")
159
+ async def list_models():
160
+ """返回 gemini_webapi 中声明的模型列表"""
161
+ now = int(datetime.now(tz=timezone.utc).timestamp())
162
+ data = [
163
+ {
164
+ "id": m.model_name, # 如 "gemini-2.0-flash"
165
+ "object": "model",
166
+ "created": now,
167
+ "owned_by": "google-gemini-web",
168
+ }
169
+ for m in Model
170
+ ]
171
+ print(data)
172
+ return {"object": "list", "data": data}
173
+
174
+
175
+ # Helper to convert between Gemini and OpenAI model names
176
+ def map_model_name(openai_model_name: str) -> Model:
177
+ """根据模型名称字符串查找匹配的 Model 枚举值"""
178
+ # 打印所有可用模型以便调试
179
+ all_models = [m.model_name if hasattr(m, "model_name") else str(m) for m in Model]
180
+ logger.info(f"Available models: {all_models}")
181
+
182
+ # 首先尝试直接查找匹配的模型名称
183
+ for m in Model:
184
+ model_name = m.model_name if hasattr(m, "model_name") else str(m)
185
+ if openai_model_name.lower() in model_name.lower():
186
+ return m
187
+
188
+ # 如果找不到匹配项,使用默认映射
189
+ model_keywords = {
190
+ "gemini-pro": ["pro", "2.0"],
191
+ "gemini-pro-vision": ["vision", "pro"],
192
+ "gemini-flash": ["flash", "2.0"],
193
+ "gemini-1.5-pro": ["1.5", "pro"],
194
+ "gemini-1.5-flash": ["1.5", "flash"],
195
+ }
196
+
197
+ # 根据关键词匹配
198
+ keywords = model_keywords.get(openai_model_name, ["pro"]) # 默认使用pro模型
199
+
200
+ for m in Model:
201
+ model_name = m.model_name if hasattr(m, "model_name") else str(m)
202
+ if all(kw.lower() in model_name.lower() for kw in keywords):
203
+ return m
204
+
205
+ # 如果还是找不到,返回第一个模型
206
+ return next(iter(Model))
207
+
208
+
209
+ # Prepare conversation history from OpenAI messages format
210
+ def prepare_conversation(messages: List[Message]) -> tuple:
211
+ conversation = ""
212
+ temp_files = []
213
+
214
+ for msg in messages:
215
+ if isinstance(msg.content, str):
216
+ # String content handling
217
+ if msg.role == "system":
218
+ conversation += f"System: {msg.content}\n\n"
219
+ elif msg.role == "user":
220
+ conversation += f"Human: {msg.content}\n\n"
221
+ elif msg.role == "assistant":
222
+ conversation += f"Assistant: {msg.content}\n\n"
223
+ else:
224
+ # Mixed content handling
225
+ if msg.role == "user":
226
+ conversation += "Human: "
227
+ elif msg.role == "system":
228
+ conversation += "System: "
229
+ elif msg.role == "assistant":
230
+ conversation += "Assistant: "
231
+
232
+ for item in msg.content:
233
+ if item.type == "text":
234
+ conversation += item.text or ""
235
+ elif item.type == "image_url" and item.image_url:
236
+ # Handle image
237
+ image_url = item.image_url.get("url", "")
238
+ if image_url.startswith("data:image/"):
239
+ # Process base64 encoded image
240
+ try:
241
+ # Extract the base64 part
242
+ base64_data = image_url.split(",")[1]
243
+ image_data = base64.b64decode(base64_data)
244
+
245
+ # Create temporary file to hold the image
246
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp:
247
+ tmp.write(image_data)
248
+ temp_files.append(tmp.name)
249
+ except Exception as e:
250
+ logger.error(f"Error processing base64 image: {str(e)}")
251
+
252
+ conversation += "\n\n"
253
+
254
+ # Add a final prompt for the assistant to respond to
255
+ conversation += "Assistant: "
256
+
257
+ return conversation, temp_files
258
+
259
+
260
+ # Dependency to get the initialized Gemini client
261
+ async def get_gemini_client():
262
+ global gemini_client
263
+ if gemini_client is None:
264
+ try:
265
+ gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
266
+ await gemini_client.init(timeout=300)
267
+ except Exception as e:
268
+ logger.error(f"Failed to initialize Gemini client: {str(e)}")
269
+ raise HTTPException(status_code=500, detail=f"Failed to initialize Gemini client: {str(e)}")
270
+ return gemini_client
271
+
272
+
273
+ @app.post("/v1/chat/completions")
274
+ async def create_chat_completion(request: ChatCompletionRequest, api_key: str = Depends(verify_api_key)):
275
+ try:
276
+ # 确保客户端已初始化
277
+ global gemini_client
278
+ if gemini_client is None:
279
+ gemini_client = GeminiClient(SECURE_1PSID, SECURE_1PSIDTS)
280
+ await gemini_client.init(timeout=300)
281
+ logger.info("Gemini client initialized successfully")
282
+
283
+ # 转换消息为对话格式
284
+ conversation, temp_files = prepare_conversation(request.messages)
285
+ logger.info(f"Prepared conversation: {conversation}")
286
+ logger.info(f"Temp files: {temp_files}")
287
+
288
+ # 获取适当的模型
289
+ model = map_model_name(request.model)
290
+ logger.info(f"Using model: {model}")
291
+
292
+ # 生成响应
293
+ logger.info("Sending request to Gemini...")
294
+ if temp_files:
295
+ # With files
296
+ response = await gemini_client.generate_content(conversation, files=temp_files, model=model)
297
+ else:
298
+ # Text only
299
+ response = await gemini_client.generate_content(conversation, model=model)
300
+
301
+ # 清理临时文件
302
+ for temp_file in temp_files:
303
+ try:
304
+ os.unlink(temp_file)
305
+ except Exception as e:
306
+ logger.warning(f"Failed to delete temp file {temp_file}: {str(e)}")
307
+
308
+ # 提取文本响应
309
+ reply_text = ""
310
+ if hasattr(response, "text"):
311
+ reply_text = response.text
312
+ else:
313
+ reply_text = str(response)
314
+
315
+ logger.info(f"Response: {reply_text}")
316
+
317
+ if not reply_text or reply_text.strip() == "":
318
+ logger.warning("Empty response received from Gemini")
319
+ reply_text = "服务器返回了空响应。请检查 Gemini API 凭据是否有效。"
320
+
321
+ # 创建响应对象
322
+ completion_id = f"chatcmpl-{uuid.uuid4()}"
323
+ created_time = int(time.time())
324
+
325
+ # 检查客户端是否请求流式响应
326
+ if request.stream:
327
+ # 实现流式响应
328
+ async def generate_stream():
329
+ # 创建 SSE 格式的流式响应
330
+ # 先发送开始事件
331
+ data = {
332
+ "id": completion_id,
333
+ "object": "chat.completion.chunk",
334
+ "created": created_time,
335
+ "model": request.model,
336
+ "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": None}],
337
+ }
338
+ yield f"data: {json.dumps(data)}\n\n"
339
+
340
+ # 模拟流式输出 - 将文本按字符分割发送
341
+ for char in reply_text:
342
+ data = {
343
+ "id": completion_id,
344
+ "object": "chat.completion.chunk",
345
+ "created": created_time,
346
+ "model": request.model,
347
+ "choices": [{"index": 0, "delta": {"content": char}, "finish_reason": None}],
348
+ }
349
+ yield f"data: {json.dumps(data)}\n\n"
350
+ # 可选:添加短暂延迟以模拟真实的流式输出
351
+ await asyncio.sleep(0.01)
352
+
353
+ # 发送结束事件
354
+ data = {
355
+ "id": completion_id,
356
+ "object": "chat.completion.chunk",
357
+ "created": created_time,
358
+ "model": request.model,
359
+ "choices": [{"index": 0, "delta": {}, "finish_reason": "stop"}],
360
+ }
361
+ yield f"data: {json.dumps(data)}\n\n"
362
+ yield "data: [DONE]\n\n"
363
+
364
+ return StreamingResponse(generate_stream(), media_type="text/event-stream")
365
+ else:
366
+ # 非流式响应(原来的逻辑)
367
+ result = {
368
+ "id": completion_id,
369
+ "object": "chat.completion",
370
+ "created": created_time,
371
+ "model": request.model,
372
+ "choices": [{"index": 0, "message": {"role": "assistant", "content": reply_text}, "finish_reason": "stop"}],
373
+ "usage": {
374
+ "prompt_tokens": len(conversation.split()),
375
+ "completion_tokens": len(reply_text.split()),
376
+ "total_tokens": len(conversation.split()) + len(reply_text.split()),
377
+ },
378
+ }
379
+
380
+ logger.info(f"Returning response: {result}")
381
+ return result
382
+
383
+ except Exception as e:
384
+ logger.error(f"Error generating completion: {str(e)}", exc_info=True)
385
+ raise HTTPException(status_code=500, detail=f"Error generating completion: {str(e)}")
386
+
387
+
388
+ @app.get("/")
389
+ async def root():
390
+ return {"status": "online", "message": "Gemini API FastAPI Server is running"}
391
+
392
+
393
+ if __name__ == "__main__":
394
+ import uvicorn
395
+
396
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, log_level="info")
pyproject.toml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "gemi2api-server"
3
+ version = "0.1.1"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.11"
7
+ dependencies = [
8
+ "browser-cookie3>=0.20.1",
9
+ "fastapi>=0.115.12",
10
+ "gemini-webapi>=1.11.0",
11
+ "uvicorn[standard]>=0.34.1",
12
+ ]
13
+ [[tool.uv.index]]
14
+ url = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
15
+ default = true
16
+
17
+ [dependency-groups]
18
+ dev = [
19
+ "ruff>=0.11.7",
20
+ ]
21
+
22
+ [tool.ruff]
23
+ line-length = 150 # 设置最大行长度
24
+ select = ["E", "F", "W", "I"] # 启用的规则(E: pycodestyle, F: pyflakes, W: pycodestyle warnings, I: isort)
25
+ ignore = ["E501"] # 忽略特定规则(如行长度警告)
26
+
27
+ [tool.ruff.format]
28
+ quote-style = "double" # 使用双引号
29
+ indent-style = "tab" # 使用空格缩进