snackshell commited on
Commit
383b059
Β·
verified Β·
1 Parent(s): 7779a28

Upload 5 files

Browse files
Files changed (5) hide show
  1. Dockerfile.txt +20 -0
  2. README.md +7 -6
  3. app.py +104 -0
  4. gitattributes.txt +35 -0
  5. requirements.txt +5 -0
Dockerfile.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image from the Docker Hub
2
+ FROM python:3.11-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file first to leverage Docker cache
8
+ COPY requirements.txt .
9
+
10
+ # Install the required Python packages
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Copy the rest of the application code into the container
14
+ COPY . .
15
+
16
+ # Expose the port the app runs on
17
+ EXPOSE 7860
18
+
19
+ # Command to run the FastAPI application using uvicorn
20
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
- title: Selamgptmodels
3
- emoji: πŸš€
4
- colorFrom: green
5
- colorTo: red
6
  sdk: docker
7
  pinned: false
8
- license: mit
9
- short_description: Something cool
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Marchupdate
3
+ emoji: πŸ†
4
+ colorFrom: red
5
+ colorTo: indigo
6
  sdk: docker
7
  pinned: false
8
+ app_file: app.py # Replace with your actual app filename
9
+ app_port: 7860
10
+
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Depends, Header, Request
2
+ from fastapi.responses import StreamingResponse
3
+ from pydantic import BaseModel
4
+ from typing import List
5
+ from g4f import ChatCompletion
6
+ from slowapi import Limiter
7
+ from slowapi.util import get_remote_address
8
+
9
+ app = FastAPI()
10
+
11
+ # Initialize the rate limiter
12
+ limiter = Limiter(key_func=get_remote_address)
13
+
14
+ # List of available models
15
+ models = [
16
+ "gpt-4o", "gpt-4o-mini", "gpt-4",
17
+ "gpt-4-turbo", "gpt-3.5-turbo",
18
+ "claude-3.7-sonnet", "o3-mini", "o1", "claude-3.5", "llama-3.1-405b", "gemini-flash", "blackboxai-pro", "openchat-3.5", "glm-4-9B", "blackboxai"
19
+ ]
20
+
21
+ # Request model
22
+ class Message(BaseModel):
23
+ role: str
24
+ content: str
25
+
26
+ class ChatRequest(BaseModel):
27
+ model: str
28
+ messages: List[Message]
29
+ streaming: bool = True # Add streaming support
30
+
31
+ class ChatResponse(BaseModel):
32
+ role: str
33
+ content: str
34
+
35
+ # Dependency to check API key
36
+ async def verify_api_key(x_api_key: str = Header(...)):
37
+ if x_api_key != "vs-5wEvIw6vfLKIypGm7uiNoWuXrJcg4vAL": # Replace with your actual API key
38
+ raise HTTPException(status_code=403, detail="Invalid API key")
39
+
40
+ @app.get("/v1/models", tags=["Models"])
41
+ async def get_models():
42
+ """Endpoint to get the list of available models."""
43
+ return {"models": models}
44
+
45
+ @app.post("/v1/chat/completions", tags=["Chat Completion"])
46
+ @limiter.limit("10/minute") # Rate limit to 10 requests per minute
47
+ async def chat_completion(
48
+ request: Request,
49
+ chat_request: ChatRequest,
50
+ api_key: str = Depends(verify_api_key)
51
+ ):
52
+ # Validate model
53
+ if chat_request.model not in models:
54
+ raise HTTPException(status_code=400, detail="Invalid model selected.")
55
+
56
+ # Check if messages are provided
57
+ if not chat_request.messages:
58
+ raise HTTPException(status_code=400, detail="Messages cannot be empty.")
59
+
60
+ # Convert messages to the format expected by ChatCompletion
61
+ formatted_messages = [{"role": msg.role, "content": msg.content} for msg in chat_request.messages]
62
+
63
+ try:
64
+ if chat_request.streaming:
65
+ # Stream the response
66
+ def event_stream():
67
+ response = ChatCompletion.create(
68
+ model=chat_request.model,
69
+ messages=formatted_messages,
70
+ stream=True # Enable streaming
71
+ )
72
+
73
+ for chunk in response:
74
+ if isinstance(chunk, dict) and 'choices' in chunk:
75
+ for choice in chunk['choices']:
76
+ if 'message' in choice:
77
+ yield f"data: {choice['message']['content']}\n\n"
78
+ else:
79
+ yield f"data: {chunk}\n\n" # Fallback if chunk is not as expected
80
+
81
+ return StreamingResponse(event_stream(), media_type="text/event-stream")
82
+ else:
83
+ # Non-streaming response
84
+ response = ChatCompletion.create(
85
+ model=chat_request.model,
86
+ messages=formatted_messages
87
+ )
88
+
89
+ if isinstance(response, str):
90
+ response_content = response # Directly use if it's a string
91
+ else:
92
+ try:
93
+ response_content = response['choices'][0]['message']['content']
94
+ except (IndexError, KeyError):
95
+ raise HTTPException(status_code=500, detail="Unexpected response structure.")
96
+
97
+ return ChatResponse(role="assistant", content=response_content)
98
+
99
+ except Exception as e:
100
+ raise HTTPException(status_code=500, detail=str(e))
101
+
102
+ if __name__ == "__main__":
103
+ import uvicorn
104
+ uvicorn.run(app, host="0.0.0.0", port=7860)
gitattributes.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ pydantic
4
+ slowapi
5
+ g4f