Update main.py
Browse files
main.py
CHANGED
@@ -1,55 +1,44 @@
|
|
|
|
|
|
1 |
import requests
|
|
|
|
|
|
|
|
|
2 |
|
3 |
API_URL = "https://api.typegpt.net/v1/chat/completions"
|
4 |
API_KEY = "sk-XzS5hhsa3vpIcRLz3prQirBQXOx2hPydPzSpzdRcE1YddnNm"
|
5 |
BACKEND_MODEL = "pixtral-large-latest"
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
"gpt-3.5": "You are ChatGPT, a helpful assistant.",
|
10 |
-
"claude": "You are Claude, known for being careful, ethical, and thoughtful.",
|
11 |
-
"mistral": "You are Mistral, known for giving concise and smart answers.",
|
12 |
-
"bard": "You are Bard, a friendly and knowledgeable Google AI assistant."
|
13 |
-
}
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
|
19 |
-
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
25 |
|
|
|
26 |
payload = {
|
27 |
"model": BACKEND_MODEL,
|
28 |
"messages": [
|
29 |
-
{"role": "system", "content":
|
30 |
-
|
31 |
-
]
|
32 |
}
|
33 |
|
34 |
-
|
|
|
|
|
|
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
return result["choices"][0]["message"]["content"]
|
39 |
-
else:
|
40 |
-
raise Exception(f"API error {response.status_code}: {response.text}")
|
41 |
-
|
42 |
-
if __name__ == "__main__":
|
43 |
-
print("🧠 Choose a virtual model:")
|
44 |
-
for name in SYSTEM_PROMPTS:
|
45 |
-
print(f" - {name}")
|
46 |
-
model = input("Model: ").strip()
|
47 |
-
|
48 |
-
print("\n👤 Enter your message:")
|
49 |
-
user_prompt = input("> ").strip()
|
50 |
-
|
51 |
-
try:
|
52 |
-
print("\n🤖 AI Response:")
|
53 |
-
print(generate_response(model, user_prompt))
|
54 |
-
except Exception as e:
|
55 |
-
print(f"❌ Error: {e}")
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from pydantic import BaseModel
|
3 |
import requests
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
|
9 |
API_URL = "https://api.typegpt.net/v1/chat/completions"
|
10 |
API_KEY = "sk-XzS5hhsa3vpIcRLz3prQirBQXOx2hPydPzSpzdRcE1YddnNm"
|
11 |
BACKEND_MODEL = "pixtral-large-latest"
|
12 |
|
13 |
+
with open("model_map.json", "r") as f:
|
14 |
+
MODEL_PROMPTS = json.load(f)
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
class Message(BaseModel):
|
17 |
+
role: str
|
18 |
+
content: str
|
19 |
|
20 |
+
class ChatRequest(BaseModel):
|
21 |
+
model: str
|
22 |
+
messages: list[Message]
|
23 |
|
24 |
+
@app.post("/v1/chat/completions")
|
25 |
+
async def openai_compatible(request: ChatRequest):
|
26 |
+
user_messages = [m for m in request.messages if m.role == "user"]
|
27 |
+
if not user_messages:
|
28 |
+
return {"error": "No user message provided."}
|
29 |
|
30 |
+
model_prompt = MODEL_PROMPTS.get(request.model, "You are a helpful AI assistant.")
|
31 |
payload = {
|
32 |
"model": BACKEND_MODEL,
|
33 |
"messages": [
|
34 |
+
{"role": "system", "content": model_prompt}
|
35 |
+
] + [{"role": "user", "content": m.content} for m in user_messages]
|
|
|
36 |
}
|
37 |
|
38 |
+
headers = {
|
39 |
+
"Authorization": f"Bearer {API_KEY}",
|
40 |
+
"Content-Type": "application/json"
|
41 |
+
}
|
42 |
|
43 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
44 |
+
return response.json()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|