Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,21 +1,27 @@
|
|
1 |
-
|
|
|
|
|
|
|
2 |
|
3 |
# Configuration du modèle
|
4 |
model_name = "gemini/gemini-1.5-pro"
|
5 |
api_key = os.environ.get("GEMINI_API_KEY")
|
6 |
|
7 |
-
#
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
try:
|
10 |
-
response = completion(
|
11 |
model=model_name,
|
12 |
-
messages=[{"role": "user", "content": prompt}]
|
13 |
-
api_key=api_key
|
14 |
)
|
15 |
-
return response.choices[0].message.content
|
16 |
except Exception as e:
|
17 |
-
return
|
18 |
-
|
19 |
-
# Pour les Spaces Hugging Face
|
20 |
-
def predict(prompt):
|
21 |
-
return get_completion(prompt)
|
|
|
1 |
+
import os
|
2 |
+
import litellm
|
3 |
+
from fastapi import FastAPI
|
4 |
+
from pydantic import BaseModel
|
5 |
|
6 |
# Configuration du modèle
|
7 |
model_name = "gemini/gemini-1.5-pro"
|
8 |
api_key = os.environ.get("GEMINI_API_KEY")
|
9 |
|
10 |
+
# Configuration de LiteLLM
|
11 |
+
litellm.api_key = api_key
|
12 |
+
|
13 |
+
app = FastAPI()
|
14 |
+
|
15 |
+
class PromptRequest(BaseModel):
|
16 |
+
prompt: str
|
17 |
+
|
18 |
+
@app.post("/")
|
19 |
+
async def predict(request: PromptRequest):
|
20 |
try:
|
21 |
+
response = litellm.completion(
|
22 |
model=model_name,
|
23 |
+
messages=[{"role": "user", "content": request.prompt}]
|
|
|
24 |
)
|
25 |
+
return {"response": response.choices[0].message.content}
|
26 |
except Exception as e:
|
27 |
+
return {"error": str(e)}
|
|
|
|
|
|
|
|