Spaces:
Running
Running
Gouzi Mohaled
commited on
Commit
·
be2dee9
1
Parent(s):
3d900a3
créationde chatbot
Browse files- Dockerfile +5 -12
- app.py +31 -0
- requirements.txt +6 -0
Dockerfile
CHANGED
@@ -1,18 +1,11 @@
|
|
1 |
-
FROM python:3.
|
2 |
-
|
3 |
-
# Installer les dépendances
|
4 |
-
RUN pip install litellm fastapi uvicorn pydantic
|
5 |
|
6 |
WORKDIR /app
|
7 |
|
8 |
-
|
9 |
-
COPY app.py .
|
10 |
|
11 |
-
|
12 |
-
ENV GEMINI_API_KEY=${GEMINI_API_KEY}
|
13 |
|
14 |
-
|
15 |
-
EXPOSE 7860
|
16 |
|
17 |
-
|
18 |
-
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
|
|
1 |
+
FROM python:3.9
|
|
|
|
|
|
|
2 |
|
3 |
WORKDIR /app
|
4 |
|
5 |
+
COPY requirements.txt .
|
|
|
6 |
|
7 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
|
|
8 |
|
9 |
+
COPY . .
|
|
|
10 |
|
11 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
app.py
CHANGED
@@ -2,6 +2,7 @@ import os
|
|
2 |
import litellm
|
3 |
from fastapi import FastAPI, HTTPException, Header, Depends
|
4 |
from pydantic import BaseModel
|
|
|
5 |
|
6 |
# Configuration du modèle
|
7 |
model_name = "gemini/gemini-1.5-pro"
|
@@ -28,6 +29,30 @@ async def verify_api_key(x_api_key: str = Header(...)):
|
|
28 |
if x_api_key != USER_API_KEY:
|
29 |
raise HTTPException(status_code=401, detail="Clé API invalide")
|
30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
@app.post("/", dependencies=[Depends(verify_api_key)])
|
32 |
async def predict(request: PromptRequest):
|
33 |
try:
|
@@ -41,3 +66,9 @@ async def predict(request: PromptRequest):
|
|
41 |
return {"response": response.choices[0].message.content, "model": request.model}
|
42 |
except Exception as e:
|
43 |
return {"error": str(e)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import litellm
|
3 |
from fastapi import FastAPI, HTTPException, Header, Depends
|
4 |
from pydantic import BaseModel
|
5 |
+
import gradio as gr
|
6 |
|
7 |
# Configuration du modèle
|
8 |
model_name = "gemini/gemini-1.5-pro"
|
|
|
29 |
if x_api_key != USER_API_KEY:
|
30 |
raise HTTPException(status_code=401, detail="Clé API invalide")
|
31 |
|
32 |
+
# Historique de la conversation
|
33 |
+
conversation_history = []
|
34 |
+
|
35 |
+
def chatbot_function(user_input):
|
36 |
+
conversation_history.append({"role": "user", "content": user_input})
|
37 |
+
|
38 |
+
response = litellm.completion(
|
39 |
+
model=model_name, # Utilisation du modèle par défaut pour le chatbot
|
40 |
+
messages=conversation_history
|
41 |
+
)
|
42 |
+
|
43 |
+
conversation_history.append({"role": "assistant", "content": response.choices[0].message.content})
|
44 |
+
|
45 |
+
return conversation_history
|
46 |
+
|
47 |
+
# Interface Gradio
|
48 |
+
iface = gr.Interface(
|
49 |
+
fn=chatbot_function,
|
50 |
+
inputs=gr.Textbox(lines=2, placeholder="Votre message..."),
|
51 |
+
outputs="chatbot",
|
52 |
+
title="Chatbot",
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
@app.post("/", dependencies=[Depends(verify_api_key)])
|
57 |
async def predict(request: PromptRequest):
|
58 |
try:
|
|
|
66 |
return {"response": response.choices[0].message.content, "model": request.model}
|
67 |
except Exception as e:
|
68 |
return {"error": str(e)}
|
69 |
+
|
70 |
+
# Lancement de l'interface Gradio après le lancement de FastAPI
|
71 |
+
if __name__ == "__main__":
|
72 |
+
import uvicorn
|
73 |
+
uvicorn.run(app, host="0.0.0.0", port=8000) # Lance FastAPI
|
74 |
+
iface.launch(share=True) # Lance Gradio et partage l'interface
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
pydantic
|
4 |
+
litellm
|
5 |
+
python-multipart
|
6 |
+
gradio
|