Upload 3 files
Browse files- Dockerfile +16 -0
- app.py +97 -0
- requirements.txt +4 -0
Dockerfile
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
FROM python:3.9
|
5 |
+
|
6 |
+
RUN useradd -m -u 1000 user
|
7 |
+
USER user
|
8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
9 |
+
|
10 |
+
WORKDIR /app
|
11 |
+
|
12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
14 |
+
|
15 |
+
COPY --chown=user . /app
|
16 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
+
import os
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
# Get the token from the environment variable
|
9 |
+
hf_token = os.environ.get("HF_TOKEN")
|
10 |
+
|
11 |
+
if hf_token:
|
12 |
+
client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct", token=hf_token)
|
13 |
+
else:
|
14 |
+
raise ValueError("HF_TOKEN environment variable not set. Please add it as a secret in your Hugging Face Space.")
|
15 |
+
|
16 |
+
class ChatRequest(BaseModel):
|
17 |
+
message: str
|
18 |
+
system_message: str = "You are a friendly Chatbot."
|
19 |
+
max_tokens: int = 512
|
20 |
+
temperature: float = 0.7
|
21 |
+
top_p: float = 0.95
|
22 |
+
|
23 |
+
class ChatResponse(BaseModel):
|
24 |
+
response: str
|
25 |
+
|
26 |
+
@app.post("/chat", response_model=ChatResponse)
|
27 |
+
async def chat(request: ChatRequest):
|
28 |
+
try:
|
29 |
+
messages = [
|
30 |
+
{"role": "system", "content": request.system_message},
|
31 |
+
{"role": "user", "content": request.message},
|
32 |
+
]
|
33 |
+
|
34 |
+
response = client.chat_completion(
|
35 |
+
messages=messages,
|
36 |
+
max_tokens=request.max_tokens,
|
37 |
+
temperature=request.temperature,
|
38 |
+
top_p=request.top_p,
|
39 |
+
)
|
40 |
+
|
41 |
+
return {"response": response.choices[0].message.content}
|
42 |
+
except Exception as e:
|
43 |
+
raise HTTPException(status_code=500, detail=str(e))
|
44 |
+
|
45 |
+
# from fastapi import FastAPI, HTTPException
|
46 |
+
# from pydantic import BaseModel
|
47 |
+
# from huggingface_hub import InferenceClient
|
48 |
+
# import os
|
49 |
+
|
50 |
+
# app = FastAPI()
|
51 |
+
|
52 |
+
# # Get the token from the environment variable
|
53 |
+
# hf_token = os.environ.get("HF_TOKEN")
|
54 |
+
|
55 |
+
# if hf_token:
|
56 |
+
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta", token=hf_token)
|
57 |
+
# else:
|
58 |
+
# raise ValueError("HF_TOKEN environment variable not set. Please add it as a secret in your Hugging Face Space.")
|
59 |
+
|
60 |
+
# # Rest of your code...
|
61 |
+
|
62 |
+
# class ChatRequest(BaseModel):
|
63 |
+
# message: str
|
64 |
+
# history: list[tuple[str, str]] = []
|
65 |
+
# system_message: str = "You are a friendly Chatbot."
|
66 |
+
# max_tokens: int = 512
|
67 |
+
# temperature: float = 0.7
|
68 |
+
# top_p: float = 0.95
|
69 |
+
|
70 |
+
# class ChatResponse(BaseModel):
|
71 |
+
# response: str
|
72 |
+
|
73 |
+
# @app.post("/chat", response_model=ChatResponse)
|
74 |
+
# async def chat(request: ChatRequest):
|
75 |
+
# try:
|
76 |
+
# messages = [{"role": "system", "content": request.system_message}]
|
77 |
+
# for val in request.history:
|
78 |
+
# if val[0]:
|
79 |
+
# messages.append({"role": "user", "content": val[0]})
|
80 |
+
# if val[1]:
|
81 |
+
# messages.append({"role": "assistant", "content": val[1]})
|
82 |
+
# messages.append({"role": "user", "content": request.message})
|
83 |
+
|
84 |
+
# response = ""
|
85 |
+
# for message in client.chat_completion(
|
86 |
+
# messages,
|
87 |
+
# max_tokens=request.max_tokens,
|
88 |
+
# stream=True,
|
89 |
+
# temperature=request.temperature,
|
90 |
+
# top_p=request.top_p,
|
91 |
+
# ):
|
92 |
+
# token = message.choices[0].delta.content
|
93 |
+
# response += token
|
94 |
+
|
95 |
+
# return {"response": response}
|
96 |
+
# except Exception as e:
|
97 |
+
# raise HTTPException(status_code=500, detail=str(e))
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn
|
3 |
+
huggingface_hub
|
4 |
+
datasets
|