Update app.py
Browse files
app.py
CHANGED
@@ -1,146 +1,41 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
import
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
#
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
""
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
"""
|
35 |
-
|
36 |
-
|
37 |
-
css = """
|
38 |
-
h1 {
|
39 |
-
text-align: center;
|
40 |
-
display: block;
|
41 |
-
}
|
42 |
-
|
43 |
-
#duplicate-button {
|
44 |
-
margin: auto;
|
45 |
-
color: white;
|
46 |
-
background: #1565c0;
|
47 |
-
border-radius: 100vh;
|
48 |
-
}
|
49 |
-
"""
|
50 |
-
|
51 |
-
# Load the tokenizer and model
|
52 |
-
tokenizer = AutoTokenizer.from_pretrained("OnlyCheeini/greesychat-turbo")
|
53 |
-
model = AutoModelForCausalLM.from_pretrained("OnlyCheeini/greesychat-turbo",device_map="auto") # to("cuda:0")
|
54 |
-
terminators = [
|
55 |
-
tokenizer.eos_token_id,
|
56 |
-
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
57 |
-
]
|
58 |
-
|
59 |
-
@spaces.GPU(duration=120)
|
60 |
-
def chat_llama3_8b(message: str,
|
61 |
-
history: list,
|
62 |
-
temperature: float,
|
63 |
-
max_new_tokens: int
|
64 |
-
) -> str:
|
65 |
-
"""
|
66 |
-
Generate a streaming response using the llama3-8b model.
|
67 |
-
Args:
|
68 |
-
message (str): The input message.
|
69 |
-
history (list): The conversation history used by ChatInterface.
|
70 |
-
temperature (float): The temperature for generating the response.
|
71 |
-
max_new_tokens (int): The maximum number of new tokens to generate.
|
72 |
-
Returns:
|
73 |
-
str: The generated response.
|
74 |
-
"""
|
75 |
-
conversation = []
|
76 |
-
for user, assistant in history:
|
77 |
-
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
78 |
-
conversation.append({"role": "user", "content": message})
|
79 |
-
|
80 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
81 |
-
|
82 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
83 |
-
|
84 |
-
generate_kwargs = dict(
|
85 |
-
input_ids= input_ids,
|
86 |
-
|
87 |
-
max_new_tokens=max_new_tokens,
|
88 |
-
do_sample=True,
|
89 |
-
temperature=temperature,
|
90 |
-
eos_token_id=terminators,
|
91 |
)
|
92 |
-
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
|
93 |
-
if temperature == 0:
|
94 |
-
generate_kwargs['do_sample'] = False
|
95 |
-
|
96 |
-
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
97 |
-
t.start()
|
98 |
-
|
99 |
-
outputs = []
|
100 |
-
for text in streamer:
|
101 |
-
outputs.append(text)
|
102 |
-
#print(outputs)
|
103 |
-
yield "".join(outputs)
|
104 |
-
|
105 |
|
106 |
-
|
107 |
-
|
108 |
|
109 |
-
with gr.Blocks(fill_height=True, css=css) as demo:
|
110 |
-
|
111 |
-
gr.Markdown(DESCRIPTION)
|
112 |
-
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
|
113 |
-
gr.ChatInterface(
|
114 |
-
fn=chat_llama3_8b,
|
115 |
-
chatbot=chatbot,
|
116 |
-
fill_height=True,
|
117 |
-
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
|
118 |
-
additional_inputs=[
|
119 |
-
gr.Slider(minimum=0,
|
120 |
-
maximum=1,
|
121 |
-
step=0.1,
|
122 |
-
value=0.95,
|
123 |
-
label="Temperature",
|
124 |
-
render=False),
|
125 |
-
gr.Slider(minimum=128,
|
126 |
-
maximum=4096,
|
127 |
-
step=1,
|
128 |
-
value=512,
|
129 |
-
label="Max new tokens",
|
130 |
-
render=False ),
|
131 |
-
],
|
132 |
-
examples=[
|
133 |
-
['How to setup a human base on Mars? Give short answer.'],
|
134 |
-
['Explain theory of relativity to me like I’m 8 years old.'],
|
135 |
-
['What is 9,000 * 9,000?'],
|
136 |
-
['Write a pun-filled happy birthday message to my friend Alex.'],
|
137 |
-
['Justify why a penguin might make a good king of the jungle.']
|
138 |
-
],
|
139 |
-
cache_examples=False,
|
140 |
-
)
|
141 |
-
|
142 |
-
gr.Markdown(LICENSE)
|
143 |
-
|
144 |
if __name__ == "__main__":
|
145 |
-
|
146 |
-
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
+
|
6 |
+
app = FastAPI()
|
7 |
+
|
8 |
+
# Load your fine-tuned model and tokenizer
|
9 |
+
model_name = "OnlyCheeini/greesychat-turbo"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16).to("cuda")
|
12 |
+
|
13 |
+
class OpenAIRequest(BaseModel):
|
14 |
+
model: str
|
15 |
+
prompt: str
|
16 |
+
max_tokens: int = 64
|
17 |
+
temperature: float = 0.7
|
18 |
+
top_p: float = 0.9
|
19 |
+
|
20 |
+
class OpenAIResponse(BaseModel):
|
21 |
+
choices: list
|
22 |
+
|
23 |
+
@app.post("/v1/completions", response_model=OpenAIResponse)
|
24 |
+
async def generate_text(request: OpenAIRequest):
|
25 |
+
if request.model != model_name:
|
26 |
+
raise HTTPException(status_code=400, detail="Model not found")
|
27 |
+
|
28 |
+
inputs = tokenizer(request.prompt, return_tensors="pt").to("cuda")
|
29 |
+
outputs = model.generate(
|
30 |
+
**inputs,
|
31 |
+
max_length=inputs['input_ids'].shape[1] + request.max_tokens,
|
32 |
+
temperature=request.temperature,
|
33 |
+
top_p=request.top_p,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
37 |
+
return OpenAIResponse(choices=[{"text": generated_text}])
|
38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
if __name__ == "__main__":
|
40 |
+
import uvicorn
|
41 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|