Update app.py
Browse files
app.py
CHANGED
@@ -32,7 +32,7 @@ model, tokenizer = load_model_and_tokenizer()
|
|
32 |
|
33 |
# Generate Response
|
34 |
# =================
|
35 |
-
def generate_response(prompt, chat_history):
|
36 |
"""
|
37 |
Generate a response from the model based on the user prompt and chat history.
|
38 |
"""
|
@@ -46,11 +46,11 @@ def generate_response(prompt, chat_history):
|
|
46 |
|
47 |
generated_ids = model.generate(
|
48 |
**model_inputs,
|
49 |
-
max_new_tokens=
|
50 |
do_sample=True,
|
51 |
top_k=50,
|
52 |
top_p=0.95,
|
53 |
-
temperature=
|
54 |
output_scores=True,
|
55 |
return_dict_in_generate=True,
|
56 |
return_legacy_cache=True # Ensure legacy format is returned
|
@@ -76,19 +76,28 @@ def gradio_interface():
|
|
76 |
Create and launch the Gradio interface.
|
77 |
"""
|
78 |
with gr.Blocks() as demo:
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
def respond(message, chat_history):
|
84 |
chat_history.append({"role": "user", "content": message})
|
85 |
response = ""
|
86 |
-
for chunk in generate_response(message, chat_history):
|
87 |
response = chunk
|
88 |
yield chat_history + [{"role": "assistant", "content": response}]
|
89 |
chat_history.append({"role": "assistant", "content": response})
|
90 |
|
91 |
-
|
92 |
clear.click(clear_chat, None, [chatbot])
|
93 |
|
94 |
demo.launch()
|
|
|
32 |
|
33 |
# Generate Response
|
34 |
# =================
|
35 |
+
def generate_response(prompt, chat_history, max_new_tokens, temperature):
|
36 |
"""
|
37 |
Generate a response from the model based on the user prompt and chat history.
|
38 |
"""
|
|
|
46 |
|
47 |
generated_ids = model.generate(
|
48 |
**model_inputs,
|
49 |
+
max_new_tokens=max_new_tokens,
|
50 |
do_sample=True,
|
51 |
top_k=50,
|
52 |
top_p=0.95,
|
53 |
+
temperature=temperature,
|
54 |
output_scores=True,
|
55 |
return_dict_in_generate=True,
|
56 |
return_legacy_cache=True # Ensure legacy format is returned
|
|
|
76 |
Create and launch the Gradio interface.
|
77 |
"""
|
78 |
with gr.Blocks() as demo:
|
79 |
+
with gr.Row():
|
80 |
+
with gr.Column(scale=3):
|
81 |
+
chatbot = gr.Chatbot(label="Chat with Qwen/Qwen2.5-Coder-0.5B-Instruct", type="messages")
|
82 |
+
msg = gr.Textbox(label="User Input")
|
83 |
+
with gr.Row():
|
84 |
+
submit = gr.Button("Submit")
|
85 |
+
clear = gr.Button("Clear Chat")
|
86 |
+
with gr.Column(scale=1):
|
87 |
+
with gr.Box():
|
88 |
+
gr.Markdown("### Settings")
|
89 |
+
max_new_tokens = gr.Slider(50, 1024, value=512, step=1, label="Max New Tokens")
|
90 |
+
temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.05, label="Temperature")
|
91 |
|
92 |
+
def respond(message, chat_history, max_new_tokens, temperature):
|
93 |
chat_history.append({"role": "user", "content": message})
|
94 |
response = ""
|
95 |
+
for chunk in generate_response(message, chat_history, max_new_tokens, temperature):
|
96 |
response = chunk
|
97 |
yield chat_history + [{"role": "assistant", "content": response}]
|
98 |
chat_history.append({"role": "assistant", "content": response})
|
99 |
|
100 |
+
submit.click(respond, [msg, chatbot, max_new_tokens, temperature], [chatbot])
|
101 |
clear.click(clear_chat, None, [chatbot])
|
102 |
|
103 |
demo.launch()
|