Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,24 @@
|
|
1 |
import gradio as gr
|
2 |
|
3 |
-
gr.load("models/Qwen/Qwen2.5-Coder-32B-Instruct").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
+
gr.load("models/Qwen/Qwen2.5-Coder-32B-Instruct").launch()
|
4 |
+
|
5 |
+
# Define the custom function with a system prompt
|
6 |
+
def chat_with_system_prompt(user_input):
|
7 |
+
# Example of a system prompt
|
8 |
+
system_prompt = "You are a helpful AI specialized in code-related tasks."
|
9 |
+
response = f"{system_prompt}\n\nUser: {user_input}\n\nAI:"
|
10 |
+
# Here, you would call your model's inference API with the full prompt
|
11 |
+
# For example:
|
12 |
+
# response = model_inference(f"{system_prompt}\nUser: {user_input}")
|
13 |
+
return response # Placeholder for now
|
14 |
+
|
15 |
+
# Build the Gradio interface
|
16 |
+
with gr.Blocks() as demo:
|
17 |
+
with gr.Row():
|
18 |
+
user_input = gr.Textbox(label="Your Input", placeholder="Ask something...")
|
19 |
+
with gr.Row():
|
20 |
+
output = gr.Textbox(label="AI Response", lines=5, interactive=False)
|
21 |
+
user_input.submit(chat_with_system_prompt, inputs=user_input, outputs=output)
|
22 |
+
|
23 |
+
# Launch the app
|
24 |
+
demo.launch()
|