import gradio as gr gr.load("models/Qwen/Qwen2.5-Coder-32B-Instruct").launch() # Define the custom function with a system prompt def chat_with_system_prompt(user_input): # Example of a system prompt system_prompt = "You are a helpful AI specialized in code-related tasks." response = f"{system_prompt}\n\nUser: {user_input}\n\nAI:" # Here, you would call your model's inference API with the full prompt # For example: # response = model_inference(f"{system_prompt}\nUser: {user_input}") return response # Placeholder for now # Build the Gradio interface with gr.Blocks() as demo: with gr.Row(): user_input = gr.Textbox(label="Your Input", placeholder="Ask something...") with gr.Row(): output = gr.Textbox(label="AI Response", lines=5, interactive=False) user_input.submit(chat_with_system_prompt, inputs=user_input, outputs=output) # Launch the app demo.launch()