Manojajj commited on
Commit
4062a25
·
verified ·
1 Parent(s): 8a4c070

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -23
app.py CHANGED
@@ -1,33 +1,42 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
 
4
- # Initialize the pipeline for code generation and assistance
5
- pipe = pipeline("text-generation", model="Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
6
 
7
- # Function to interact with the model for code-related assistance
8
- def code_assistance(user_input):
9
- # Define the system message to set the context
10
- system_message = "You are Qwen, a code assistant created by Alibaba Cloud. You assist with code generation, debugging, and explanation tasks."
11
-
12
- # Format the prompt with the system message and user input (code-related query)
13
- prompt = f"{system_message}\nUser: {user_input}\nAssistant (Code Assistance):"
14
-
15
- # Use the pipeline to generate the response for code assistance
16
- response = pipe(prompt, max_length=512, num_return_sequences=1)
17
-
18
- # Extract and clean the response to return only the assistant's code suggestion or explanation
19
- generated_response = response[0]['generated_text'].split("Assistant (Code Assistance):")[1].strip()
20
 
21
- return generated_response
 
 
 
 
22
 
23
- # Create the Gradio interface for the code assistance chatbot
24
  iface = gr.Interface(
25
- fn=code_assistance,
26
- inputs=gr.Textbox(lines=5, placeholder="Ask for code help..."),
 
 
 
27
  outputs="text",
28
- title="Qwen2.5-Coder Chatbot",
29
- description="A chatbot using Qwen2.5-Coder for code generation, debugging, and explanation tasks."
30
  )
31
 
32
- # Launch the Gradio interface
33
  iface.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
 
4
+ # Function to interact with the model using the Inference API
5
+ def chat_with_model(user_input, hf_api_key):
6
+ # Initialize the InferenceClient with the provided API key
7
+ client = InferenceClient(api_key=hf_api_key)
8
 
9
+ # Define the messages for the chat (system message tailored for a code assistant)
10
+ messages = [
11
+ {"role": "system", "content": "You are a code assistant that helps with code generation, debugging, and explanations."},
12
+ {"role": "user", "content": user_input}
13
+ ]
14
+
15
+ # Create a stream for chat completions using the API
16
+ stream = client.chat.completions.create(
17
+ model="Qwen/Qwen2.5-Coder-32B-Instruct",
18
+ messages=messages,
19
+ max_tokens=500,
20
+ stream=True
21
+ )
22
 
23
+ # Collect the generated response from the model
24
+ response = ""
25
+ for chunk in stream:
26
+ response += chunk.choices[0].delta.content
27
+ return response
28
 
29
+ # Create the Gradio interface
30
  iface = gr.Interface(
31
+ fn=chat_with_model,
32
+ inputs=[
33
+ gr.Textbox(lines=5, placeholder="Ask me anything about coding..."),
34
+ gr.Textbox(lines=1, placeholder="Enter your Hugging Face API key", type="password") # API key input
35
+ ],
36
  outputs="text",
37
+ title="Code Assistant with Qwen2.5-Coder",
38
+ description="A code assistant that helps you with code generation, debugging, and explanations using the Qwen2.5-Coder model via Hugging Face Inference API."
39
  )
40
 
41
+ # Launch the interface
42
  iface.launch()