Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,57 +1,90 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
# Initialize
|
5 |
conversation_history = []
|
6 |
|
7 |
# Function to interact with the model using the Inference API
|
8 |
def chat_with_model(user_input, hf_api_key):
|
9 |
global conversation_history
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
"role": "
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
#
|
40 |
-
|
|
|
|
|
41 |
|
42 |
-
|
|
|
43 |
|
44 |
-
#
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
],
|
51 |
-
outputs="text",
|
52 |
-
title="Code Assistant with Qwen2.5-Coder",
|
53 |
-
description="A code assistant that helps you with code generation, debugging, and explanations using the Qwen2.5-Coder model via Hugging Face Inference API."
|
54 |
-
)
|
55 |
-
|
56 |
-
# Launch the interface
|
57 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
+
# Initialize a list to store the conversation history
|
5 |
conversation_history = []
|
6 |
|
7 |
# Function to interact with the model using the Inference API
|
8 |
def chat_with_model(user_input, hf_api_key):
|
9 |
global conversation_history
|
10 |
|
11 |
+
if not hf_api_key:
|
12 |
+
return "Error: Please provide your Hugging Face API key."
|
13 |
+
|
14 |
+
try:
|
15 |
+
# Initialize the InferenceClient with the provided API key
|
16 |
+
client = InferenceClient(api_key=hf_api_key)
|
17 |
+
|
18 |
+
# Add the user's message to the conversation history
|
19 |
+
conversation_history.append({"role": "user", "content": user_input})
|
20 |
+
|
21 |
+
# Define the system message (defining the assistant role)
|
22 |
+
system_message = {
|
23 |
+
"role": "system",
|
24 |
+
"content": "You are a code assistant that helps with code generation, debugging, and explanations."
|
25 |
+
}
|
26 |
+
|
27 |
+
# Add system message to the conversation history
|
28 |
+
if len(conversation_history) == 1: # Add system message only once
|
29 |
+
conversation_history.insert(0, system_message)
|
30 |
+
|
31 |
+
# Ensure the conversation history doesn't exceed token limits
|
32 |
+
if len(conversation_history) > 10: # Keep the last 10 messages
|
33 |
+
conversation_history = [system_message] + conversation_history[-10:]
|
34 |
+
|
35 |
+
# Create a stream for chat completions using the API
|
36 |
+
stream = client.chat.completions.create(
|
37 |
+
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
38 |
+
messages=conversation_history,
|
39 |
+
max_tokens=500,
|
40 |
+
stream=True
|
41 |
+
)
|
42 |
+
|
43 |
+
# Collect the generated response from the model
|
44 |
+
response = ""
|
45 |
+
for chunk in stream:
|
46 |
+
response += chunk.choices[0].delta.content
|
47 |
+
|
48 |
+
# Add the assistant's response to the conversation history
|
49 |
+
conversation_history.append({"role": "assistant", "content": response})
|
50 |
+
|
51 |
+
return response
|
52 |
+
|
53 |
+
except Exception as e:
|
54 |
+
return f"Error: {e}"
|
55 |
+
|
56 |
+
# AdSense HTML Code
|
57 |
+
adsense_code = """
|
58 |
+
<script async src="https://pagead2.googlesyndication.com/pagead/js/adsbygoogle.js?client=ca-pub-4124087181208916"
|
59 |
+
crossorigin="anonymous"></script>
|
60 |
+
<ins class="adsbygoogle"
|
61 |
+
style="display:block"
|
62 |
+
data-ad-client="ca-pub-4124087181208916"
|
63 |
+
data-ad-slot="8617397243"
|
64 |
+
data-ad-format="auto"
|
65 |
+
data-full-width-responsive="true"></ins>
|
66 |
+
<script>
|
67 |
+
(adsbygoogle = window.adsbygoogle || []).push({});
|
68 |
+
</script>
|
69 |
+
"""
|
70 |
+
|
71 |
+
# Create the Gradio interface
|
72 |
+
with gr.Blocks() as demo:
|
73 |
+
gr.HTML(adsense_code) # Add Google AdSense
|
74 |
+
gr.Markdown("### Code Assistant with Qwen2.5-Coder")
|
75 |
+
gr.Markdown("Ask me anything about coding! Enter your Hugging Face API key to start.")
|
76 |
|
77 |
+
# Create the input and output interface
|
78 |
+
with gr.Row():
|
79 |
+
user_input = gr.Textbox(lines=5, placeholder="Ask me anything about coding...")
|
80 |
+
api_key = gr.Textbox(lines=1, placeholder="Enter your Hugging Face API key", type="password")
|
81 |
|
82 |
+
# Create the output display
|
83 |
+
output = gr.Textbox(label="Response")
|
84 |
|
85 |
+
# Button for submitting queries
|
86 |
+
submit_button = gr.Button("Submit")
|
87 |
+
submit_button.click(chat_with_model, inputs=[user_input, api_key], outputs=output)
|
88 |
+
|
89 |
+
# Launch the app
|
90 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|