Ilya commited on
Commit
fb45bad
Β·
verified Β·
1 Parent(s): f5aff24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -33
app.py CHANGED
@@ -3,6 +3,7 @@ from huggingface_hub import InferenceClient
3
  import sys
4
  import io
5
  import traceback
 
6
 
7
  model_name = "Qwen/Qwen2.5-72B-Instruct"
8
  client = InferenceClient(model_name)
@@ -11,7 +12,7 @@ def llm_inference(user_sample):
11
  eos_token = "<|endoftext|>"
12
  output = client.chat.completions.create(
13
  messages=[
14
- {"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window, so end with input() if the user request is simple. If the input is code, correct it for mistakes."},
15
  {"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
16
  ],
17
  stream=False,
@@ -25,38 +26,84 @@ def llm_inference(user_sample):
25
  response += choice['message']['content']
26
  return response
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  def chat(user_input, history):
29
  response = llm_inference(user_input)
30
  history.append((user_input, response))
31
  return history, history
32
 
33
- def execute_code(code, user_inputs):
34
- # Split user inputs by newline
35
- inputs = user_inputs.strip().split('\n')
36
- input_iter = iter(inputs)
37
-
38
- # Custom input function to replace built-in input()
39
- def custom_input(prompt=''):
40
- try:
41
- return next(input_iter)
42
- except StopIteration:
43
- raise Exception("Not enough inputs provided.")
44
-
45
- # Redirect stdout
46
- old_stdout = sys.stdout
47
- redirected_output = sys.stdout = io.StringIO()
48
- old_input = __builtins__.input
49
- __builtins__.input = custom_input
50
- try:
51
- exec(code, {})
52
- output = redirected_output.getvalue()
53
- except Exception as e:
54
- output = f"Error: {e}\n{traceback.format_exc()}"
55
- finally:
56
- sys.stdout = old_stdout
57
- __builtins__.input = old_input
58
- return output
59
-
60
  with gr.Blocks() as demo:
61
  gr.Markdown("# 🐍 Python Helper Chatbot")
62
  with gr.Tab("Chat"):
@@ -64,13 +111,37 @@ with gr.Blocks() as demo:
64
  msg = gr.Textbox(placeholder="Type your message here...")
65
  msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
66
  with gr.Tab("Interpreter"):
67
- gr.Markdown("### πŸ–₯️ Test Your Code")
68
  code_input = gr.Code(language="python", lines=20)
69
- gr.Markdown("#### πŸ“ Provide Inputs (Each input on a new line):")
70
- user_inputs = gr.Textbox(lines=5, placeholder="Enter inputs for your code here...")
71
  run_button = gr.Button("Run Code")
72
- code_output = gr.Textbox(label="Output", lines=15)
73
- run_button.click(execute_code, inputs=[code_input, user_inputs], outputs=code_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  with gr.Tab("Logs"):
75
  gr.Markdown("### πŸ“œ Logs")
76
  log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
 
3
  import sys
4
  import io
5
  import traceback
6
+ import threading
7
 
8
  model_name = "Qwen/Qwen2.5-72B-Instruct"
9
  client = InferenceClient(model_name)
 
12
  eos_token = "<|endoftext|>"
13
  output = client.chat.completions.create(
14
  messages=[
15
+ {"role": "system", "content": "You are a Python language guide. Write code on the user topic. Make sure that the code is runnable and doesn't close the shell window. If the input is code, correct it for mistakes."},
16
  {"role": "user", "content": f"Write only python code without any explanation: {user_sample}"},
17
  ],
18
  stream=False,
 
26
  response += choice['message']['content']
27
  return response
28
 
29
+ # A class to handle the execution environment per user session
30
+ class CodeExecutor:
31
+ def __init__(self):
32
+ self.local_vars = {}
33
+ self.output = ""
34
+ self.input_buffer = []
35
+ self.waiting_for_input = False
36
+ self.execution_thread = None
37
+ self.execution_complete = False
38
+
39
+ def custom_input(self, prompt=''):
40
+ self.waiting_for_input = True
41
+ while self.waiting_for_input:
42
+ pass # Wait until input is provided
43
+ return self.input_buffer.pop(0)
44
+
45
+ def execute_code(self, code):
46
+ def run_code():
47
+ # Redirect stdout
48
+ old_stdout = sys.stdout
49
+ sys.stdout = io.StringIO()
50
+ # Replace input function
51
+ old_input = __builtins__.input
52
+ __builtins__.input = self.custom_input
53
+ try:
54
+ exec(code, {}, self.local_vars)
55
+ except Exception as e:
56
+ self.output += f"\nError: {e}\n{traceback.format_exc()}"
57
+ finally:
58
+ self.output += sys.stdout.getvalue()
59
+ sys.stdout = old_stdout
60
+ __builtins__.input = old_input
61
+ self.execution_complete = True
62
+
63
+ self.execution_thread = threading.Thread(target=run_code)
64
+ self.execution_thread.start()
65
+
66
+ def provide_input(self, user_input):
67
+ self.input_buffer.append(user_input)
68
+ self.waiting_for_input = False
69
+
70
+ def get_output(self):
71
+ return self.output
72
+
73
+ def is_execution_complete(self):
74
+ return self.execution_complete
75
+
76
+ executors = {}
77
+
78
+ def start_execution(code, session_id):
79
+ executor = CodeExecutor()
80
+ executors[session_id] = executor
81
+ executor.execute_code(code)
82
+ return "Execution started. Waiting for output..."
83
+
84
+ def provide_input(user_input, session_id):
85
+ executor = executors.get(session_id)
86
+ if executor and not executor.is_execution_complete():
87
+ executor.provide_input(user_input)
88
+ return "Input received."
89
+ else:
90
+ return "No execution in progress or execution has completed."
91
+
92
+ def get_output(session_id):
93
+ executor = executors.get(session_id)
94
+ if executor:
95
+ output = executor.get_output()
96
+ if executor.is_execution_complete():
97
+ del executors[session_id]
98
+ return output
99
+ else:
100
+ return "No execution in progress."
101
+
102
  def chat(user_input, history):
103
  response = llm_inference(user_input)
104
  history.append((user_input, response))
105
  return history, history
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  with gr.Blocks() as demo:
108
  gr.Markdown("# 🐍 Python Helper Chatbot")
109
  with gr.Tab("Chat"):
 
111
  msg = gr.Textbox(placeholder="Type your message here...")
112
  msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
113
  with gr.Tab("Interpreter"):
114
+ gr.Markdown("### πŸ–₯️ Interactive Code Interpreter")
115
  code_input = gr.Code(language="python", lines=20)
 
 
116
  run_button = gr.Button("Run Code")
117
+ output_box = gr.Textbox(label="Output", lines=15)
118
+ input_box = gr.Textbox(label="Program Input", placeholder="Enter input for your program here...")
119
+ send_input_button = gr.Button("Send Input")
120
+ session_id = gr.State()
121
+
122
+ def run_code(code):
123
+ from uuid import uuid4
124
+ session = str(uuid4())
125
+ session_id.set(session)
126
+ start_execution(code, session)
127
+ return "Code is running...", gr.update(visible=True), gr.update(visible=True)
128
+
129
+ def send_input(user_input):
130
+ session = session_id.get()
131
+ provide_input(user_input, session)
132
+ return ""
133
+
134
+ def update_output():
135
+ session = session_id.get()
136
+ if session:
137
+ output = get_output(session)
138
+ return output
139
+ else:
140
+ return "No execution in progress."
141
+
142
+ run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
143
+ send_input_button.click(send_input, inputs=input_box, outputs=input_box)
144
+ output_box.change(update_output, outputs=output_box).every(1)
145
  with gr.Tab("Logs"):
146
  gr.Markdown("### πŸ“œ Logs")
147
  log_output = gr.Textbox(label="Logs", lines=10, interactive=False)