SrijitMukherjee commited on
Commit
e3b04c8
·
verified ·
1 Parent(s): 778c7b2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +242 -0
app.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # from huggingface_hub import InferenceClient
3
+ # import pandas as pd
4
+
5
+ # """
6
+ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
7
+ # """
8
+ # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
+
10
+ # ################################################################
11
+
12
+ # # Load your CSV file
13
+ # df = pd.read_csv("your_file.csv")
14
+
15
+ # # Create dropdowns for exam name, year, and problem number
16
+ # exam_names = df["exam name"].unique()
17
+ # year_options = df["year"].unique()
18
+ # problem_numbers = df["problem number"].unique()
19
+
20
+ # exam_dropdown = gr.Dropdown(exam_names, label="Exam Name")
21
+ # year_dropdown = gr.Dropdown(year_options, label="Year")
22
+ # problem_dropdown = gr.Dropdown(problem_numbers, label="Problem Number")
23
+
24
+ # # Define the functions for the three buttons
25
+ # def solve_problem(exam, year, problem):
26
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
27
+ # prompt = f"Solve the following problem: {problem_statement}"
28
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
29
+ # return response.choices[0].text
30
+
31
+ # def give_hints(exam, year, problem):
32
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
33
+ # prompt = f"Give hints for the following problem: {problem_statement}"
34
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
35
+ # return response.choices[0].text
36
+
37
+ # def create_similar_problem(exam, year, problem):
38
+ # problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
39
+ # prompt = f"Create a similar problem to the following one: {problem_statement}"
40
+ # response = client.chat_completion(prompt, max_tokens=512, temperature=0.7, top_p=0.95)
41
+ # return response.choices[0].text
42
+
43
+ # ################################################################
44
+
45
+ # def respond(
46
+ # message,
47
+ # history: list[tuple[str, str]],
48
+ # system_message,
49
+ # max_tokens,
50
+ # temperature,
51
+ # top_p,
52
+ # ):
53
+ # messages = [{"role": "system", "content": system_message}]
54
+
55
+ # for val in history:
56
+ # if val[0]:
57
+ # messages.append({"role": "user", "content": val[0]})
58
+ # if val[1]:
59
+ # messages.append({"role": "assistant", "content": val[1]})
60
+
61
+ # messages.append({"role": "user", "content": message})
62
+
63
+ # response = ""
64
+
65
+ # for message in client.chat_completion(
66
+ # messages,
67
+ # max_tokens=max_tokens,
68
+ # stream=True,
69
+ # temperature=temperature,
70
+ # top_p=top_p,
71
+ # ):
72
+ # token = message.choices[0].delta.content
73
+
74
+ # response += token
75
+ # yield response
76
+
77
+ # """
78
+ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
79
+ # """
80
+ # demo = gr.ChatInterface(
81
+ # respond,
82
+ # additional_inputs=[
83
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
84
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
85
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
86
+ # gr.Slider(
87
+ # minimum=0.1,
88
+ # maximum=1.0,
89
+ # value=0.95,
90
+ # step=0.05,
91
+ # label="Top-p (nucleus sampling)",
92
+ # ),
93
+ # ],
94
+ # )
95
+
96
+ # ################################################################
97
+
98
+ # # Create Gradio interface with Blocks context
99
+ # with gr.Blocks() as dropdown_interface:
100
+ # with gr.Column():
101
+ # exam_dropdown.render()
102
+ # year_dropdown.render()
103
+ # problem_dropdown.render()
104
+
105
+ # solve_button = gr.Button("Solve Problem")
106
+ # hints_button = gr.Button("Give Hints")
107
+ # similar_problem_button = gr.Button("Create Similar Problem")
108
+
109
+ # output_text = gr.Textbox(label="Output")
110
+
111
+ # solve_button.click(solve_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
112
+ # hints_button.click(give_hints, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
113
+ # similar_problem_button.click(create_similar_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
114
+
115
+ # ################################################################
116
+
117
+ # # Combine both interfaces into a tabbed layout
118
+ # tabbed_interface = gr.TabbedInterface(
119
+ # [dropdown_interface, demo],
120
+ # ["Problem Solver", "Chat Interface"]
121
+ # )
122
+
123
+ # ################################################################
124
+
125
+ # # Launch the app
126
+ # if __name__ == "__main__":
127
+ # tabbed_interface.launch()
128
+
129
+
130
+ import pandas as pd
131
+ import gradio as gr
132
+ from huggingface_hub import InferenceClient
133
+
134
+ # Initialize the InferenceClient
135
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
136
+
137
+ # Load your CSV file
138
+ df = pd.read_csv("your_file.csv")
139
+
140
+ # Create dropdowns for exam name, year, and problem number
141
+ exam_names = df["exam name"].unique()
142
+ year_options = df["year"].unique()
143
+ problem_numbers = df["problem number"].unique()
144
+
145
+ exam_dropdown = gr.Dropdown(exam_names, label="Exam Name")
146
+ year_dropdown = gr.Dropdown(year_options, label="Year")
147
+ problem_dropdown = gr.Dropdown(problem_numbers, label="Problem Number")
148
+
149
+ # Define the functions for the three buttons
150
+ def solve_problem(exam, year, problem):
151
+ problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
152
+ prompt = f"Solve the following problem: {problem_statement}"
153
+ response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95)
154
+ return response[0]['generated_text']
155
+
156
+ def give_hints(exam, year, problem):
157
+ problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
158
+ prompt = f"Give hints for the following problem: {problem_statement}"
159
+ response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95)
160
+ return response[0]['generated_text']
161
+
162
+ def create_similar_problem(exam, year, problem):
163
+ problem_statement = df[(df["exam name"] == exam) & (df["year"] == year) & (df["problem number"] == problem)]["problem statement"].values[0]
164
+ prompt = f"Create a similar problem to the following one: {problem_statement}"
165
+ response = client.text_generation(prompt, max_new_tokens=512, temperature=0.7, top_p=0.95)
166
+ return response[0]['generated_text']
167
+
168
+ # Define the chat response function
169
+ def respond(
170
+ message,
171
+ history: list[tuple[str, str]],
172
+ system_message,
173
+ max_tokens,
174
+ temperature,
175
+ top_p,
176
+ ):
177
+ messages = [{"role": "system", "content": system_message}]
178
+
179
+ for val in history:
180
+ if val[0]:
181
+ messages.append({"role": "user", "content": val[0]})
182
+ if val[1]:
183
+ messages.append({"role": "assistant", "content": val[1]})
184
+
185
+ messages.append({"role": "user", "content": message})
186
+
187
+ response = ""
188
+
189
+ for message in client.chat_completion(
190
+ messages,
191
+ max_tokens=max_tokens,
192
+ stream=True,
193
+ temperature=temperature,
194
+ top_p=top_p,
195
+ ):
196
+ token = message.choices[0].delta.content
197
+
198
+ response += token
199
+ yield response
200
+
201
+ # Create Gradio interface with Blocks context
202
+ with gr.Blocks() as dropdown_interface:
203
+ with gr.Column():
204
+ exam_dropdown.render()
205
+ year_dropdown.render()
206
+ problem_dropdown.render()
207
+
208
+ solve_button = gr.Button("Solve Problem")
209
+ hints_button = gr.Button("Give Hints")
210
+ similar_problem_button = gr.Button("Create Similar Problem")
211
+
212
+ output_text = gr.Textbox(label="Output")
213
+
214
+ solve_button.click(solve_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
215
+ hints_button.click(give_hints, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
216
+ similar_problem_button.click(create_similar_problem, inputs=[exam_dropdown, year_dropdown, problem_dropdown], outputs=output_text)
217
+
218
+ chat_interface = gr.ChatInterface(
219
+ respond,
220
+ additional_inputs=[
221
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
222
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
223
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
224
+ gr.Slider(
225
+ minimum=0.1,
226
+ maximum=1.0,
227
+ value=0.95,
228
+ step=0.05,
229
+ label="Top-p (nucleus sampling)",
230
+ ),
231
+ ],
232
+ )
233
+
234
+ # Combine both interfaces into a tabbed layout
235
+ tabbed_interface = gr.TabbedInterface(
236
+ [dropdown_interface, chat_interface],
237
+ ["Problem Solver", "Chat Interface"]
238
+ )
239
+
240
+ # Launch the app
241
+ if __name__ == "__main__":
242
+ tabbed_interface.launch()