Vera-ZWY commited on
Commit
1fe2261
·
verified ·
1 Parent(s): 9a76e3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -25
app.py CHANGED
@@ -59,19 +59,20 @@ def predict(message, history):
59
  return gpt_response.content
60
 
61
  def chat_function(message, history, year):
62
- history_langchain_format = []
63
- for msg in history:
64
- if msg['role'] == "user":
65
- history_langchain_format.append(HumanMessage(content=msg['content']))
66
- elif msg['role'] == "assistant":
67
- history_langchain_format.append(AIMessage(content=msg['content']))
68
- history_langchain_format.append(HumanMessage(content=message))
69
- rag_response = stream_chat_with_rag(history_langchain_format,year)[0]
70
 
71
- # response = f"Year selected: {year}. Here's a response."
72
- # answer = stream_chat_with_rag(user_message, year)[0]
73
- # chat_history.append((user_message, response +"\n"+ answer))
74
- return rag_response
 
75
 
76
  chatbot_state = gr.State([])
77
 
@@ -204,12 +205,14 @@ with gr.Blocks(title="Reddit Election Analysis") as demo:
204
  gr.Markdown("Ask questions about election-related comments and posts")
205
 
206
  with gr.Row():
207
- # with gr.Column():
208
- # year_selector = gr.Radio(
209
- # choices=["2016 Election", "2024 Election", "Comparison two years"],
210
- # label="Select Election Year",
211
- # value="2016 Election"
212
- # )
 
 
213
 
214
  # query_input = gr.Textbox(
215
  # label="Your Question",
@@ -233,13 +236,8 @@ with gr.Blocks(title="Reddit Election Analysis") as demo:
233
  with gr.Column():
234
  chatbot = gr.ChatInterface(chat_function,
235
  type="messages",
236
- addtional_inputs = [
237
- year_selector = gr.Radio(
238
- choices=["2016 Election", "2024 Election", "Comparison two years"],
239
- label="Select Election Year",
240
- value="2016 Election"
241
- )
242
- ])
243
 
244
  gr.Markdown("## Top works of the relevant Q&A")
245
  # with gr.Row():
 
59
  return gpt_response.content
60
 
61
  def chat_function(message, history, year):
62
+ # history_langchain_format = []
63
+ # for msg in history:
64
+ # if msg['role'] == "user":
65
+ # history_langchain_format.append(HumanMessage(content=msg['content']))
66
+ # elif msg['role'] == "assistant":
67
+ # history_langchain_format.append(AIMessage(content=msg['content']))
68
+ # history_langchain_format.append(HumanMessage(content=message))
69
+ # rag_response = stream_chat_with_rag(history_langchain_format,year)[0]
70
 
71
+ response = f"Year selected: {year}. Here's a response."
72
+ answer = stream_chat_with_rag(message, year)[0]
73
+ history.append((message, response +"\n"+ answer))
74
+
75
+ return answer
76
 
77
  chatbot_state = gr.State([])
78
 
 
205
  gr.Markdown("Ask questions about election-related comments and posts")
206
 
207
  with gr.Row():
208
+ with gr.Column():
209
+ year_selector = gr.Radio(
210
+ choices=["2016 Election", "2024 Election", "Comparison two years"],
211
+ label="Select Election Year",
212
+ value="2016 Election"
213
+ )
214
+ slider = gr.Slider(50, 500, render=False, label= "Tokens")
215
+
216
 
217
  # query_input = gr.Textbox(
218
  # label="Your Question",
 
236
  with gr.Column():
237
  chatbot = gr.ChatInterface(chat_function,
238
  type="messages",
239
+ addtional_inputs = [year_selector]
240
+ )
 
 
 
 
 
241
 
242
  gr.Markdown("## Top works of the relevant Q&A")
243
  # with gr.Row():