Shreyas094 commited on
Commit
9723c64
·
verified ·
1 Parent(s): 618cc2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -22
app.py CHANGED
@@ -78,16 +78,16 @@ def update_vectors(files, parser):
78
 
79
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
80
 
81
- def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, temperature=0.2):
82
- print(f"Starting generate_chunked_response with {num_calls} calls") # Debug log
83
  client = InferenceClient(model, token=huggingface_token)
84
  full_responses = []
85
  messages = [{"role": "user", "content": prompt}]
86
 
87
  for i in range(num_calls):
88
- print(f"Starting API call {i+1}") # Debug log
89
- if stop_clicked:
90
- print("Stop clicked, breaking loop") # Debug log
91
  break
92
  try:
93
  response = ""
@@ -97,25 +97,25 @@ def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, tempe
97
  temperature=temperature,
98
  stream=True,
99
  ):
100
- if stop_clicked:
101
- print("Stop clicked during streaming, breaking") # Debug log
102
  break
103
  if message.choices and message.choices[0].delta and message.choices[0].delta.content:
104
  chunk = message.choices[0].delta.content
105
  response += chunk
106
- print(f"API call {i+1} response: {response[:100]}...") # Debug log
107
  full_responses.append(response)
108
  except Exception as e:
109
  print(f"Error in generating response: {str(e)}")
110
 
111
  combined_response = " ".join(full_responses)
112
- print(f"Combined response: {combined_response[:100]}...") # Debug log
113
 
114
  clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', combined_response, flags=re.DOTALL)
115
  clean_response = clean_response.replace("Using the following context:", "").strip()
116
  clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
117
 
118
- print(f"Final clean response: {clean_response[:100]}...") # Debug log
119
  return clean_response
120
 
121
  def duckduckgo_search(query):
@@ -251,37 +251,37 @@ with gr.Blocks() as demo:
251
  clear_btn = gr.Button("Clear")
252
 
253
  def protected_generate_response(message, history, use_web_search, model, temperature, num_calls, is_generating, stop_clicked):
254
- print("Starting protected_generate_response") # Debug log
255
  if is_generating:
256
- print("Already generating, returning") # Debug log
257
  return message, history, is_generating, stop_clicked
258
  is_generating = True
259
- stop_clicked = False
260
 
261
  try:
262
- print(f"Generating response for: {message}") # Debug log
263
  if use_web_search:
264
- print("Using web search") # Debug log
265
- main_content, sources = get_response_with_search(message, model, num_calls=num_calls, temperature=temperature)
266
  formatted_response = f"{main_content}\n\nSources:\n{sources}"
267
  else:
268
- print("Using PDF search") # Debug log
269
- response = get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature)
270
  formatted_response = response
271
 
272
- print(f"Generated response: {formatted_response[:100]}...") # Debug log
273
 
274
  if not stop_clicked:
275
- print("Appending to history") # Debug log
276
  history.append((message, formatted_response))
277
  else:
278
- print("Stop clicked, not appending to history") # Debug log
279
  except Exception as e:
280
  print(f"Error generating response: {str(e)}")
281
  history.append((message, "I'm sorry, but I encountered an error while generating the response. Please try again."))
282
 
283
  is_generating = False
284
- print(f"Returning history with {len(history)} items") # Debug log
285
  return "", history, is_generating, stop_clicked
286
 
287
  submit_btn.click(
 
78
 
79
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
80
 
81
+ def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, temperature=0.2, stop_clicked=False):
82
+ print(f"Starting generate_chunked_response with {num_calls} calls")
83
  client = InferenceClient(model, token=huggingface_token)
84
  full_responses = []
85
  messages = [{"role": "user", "content": prompt}]
86
 
87
  for i in range(num_calls):
88
+ print(f"Starting API call {i+1}")
89
+ if stop_clicked(): # Assume stop_clicked is now a function that returns the current state
90
+ print("Stop clicked, breaking loop")
91
  break
92
  try:
93
  response = ""
 
97
  temperature=temperature,
98
  stream=True,
99
  ):
100
+ if stop_clicked():
101
+ print("Stop clicked during streaming, breaking")
102
  break
103
  if message.choices and message.choices[0].delta and message.choices[0].delta.content:
104
  chunk = message.choices[0].delta.content
105
  response += chunk
106
+ print(f"API call {i+1} response: {response[:100]}...")
107
  full_responses.append(response)
108
  except Exception as e:
109
  print(f"Error in generating response: {str(e)}")
110
 
111
  combined_response = " ".join(full_responses)
112
+ print(f"Combined response: {combined_response[:100]}...")
113
 
114
  clean_response = re.sub(r'<s>\[INST\].*?\[/INST\]\s*', '', combined_response, flags=re.DOTALL)
115
  clean_response = clean_response.replace("Using the following context:", "").strip()
116
  clean_response = clean_response.replace("Using the following context from the PDF documents:", "").strip()
117
 
118
+ print(f"Final clean response: {clean_response[:100]}...")
119
  return clean_response
120
 
121
  def duckduckgo_search(query):
 
251
  clear_btn = gr.Button("Clear")
252
 
253
  def protected_generate_response(message, history, use_web_search, model, temperature, num_calls, is_generating, stop_clicked):
254
+ print("Starting protected_generate_response")
255
  if is_generating:
256
+ print("Already generating, returning")
257
  return message, history, is_generating, stop_clicked
258
  is_generating = True
259
+ stop_clicked = False # Reset stop_clicked at the start of generation
260
 
261
  try:
262
+ print(f"Generating response for: {message}")
263
  if use_web_search:
264
+ print("Using web search")
265
+ main_content, sources = get_response_with_search(message, model, num_calls=num_calls, temperature=temperature, stop_clicked=stop_clicked)
266
  formatted_response = f"{main_content}\n\nSources:\n{sources}"
267
  else:
268
+ print("Using PDF search")
269
+ response = get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature, stop_clicked=stop_clicked)
270
  formatted_response = response
271
 
272
+ print(f"Generated response: {formatted_response[:100]}...")
273
 
274
  if not stop_clicked:
275
+ print("Appending to history")
276
  history.append((message, formatted_response))
277
  else:
278
+ print("Stop clicked, not appending to history")
279
  except Exception as e:
280
  print(f"Error generating response: {str(e)}")
281
  history.append((message, "I'm sorry, but I encountered an error while generating the response. Please try again."))
282
 
283
  is_generating = False
284
+ print(f"Returning history with {len(history)} items")
285
  return "", history, is_generating, stop_clicked
286
 
287
  submit_btn.click(