Shreyas094 commited on
Commit
1f41265
·
verified ·
1 Parent(s): fb4793f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -72
app.py CHANGED
@@ -188,31 +188,15 @@ class CitingSources(BaseModel):
188
  ...,
189
  description="List of sources to cite. Should be an URL of the source."
190
  )
191
- def chatbot_interface(message, history, use_web_search, model, temperature, num_calls, continue_generation=False):
192
- if not message.strip() and not continue_generation:
193
  return "", history
194
 
195
- if continue_generation and history:
196
- last_message, last_response = history[-1]
197
- continuation_prompt = f"""
198
- Original query: {last_message}
199
-
200
- Previously generated response:
201
- {last_response}
202
-
203
- Please continue the response from where it left off, maintaining coherence and relevance to the original query.
204
- """
205
- history = history[:-1] # Remove the last response to replace it with the continued one
206
- message = continuation_prompt
207
- else:
208
- history = history + [(message, "")]
209
 
210
  try:
211
  for response in respond(message, history, model, temperature, num_calls, use_web_search):
212
- if continue_generation and history:
213
- history.append((history[-1][0], history[-1][1] + response))
214
- else:
215
- history[-1] = (message, response)
216
  yield history
217
  except gr.CancelledError:
218
  yield history
@@ -397,37 +381,6 @@ Write a detailed and complete response that answers the following user question:
397
  response += chunk
398
  yield response # Yield partial response
399
 
400
- def continue_generation(message, last_response, model, temperature, num_calls):
401
- continue_prompt = f"""
402
- Original query: {message}
403
-
404
- Previously generated response:
405
- {last_response}
406
-
407
- Please continue the response from where it left off, maintaining coherence and relevance to the original query.
408
- """
409
-
410
- if model == "@cf/meta/llama-3.1-8b-instruct":
411
- # Use Cloudflare API
412
- for response in get_response_from_cloudflare(prompt=continue_prompt, context="", query="", num_calls=num_calls, temperature=temperature, search_type="continue"):
413
- yield last_response + response
414
- else:
415
- # Use Hugging Face API
416
- client = InferenceClient(model, token=huggingface_token)
417
-
418
- continued_response = last_response
419
- for i in range(num_calls):
420
- for message in client.chat_completion(
421
- messages=[{"role": "user", "content": continue_prompt}],
422
- max_tokens=1000,
423
- temperature=temperature,
424
- stream=True,
425
- ):
426
- if message.choices and message.choices[0].delta and message.choices[0].delta.content:
427
- chunk = message.choices[0].delta.content
428
- continued_response += chunk
429
- yield continued_response
430
-
431
  def vote(data: gr.LikeData):
432
  if data.liked:
433
  print(f"You upvoted this response: {data.value}")
@@ -441,15 +394,13 @@ css = """
441
  # Define the checkbox outside the demo block
442
  use_web_search = gr.Checkbox(label="Use Web Search", value=False)
443
 
444
- # Modify your main chat interface to use the same function
445
  demo = gr.ChatInterface(
446
- fn=chatbot_interface,
447
  additional_inputs=[
448
- use_web_search,
449
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
450
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
451
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
452
- gr.State(False) # Flag for continue_generation, default False for normal chat
453
  ],
454
  title="AI-powered Web Search and PDF Chat Assistant",
455
  description="Chat with your PDFs or use web search to answer questions.",
@@ -482,23 +433,6 @@ demo = gr.ChatInterface(
482
  analytics_enabled=False,
483
  )
484
 
485
- # In your Gradio interface setup
486
- with demo:
487
- continue_button = gr.Button("Continue Generation")
488
- continue_button.click(
489
- chatbot_interface,
490
- inputs=[
491
- gr.Textbox(value="", visible=False), # Hidden textbox for the message
492
- gr.State([]), # Chat history
493
- use_web_search,
494
- gr.Dropdown(choices=MODELS, label="Select Model"),
495
- gr.Slider(label="Temperature"),
496
- gr.Slider(label="Number of API Calls"),
497
- gr.State(True) # Flag for continue_generation
498
- ],
499
- outputs=gr.Chatbot()
500
- )
501
-
502
  # Add file upload functionality
503
  with demo:
504
  gr.Markdown("## Upload PDF Documents")
 
188
  ...,
189
  description="List of sources to cite. Should be an URL of the source."
190
  )
191
+ def chatbot_interface(message, history, use_web_search, model, temperature, num_calls):
192
+ if not message.strip():
193
  return "", history
194
 
195
+ history = history + [(message, "")]
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
197
  try:
198
  for response in respond(message, history, model, temperature, num_calls, use_web_search):
199
+ history[-1] = (message, response)
 
 
 
200
  yield history
201
  except gr.CancelledError:
202
  yield history
 
381
  response += chunk
382
  yield response # Yield partial response
383
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  def vote(data: gr.LikeData):
385
  if data.liked:
386
  print(f"You upvoted this response: {data.value}")
 
394
  # Define the checkbox outside the demo block
395
  use_web_search = gr.Checkbox(label="Use Web Search", value=False)
396
 
 
397
  demo = gr.ChatInterface(
398
+ respond,
399
  additional_inputs=[
 
400
  gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0]),
401
  gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
402
  gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
403
+ use_web_search # Add this line to include the checkbox
404
  ],
405
  title="AI-powered Web Search and PDF Chat Assistant",
406
  description="Chat with your PDFs or use web search to answer questions.",
 
433
  analytics_enabled=False,
434
  )
435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  # Add file upload functionality
437
  with demo:
438
  gr.Markdown("## Upload PDF Documents")