jedick commited on
Commit
f53e324
Β·
1 Parent(s): 30fd51f

Update examples depending on compute mode

Browse files
Files changed (2) hide show
  1. app.py +68 -43
  2. prompts.py +1 -1
app.py CHANGED
@@ -50,7 +50,7 @@ def append_content(chunk_messages, history, thinking_about):
50
  gr.ChatMessage(
51
  role="assistant",
52
  content=think_text,
53
- metadata={"title": f"🧠 Thinking about {thinking_about}"},
54
  )
55
  )
56
  if not post_think and not chunk_messages.tool_calls:
@@ -218,7 +218,7 @@ def to_workflow(request: gr.Request, *args):
218
  yield value
219
 
220
 
221
- @spaces.GPU(duration=100)
222
  def run_workflow_local(*args):
223
  for value in run_workflow(*args):
224
  yield value
@@ -287,17 +287,12 @@ with gr.Blocks(
287
  render=False,
288
  )
289
  data_error = gr.Textbox(
290
- value="Email database is missing. Try reloading the page, then contact the maintainer if the problem persists.",
291
  lines=1,
292
  label="Error downloading or extracting data",
293
  visible=False,
294
  render=False,
295
  )
296
- show_examples = gr.Checkbox(
297
- value=False,
298
- label="πŸ’‘ Example Questions",
299
- render=False,
300
- )
301
  chatbot = gr.Chatbot(
302
  type="messages",
303
  show_label=False,
@@ -382,8 +377,8 @@ with gr.Blocks(
382
  status_text = f"""
383
  πŸ“ Now in **local** mode, using ZeroGPU hardware<br>
384
  βŒ› Response time is about one minute<br>
385
- 🧠 Add **/think** to enable thinking for answer</br>
386
- &emsp;&nbsp; πŸ” Thinking is already enabled for query<br>
387
  ✨ [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) and [{model_id.split("/")[-1]}](https://huggingface.co/{model_id})<br>
388
  🏠 See the project's [GitHub repository](https://github.com/jedick/R-help-chat)
389
  """
@@ -407,6 +402,35 @@ with gr.Blocks(
407
  """
408
  return info_text
409
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
410
  with gr.Row():
411
  # Left column: Intro, Compute, Chat
412
  with gr.Column(scale=2):
@@ -429,38 +453,27 @@ with gr.Blocks(
429
  info = gr.Markdown(get_info_text())
430
  with gr.Accordion("πŸ’‘ Examples", open=True):
431
  # Add some helpful examples
432
- example_questions = [
433
- # "What is today's date?",
434
- "Summarize emails from the last two months",
435
- "Advice on using plotmath /think",
436
- "When was has.HLC mentioned?",
437
- "Who reported installation problems in 2023-2024?",
438
- ]
439
- gr.Examples(
440
- examples=[[q] for q in example_questions],
441
  inputs=[input],
442
  label="Click an example to fill the message box",
443
- elem_id="example-questions",
444
  )
445
- multi_tool_questions = [
446
- "Differences between lapply and for loops /think",
447
- "Compare usage of pipe operator between 2022 and 2024",
448
- ]
449
- gr.Examples(
450
- examples=[[q] for q in multi_tool_questions],
451
  inputs=[input],
452
  label="Multiple retrievals",
453
- elem_id="example-questions",
454
  )
455
- multi_turn_questions = [
456
- "Lookup emails that reference bugs.r-project.org in 2025",
457
- "Did those authors report bugs before 2025?",
458
- ]
459
- gr.Examples(
460
- examples=[[q] for q in multi_turn_questions],
461
  inputs=[input],
462
  label="Asking follow-up questions",
463
- elem_id="example-questions",
464
  )
465
 
466
  # Bottom row: retrieved emails and citations
@@ -518,10 +531,9 @@ with gr.Blocks(
518
  return component.clear()
519
 
520
  compute_mode.change(
521
- # Change the app status text
522
- get_status_text,
523
- [compute_mode],
524
- [status],
525
  api_name=False,
526
  ).then(
527
  # Clear the chatbot history
@@ -535,17 +547,30 @@ with gr.Blocks(
535
  [compute_mode],
536
  [chatbot],
537
  api_name=False,
538
- ).then(
539
- # Start a new thread
540
- generate_thread_id,
541
- outputs=[thread_id],
542
- api_name=False,
543
  ).then(
544
  # Focus textbox by updating the textbox with the current value
545
  lambda x: gr.update(value=x),
546
  [input],
547
  [input],
548
  api_name=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
549
  )
550
 
551
  input.submit(
 
50
  gr.ChatMessage(
51
  role="assistant",
52
  content=think_text,
53
+ metadata={"title": f"🧠 Thinking about the {thinking_about}"},
54
  )
55
  )
56
  if not post_think and not chunk_messages.tool_calls:
 
218
  yield value
219
 
220
 
221
+ @spaces.GPU(duration=60)
222
  def run_workflow_local(*args):
223
  for value in run_workflow(*args):
224
  yield value
 
287
  render=False,
288
  )
289
  data_error = gr.Textbox(
290
+ value="Email database is missing. Try reloading this page. If the problem persists, please contact the maintainer.",
291
  lines=1,
292
  label="Error downloading or extracting data",
293
  visible=False,
294
  render=False,
295
  )
 
 
 
 
 
296
  chatbot = gr.Chatbot(
297
  type="messages",
298
  show_label=False,
 
377
  status_text = f"""
378
  πŸ“ Now in **local** mode, using ZeroGPU hardware<br>
379
  βŒ› Response time is about one minute<br>
380
+ πŸ” Thinking is enabled for the query<br>
381
+ &emsp;&nbsp; 🧠 Add **/think** to enable thinking for the answer</br>
382
  ✨ [nomic-embed-text-v1.5](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) and [{model_id.split("/")[-1]}](https://huggingface.co/{model_id})<br>
383
  🏠 See the project's [GitHub repository](https://github.com/jedick/R-help-chat)
384
  """
 
402
  """
403
  return info_text
404
 
405
+ def get_example_questions(compute_mode, as_dataset=True):
406
+ """Get example questions based on compute mode"""
407
+ questions = [
408
+ # "What is today's date?",
409
+ "Summarize emails from the last two months",
410
+ "Advice on using plotmath /think",
411
+ "When was has.HLC mentioned?",
412
+ "Who reported installation problems in 2023-2024?",
413
+ ]
414
+
415
+ if compute_mode == "remote":
416
+ # Remove "/think" from questions in remote mode
417
+ questions = [q.replace(" /think", "") for q in questions]
418
+
419
+ # cf. https://github.com/gradio-app/gradio/pull/8745 for updating examples
420
+ return gr.Dataset(samples=[[q] for q in questions]) if as_dataset else questions
421
+
422
+ def get_multi_tool_questions(compute_mode, as_dataset=True):
423
+ """Get multi-tool example questions based on compute mode"""
424
+ questions = [
425
+ "Differences between lapply and for loops /think",
426
+ "Compare usage of pipe operator between 2022 and 2024",
427
+ ]
428
+
429
+ if compute_mode == "remote":
430
+ questions = [q.replace(" /think", "") for q in questions]
431
+
432
+ return gr.Dataset(samples=[[q] for q in questions]) if as_dataset else questions
433
+
434
  with gr.Row():
435
  # Left column: Intro, Compute, Chat
436
  with gr.Column(scale=2):
 
453
  info = gr.Markdown(get_info_text())
454
  with gr.Accordion("πŸ’‘ Examples", open=True):
455
  # Add some helpful examples
456
+ example_questions = gr.Examples(
457
+ examples=get_example_questions(
458
+ compute_mode.value, as_dataset=False
459
+ ),
 
 
 
 
 
460
  inputs=[input],
461
  label="Click an example to fill the message box",
 
462
  )
463
+ multi_tool_questions = gr.Examples(
464
+ examples=get_multi_tool_questions(
465
+ compute_mode.value, as_dataset=False
466
+ ),
 
 
467
  inputs=[input],
468
  label="Multiple retrievals",
 
469
  )
470
+ multi_turn_questions = gr.Examples(
471
+ examples=[
472
+ "Lookup emails that reference bugs.r-project.org in 2025",
473
+ "Did those authors report bugs before 2025?",
474
+ ],
 
475
  inputs=[input],
476
  label="Asking follow-up questions",
 
477
  )
478
 
479
  # Bottom row: retrieved emails and citations
 
531
  return component.clear()
532
 
533
  compute_mode.change(
534
+ # Start a new thread
535
+ generate_thread_id,
536
+ outputs=[thread_id],
 
537
  api_name=False,
538
  ).then(
539
  # Clear the chatbot history
 
547
  [compute_mode],
548
  [chatbot],
549
  api_name=False,
 
 
 
 
 
550
  ).then(
551
  # Focus textbox by updating the textbox with the current value
552
  lambda x: gr.update(value=x),
553
  [input],
554
  [input],
555
  api_name=False,
556
+ ).then(
557
+ # Change the app status text
558
+ get_status_text,
559
+ [compute_mode],
560
+ [status],
561
+ api_name=False,
562
+ ).then(
563
+ # Update examples based on compute mode
564
+ get_example_questions,
565
+ [compute_mode],
566
+ [example_questions.dataset],
567
+ api_name=False,
568
+ ).then(
569
+ # Update multi-tool examples based on compute mode
570
+ get_multi_tool_questions,
571
+ [compute_mode],
572
+ [multi_tool_questions.dataset],
573
+ api_name=False,
574
  )
575
 
576
  input.submit(
prompts.py CHANGED
@@ -60,7 +60,7 @@ def generate_prompt(chat_model, think=False, with_tools=False):
60
  "Summarize the content of the emails rather than copying the headers. " # Qwen
61
  "You must include inline citations (email senders and dates) in each part of your response. "
62
  "Only answer general questions about R if the answer is in the retrieved emails. "
63
- "Your response can include URLs, but make sure they are quoted verbatim from the retrieved emails. " # Qwen
64
  "Respond with 300 words maximum and 30 lines of code maximum. "
65
  )
66
  if with_tools:
 
60
  "Summarize the content of the emails rather than copying the headers. " # Qwen
61
  "You must include inline citations (email senders and dates) in each part of your response. "
62
  "Only answer general questions about R if the answer is in the retrieved emails. "
63
+ "Your response can include URLs, but make sure they are unchanged from the retrieved emails. " # Qwen
64
  "Respond with 300 words maximum and 30 lines of code maximum. "
65
  )
66
  if with_tools: