zhiminy commited on
Commit
55a2fb1
·
1 Parent(s): 520df9c
Files changed (2) hide show
  1. app.py +94 -66
  2. context_window.json +0 -1
app.py CHANGED
@@ -188,7 +188,7 @@ def fetch_url_content(url):
188
  return fetch_huggingface_content(url)
189
  except Exception as e:
190
  print(f"Error fetching URL content: {e}")
191
- return ''
192
 
193
 
194
  # Truncate prompt
@@ -673,59 +673,58 @@ with gr.Blocks() as app:
673
  ):
674
  # Guardrail check first
675
  if not repo_url and not guardrail_check_se_relevance(user_input):
676
- # Return updates to show the guardrail message,
677
- # hide everything else or revert to original state
678
  return (
679
- # guardrail_message
680
  gr.update(
681
  value="### Oops! Try asking something about software engineering. Thanks!",
682
  visible=True,
683
  ),
684
- # shared_input
685
  gr.update(value="", visible=True),
686
- # repo_url
687
  gr.update(value="", visible=True),
688
- # user_prompt_md
689
  gr.update(value="", visible=False),
690
- # response_a_title
691
  gr.update(value="", visible=False),
692
- # response_b_title
693
  gr.update(value="", visible=False),
694
- # response_a
695
  gr.update(value=""),
696
- # response_b
697
  gr.update(value=""),
698
- # multi_round_inputs
699
  gr.update(visible=False),
700
- # vote_panel
701
  gr.update(visible=False),
702
- # send_first
703
  gr.update(visible=True, interactive=True),
704
- # feedback
705
- gr.update(interactive=False),
706
- # models_state
707
  models_state,
708
- # conversation_state
709
  conversation_state,
710
- # timeout_popup
711
  gr.update(visible=False),
712
- # model_a_send
713
  gr.update(interactive=False),
714
- # model_b_send
715
  gr.update(interactive=False),
716
- # thanks_message
717
  gr.update(visible=False),
718
  )
719
-
720
  repo_info = fetch_url_content(repo_url)
721
- # Combine repo-related information (if any) and user query into one prompt.
722
  combined_user_input = (
723
  f"Repo-related Information: {repo_info}\n\n{user_input}"
724
  if repo_info
725
  else user_input
726
  )
727
 
728
- # Dynamically select two random models
729
  if len(available_models) < 2:
730
  raise ValueError(
731
  "Insufficient models in context_window.json. At least two are required."
@@ -747,26 +746,44 @@ with gr.Blocks() as app:
747
  combined_user_input, "Model B", models_state, conversation_state
748
  )
749
  except TimeoutError as e:
750
- # Handle the timeout by resetting components, showing a popup, and disabling inputs
751
  return (
752
- gr.update(
753
- value="", interactive=False, visible=True
754
- ), # Disable shared_input
755
- gr.update(
756
- value="", interactive=False, visible=True
757
- ), # Disable repo_url
758
- gr.update(value="", visible=False), # Hide user_prompt_md
759
- gr.update(value="", visible=False), # Hide Model A title
760
- gr.update(value="", visible=False), # Hide Model B title
761
- gr.update(value=""), # Clear response from Model A
762
- gr.update(value=""), # Clear response from Model B
763
- gr.update(visible=False), # Hide multi-round inputs
764
- gr.update(visible=False), # Hide vote panel
765
- gr.update(visible=True, interactive=False), # Disable submit button
766
- gr.update(interactive=False), # Disable feedback selection
 
 
 
 
 
 
 
 
 
 
767
  models_state,
 
768
  conversation_state,
769
- gr.update(visible=True), # Show the timeout popup
 
 
 
 
 
 
 
770
  )
771
  except Exception as e:
772
  raise gr.Error(str(e))
@@ -775,28 +792,44 @@ with gr.Blocks() as app:
775
  model_a_send_state = toggle_submit_button("")
776
  model_b_send_state = toggle_submit_button("")
777
 
 
778
  return (
779
- gr.update(visible=False), # Hide shared_input
780
- gr.update(visible=False), # Hide repo_url the same way
781
- gr.update(
782
- value=f"**Your Query:**\n\n{user_input}", visible=True
783
- ), # Show user_prompt_md
784
- gr.update(value=f"### Model A:", visible=True),
785
- gr.update(value=f"### Model B:", visible=True),
786
- gr.update(value=response_a), # Show Model A response
787
- gr.update(value=response_b), # Show Model B response
788
- gr.update(visible=True), # Show multi-round inputs
789
- gr.update(visible=True), # Show vote panel
790
- gr.update(visible=False), # Hide submit button
791
- gr.update(interactive=True), # Enable feedback selection
 
 
 
 
 
 
 
 
 
 
 
 
792
  models_state,
 
793
  conversation_state,
794
- gr.update(visible=False), # Hide the timeout popup if it was visible
795
- model_a_send_state, # Set model_a_send button state
796
- model_b_send_state, # Set model_b_send button state
797
- gr.update(
798
- visible=False
799
- ), # thanks_message - Make sure to return it as invisible here as well
 
 
800
  )
801
 
802
  # Feedback panel, initially hidden
@@ -824,11 +857,6 @@ with gr.Blocks() as app:
824
  try:
825
  # Use Hugging Face OAuth to initiate login
826
  HfApi()
827
-
828
- # Wait for user authentication and get the token
829
- print(
830
- "Redirected to Hugging Face for authentication. Please complete the login."
831
- )
832
  token = HfFolder.get_token()
833
  if not token:
834
  raise Exception("Authentication token not found.")
 
188
  return fetch_huggingface_content(url)
189
  except Exception as e:
190
  print(f"Error fetching URL content: {e}")
191
+ return ""
192
 
193
 
194
  # Truncate prompt
 
673
  ):
674
  # Guardrail check first
675
  if not repo_url and not guardrail_check_se_relevance(user_input):
676
+ # Return updates to show the guardrail message and hide everything else.
 
677
  return (
678
+ # [0] guardrail_message: Show guardrail message
679
  gr.update(
680
  value="### Oops! Try asking something about software engineering. Thanks!",
681
  visible=True,
682
  ),
683
+ # [1] shared_input: clear and show
684
  gr.update(value="", visible=True),
685
+ # [2] repo_url: clear and show
686
  gr.update(value="", visible=True),
687
+ # [3] user_prompt_md: clear and hide
688
  gr.update(value="", visible=False),
689
+ # [4] response_a_title: clear and hide
690
  gr.update(value="", visible=False),
691
+ # [5] response_b_title: clear and hide
692
  gr.update(value="", visible=False),
693
+ # [6] response_a: clear response
694
  gr.update(value=""),
695
+ # [7] response_b: clear response
696
  gr.update(value=""),
697
+ # [8] multi_round_inputs: hide
698
  gr.update(visible=False),
699
+ # [9] vote_panel: hide
700
  gr.update(visible=False),
701
+ # [10] send_first: show and enable button
702
  gr.update(visible=True, interactive=True),
703
+ # [11] feedback: enable the selection
704
+ gr.update(interactive=True),
705
+ # [12] models_state: pass state as-is
706
  models_state,
707
+ # [13] conversation_state: pass state as-is
708
  conversation_state,
709
+ # [14] timeout_popup: hide
710
  gr.update(visible=False),
711
+ # [15] model_a_send: disable
712
  gr.update(interactive=False),
713
+ # [16] model_b_send: disable
714
  gr.update(interactive=False),
715
+ # [17] thanks_message: hide
716
  gr.update(visible=False),
717
  )
718
+
719
  repo_info = fetch_url_content(repo_url)
720
+ # Combine repository info (if available) with the user query.
721
  combined_user_input = (
722
  f"Repo-related Information: {repo_info}\n\n{user_input}"
723
  if repo_info
724
  else user_input
725
  )
726
 
727
+ # Ensure that at least two models are available.
728
  if len(available_models) < 2:
729
  raise ValueError(
730
  "Insufficient models in context_window.json. At least two are required."
 
746
  combined_user_input, "Model B", models_state, conversation_state
747
  )
748
  except TimeoutError as e:
749
+ # Handle timeout by resetting components and showing a popup.
750
  return (
751
+ # [0] guardrail_message: hide
752
+ gr.update(visible=False),
753
+ # [1] shared_input: disable and clear
754
+ gr.update(value="", interactive=False, visible=True),
755
+ # [2] repo_url: disable and clear
756
+ gr.update(value="", interactive=False, visible=True),
757
+ # [3] user_prompt_md: hide
758
+ gr.update(value="", visible=False),
759
+ # [4] response_a_title: hide
760
+ gr.update(value="", visible=False),
761
+ # [5] response_b_title: hide
762
+ gr.update(value="", visible=False),
763
+ # [6] response_a: clear
764
+ gr.update(value=""),
765
+ # [7] response_b: clear
766
+ gr.update(value=""),
767
+ # [8] multi_round_inputs: hide
768
+ gr.update(visible=False),
769
+ # [9] vote_panel: hide
770
+ gr.update(visible=False),
771
+ # [10] send_first: disable
772
+ gr.update(visible=True, interactive=False),
773
+ # [11] feedback: disable
774
+ gr.update(interactive=False),
775
+ # [12] models_state: pass state as-is
776
  models_state,
777
+ # [13] conversation_state: pass state as-is
778
  conversation_state,
779
+ # [14] timeout_popup: show popup
780
+ gr.update(visible=True),
781
+ # [15] model_a_send: disable
782
+ gr.update(interactive=False),
783
+ # [16] model_b_send: disable
784
+ gr.update(interactive=False),
785
+ # [17] thanks_message: hide
786
+ gr.update(visible=False),
787
  )
788
  except Exception as e:
789
  raise gr.Error(str(e))
 
792
  model_a_send_state = toggle_submit_button("")
793
  model_b_send_state = toggle_submit_button("")
794
 
795
+ # Return the updates for all 18 outputs.
796
  return (
797
+ # [0] guardrail_message: hide (since no guardrail issue)
798
+ gr.update(visible=False),
799
+ # [1] shared_input: hide shared_input to prevent changes during the conversation
800
+ gr.update(visible=False),
801
+ # [2] repo_url: hide repository URL input similarly
802
+ gr.update(visible=False),
803
+ # [3] user_prompt_md: display the user's query
804
+ gr.update(value=f"**Your Query:**\n\n{user_input}", visible=True),
805
+ # [4] response_a_title: show title for Model A
806
+ gr.update(value="### Model A:", visible=True),
807
+ # [5] response_b_title: show title for Model B
808
+ gr.update(value="### Model B:", visible=True),
809
+ # [6] response_a: display Model A response
810
+ gr.update(value=response_a),
811
+ # [7] response_b: display Model B response
812
+ gr.update(value=response_b),
813
+ # [8] multi_round_inputs: show the input section for multi-round dialogues
814
+ gr.update(visible=True),
815
+ # [9] vote_panel: show vote panel
816
+ gr.update(visible=True),
817
+ # [10] send_first: hide the submit button
818
+ gr.update(visible=False),
819
+ # [11] feedback: enable the feedback selection
820
+ gr.update(interactive=True),
821
+ # [12] models_state: pass updated models_state
822
  models_state,
823
+ # [13] conversation_state: pass updated conversation_state
824
  conversation_state,
825
+ # [14] timeout_popup: hide any timeout popup if visible
826
+ gr.update(visible=False),
827
+ # [15] model_a_send: set state of the model A send button
828
+ model_a_send_state,
829
+ # [16] model_b_send: set state of the model B send button
830
+ model_b_send_state,
831
+ # [17] thanks_message: hide the thank-you message
832
+ gr.update(visible=False),
833
  )
834
 
835
  # Feedback panel, initially hidden
 
857
  try:
858
  # Use Hugging Face OAuth to initiate login
859
  HfApi()
 
 
 
 
 
860
  token = HfFolder.get_token()
861
  if not token:
862
  raise Exception("Authentication token not found.")
context_window.json CHANGED
@@ -20,7 +20,6 @@
20
  "llama-3.1-8b": 128000,
21
  "llama-3.1-405b": 128000,
22
  "llama-3.3-70b": 128000,
23
- "mistral-embed": 8000,
24
  "mistral-large-latest": 131000,
25
  "mistral-small-latest": 32000,
26
  "o1": 128000,
 
20
  "llama-3.1-8b": 128000,
21
  "llama-3.1-405b": 128000,
22
  "llama-3.3-70b": 128000,
 
23
  "mistral-large-latest": 131000,
24
  "mistral-small-latest": 32000,
25
  "o1": 128000,