Cartinoe5930 commited on
Commit
aac308a
ยท
1 Parent(s): e71eda5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -6
app.py CHANGED
@@ -1,4 +1,9 @@
1
  import gradio as gr
 
 
 
 
 
2
 
3
  def response_print(model_list, response_list):
4
  answer = ""
@@ -8,12 +13,34 @@ def response_print(model_list, response_list):
8
  answer = answer + f"# {model_list[idx]}'s response: {cot if response_list else none}\n"
9
  return answer
10
 
11
- demo = gr.Interface(
12
- response_print,
13
- [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  gr.CheckboxGroup(["Llama2", "Alpaca", "Vicuna", "Koala", "Falcon", "Baize", "WizardLM", "Orca", "phi-1.5"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora."),
15
  gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
16
- ],
17
- "text",
18
- )
 
 
 
 
 
 
19
  demo.launch()
 
1
  import gradio as gr
2
+ import json
3
+ import requests
4
+ import openai
5
+
6
+
7
 
8
  def response_print(model_list, response_list):
9
  answer = ""
 
13
  answer = answer + f"# {model_list[idx]}'s response: {cot if response_list else none}\n"
14
  return answer
15
 
16
+ TITLE = """<h1 align="center">LLM Agora ๐Ÿ—ฃ๏ธ๐Ÿฆ</h1>"""
17
+
18
+ INTRODUCTION_TEXT = """
19
+ The **LLM Agora ๐Ÿ—ฃ๏ธ๐Ÿฆ** aims to improve the quality of open-source LMs' responses through debate & revision introduced in [Improving Factuality and Reasoning in Language Models through Multiagent Debate](https://arxiv.org/abs/2305.14325).
20
+
21
+ Do you know that? ๐Ÿค” **LLMs can also improve their responses by debating with other LLMs**! ๐Ÿ˜ฎ We applied this concept to several open-source LMs to verify that the open-source model, not the proprietary one, can sufficiently improve the response through discussion. ๐Ÿค—
22
+ For more details, please refer to the GitHub Repository below.
23
+
24
+ You can use LLM Agora with your own questions if the response of open-source LM is not satisfactory and you want to improve the quality!
25
+ The Math, GSM8K, and MMLU Tabs show the results of the experiment, and for inference, please use the 'Inference' tab.
26
+
27
+ Please check the more specific information in [GitHub Repository](https://github.com/gauss5930/LLM-Agora)!
28
+ """
29
+
30
+ with gr.Blocks() as demo:
31
+ gr.HTML(TITLE)
32
+ gr.Markdown(INTRODUCTION_TEXT)
33
+
34
+ with gr.Tab("Inference"):
35
  gr.CheckboxGroup(["Llama2", "Alpaca", "Vicuna", "Koala", "Falcon", "Baize", "WizardLM", "Orca", "phi-1.5"], label="Model Selection", info="Choose 3 LMs to participate in LLM Agora."),
36
  gr.Checkbox(label="CoT", info="Do you want to use CoT for inference?")
37
+
38
+ with gr.Tab("Math"):
39
+ text
40
+
41
+ with gr.Tab("GSM8K"):
42
+
43
+ with gr.Tab("MMLU"):
44
+
45
+
46
  demo.launch()