epochs-demos commited on
Commit
058bf5c
·
1 Parent(s): db4a040

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -57
app.py CHANGED
@@ -3,7 +3,6 @@ import base64
3
  from gpt_reader.pdf_reader import PaperReader
4
  from gpt_reader.prompt import BASE_POINTS
5
 
6
-
7
  class GUI:
8
  def __init__(self):
9
  self.api_key = ""
@@ -19,67 +18,56 @@ class GUI:
19
  return self.session.question(question)
20
 
21
 
22
-
23
- with open("./logo.png", "rb") as f:
24
- image_data = f.read()
25
- image_base64 = base64.b64encode(image_data).decode("utf-8")
26
-
27
- title = f"""
28
- <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
29
- -webkit-text-fill-color: transparent; text-align: center;">
30
- Speech Emotion Recognition
31
- </h2>
32
  """
33
-
34
- description = f"""
35
- <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
36
- <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
37
  Extract the emotion and tone, Simply upload the audio or text!
38
- </p>
39
  <div style="display: flex; align-items: center; margin-bottom: 0px;">
40
- <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
41
- <p style="font-size: 14px; color: #555;">
42
  Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
43
- </p>
 
44
  </div>
45
- </div>
46
- """
47
-
48
-
49
- with gr.Tab("Upload PDF File"):
50
- pdf_input = gr.File(label="PDF File")
51
- api_input = gr.Textbox(label="OpenAI API Key")
52
- result = gr.Textbox(label="PDF Summary")
53
- upload_button = gr.Button("Start Analyse")
54
- with gr.Tab("Ask question about your PDF"):
55
- question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
56
- answer = gr.Textbox(label="Answer")
57
- ask_button = gr.Button("Ask")
58
- with gr.Accordion("About this project"):
59
  gr.Markdown(
60
- """## CHATGPT-PAPER-READER📝
61
- This repository provides a simple interface that utilizes the gpt-3.5-turbo
62
- model to read academic papers in PDF format locally. You can use it to help you summarize papers,
63
- create presentation slides, or simply fulfill tasks assigned by your supervisor.\n
64
- [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
65
-
66
-
67
- app = GUI()
68
- def start_analyse(api_key, pdf_file):
69
- return app.analyse(api_key, pdf_file)
70
-
71
- def start_ask(question):
72
- return app.ask_question(question)
73
-
74
- iface = gr.Interface(fn=start_analyse,inputs=["text", "file"],outputs="text",title=title,description=description)
75
- upload_button = iface.get_widget("file")
76
- upload_button.label = "Upload PDF File"
77
- iface.inputs[1].label = "Upload PDF File" # Set the label for the file input
78
-
79
- question_input = gr.inputs.Textbox(label="Your Question", placeholder="Authors of this paper?")
80
- ask_button = gr.Button("Ask")
81
 
82
- iface.add_input(question_input)
83
- iface.add_output(ask_button)
 
84
 
85
- iface.launch()
 
 
 
3
  from gpt_reader.pdf_reader import PaperReader
4
  from gpt_reader.prompt import BASE_POINTS
5
 
 
6
  class GUI:
7
  def __init__(self):
8
  self.api_key = ""
 
18
  return self.session.question(question)
19
 
20
 
21
+ with gr.Blocks() as demo:
22
+ with open("./logo.png", "rb") as f:
23
+ image_data = f.read()
24
+ image_base64 = base64.b64encode(image_data).decode("utf-8")
25
+ title = f"""
26
+ <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
27
+ -webkit-text-fill-color: transparent; text-align: center;">
28
+ Speech Emotion Recognition
29
+ </h2>
 
30
  """
31
+ description = f"""
32
+ <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
33
+ <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
 
34
  Extract the emotion and tone, Simply upload the audio or text!
35
+ </p>
36
  <div style="display: flex; align-items: center; margin-bottom: 0px;">
37
+ <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
38
+ <p style="font-size: 14px; color: #555;">
39
  Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
40
+ </p>
41
+ </div>
42
  </div>
43
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  gr.Markdown(
45
+ """
46
+ # CHATGPT-PAPER-READER
47
+ """)
48
+ gr.HTML(title)
49
+ gr.HTML(description)
50
+ with gr.Tab("Upload PDF File"):
51
+ pdf_input = gr.File(label="PDF File")
52
+ api_input = gr.Textbox(label="OpenAI API Key")
53
+ result = gr.Textbox(label="PDF Summary")
54
+ upload_button = gr.Button("Start Analyse")
55
+ with gr.Tab("Ask question about your PDF"):
56
+ question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
57
+ answer = gr.Textbox(label="Answer")
58
+ ask_button = gr.Button("Ask")
59
+ with gr.Accordion("About this project"):
60
+ gr.Markdown(
61
+ """## CHATGPT-PAPER-READER📝
62
+ This repository provides a simple interface that utilizes the gpt-3.5-turbo
63
+ model to read academic papers in PDF format locally. You can use it to help you summarize papers,
64
+ create presentation slides, or simply fulfill tasks assigned by your supervisor.\n
65
+ [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
66
 
67
+ app = GUI()
68
+ upload_button.click(fn=app.analyse, inputs=[api_input, pdf_input], outputs=result)
69
+ ask_button.click(app.ask_question, inputs=question_input, outputs=answer)
70
 
71
+ if __name__ == "__main__":
72
+ demo.title = "CHATGPT-PAPER-READER"
73
+ demo.launch()