epochs-demos commited on
Commit
c791d03
·
1 Parent(s): 71de0be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -48
app.py CHANGED
@@ -20,62 +20,62 @@ class GUI:
20
 
21
 
22
 
23
- with open("./logo.png", "rb") as f:
24
- image_data = f.read()
25
- image_base64 = base64.b64encode(image_data).decode("utf-8")
26
 
27
- title = f"""
28
- <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
29
- -webkit-text-fill-color: transparent; text-align: center;">
30
- Speech Emotion Recognition
31
- </h2>
32
  """
33
 
34
- description = f"""
35
- <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
36
- <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
37
- Extract the emotion and tone, Simply upload the audio or text!
 
 
 
 
 
38
  </p>
39
- <div style="display: flex; align-items: center; margin-bottom: 0px;">
40
- <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
41
- <p style="font-size: 14px; color: #555;">
42
- Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
43
- </p>
44
- </div>
45
  </div>
46
- """
 
47
 
48
 
49
- with gr.Tab("Upload PDF File"):
50
- pdf_input = gr.File(label="PDF File")
51
- api_input = gr.Textbox(label="OpenAI API Key")
52
- result = gr.Textbox(label="PDF Summary")
53
- upload_button = gr.Button("Start Analyse")
54
- with gr.Tab("Ask question about your PDF"):
55
- question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
56
- answer = gr.Textbox(label="Answer")
57
- ask_button = gr.Button("Ask")
58
- with gr.Accordion("About this project"):
59
- gr.Markdown(
60
- """## CHATGPT-PAPER-READER📝
61
- This repository provides a simple interface that utilizes the gpt-3.5-turbo
62
- model to read academic papers in PDF format locally. You can use it to help you summarize papers,
63
- create presentation slides, or simply fulfill tasks assigned by your supervisor.\n
64
- [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
65
 
66
 
67
- app = GUI()
68
- def start_analyse(api_key, pdf_file):
69
- return app.analyse(api_key, pdf_file)
70
 
71
- def start_ask(question):
72
- return app.ask_question(question)
73
 
74
- iface = gr.Interface(fn=start_analyse,inputs=["text", "file"],outputs="text",title=title,description=description)
75
- upload_button = iface.get_widget("file")
76
- upload_button.label = "Upload PDF File"
77
- question_input = gr.inputs.Textbox(label="Your Question", placeholder="Authors of this paper?")
78
- ask_button = gr.Button("Ask")
79
- iface.add_input(question_input)
80
- iface.add_output(ask_button)
81
- iface.launch()
 
20
 
21
 
22
 
23
+ with open("./logo.png", "rb") as f:
24
+ image_data = f.read()
25
+ image_base64 = base64.b64encode(image_data).decode("utf-8")
26
 
27
+ title = f"""
28
+ <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
29
+ -webkit-text-fill-color: transparent; text-align: center;">
30
+ Speech Emotion Recognition
31
+ </h2>
32
  """
33
 
34
+ description = f"""
35
+ <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
36
+ <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
37
+ Extract the emotion and tone, Simply upload the audio or text!
38
+ </p>
39
+ <div style="display: flex; align-items: center; margin-bottom: 0px;">
40
+ <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
41
+ <p style="font-size: 14px; color: #555;">
42
+ Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
43
  </p>
 
 
 
 
 
 
44
  </div>
45
+ </div>
46
+ """
47
 
48
 
49
+ with gr.Tab("Upload PDF File"):
50
+ pdf_input = gr.File(label="PDF File")
51
+ api_input = gr.Textbox(label="OpenAI API Key")
52
+ result = gr.Textbox(label="PDF Summary")
53
+ upload_button = gr.Button("Start Analyse")
54
+ with gr.Tab("Ask question about your PDF"):
55
+ question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
56
+ answer = gr.Textbox(label="Answer")
57
+ ask_button = gr.Button("Ask")
58
+ with gr.Accordion("About this project"):
59
+ gr.Markdown(
60
+ """## CHATGPT-PAPER-READER📝
61
+ This repository provides a simple interface that utilizes the gpt-3.5-turbo
62
+ model to read academic papers in PDF format locally. You can use it to help you summarize papers,
63
+ create presentation slides, or simply fulfill tasks assigned by your supervisor.\n
64
+ [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
65
 
66
 
67
+ app = GUI()
68
+ def start_analyse(api_key, pdf_file):
69
+ return app.analyse(api_key, pdf_file)
70
 
71
+ def start_ask(question):
72
+ return app.ask_question(question)
73
 
74
+ iface = gr.Interface(fn=start_analyse,inputs=["text", "file"],outputs="text",title=title,description=description)
75
+ upload_button = iface.get_widget("file")
76
+ upload_button.label = "Upload PDF File"
77
+ question_input = gr.inputs.Textbox(label="Your Question", placeholder="Authors of this paper?")
78
+ ask_button = gr.Button("Ask")
79
+ iface.add_input(question_input)
80
+ iface.add_output(ask_button)
81
+ iface.launch()