File size: 3,160 Bytes
f87ab8f
bb57e06
f87ab8f
 
 
93bdb59
f87ab8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71de0be
c791d03
 
 
9f3d0ce
c791d03
 
 
 
 
9f3d0ce
 
c791d03
 
 
 
 
 
 
 
 
9f3d0ce
 
c791d03
 
b39fcd0
 
c791d03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b39fcd0
f87ab8f
c791d03
 
 
71de0be
c791d03
 
71de0be
c791d03
 
 
db4a040
 
c791d03
 
db4a040
c791d03
 
db4a040
c791d03
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import base64
from gpt_reader.pdf_reader import PaperReader
from gpt_reader.prompt import BASE_POINTS


class GUI:
    def __init__(self):
        self.api_key = ""
        self.session = ""

    def analyse(self, api_key, pdf_file):
        self.session = PaperReader(api_key, points_to_focus=BASE_POINTS)
        return self.session.read_pdf_and_summarize(pdf_file)

    def ask_question(self, question):
        if self.session == "":
            return "Please upload PDF file first!"
        return self.session.question(question)



with open("./logo.png", "rb") as f:
    image_data = f.read()
    image_base64 = base64.b64encode(image_data).decode("utf-8")
    
title = f"""
    <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
        -webkit-text-fill-color: transparent; text-align: center;">
        Speech Emotion Recognition
    </h2>
    """

description = f"""
<div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
    <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
        Extract the emotion and tone, Simply upload the audio or text!
    </p>
    <div style="display: flex; align-items: center; margin-bottom: 0px;">
        <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
        <p style="font-size: 14px; color: #555;">
            Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
        </p>
    </div>
</div>
"""
    

with gr.Tab("Upload PDF File"):
    pdf_input = gr.File(label="PDF File")
    api_input = gr.Textbox(label="OpenAI API Key")
    result = gr.Textbox(label="PDF Summary")
    upload_button = gr.Button("Start Analyse")
with gr.Tab("Ask question about your PDF"):
    question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
    answer = gr.Textbox(label="Answer")
    ask_button = gr.Button("Ask")
with gr.Accordion("About this project"):
    gr.Markdown(
    """## CHATGPT-PAPER-READER📝 
        This repository provides a simple interface that utilizes the gpt-3.5-turbo 
        model to read academic papers in PDF format locally. You can use it to help you summarize papers, 
        create presentation slides, or simply fulfill tasks assigned by your supervisor.\n 
        [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")
    

app = GUI()
def start_analyse(api_key, pdf_file):
    return app.analyse(api_key, pdf_file)

def start_ask(question):
    return app.ask_question(question)

iface = gr.Interface(fn=start_analyse,inputs=["text", "file"],outputs="text",title=title,description=description)
upload_button = iface.get_widget("file")
upload_button.label = "Upload PDF File"
iface.inputs[1].label = "Upload PDF File"  # Set the label for the file input

question_input = gr.inputs.Textbox(label="Your Question", placeholder="Authors of this paper?")
ask_button = gr.Button("Ask")

iface.add_input(question_input)
iface.add_output(ask_button)

iface.launch()