File size: 3,058 Bytes
f87ab8f
bb57e06
f87ab8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
058bf5c
 
 
 
 
 
 
 
 
9f3d0ce
058bf5c
 
 
c791d03
058bf5c
c791d03
058bf5c
 
c791d03
058bf5c
 
9f3d0ce
058bf5c
c791d03
058bf5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db4a040
058bf5c
 
 
db4a040
058bf5c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
import base64
from gpt_reader.pdf_reader import PaperReader
from gpt_reader.prompt import BASE_POINTS

class GUI:
    def __init__(self):
        self.api_key = ""
        self.session = ""

    def analyse(self, api_key, pdf_file):
        self.session = PaperReader(api_key, points_to_focus=BASE_POINTS)
        return self.session.read_pdf_and_summarize(pdf_file)

    def ask_question(self, question):
        if self.session == "":
            return "Please upload PDF file first!"
        return self.session.question(question)


with gr.Blocks() as demo:
     with open("./logo.png", "rb") as f:
        image_data = f.read()
        image_base64 = base64.b64encode(image_data).decode("utf-8")
    title = f"""
        <h2 style="background-image: linear-gradient(to right, #3A5FCD, #87CEFA); -webkit-background-clip: text;
            -webkit-text-fill-color: transparent; text-align: center;">
            Speech Emotion Recognition
        </h2>
    """
    description = f"""
    <div style="display: flex; align-items: center; justify-content: center; flex-direction: column;">
        <p style="font-size: 18px; color: #4AAAFF; text-align: center;">
        Extract the emotion and tone, Simply upload the audio or text!
        </p>
    <div style="display: flex; align-items: center; margin-bottom: 0px;">
            <img src='data:image/jpeg;base64,{image_base64}' width='50' height='30' style="margin-right: 5px;"/>
            <p style="font-size: 14px; color: #555;">
            Disclaimer: This web app is for demonstration purposes only and not intended for commercial use. Contact: [email protected] for full solution.
            </p>
        </div>
    </div>
    """
    gr.Markdown(
        """
        # CHATGPT-PAPER-READER
        """)
    gr.HTML(title)
    gr.HTML(description)
    with gr.Tab("Upload PDF File"):
        pdf_input = gr.File(label="PDF File")
        api_input = gr.Textbox(label="OpenAI API Key")
        result = gr.Textbox(label="PDF Summary")
        upload_button = gr.Button("Start Analyse")
    with gr.Tab("Ask question about your PDF"):
        question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
        answer = gr.Textbox(label="Answer")
        ask_button = gr.Button("Ask")
    with gr.Accordion("About this project"):
        gr.Markdown(
            """## CHATGPT-PAPER-READER📝 
            This repository provides a simple interface that utilizes the gpt-3.5-turbo 
            model to read academic papers in PDF format locally. You can use it to help you summarize papers, 
            create presentation slides, or simply fulfill tasks assigned by your supervisor.\n 
            [Github](https://github.com/talkingwallace/ChatGPT-Paper-Reader)""")

    app = GUI()
    upload_button.click(fn=app.analyse, inputs=[api_input, pdf_input], outputs=result)
    ask_button.click(app.ask_question, inputs=question_input, outputs=answer)

if __name__ == "__main__":
    demo.title = "CHATGPT-PAPER-READER"
    demo.launch()