File size: 6,575 Bytes
2f0e211
 
 
d05ba12
2f0e211
3e93b01
2f0e211
3e93b01
2f0e211
 
4dcf9b3
 
 
 
 
 
2f0e211
e455307
 
895d964
 
d05ba12
2f0e211
41297e0
2f0e211
4dcf9b3
 
 
 
 
 
a7aa0eb
4659cc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7aa0eb
4659cc6
e455307
d8804c0
4659cc6
cfc65ef
4659cc6
 
 
 
cfc65ef
 
d8804c0
cfc65ef
 
d8804c0
9c04c52
a08bac4
4dcf9b3
a08bac4
4dcf9b3
a08bac4
 
2f0e211
d05ba12
 
2f0e211
 
 
 
4dcf9b3
 
 
 
 
 
 
d8804c0
 
895d964
 
00e09c1
4dcf9b3
 
 
 
 
 
 
 
 
 
 
 
 
 
2f0e211
d64fc58
cfc65ef
 
d64fc58
 
 
cfc65ef
d64fc58
cfc65ef
6360179
 
cfc65ef
d05ba12
cfc65ef
e455307
2f0e211
 
cfc65ef
2f0e211
 
b91cab8
2f0e211
4dcf9b3
 
2f0e211
 
cfc65ef
 
 
 
 
 
 
 
2f0e211
cfc65ef
 
2f0e211
cfc65ef
 
 
 
 
 
 
 
 
 
 
 
 
 
df75cae
cfc65ef
2f0e211
cfc65ef
 
 
 
4659cc6
cfc65ef
 
 
 
 
 
 
 
 
 
 
 
 
6360179
4659cc6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import gradio as gr
import os
import time
import threading
from langchain.document_loaders import OnlinePDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import WebBaseLoader
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains.combine_documents.stuff import StuffDocumentsChain

os.environ['OPENAI_API_KEY'] = os.getenv("Your_API_Key")

# Global variable for tracking last interaction time
last_interaction_time = 0

def loading_pdf():
    return "Working on the upload. Also, pondering the usefulness of sporks..."

# Inside Chroma mod
def summary(self):
    num_documents = len(self.documents)
    avg_doc_length = sum(len(doc) for doc in self.documents) / num_documents
    return f"Number of documents: {num_documents}, Average document length: {avg_doc_length}"

# Gradio state
summary_state = gr.State(initial_value="")

# Initialize loader and load documents
def load_documents(pdf_doc):
    loader = OnlinePDFLoader(pdf_doc.name)
    return loader.load()

# Generate summary using StuffDocumentsChain
def generate_summary(documents):
    prompt_template = """Write a concise summary of the following:
    "{text}"
    CONCISE SUMMARY:"""
    prompt = PromptTemplate.from_template(prompt_template)
    llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo-16k")
    llm_chain = LLMChain(llm=llm, prompt=prompt)
    stuff_chain = StuffDocumentsChain(
        llm_chain=llm_chain, document_variable_name="text"
    )
    return stuff_chain.run(documents)

# Setup Chroma, embeddings, and retrieval
def setup_retrieval(documents):
    embeddings = OpenAIEmbeddings()
    db = Chroma.from_documents(documents, embeddings)
    retriever = db.as_retriever()
    qa = ConversationalRetrievalChain.from_llm(
        llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo-16k", max_tokens=-1, n=2),
        retriever=retriever,
        return_source_documents=False
    )
    return db, qa

# Main function to handle PDF changes
def pdf_changes(pdf_doc):
    try:
        documents = load_documents(pdf_doc)
        global full_summary
        full_summary = generate_summary(documents)
        summary_state.value = full_summary
        global db, qa
        db, qa = setup_retrieval(documents)
        summary_box.set_value(full_summary)
        return f"Ready. Full Summary loaded."
    except Exception as e:
        return f"Error processing PDF: {str(e)}"



def clear_data():
    global qa, db
    qa = None
    db = None
    return "Data cleared"

def add_text(history, text):
    global last_interaction_time
    last_interaction_time = time.time()
    history = history + [(text, None)]
    return history, ""

def bot(history):
    global full_summary  
    if 'summary' in history[-1][0].lower():  # Check if the last question asks for a summary
        response = full_summary
        return full_summary
    else:
        response = infer(history[-1][0], history)

    sentences = '  \n'.join(response.split('. '))
    formatted_response = f"**Bot:**\n\n{sentences}"
    history[-1][1] = formatted_response
    return history


def infer(question, history):
    try:
        res = []
        for human, ai in history[:-1]:
            pair = (human, ai)
            res.append(pair)
    
        chat_history = res
        query = question
        result = qa({"question": query, "chat_history": chat_history, "system": "This is a world-class summarizing AI, be helpful."})
        return result["answer"]
    except Exception as e:
        return f"Error querying chatbot: {str(e)}"

def auto_clear_data():
    global qa, db, last_interaction_time
    if time.time() - last_interaction_time > 1000:
        qa = None
        db = None
        print("Data cleared successfully.")  # Logging

def periodic_clear():
    while True:
        auto_clear_data()
        time.sleep(1000)

threading.Thread(target=periodic_clear).start()

css = """
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
"""

title = """
<div style="text-align: center;max-width: 700px;">
    <h1>CauseWriter Chat with PDF • OpenAI</h1>
    <p style="text-align: center;">Upload a .PDF from your computer, click the "Load PDF to LangChain" button, <br />
    when everything is ready, you can start asking questions about the pdf. Limit ~11k words. <br />
    This version is set to erase chat history automatically after page timeout and uses OpenAI.</p>
</div>
"""
# Global variable for tracking last interaction time
last_interaction_time = 0
full_summary = ""  # Added global full_summary

def update_summary_box():
    global full_summary
    return {"summary_box": full_summary}

with gr.Blocks(css=css) as demo:
    with gr.Column(elem_id="col-container"):
        gr.HTML(title)
        
        with gr.Column():
            pdf_doc = gr.File(label="Load a pdf", file_types=['.pdf'], type="file")
            with gr.Row():
                langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
                load_pdf = gr.Button("Convert PDF to Magic AI language")
                clear_btn = gr.Button("Clear Data")
            
            # New Textbox to display summary
            summary_box = gr.Textbox(
              label="Document Summary",
              placeholder="Summary will appear here.",
              interactive=False,
              rows=5,
              elem_id="summary_box"  # Set the elem_id to match the state key
            )

        
        chatbot = gr.Chatbot([], elem_id="chatbot").style(height=450)
        question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter")
        submit_btn = gr.Button("Send Message")

        load_pdf.click(loading_pdf, None, langchain_status)
        load_pdf.click(pdf_changes, inputs=[pdf_doc], outputs=[langchain_status], queue=False).then(
          update_summary_box
      )


    # Then update the summary_box
    clear_btn.click(clear_data, outputs=[langchain_status], queue=False)
    question.submit(add_text, [chatbot, question], [chatbot, question]).then(
        bot, chatbot, chatbot
    )
    submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
        bot, chatbot, chatbot
    )

demo.launch()