wholewhale commited on
Commit
895d964
·
1 Parent(s): 3e93b01
Files changed (1) hide show
  1. app.py +13 -18
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- from gradio import state
3
  import os
4
  import time
5
  import threading
@@ -12,11 +11,11 @@ from langchain.chains import ConversationalRetrievalChain
12
 
13
  os.environ['OPENAI_API_KEY'] = os.getenv("Your_API_Key")
14
 
15
- # Declare session state for tracking last interaction time
16
- last_interaction_time = state.declare("last_interaction_time", 0)
17
 
18
  def loading_pdf():
19
- return "Working the upload. Also, pondering the usefulness of sporks..."
20
 
21
  def pdf_changes(pdf_doc):
22
  loader = OnlinePDFLoader(pdf_doc.name)
@@ -28,10 +27,9 @@ def pdf_changes(pdf_doc):
28
  retriever = db.as_retriever()
29
  global qa
30
  qa = ConversationalRetrievalChain.from_llm(
31
- llm=OpenAI(temperature=0.5),
32
- retriever=retriever,
33
- return_source_documents=False
34
- )
35
  return "Ready"
36
 
37
  def clear_data():
@@ -48,18 +46,15 @@ def add_text(history, text):
48
  def bot(history):
49
  response = infer(history[-1][0], history)
50
  formatted_response = "**Bot:** \n" + ' \n'.join(response.split('. '))
51
- history[-1][1] = ""
52
-
53
- for character in formatted_response:
54
- history[-1][1] += character
55
- time.sleep(0.05)
56
- yield history
57
 
58
  def infer(question, history):
59
  res = []
60
  for human, ai in history[:-1]:
61
  pair = (human, ai)
62
  res.append(pair)
 
63
  chat_history = res
64
  query = question
65
  result = qa({"question": query, "chat_history": chat_history})
@@ -67,13 +62,13 @@ def infer(question, history):
67
 
68
  def auto_clear_data():
69
  global qa, last_interaction_time
70
- if time.time() - last_interaction_time > 600: # 600 seconds = 10 minutes
71
  qa = None
72
 
73
  def periodic_clear():
74
  while True:
75
  auto_clear_data()
76
- time.sleep(60) # Check every minute
77
 
78
  threading.Thread(target=periodic_clear).start()
79
 
@@ -85,8 +80,8 @@ title = """
85
  <div style="text-align: center;max-width: 700px;">
86
  <h1>CauseWriter Chat with PDF • OpenAI</h1>
87
  <p style="text-align: center;">Upload a .PDF from your computer, click the "Load PDF to LangChain" button, <br />
88
- when everything is ready, you can start asking questions about the pdf ;) <br />
89
- This version is set to store chat history, and uses OpenAI as LLM.</p>
90
  </div>
91
  """
92
 
 
1
  import gradio as gr
 
2
  import os
3
  import time
4
  import threading
 
11
 
12
  os.environ['OPENAI_API_KEY'] = os.getenv("Your_API_Key")
13
 
14
+ # Global variable for tracking last interaction time
15
+ last_interaction_time = 0
16
 
17
  def loading_pdf():
18
+ return "Working on the upload. Also, pondering the usefulness of sporks..."
19
 
20
  def pdf_changes(pdf_doc):
21
  loader = OnlinePDFLoader(pdf_doc.name)
 
27
  retriever = db.as_retriever()
28
  global qa
29
  qa = ConversationalRetrievalChain.from_llm(
30
+ llm=OpenAI(temperature=0.5),
31
+ retriever=retriever,
32
+ return_source_documents=False)
 
33
  return "Ready"
34
 
35
  def clear_data():
 
46
  def bot(history):
47
  response = infer(history[-1][0], history)
48
  formatted_response = "**Bot:** \n" + ' \n'.join(response.split('. '))
49
+ history[-1][1] = formatted_response
50
+ return history
 
 
 
 
51
 
52
  def infer(question, history):
53
  res = []
54
  for human, ai in history[:-1]:
55
  pair = (human, ai)
56
  res.append(pair)
57
+
58
  chat_history = res
59
  query = question
60
  result = qa({"question": query, "chat_history": chat_history})
 
62
 
63
  def auto_clear_data():
64
  global qa, last_interaction_time
65
+ if time.time() - last_interaction_time > 600:
66
  qa = None
67
 
68
  def periodic_clear():
69
  while True:
70
  auto_clear_data()
71
+ time.sleep(60)
72
 
73
  threading.Thread(target=periodic_clear).start()
74
 
 
80
  <div style="text-align: center;max-width: 700px;">
81
  <h1>CauseWriter Chat with PDF • OpenAI</h1>
82
  <p style="text-align: center;">Upload a .PDF from your computer, click the "Load PDF to LangChain" button, <br />
83
+ when everything is ready, you can start asking questions about the pdf. <br />
84
+ This version is set to store chat history and uses OpenAI as LLM.</p>
85
  </div>
86
  """
87