awacke1 commited on
Commit
baff181
ยท
1 Parent(s): a6df275

Update backupapp.py

Browse files
Files changed (1) hide show
  1. backupapp.py +32 -18
backupapp.py CHANGED
@@ -18,7 +18,6 @@ from xml.etree import ElementTree as ET
18
  from bs4 import BeautifulSoup
19
  from collections import deque
20
  from audio_recorder_streamlit import audio_recorder
21
-
22
  from dotenv import load_dotenv
23
  from PyPDF2 import PdfReader
24
  from langchain.text_splitter import CharacterTextSplitter
@@ -29,7 +28,9 @@ from langchain.memory import ConversationBufferMemory
29
  from langchain.chains import ConversationalRetrievalChain
30
  from templates import css, bot_template, user_template
31
 
32
-
 
 
33
 
34
  def generate_filename(prompt, file_type):
35
  central = pytz.timezone('US/Central')
@@ -53,7 +54,10 @@ def transcribe_audio(openai_key, file_path, model):
53
  #st.write('Responses:')
54
  #st.write(chatResponse)
55
  filename = generate_filename(transcript, 'txt')
56
- create_file(filename, transcript, chatResponse)
 
 
 
57
  return transcript
58
  else:
59
  st.write(response.json())
@@ -70,7 +74,9 @@ def save_and_play_audio(audio_recorder):
70
  return filename
71
  return None
72
 
73
- def create_file(filename, prompt, response):
 
 
74
  if filename.endswith(".txt"):
75
  with open(filename, 'w') as file:
76
  file.write(f"{prompt}\n{response}")
@@ -273,8 +279,10 @@ def process_user_input(user_question):
273
  st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
274
  # Save file output from PDF query results
275
  filename = generate_filename(user_question, 'txt')
276
- create_file(filename, user_question, message.content)
277
-
 
 
278
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
279
 
280
  def divide_prompt(prompt, max_length):
@@ -294,21 +302,19 @@ def divide_prompt(prompt, max_length):
294
  return chunks
295
 
296
  def main():
297
- # Sidebar and global
298
  openai.api_key = os.getenv('OPENAI_API_KEY')
299
- st.set_page_config(page_title="GPT Streamlit Document Reasoner",layout="wide")
300
 
301
  # File type for output, model choice
302
- menu = ["txt", "htm", "xlsx", "csv", "md", "py"] #619
303
  choice = st.sidebar.selectbox("Output File Type:", menu)
304
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
305
-
306
  # Audio, transcribe, GPT:
307
  filename = save_and_play_audio(audio_recorder)
308
  if filename is not None:
309
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
310
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
311
- filename=None # since transcription is finished next time just use the saved transcript
312
 
313
  # prompt interfaces
314
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
@@ -318,9 +324,11 @@ def main():
318
  with collength:
319
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
320
  with colupload:
321
- uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx","csv","html", "htm", "md", "txt"])
 
322
 
323
  # Document section chat
 
324
  document_sections = deque()
325
  document_responses = {}
326
  if uploaded_file is not None:
@@ -343,7 +351,7 @@ def main():
343
  st.write(response)
344
  document_responses[i] = response
345
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
346
- create_file(filename, user_prompt, response)
347
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
348
 
349
  if st.button('๐Ÿ’ฌ Chat'):
@@ -367,14 +375,20 @@ def main():
367
  st.write(response)
368
 
369
  filename = generate_filename(user_prompt, choice)
370
- create_file(filename, user_prompt, response)
371
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
372
 
373
  all_files = glob.glob("*.*")
374
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
375
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
376
 
377
- # sidebar of files
 
 
 
 
 
 
378
  file_contents=''
379
  next_action=''
380
  for file in all_files:
@@ -411,7 +425,7 @@ def main():
411
  st.write('Reasoning with your inputs...')
412
  response = chat_with_model(user_prompt, file_contents, model_choice)
413
  filename = generate_filename(file_contents, choice)
414
- create_file(filename, file_contents, response)
415
 
416
  st.experimental_rerun()
417
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
@@ -439,5 +453,5 @@ with st.sidebar:
439
  st.session_state.conversation = get_chain(vectorstore)
440
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
441
  filename = generate_filename(raw, 'txt')
442
- create_file(filename, raw, '')
443
-
 
18
  from bs4 import BeautifulSoup
19
  from collections import deque
20
  from audio_recorder_streamlit import audio_recorder
 
21
  from dotenv import load_dotenv
22
  from PyPDF2 import PdfReader
23
  from langchain.text_splitter import CharacterTextSplitter
 
28
  from langchain.chains import ConversationalRetrievalChain
29
  from templates import css, bot_template, user_template
30
 
31
+ # page config and sidebar declares up front allow all other functions to see global class variables
32
+ st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
33
+ should_save = st.sidebar.checkbox("๐Ÿ’พ Save")
34
 
35
  def generate_filename(prompt, file_type):
36
  central = pytz.timezone('US/Central')
 
54
  #st.write('Responses:')
55
  #st.write(chatResponse)
56
  filename = generate_filename(transcript, 'txt')
57
+ #create_file(filename, transcript, chatResponse)
58
+ response = chatResponse
59
+ user_prompt = transcript
60
+ create_file(filename, user_prompt, response, should_save)
61
  return transcript
62
  else:
63
  st.write(response.json())
 
74
  return filename
75
  return None
76
 
77
+ def create_file(filename, prompt, response, should_save=True):
78
+ if not should_save:
79
+ return
80
  if filename.endswith(".txt"):
81
  with open(filename, 'w') as file:
82
  file.write(f"{prompt}\n{response}")
 
279
  st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
280
  # Save file output from PDF query results
281
  filename = generate_filename(user_question, 'txt')
282
+ #create_file(filename, user_question, message.content)
283
+ response = message.content
284
+ user_prompt = user_question
285
+ create_file(filename, user_prompt, response, should_save)
286
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
287
 
288
  def divide_prompt(prompt, max_length):
 
302
  return chunks
303
 
304
  def main():
 
305
  openai.api_key = os.getenv('OPENAI_API_KEY')
 
306
 
307
  # File type for output, model choice
308
+ menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
309
  choice = st.sidebar.selectbox("Output File Type:", menu)
310
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
311
+
312
  # Audio, transcribe, GPT:
313
  filename = save_and_play_audio(audio_recorder)
314
  if filename is not None:
315
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
316
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
317
+ filename = None
318
 
319
  # prompt interfaces
320
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
 
324
  with collength:
325
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
326
  with colupload:
327
+ uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
328
+
329
 
330
  # Document section chat
331
+
332
  document_sections = deque()
333
  document_responses = {}
334
  if uploaded_file is not None:
 
351
  st.write(response)
352
  document_responses[i] = response
353
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
354
+ create_file(filename, user_prompt, response, should_save)
355
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
356
 
357
  if st.button('๐Ÿ’ฌ Chat'):
 
375
  st.write(response)
376
 
377
  filename = generate_filename(user_prompt, choice)
378
+ create_file(filename, user_prompt, response, should_save)
379
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
380
 
381
  all_files = glob.glob("*.*")
382
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
383
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
384
 
385
+ # Sidebar of Files Saving History and surfacing files as context of prompts and responses
386
+ # Added "Delete All" button
387
+ if st.sidebar.button("๐Ÿ—‘ Delete All"):
388
+ for file in all_files:
389
+ os.remove(file)
390
+ st.experimental_rerun()
391
+
392
  file_contents=''
393
  next_action=''
394
  for file in all_files:
 
425
  st.write('Reasoning with your inputs...')
426
  response = chat_with_model(user_prompt, file_contents, model_choice)
427
  filename = generate_filename(file_contents, choice)
428
+ create_file(filename, user_prompt, response, should_save)
429
 
430
  st.experimental_rerun()
431
  #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
453
  st.session_state.conversation = get_chain(vectorstore)
454
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
455
  filename = generate_filename(raw, 'txt')
456
+ create_file(filename, raw, '', should_save)
457
+ #create_file(filename, raw, '')