Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -38,6 +38,61 @@ from templates import css, bot_template, user_template
|
|
38 |
st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
|
39 |
should_save = st.sidebar.checkbox("πΎ Save", value=True)
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
def link_button_with_emoji(url, title, emoji_summary):
|
43 |
emojis = ["π", "π₯", "π‘οΈ", "π©Ί", "π‘οΈ", "π¬", "π", "π§ͺ", "π¨ββοΈ", "π©ββοΈ"]
|
@@ -361,57 +416,6 @@ def readitaloud(result):
|
|
361 |
components.html(documentHTML5, width=800, height=300)
|
362 |
#return result
|
363 |
|
364 |
-
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
365 |
-
model = model_choice
|
366 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
367 |
-
conversation.append({'role': 'user', 'content': prompt})
|
368 |
-
if len(document_section)>0:
|
369 |
-
conversation.append({'role': 'assistant', 'content': document_section})
|
370 |
-
|
371 |
-
start_time = time.time()
|
372 |
-
report = []
|
373 |
-
res_box = st.empty()
|
374 |
-
collected_chunks = []
|
375 |
-
collected_messages = []
|
376 |
-
|
377 |
-
key = os.getenv('OPENAI_API_KEY')
|
378 |
-
openai.api_key = key
|
379 |
-
for chunk in openai.ChatCompletion.create(
|
380 |
-
model='gpt-3.5-turbo',
|
381 |
-
messages=conversation,
|
382 |
-
temperature=0.5,
|
383 |
-
stream=True
|
384 |
-
):
|
385 |
-
|
386 |
-
collected_chunks.append(chunk) # save the event response
|
387 |
-
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
388 |
-
collected_messages.append(chunk_message) # save the message
|
389 |
-
|
390 |
-
content=chunk["choices"][0].get("delta",{}).get("content")
|
391 |
-
|
392 |
-
try:
|
393 |
-
report.append(content)
|
394 |
-
if len(content) > 0:
|
395 |
-
result = "".join(report).strip()
|
396 |
-
#result = result.replace("\n", "")
|
397 |
-
res_box.markdown(f'*{result}*')
|
398 |
-
except:
|
399 |
-
st.write(' ')
|
400 |
-
|
401 |
-
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
402 |
-
st.write("Elapsed time:")
|
403 |
-
st.write(time.time() - start_time)
|
404 |
-
readitaloud(full_reply_content)
|
405 |
-
return full_reply_content
|
406 |
-
|
407 |
-
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
408 |
-
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
409 |
-
conversation.append({'role': 'user', 'content': prompt})
|
410 |
-
if len(file_content)>0:
|
411 |
-
conversation.append({'role': 'assistant', 'content': file_content})
|
412 |
-
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
413 |
-
return response['choices'][0]['message']['content']
|
414 |
-
|
415 |
def extract_mime_type(file):
|
416 |
# Check if the input is a string
|
417 |
if isinstance(file, str):
|
|
|
38 |
st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
|
39 |
should_save = st.sidebar.checkbox("πΎ Save", value=True)
|
40 |
|
41 |
+
# LLM engines for ChatCompletion and Chat with files
|
42 |
+
def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
|
43 |
+
model = model_choice
|
44 |
+
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
45 |
+
conversation.append({'role': 'user', 'content': prompt})
|
46 |
+
if len(document_section)>0:
|
47 |
+
conversation.append({'role': 'assistant', 'content': document_section})
|
48 |
+
|
49 |
+
start_time = time.time()
|
50 |
+
report = []
|
51 |
+
res_box = st.empty()
|
52 |
+
collected_chunks = []
|
53 |
+
collected_messages = []
|
54 |
+
|
55 |
+
key = os.getenv('OPENAI_API_KEY')
|
56 |
+
openai.api_key = key
|
57 |
+
for chunk in openai.ChatCompletion.create(
|
58 |
+
model='gpt-3.5-turbo',
|
59 |
+
messages=conversation,
|
60 |
+
temperature=0.5,
|
61 |
+
stream=True
|
62 |
+
):
|
63 |
+
|
64 |
+
collected_chunks.append(chunk) # save the event response
|
65 |
+
chunk_message = chunk['choices'][0]['delta'] # extract the message
|
66 |
+
collected_messages.append(chunk_message) # save the message
|
67 |
+
|
68 |
+
content=chunk["choices"][0].get("delta",{}).get("content")
|
69 |
+
|
70 |
+
try:
|
71 |
+
report.append(content)
|
72 |
+
if len(content) > 0:
|
73 |
+
result = "".join(report).strip()
|
74 |
+
#result = result.replace("\n", "")
|
75 |
+
res_box.markdown(f'*{result}*')
|
76 |
+
except:
|
77 |
+
st.write(' ')
|
78 |
+
|
79 |
+
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
80 |
+
st.write("Elapsed time:")
|
81 |
+
st.write(time.time() - start_time)
|
82 |
+
readitaloud(full_reply_content)
|
83 |
+
return full_reply_content
|
84 |
+
|
85 |
+
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
|
86 |
+
conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
|
87 |
+
conversation.append({'role': 'user', 'content': prompt})
|
88 |
+
if len(file_content)>0:
|
89 |
+
conversation.append({'role': 'assistant', 'content': file_content})
|
90 |
+
response = openai.ChatCompletion.create(model=model_choice, messages=conversation)
|
91 |
+
return response['choices'][0]['message']['content']
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
|
97 |
def link_button_with_emoji(url, title, emoji_summary):
|
98 |
emojis = ["π", "π₯", "π‘οΈ", "π©Ί", "π‘οΈ", "π¬", "π", "π§ͺ", "π¨ββοΈ", "π©ββοΈ"]
|
|
|
416 |
components.html(documentHTML5, width=800, height=300)
|
417 |
#return result
|
418 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
419 |
def extract_mime_type(file):
|
420 |
# Check if the input is a string
|
421 |
if isinstance(file, str):
|