Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -207,9 +207,10 @@ def get_central_time():
|
|
207 |
return datetime.now(central)
|
208 |
|
209 |
def format_timestamp_prefix():
|
210 |
-
"""📅 Generate a timestamp prefix
|
211 |
ct = get_central_time()
|
212 |
-
return ct.strftime("%m_%d_%y_%I_%M_%p")
|
|
|
213 |
|
214 |
def initialize_marquee_settings():
|
215 |
"""🌈 Initialize marquee defaults if needed."""
|
@@ -751,7 +752,7 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
751 |
# --- 2) Arxiv RAG
|
752 |
#st.write("Arxiv's AI this Evening is Mixtral 8x7B...")
|
753 |
st.write('Running Arxiv RAG with Claude inputs.')
|
754 |
-
st.code(q, language="python", line_numbers=True, wrap_lines=True)
|
755 |
|
756 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
757 |
refs = client.predict(
|
@@ -769,19 +770,6 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
769 |
# api_name="/ask_llm"
|
770 |
#)
|
771 |
|
772 |
-
# --- 3) Claude API with arxiv list of papers to app.py
|
773 |
-
client = anthropic.Anthropic(api_key=anthropic_key)
|
774 |
-
user_input = q + '\n\n' + 'Use the paper list below to answer the question thinking through step by step how to create a streamlit app.py and requirements.txt for the solution that answers the questions with a working app to demonstrate.'+ '\n\n'
|
775 |
-
response = client.messages.create(
|
776 |
-
model="claude-3-sonnet-20240229",
|
777 |
-
max_tokens=1000,
|
778 |
-
messages=[
|
779 |
-
{"role": "user", "content": user_input}
|
780 |
-
])
|
781 |
-
r2 = response.content[0].text
|
782 |
-
st.write("Claude's reply 🧠:")
|
783 |
-
st.markdown(r2)
|
784 |
-
|
785 |
#result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
|
786 |
result = f"🔎 {r2}\n\n{refs}"
|
787 |
md_file, audio_file = save_qa_with_audio(q, result)
|
@@ -797,12 +785,31 @@ def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
|
797 |
st.markdown(paper_links)
|
798 |
|
799 |
# Then create audio for each paper
|
800 |
-
|
801 |
-
|
802 |
-
|
|
|
|
|
|
|
803 |
else:
|
804 |
st.warning("No papers found in the response.")
|
805 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
806 |
elapsed = time.time() - start
|
807 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
808 |
return result
|
|
|
207 |
return datetime.now(central)
|
208 |
|
209 |
def format_timestamp_prefix():
|
210 |
+
"""📅 Generate a timestamp prefix"""
|
211 |
ct = get_central_time()
|
212 |
+
#return ct.strftime("%m_%d_%y_%I_%M_%p")
|
213 |
+
return ct.strftime("%Y%m%d_%H%M%S")
|
214 |
|
215 |
def initialize_marquee_settings():
|
216 |
"""🌈 Initialize marquee defaults if needed."""
|
|
|
752 |
# --- 2) Arxiv RAG
|
753 |
#st.write("Arxiv's AI this Evening is Mixtral 8x7B...")
|
754 |
st.write('Running Arxiv RAG with Claude inputs.')
|
755 |
+
#st.code(q, language="python", line_numbers=True, wrap_lines=True)
|
756 |
|
757 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
758 |
refs = client.predict(
|
|
|
770 |
# api_name="/ask_llm"
|
771 |
#)
|
772 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
773 |
#result = f"### 🔎 {q}\n\n{r2}\n\n{refs}"
|
774 |
result = f"🔎 {r2}\n\n{refs}"
|
775 |
md_file, audio_file = save_qa_with_audio(q, result)
|
|
|
785 |
st.markdown(paper_links)
|
786 |
|
787 |
# Then create audio for each paper
|
788 |
+
if st.button("Generate audio file reading aloud paper title and paper summary"):
|
789 |
+
create_paper_audio_files(papers, input_question=q)
|
790 |
+
|
791 |
+
display_papers(papers, get_marquee_settings()) # scrolling marquee per paper and summary
|
792 |
+
|
793 |
+
display_papers_in_sidebar(papers) # sidebar entry per paper and summary
|
794 |
else:
|
795 |
st.warning("No papers found in the response.")
|
796 |
|
797 |
+
|
798 |
+
# --- 4) Claude API with arxiv list of papers to app.py
|
799 |
+
client = anthropic.Anthropic(api_key=anthropic_key)
|
800 |
+
user_input = q + '\n\n' + 'Use the reference papers below to answer the question by creating a python streamlit app.py and requirements.txt with python libraries for creating a single app.py application that answers the questions with working code to demonstrate.'+ '\n\n'
|
801 |
+
response = client.messages.create(
|
802 |
+
model="claude-3-sonnet-20240229",
|
803 |
+
max_tokens=1000,
|
804 |
+
messages=[
|
805 |
+
{"role": "user", "content": user_input}
|
806 |
+
])
|
807 |
+
r2 = response.content[0].text
|
808 |
+
st.write("Claude's reply 🧠:")
|
809 |
+
st.markdown(r2)
|
810 |
+
|
811 |
+
|
812 |
+
|
813 |
elapsed = time.time() - start
|
814 |
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
815 |
return result
|