Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -650,7 +650,101 @@ def create_zip_of_files(md_files, mp3_files, wav_files, input_question):
|
|
650 |
# 7. MAIN AI LOGIC: LOOKUP & TAB HANDLERS
|
651 |
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
652 |
|
653 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
654 |
q,
|
655 |
vocal_summary=True,
|
656 |
extended_refs=False,
|
@@ -698,11 +792,6 @@ def perform_ai_lookup(
|
|
698 |
end = time.time()
|
699 |
st.write(f"**Elapsed:** {end - start:.2f}s")
|
700 |
|
701 |
-
|
702 |
-
# Try async run from top
|
703 |
-
st.markdown('# Async run:')
|
704 |
-
asyncio.run(process_voice_input(q + result_text))
|
705 |
-
|
706 |
return result_text
|
707 |
|
708 |
async def process_voice_input(text):
|
|
|
650 |
# 7. MAIN AI LOGIC: LOOKUP & TAB HANDLERS
|
651 |
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
652 |
|
653 |
+
|
654 |
+
def perform_ai_lookup(q, vocal_summary=True, extended_refs=False,
|
655 |
+
titles_summary=True, full_audio=False, useArxiv=False):
|
656 |
+
"""Main routine that uses Anthropic (Claude) + Gradio ArXiv RAG pipeline."""
|
657 |
+
start = time.time()
|
658 |
+
ai_constitution = """
|
659 |
+
You are a medical and machine learning review board expert and streamlit python and html5 expert. You are tasked with creating a streamlit app.py and requirements.txt for a solution that answers the questions with a working app to demonstrate. You are to use the paper list below to answer the question thinking through step by step how to create a streamlit app.py and requirements.txt for the solution that answers the questions with a working app to demonstrate.
|
660 |
+
"""
|
661 |
+
|
662 |
+
# --- 1) Claude API
|
663 |
+
client = anthropic.Anthropic(api_key=anthropic_key)
|
664 |
+
user_input = q
|
665 |
+
response = client.messages.create(
|
666 |
+
model="claude-3-sonnet-20240229",
|
667 |
+
max_tokens=1000,
|
668 |
+
messages=[
|
669 |
+
{"role": "user", "content": user_input}
|
670 |
+
])
|
671 |
+
st.write("Claude's reply π§ :")
|
672 |
+
st.markdown(response.content[0].text)
|
673 |
+
|
674 |
+
# Save & produce audio
|
675 |
+
result = response.content[0].text
|
676 |
+
create_file(q, result)
|
677 |
+
md_file, audio_file = save_qa_with_audio(q, result)
|
678 |
+
st.subheader("π Main Response Audio")
|
679 |
+
play_and_download_audio(audio_file, st.session_state['audio_format'])
|
680 |
+
|
681 |
+
|
682 |
+
#useArxiv = st.checkbox("Search Arxiv for Research Answer", value=False)
|
683 |
+
if useArxiv:
|
684 |
+
# --- 2) Arxiv RAG
|
685 |
+
#st.write("Arxiv's AI this Evening is Mixtral 8x7B...")
|
686 |
+
st.write('Running Arxiv RAG with Claude inputs.')
|
687 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
688 |
+
refs = client.predict(
|
689 |
+
q,
|
690 |
+
10,
|
691 |
+
"Semantic Search",
|
692 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
693 |
+
api_name="/update_with_rag_md"
|
694 |
+
)[0]
|
695 |
+
|
696 |
+
#r2 = client.predict(
|
697 |
+
# q,
|
698 |
+
# "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
699 |
+
# True,
|
700 |
+
# api_name="/ask_llm"
|
701 |
+
#)
|
702 |
+
|
703 |
+
# --- 3) Claude API with arxiv list of papers to app.py
|
704 |
+
client = anthropic.Anthropic(api_key=anthropic_key)
|
705 |
+
user_input = q + '\n\n' + 'Use the paper list below to answer the question thinking through step by step how to create a streamlit app.py and requirements.txt for the solution that answers the questions with a working app to demonstrate.'+ '\n\n'
|
706 |
+
response = client.messages.create(
|
707 |
+
model="claude-3-sonnet-20240229",
|
708 |
+
max_tokens=1000,
|
709 |
+
messages=[
|
710 |
+
{"role": "user", "content": user_input}
|
711 |
+
])
|
712 |
+
r2 = response.content[0].text
|
713 |
+
st.write("Claude's reply π§ :")
|
714 |
+
st.markdown(r2)
|
715 |
+
|
716 |
+
#result = f"### π {q}\n\n{r2}\n\n{refs}"
|
717 |
+
result = f"π {r2}\n\n{refs}"
|
718 |
+
md_file, audio_file = save_qa_with_audio(q, result)
|
719 |
+
st.subheader("π Main Response Audio")
|
720 |
+
play_and_download_audio(audio_file, st.session_state['audio_format'])
|
721 |
+
|
722 |
+
# --- 3) Parse + handle papers
|
723 |
+
papers = parse_arxiv_refs(refs)
|
724 |
+
if papers:
|
725 |
+
# Create minimal links page first
|
726 |
+
paper_links = create_paper_links_md(papers)
|
727 |
+
links_file = create_file(q, paper_links, "md")
|
728 |
+
st.markdown(paper_links)
|
729 |
+
|
730 |
+
# Then create audio for each paper
|
731 |
+
create_paper_audio_files(papers, input_question=q)
|
732 |
+
display_papers(papers, get_marquee_settings())
|
733 |
+
display_papers_in_sidebar(papers)
|
734 |
+
else:
|
735 |
+
st.warning("No papers found in the response.")
|
736 |
+
|
737 |
+
elapsed = time.time() - start
|
738 |
+
st.write(f"**Total Elapsed:** {elapsed:.2f} s")
|
739 |
+
return result
|
740 |
+
|
741 |
+
|
742 |
+
|
743 |
+
|
744 |
+
|
745 |
+
|
746 |
+
|
747 |
+
def perform_ai_lookup_old(
|
748 |
q,
|
749 |
vocal_summary=True,
|
750 |
extended_refs=False,
|
|
|
792 |
end = time.time()
|
793 |
st.write(f"**Elapsed:** {end - start:.2f}s")
|
794 |
|
|
|
|
|
|
|
|
|
|
|
795 |
return result_text
|
796 |
|
797 |
async def process_voice_input(text):
|