Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
kartheikiyer
commited on
Commit
·
e63256f
1
Parent(s):
780c323
require older version of chroma to fix default_tenant error
Browse files
.ipynb_checkpoints/app_gradio-checkpoint.py
CHANGED
@@ -533,12 +533,16 @@ def compileinfo(query, atom_qns, atom_qn_ans, atom_qn_strs):
|
|
533 |
def deep_research(question, top_k, ec):
|
534 |
|
535 |
full_answer = '## ' + question
|
|
|
536 |
|
537 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
538 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
539 |
rscope_text = gen_client.invoke(messages).content
|
540 |
|
541 |
full_answer = full_answer +' \n'+ rscope_text
|
|
|
|
|
|
|
542 |
|
543 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
544 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
@@ -563,6 +567,7 @@ def deep_research(question, top_k, ec):
|
|
563 |
atom_qn_strs.append(linkstr)
|
564 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
565 |
full_answer = full_answer +' \n'+smallans
|
|
|
566 |
|
567 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
568 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
|
|
533 |
def deep_research(question, top_k, ec):
|
534 |
|
535 |
full_answer = '## ' + question
|
536 |
+
yield None, None
|
537 |
|
538 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
539 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
540 |
rscope_text = gen_client.invoke(messages).content
|
541 |
|
542 |
full_answer = full_answer +' \n'+ rscope_text
|
543 |
+
rag_answer = {}
|
544 |
+
rag_answer['answer'] = full_answer
|
545 |
+
yield None, rag_answer
|
546 |
|
547 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
548 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
|
567 |
atom_qn_strs.append(linkstr)
|
568 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
569 |
full_answer = full_answer +' \n'+smallans
|
570 |
+
|
571 |
|
572 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
573 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
.ipynb_checkpoints/requirements-checkpoint.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
matplotlib
|
2 |
+
bokeh==2.4.3
|
3 |
+
cloudpickle
|
4 |
+
scipy
|
5 |
+
summa
|
6 |
+
faiss-cpu
|
7 |
+
langchain
|
8 |
+
langchain_openai
|
9 |
+
langchain_community
|
10 |
+
langchain_core
|
11 |
+
langchainhub
|
12 |
+
openai
|
13 |
+
instructor
|
14 |
+
pydantic
|
15 |
+
feedparser
|
16 |
+
tiktoken
|
17 |
+
chromadb==0.4.9
|
18 |
+
streamlit==1.37.0
|
19 |
+
streamlit-extras
|
20 |
+
nltk
|
21 |
+
cohere
|
22 |
+
duckduckgo-search
|
23 |
+
pytextrank
|
24 |
+
spacy==3.7.5
|
25 |
+
https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1-py3-none-any.whl
|
app_gradio.py
CHANGED
@@ -533,12 +533,16 @@ def compileinfo(query, atom_qns, atom_qn_ans, atom_qn_strs):
|
|
533 |
def deep_research(question, top_k, ec):
|
534 |
|
535 |
full_answer = '## ' + question
|
|
|
536 |
|
537 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
538 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
539 |
rscope_text = gen_client.invoke(messages).content
|
540 |
|
541 |
full_answer = full_answer +' \n'+ rscope_text
|
|
|
|
|
|
|
542 |
|
543 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
544 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
@@ -563,6 +567,7 @@ def deep_research(question, top_k, ec):
|
|
563 |
atom_qn_strs.append(linkstr)
|
564 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
565 |
full_answer = full_answer +' \n'+smallans
|
|
|
566 |
|
567 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
568 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
|
|
533 |
def deep_research(question, top_k, ec):
|
534 |
|
535 |
full_answer = '## ' + question
|
536 |
+
yield None, None
|
537 |
|
538 |
gen_client = openai_llm(temperature=0,model_name='gpt-4o-mini', openai_api_key = openai_key)
|
539 |
messages = [("system",df_atomic_prompt,),("human", question),]
|
540 |
rscope_text = gen_client.invoke(messages).content
|
541 |
|
542 |
full_answer = full_answer +' \n'+ rscope_text
|
543 |
+
rag_answer = {}
|
544 |
+
rag_answer['answer'] = full_answer
|
545 |
+
yield None, rag_answer
|
546 |
|
547 |
rscope_messages = [("system","""In the given text, what are the main atomic questions being asked? Please answer as a concise list.""",),("human", rscope_text),]
|
548 |
rscope_qns = gen_client.invoke(rscope_messages).content
|
|
|
567 |
atom_qn_strs.append(linkstr)
|
568 |
full_answer = full_answer +' \n### '+atom_qns[i]
|
569 |
full_answer = full_answer +' \n'+smallans
|
570 |
+
|
571 |
|
572 |
finalans, finallinks = compileinfo(question, atom_qns, atom_qn_ans, atom_qn_strs)
|
573 |
full_answer = full_answer +' \n'+'### Summary:\n'+finalans
|
requirements.txt
CHANGED
@@ -14,7 +14,7 @@ instructor
|
|
14 |
pydantic
|
15 |
feedparser
|
16 |
tiktoken
|
17 |
-
chromadb
|
18 |
streamlit==1.37.0
|
19 |
streamlit-extras
|
20 |
nltk
|
|
|
14 |
pydantic
|
15 |
feedparser
|
16 |
tiktoken
|
17 |
+
chromadb==0.4.9
|
18 |
streamlit==1.37.0
|
19 |
streamlit-extras
|
20 |
nltk
|