Joshua Sundance Bailey
commited on
Commit
·
c2ef570
1
Parent(s):
8106321
remove commented blocks; fix parameter names
Browse files
langchain-streamlit-demo/app.py
CHANGED
@@ -323,7 +323,7 @@ with sidebar:
|
|
323 |
if provider_api_key:
|
324 |
if st.session_state.provider == "OpenAI":
|
325 |
st.session_state.llm = ChatOpenAI(
|
326 |
-
|
327 |
openai_api_key=provider_api_key,
|
328 |
temperature=temperature,
|
329 |
streaming=True,
|
@@ -331,7 +331,7 @@ if provider_api_key:
|
|
331 |
)
|
332 |
elif st.session_state.provider == "Anthropic":
|
333 |
st.session_state.llm = ChatAnthropic(
|
334 |
-
|
335 |
anthropic_api_key=provider_api_key,
|
336 |
temperature=temperature,
|
337 |
streaming=True,
|
@@ -339,7 +339,7 @@ if provider_api_key:
|
|
339 |
)
|
340 |
elif st.session_state.provider == "Anyscale Endpoints":
|
341 |
st.session_state.llm = ChatAnyscale(
|
342 |
-
|
343 |
anyscale_api_key=provider_api_key,
|
344 |
temperature=temperature,
|
345 |
streaming=True,
|
@@ -365,7 +365,6 @@ if st.session_state.llm:
|
|
365 |
if document_chat_chain_type == "Summarization":
|
366 |
st.session_state.doc_chain = "summarization"
|
367 |
elif document_chat_chain_type == "Q&A Generation":
|
368 |
-
# st.session_state.doc_chain = get_qa_gen_chain(st.session_state.llm)
|
369 |
st.session_state.doc_chain = get_rag_qa_gen_chain(
|
370 |
st.session_state.retriever,
|
371 |
st.session_state.llm,
|
@@ -440,16 +439,7 @@ if st.session_state.llm:
|
|
440 |
)
|
441 |
if st.session_state.provider == "Anthropic":
|
442 |
config["max_concurrency"] = 5
|
443 |
-
# raw_results = st.session_state.doc_chain.batch(
|
444 |
-
# [
|
445 |
-
# {"context": doc.page_content, "prompt": prompt}
|
446 |
-
# for doc in st.session_state.texts
|
447 |
-
# ],
|
448 |
-
# config,
|
449 |
-
# )
|
450 |
raw_results = st.session_state.doc_chain.invoke(prompt, config)
|
451 |
-
# print(raw_results)
|
452 |
-
# results = combine_qa_pair_lists(raw_results).QuestionAnswerPairs
|
453 |
results = raw_results.QuestionAnswerPairs
|
454 |
|
455 |
def _to_str(idx, qap):
|
|
|
323 |
if provider_api_key:
|
324 |
if st.session_state.provider == "OpenAI":
|
325 |
st.session_state.llm = ChatOpenAI(
|
326 |
+
model_name="test",
|
327 |
openai_api_key=provider_api_key,
|
328 |
temperature=temperature,
|
329 |
streaming=True,
|
|
|
331 |
)
|
332 |
elif st.session_state.provider == "Anthropic":
|
333 |
st.session_state.llm = ChatAnthropic(
|
334 |
+
model=model,
|
335 |
anthropic_api_key=provider_api_key,
|
336 |
temperature=temperature,
|
337 |
streaming=True,
|
|
|
339 |
)
|
340 |
elif st.session_state.provider == "Anyscale Endpoints":
|
341 |
st.session_state.llm = ChatAnyscale(
|
342 |
+
model_name=model,
|
343 |
anyscale_api_key=provider_api_key,
|
344 |
temperature=temperature,
|
345 |
streaming=True,
|
|
|
365 |
if document_chat_chain_type == "Summarization":
|
366 |
st.session_state.doc_chain = "summarization"
|
367 |
elif document_chat_chain_type == "Q&A Generation":
|
|
|
368 |
st.session_state.doc_chain = get_rag_qa_gen_chain(
|
369 |
st.session_state.retriever,
|
370 |
st.session_state.llm,
|
|
|
439 |
)
|
440 |
if st.session_state.provider == "Anthropic":
|
441 |
config["max_concurrency"] = 5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
raw_results = st.session_state.doc_chain.invoke(prompt, config)
|
|
|
|
|
443 |
results = raw_results.QuestionAnswerPairs
|
444 |
|
445 |
def _to_str(idx, qap):
|
langchain-streamlit-demo/qagen.py
CHANGED
@@ -64,12 +64,6 @@ def combine_qa_pair_lists(
|
|
64 |
)
|
65 |
|
66 |
|
67 |
-
# def get_qa_gen_chain(llm: BaseLanguageModel) -> RunnableSequence:
|
68 |
-
# return (
|
69 |
-
# CHAT_PROMPT | llm | OutputFixingParser.from_llm(llm=llm, parser=PYDANTIC_PARSER)
|
70 |
-
# )
|
71 |
-
|
72 |
-
|
73 |
def get_rag_qa_gen_chain(
|
74 |
retriever: BaseRetriever,
|
75 |
llm: BaseLanguageModel,
|
|
|
64 |
)
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
def get_rag_qa_gen_chain(
|
68 |
retriever: BaseRetriever,
|
69 |
llm: BaseLanguageModel,
|