Spaces:
Sleeping
Sleeping
Update functions.py
Browse files- functions.py +33 -1
functions.py
CHANGED
@@ -540,6 +540,38 @@ def format_google_results(google_results):
|
|
540 |
return formatted_documents
|
541 |
|
542 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
def grade_generation_v_documents_and_question(state,hallucination_grader,answer_grader ):
|
544 |
"""
|
545 |
Determines whether the generation is grounded in the document and answers the question.
|
@@ -627,7 +659,7 @@ def generate(state):
|
|
627 |
"""
|
628 |
question = state["question"]
|
629 |
documents = state["documents"]
|
630 |
-
generation =
|
631 |
steps = state["steps"]
|
632 |
steps.append("generate_answer")
|
633 |
generation_count = state["generation_count"]
|
|
|
540 |
return formatted_documents
|
541 |
|
542 |
|
543 |
+
def QA_chain(llm):
|
544 |
+
"""
|
545 |
+
Creates a question-answering chain using the provided language model.
|
546 |
+
|
547 |
+
Args:
|
548 |
+
llm: The language model to use for generating answers.
|
549 |
+
|
550 |
+
Returns:
|
551 |
+
An LLMChain configured with the question-answering prompt and the provided model.
|
552 |
+
"""
|
553 |
+
# Define the prompt template
|
554 |
+
prompt = PromptTemplate(
|
555 |
+
template="""You are an assistant for question-answering tasks.
|
556 |
+
|
557 |
+
Use the following pieces of retrieved documents to answer the question. If you don't know the answer, just say that you don't know.
|
558 |
+
Do not repeat yourself!
|
559 |
+
Be informative and concise.
|
560 |
+
|
561 |
+
Question: {question}
|
562 |
+
|
563 |
+
Documents: {documents}
|
564 |
+
|
565 |
+
Answer:
|
566 |
+
""",
|
567 |
+
input_variables=["question", "documents"],
|
568 |
+
)
|
569 |
+
|
570 |
+
# Create and return the question-answering chain
|
571 |
+
return LLMChain(prompt=prompt, llm=llm, output_parser=StrOutputParser())
|
572 |
+
|
573 |
+
|
574 |
+
|
575 |
def grade_generation_v_documents_and_question(state,hallucination_grader,answer_grader ):
|
576 |
"""
|
577 |
Determines whether the generation is grounded in the document and answers the question.
|
|
|
659 |
"""
|
660 |
question = state["question"]
|
661 |
documents = state["documents"]
|
662 |
+
generation = QA_chain.invoke({"documents": documents, "question": question})
|
663 |
steps = state["steps"]
|
664 |
steps.append("generate_answer")
|
665 |
generation_count = state["generation_count"]
|