Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ from langchain.chains import LLMChain
|
|
12 |
|
13 |
# https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
|
14 |
llm_hf = HuggingFaceHub(
|
15 |
-
repo_id="
|
16 |
model_kwargs={"temperature":0.9 }
|
17 |
)
|
18 |
|
@@ -32,7 +32,7 @@ fact_extraction_prompt = PromptTemplate(
|
|
32 |
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
|
33 |
)
|
34 |
|
35 |
-
fact_extraction_chain = LLMChain(llm=
|
36 |
|
37 |
facts = fact_extraction_chain.run(text + " " +output_question_1)
|
38 |
|
|
|
12 |
|
13 |
# https://colab.research.google.com/drive/1hrS6_g14EcOD4ezwSGlGX2zxJegX5uNX#scrollTo=NUwUR9U7qkld
|
14 |
llm_hf = HuggingFaceHub(
|
15 |
+
repo_id="HenryJJ/vincua-13b",
|
16 |
model_kwargs={"temperature":0.9 }
|
17 |
)
|
18 |
|
|
|
32 |
template="Extract the key facts out of this text. Don't include opinions. Give each fact a number and keep them short sentences. :\n\n {text_input}"
|
33 |
)
|
34 |
|
35 |
+
fact_extraction_chain = LLMChain(llm=llm_hf, prompt=fact_extraction_prompt)
|
36 |
|
37 |
facts = fact_extraction_chain.run(text + " " +output_question_1)
|
38 |
|