Update app.py
Browse files
app.py
CHANGED
@@ -62,14 +62,14 @@ tokenizer = AutoTokenizer.from_pretrained(llm_model)
|
|
62 |
model = AutoModelForCausalLM.from_pretrained(llm_model)
|
63 |
pipe = pipeline(model = llm_model, tokenizer = tokenizer,trust_remote_code=True, task = "question-answering", temperature=0.2)
|
64 |
|
65 |
-
question = "How can I reverse diabetes?"
|
66 |
#docs1 = retriever.invoke(question)
|
67 |
-
docs1 = retriever.similarity_search(question)
|
68 |
-
print(docs1[0].page_content)
|
69 |
|
70 |
import pandas as pd
|
71 |
-
df = pd.DataFrame(docs1, columns=["text"])
|
72 |
-
context = df.to_string()
|
73 |
print(context)
|
74 |
|
75 |
#print(docs1)[0]['generated_text'][-1]
|
@@ -81,12 +81,12 @@ print("check2")
|
|
81 |
print("result")
|
82 |
|
83 |
print("check3")
|
84 |
-
chain = pipe(question = question,context = "Use the following information to answer the question. {context}.")
|
85 |
-
|
86 |
|
87 |
|
88 |
print("check3A")
|
89 |
-
print(chain)[0]['generated_text'][-1]
|
90 |
print("check3B")
|
91 |
|
92 |
import gradio as gr
|
|
|
62 |
model = AutoModelForCausalLM.from_pretrained(llm_model)
|
63 |
pipe = pipeline(model = llm_model, tokenizer = tokenizer,trust_remote_code=True, task = "question-answering", temperature=0.2)
|
64 |
|
65 |
+
#question = "How can I reverse diabetes?"
|
66 |
#docs1 = retriever.invoke(question)
|
67 |
+
#docs1 = retriever.similarity_search(question)
|
68 |
+
$print(docs1[0].page_content)
|
69 |
|
70 |
import pandas as pd
|
71 |
+
#df = pd.DataFrame(docs1, columns=["text"])
|
72 |
+
#context = df.to_string()
|
73 |
print(context)
|
74 |
|
75 |
#print(docs1)[0]['generated_text'][-1]
|
|
|
81 |
print("result")
|
82 |
|
83 |
print("check3")
|
84 |
+
#chain = pipe(question = question,context = "Use the following information to answer the question. {context}.")
|
85 |
+
chain = pipe(question = question,context = "Use the following information to answer the question. Diabetes can be cured by eating apples.")
|
86 |
|
87 |
|
88 |
print("check3A")
|
89 |
+
#print(chain)[0]['generated_text'][-1]
|
90 |
print("check3B")
|
91 |
|
92 |
import gradio as gr
|