Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -119,14 +119,24 @@ llm = HuggingFaceHub(repo_id=repo_id,
|
|
119 |
"top_k":50,
|
120 |
"top_p":0.95, "eos_token_id":49155})
|
121 |
|
122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
|
124 |
def run_chain(user_query):
|
125 |
if user_query !="" and not user_query.strip().isspace() and not user_query.isspace():
|
126 |
print("Your query:\n"+user_query)
|
127 |
vector_db_from_index = Pinecone.from_existing_index(index_name, hf_embeddings, namespace=namespace)
|
128 |
ss_results = vector_db_from_index.similarity_search(query=user_query, namespace=namespace, k=5)
|
129 |
-
initial_ai_response = chain.run(input_documents=ss_results, question=user_query)
|
|
|
130 |
temp_ai_response = initial_ai_response.partition('<|end|>')[0]
|
131 |
final_ai_response = temp_ai_response.replace('\n', '')
|
132 |
print(final_ai_response)
|
|
|
119 |
"top_k":50,
|
120 |
"top_p":0.95, "eos_token_id":49155})
|
121 |
|
122 |
+
prompt_template = """You are a very helpful AI assistant. Please ONLY use {context} to answer the user's input question. If you don't know the answer, just say that you don't know. DON'T try to make up an answer and do NOT go beyond the given context without the user's explicitly asking you to do so!
|
123 |
+
Question: {question}
|
124 |
+
Helpufl AI AI Repsonse:
|
125 |
+
"""
|
126 |
+
|
127 |
+
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
128 |
+
|
129 |
+
chain = load_qa_chain(llm=llm, chain_type="stuff", prompt=PROMPT)
|
130 |
+
|
131 |
+
#chain = load_qa_chain(llm=llm, chain_type="stuff")
|
132 |
|
133 |
def run_chain(user_query):
|
134 |
if user_query !="" and not user_query.strip().isspace() and not user_query.isspace():
|
135 |
print("Your query:\n"+user_query)
|
136 |
vector_db_from_index = Pinecone.from_existing_index(index_name, hf_embeddings, namespace=namespace)
|
137 |
ss_results = vector_db_from_index.similarity_search(query=user_query, namespace=namespace, k=5)
|
138 |
+
#initial_ai_response = chain.run(input_documents=ss_results, question=user_query)
|
139 |
+
initial_ai_response=chain({"input_documents": ss_results, "question": user_query}, return_only_outputs=True)
|
140 |
temp_ai_response = initial_ai_response.partition('<|end|>')[0]
|
141 |
final_ai_response = temp_ai_response.replace('\n', '')
|
142 |
print(final_ai_response)
|