Spaces:
Sleeping
Sleeping
Commit
·
4f4b4ad
1
Parent(s):
fe99578
Update app.py
Browse files
app.py
CHANGED
@@ -29,7 +29,8 @@ with open(css_file) as f:
|
|
29 |
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
30 |
model_id = os.getenv('model_id')
|
31 |
hf_token = os.getenv('hf_token')
|
32 |
-
repo_id
|
|
|
33 |
#HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
34 |
#model_id = os.environ.get('model_id')
|
35 |
#hf_token = os.environ.get('hf_token')
|
@@ -50,8 +51,8 @@ llm = HuggingFaceHub(repo_id=repo_id,
|
|
50 |
"top_p":0.95, "eos_token_id":49155})
|
51 |
|
52 |
prompt_template = """
|
53 |
-
|
54 |
-
|
55 |
Question: {question}
|
56 |
Helpful AI Repsonse:
|
57 |
"""
|
@@ -161,8 +162,11 @@ with st.spinner("AI Thinking...Please wait a while to Cheers!"):
|
|
161 |
loader = TextLoader(i_file_path, encoding="utf-8")
|
162 |
loaded_documents = loader.load()
|
163 |
temp_ai_response=chain({"input_documents": loaded_documents, "question": user_question}, return_only_outputs=False)
|
164 |
-
|
165 |
-
|
166 |
-
|
|
|
|
|
|
|
167 |
st.write("AI Response:")
|
168 |
-
st.write(
|
|
|
29 |
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
|
30 |
model_id = os.getenv('model_id')
|
31 |
hf_token = os.getenv('hf_token')
|
32 |
+
repo_id== os.getenv('LLM_RepoID')
|
33 |
+
|
34 |
#HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN')
|
35 |
#model_id = os.environ.get('model_id')
|
36 |
#hf_token = os.environ.get('hf_token')
|
|
|
51 |
"top_p":0.95, "eos_token_id":49155})
|
52 |
|
53 |
prompt_template = """
|
54 |
+
You are a very helpful AI assistant. Please ONLY use {context} to answer the user's question. If you don't know the answer, just say that you don't know. DON'T try to make up an answer.
|
55 |
+
Your response should be full and detailed.
|
56 |
Question: {question}
|
57 |
Helpful AI Repsonse:
|
58 |
"""
|
|
|
162 |
loader = TextLoader(i_file_path, encoding="utf-8")
|
163 |
loaded_documents = loader.load()
|
164 |
temp_ai_response=chain({"input_documents": loaded_documents, "question": user_question}, return_only_outputs=False)
|
165 |
+
initial_ai_response=temp_ai_response['output_text']
|
166 |
+
cleaned_initial_ai_response = remove_context(initial_ai_response)
|
167 |
+
final_ai_response = cleaned_initial_ai_response.split('<|end|>\n<|system|>\n<|end|>\n<|user|>')[0].strip().replace('\n\n', '\n').replace('<|end|>', '').replace('<|user|>', '').replace('<|system|>', '').replace('<|assistant|>', '')
|
168 |
+
#temp_ai_response = temp_ai_response['output_text']
|
169 |
+
#final_ai_response=temp_ai_response.partition('<|end|>')[0]
|
170 |
+
#i_final_ai_response = final_ai_response.replace('\n', '')
|
171 |
st.write("AI Response:")
|
172 |
+
st.write(final_ai_response)
|