Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,8 @@ hftoken = os.environ["hftoken"]
|
|
6 |
|
7 |
from langchain_huggingface import HuggingFaceEndpoint
|
8 |
|
9 |
-
repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
|
10 |
llm = HuggingFaceEndpoint(repo_id = repo_id, max_new_tokens = 128, temperature = 0.7, huggingfacehub_api_token = hftoken)
|
11 |
|
12 |
from langchain_core.output_parsers import StrOutputParser
|
@@ -37,11 +38,22 @@ retriever = vectorstore.as_retriever()
|
|
37 |
|
38 |
from langchain_core.prompts import ChatPromptTemplate
|
39 |
|
40 |
-
prompt = ChatPromptTemplate.from_template("""Given the following context and a question, generate an answer based on the context only.
|
41 |
|
42 |
-
In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
|
43 |
-
If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
|
44 |
-
If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
CONTEXT: {context}
|
47 |
|
|
|
6 |
|
7 |
from langchain_huggingface import HuggingFaceEndpoint
|
8 |
|
9 |
+
# repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
10 |
+
repo_id = "google/gemma-2-9b-it"
|
11 |
llm = HuggingFaceEndpoint(repo_id = repo_id, max_new_tokens = 128, temperature = 0.7, huggingfacehub_api_token = hftoken)
|
12 |
|
13 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
38 |
|
39 |
from langchain_core.prompts import ChatPromptTemplate
|
40 |
|
41 |
+
# prompt = ChatPromptTemplate.from_template("""Given the following context and a question, generate an answer based on the context only.
|
42 |
|
43 |
+
# In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
|
44 |
+
# If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
|
45 |
+
# If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at [email protected]" Don't try to make up an answer.
|
46 |
+
|
47 |
+
# CONTEXT: {context}
|
48 |
+
|
49 |
+
# QUESTION: {question}""")
|
50 |
+
|
51 |
+
prompt = ChatPromptTemplate.from_template("""As an AI assistant for AIoT SMART Labs, your task is to provide accurate answers based on the given context.
|
52 |
+
|
53 |
+
1. **Use the context:** Generate an answer based only on the context provided. Try to use as much text as possible from the "response" section in the source document without making significant changes.
|
54 |
+
2. **Identify yourself:** If someone asks "Who are you?" or a similar question, reply with "I am Rishi's assistant built using a Large Language Model!"
|
55 |
+
3. **Handle unknowns:** If you cannot find the answer in the context, state "I don't know. Please ask Rishi on Discord at https://discord.gg/6ezpZGeCcM or email [email protected]." Do not make up an answer.
|
56 |
+
4. **Clarity and brevity:** Ensure your answers are clear and concise.
|
57 |
|
58 |
CONTEXT: {context}
|
59 |
|