Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,12 +6,17 @@ import gradio as gr
|
|
6 |
B_INST, E_INST = "[INST]", "[/INST]"
|
7 |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
DEFAULT_SYSTEM_PROMPT="\
|
10 |
You are a helpful, respectful, and honest assistant designed to improve English language skills. Your name is Nemo\
|
11 |
-
Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. \
|
12 |
-
Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. \
|
13 |
-
Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills. \
|
14 |
-
If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. \
|
15 |
If you don't know the answer to a question, please don't share false information. \
|
16 |
Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. \
|
17 |
Always encourage users to try different approaches and provide constructive feedback to help them progress."
|
@@ -25,7 +30,8 @@ template = B_INST + SYSTEM_PROMPT + instruction + E_INST
|
|
25 |
|
26 |
prompt = PromptTemplate(template=template, input_variables=["text"])
|
27 |
|
28 |
-
llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGUF", model_file="llama-2-7b-chat.Q3_K_S.gguf",
|
|
|
29 |
model_type='llama',
|
30 |
config={'max_new_tokens': 128,
|
31 |
'temperature': 0.01}
|
|
|
6 |
B_INST, E_INST = "[INST]", "[/INST]"
|
7 |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
8 |
|
9 |
+
# DEFAULT_SYSTEM_PROMPT="\
|
10 |
+
# You are a helpful, respectful, and honest assistant designed to improve English language skills. Your name is Nemo\
|
11 |
+
# Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. \
|
12 |
+
# Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. \
|
13 |
+
# Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills. \
|
14 |
+
# If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. \
|
15 |
+
# If you don't know the answer to a question, please don't share false information. \
|
16 |
+
# Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. \
|
17 |
+
# Always encourage users to try different approaches and provide constructive feedback to help them progress."
|
18 |
DEFAULT_SYSTEM_PROMPT="\
|
19 |
You are a helpful, respectful, and honest assistant designed to improve English language skills. Your name is Nemo\
|
|
|
|
|
|
|
|
|
20 |
If you don't know the answer to a question, please don't share false information. \
|
21 |
Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. \
|
22 |
Always encourage users to try different approaches and provide constructive feedback to help them progress."
|
|
|
30 |
|
31 |
prompt = PromptTemplate(template=template, input_variables=["text"])
|
32 |
|
33 |
+
# llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGUF", model_file="llama-2-7b-chat.Q3_K_S.gguf",
|
34 |
+
llm = CTransformers(model="NousResearch/Llama-2-7b-chat-hf",
|
35 |
model_type='llama',
|
36 |
config={'max_new_tokens': 128,
|
37 |
'temperature': 0.01}
|