Spaces:
Runtime error
Runtime error
Commit
·
13680c9
1
Parent(s):
f20e25b
First commit
Browse files- app.py +67 -0
- requirements.txt +2 -0
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import transformers
|
3 |
+
from transformers import AutoTokenizer, pipeline
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
|
7 |
+
model = "meta-llama/Llama-2-7b-chat-hf"
|
8 |
+
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model, token=True)
|
10 |
+
|
11 |
+
llama_pipeline = pipeline(
|
12 |
+
"text-generation",
|
13 |
+
model=model,
|
14 |
+
torch_dtype = torch.float16,
|
15 |
+
device_map="auto"
|
16 |
+
)
|
17 |
+
|
18 |
+
BOS = "<s>"
|
19 |
+
EOS = "</s>"
|
20 |
+
BINS = "[INST] "
|
21 |
+
EINS = " [/INST]"
|
22 |
+
BSYS = "<<SYS>>\n"
|
23 |
+
ESYS = "\n<</SYS>>\n\n"
|
24 |
+
|
25 |
+
SYSTEM_PROMPT = BOS + BINS + BSYS + """You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
|
26 |
+
Please ensure that your responses are socially unbiased and positive in nature.
|
27 |
+
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct.
|
28 |
+
If you don't know the answer to a question, just say you don't know, please don't share false information.""" + ESYS
|
29 |
+
|
30 |
+
def message_format(msg: str, history: list, history_lim: int = 5):
|
31 |
+
history = history[-max(len(history), history_lim):]
|
32 |
+
|
33 |
+
if len(history) == 0:
|
34 |
+
return SYSTEM_PROMPT + f"{msg} {EINS}"
|
35 |
+
|
36 |
+
# history is list of (user_query, model_response)
|
37 |
+
query = SYSTEM_PROMPT + f"{history[0][0]} {EINS} {history[0][1]} {EOS}"
|
38 |
+
|
39 |
+
for user_query, model_response in history[1:]:
|
40 |
+
query += f"{BOS}{BINS} {user_query} {EINS} {model_response} {EOS}"
|
41 |
+
|
42 |
+
query += f"{BOS}{BINS} {msg} {EINS}"
|
43 |
+
|
44 |
+
return query
|
45 |
+
|
46 |
+
def response(msg: str, history: list):
|
47 |
+
query = message_format(msg, history)
|
48 |
+
|
49 |
+
response = ""
|
50 |
+
|
51 |
+
sequences = llama_pipeline(
|
52 |
+
query,
|
53 |
+
do_sample=True, #randomly sample from the most likely tokens for diversity in generated text.
|
54 |
+
top_k=10, #consider the top 10 likely tokens at each step
|
55 |
+
num_return_sequences=1, # return the most likely answer at last generation step.
|
56 |
+
eos_token_id=tokenizer.eos_token_id, # when reaching end-of-sentence token, it will stop generating
|
57 |
+
max_length=1024 # set the max length if the answers is too long
|
58 |
+
)
|
59 |
+
|
60 |
+
generated_text = sequences[0]["generated_text"]
|
61 |
+
response = generated_text[len(query):].strip() # removing prompt
|
62 |
+
|
63 |
+
print(f"AI Agent: {response}")
|
64 |
+
return response
|
65 |
+
|
66 |
+
|
67 |
+
gr.ChatInterface(response).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|