Spaces:
Sleeping
Sleeping
Create chat_qwen.py
Browse files- chat_qwen.py +33 -0
chat_qwen.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
|
3 |
+
def get_response(prompt: str):
|
4 |
+
model = AutoModelForCausalLM.from_pretrained(
|
5 |
+
"Qwen/Qwen2-7B-Instruct-GPTQ-Int4",
|
6 |
+
torch_dtype="auto",
|
7 |
+
device_map="auto",
|
8 |
+
)
|
9 |
+
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B-Instruct-GPTQ-Int4")
|
11 |
+
|
12 |
+
prompt = "Give me a short introduction to large language model."
|
13 |
+
messages = [
|
14 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
15 |
+
{"role": "user", "content": prompt},
|
16 |
+
]
|
17 |
+
text = tokenizer.apply_chat_template(
|
18 |
+
messages,
|
19 |
+
tokenize=False,
|
20 |
+
add_generation_prompt=True,
|
21 |
+
)
|
22 |
+
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
23 |
+
|
24 |
+
generated_ids = model.generate(
|
25 |
+
**model_inputs,
|
26 |
+
max_new_tokens=512,
|
27 |
+
)
|
28 |
+
generated_ids = [
|
29 |
+
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
|
30 |
+
]
|
31 |
+
|
32 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
33 |
+
return response
|