lrl-modelcloud commited on
Commit
aa3c5ac
1 Parent(s): a56a098

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -47,10 +47,10 @@ from gptqmodel import GPTQModel
47
  model_name = "ModelCloud/Qwen2.5-Coder-32B-Instruct-gptqmodel-4bit-vortex-v1"
48
 
49
  tokenizer = AutoTokenizer.from_pretrained(model_name)
50
- model = GPTQModel.from_quantized(model_name)
51
 
52
  messages = [
53
- {"role": "system", "content": "You are an AI programming assistant, skilled in analyzing and generating code."},
54
  {"role": "user", "content": "How can I design a data structure in C++ to store the top 5 largest integer numbers?"},
55
  ]
56
  input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")
 
47
  model_name = "ModelCloud/Qwen2.5-Coder-32B-Instruct-gptqmodel-4bit-vortex-v1"
48
 
49
  tokenizer = AutoTokenizer.from_pretrained(model_name)
50
+ model = GPTQModel.load(model_name)
51
 
52
  messages = [
53
+ {"role": "system", "content": "You are Qwen, created by Alibaba Cloud. You are a helpful assistant."},
54
  {"role": "user", "content": "How can I design a data structure in C++ to store the top 5 largest integer numbers?"},
55
  ]
56
  input_tensor = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt")