mayank-mishra commited on
Commit
8b152ee
·
verified ·
1 Parent(s): 196a2fe

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -6
README.md CHANGED
@@ -130,18 +130,15 @@ This is a simple example of how to use **PowerMoE-3b** model.
130
  import torch
131
  from transformers import AutoModelForCausalLM, AutoTokenizer
132
  device = "cuda" # or "cpu"
133
- model_path = "ibm/PowerMoE-3b"
134
  tokenizer = AutoTokenizer.from_pretrained(model_path)
135
  # drop device_map if running on CPU
136
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)
137
  model.eval()
138
  # change input text as desired
139
- chat = [
140
- { "role": "user", "content": "Write a code to find the maximum value in a list of numbers." },
141
- ]
142
- chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
143
  # tokenize the text
144
- input_tokens = tokenizer(chat, return_tensors="pt")
145
  # transfer tokenized inputs to the device
146
  for i in input_tokens:
147
  input_tokens[i] = input_tokens[i].to(device)
 
130
  import torch
131
  from transformers import AutoModelForCausalLM, AutoTokenizer
132
  device = "cuda" # or "cpu"
133
+ model_path = "ibm/PowerLM-3b"
134
  tokenizer = AutoTokenizer.from_pretrained(model_path)
135
  # drop device_map if running on CPU
136
  model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)
137
  model.eval()
138
  # change input text as desired
139
+ prompt = "Write a code to find the maximum value in a list of numbers."
 
 
 
140
  # tokenize the text
141
+ input_tokens = tokenizer(prompt, return_tensors="pt")
142
  # transfer tokenized inputs to the device
143
  for i in input_tokens:
144
  input_tokens[i] = input_tokens[i].to(device)