davidsi commited on
Commit
a059f57
·
verified ·
1 Parent(s): 25cd3f7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +30 -2
README.md CHANGED
@@ -3,7 +3,7 @@ library_name: transformers
3
  language:
4
  - en
5
  pipeline_tag: text-generation
6
- license: llama3
7
  ---
8
  <!-- Provide a quick summary of what the model is/does. -->
9
 
@@ -56,9 +56,37 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
56
  model_name = 'davidsi/Llama3_1-8B-Instruct-AMD-python'
57
  tokenizer = AutoTokenizer.from_pretrained(model_name)
58
  llm = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  ~~~
60
 
61
- [More Information Needed]
62
 
63
  ## Training Details
64
 
 
3
  language:
4
  - en
5
  pipeline_tag: text-generation
6
+ license: llama3.1
7
  ---
8
  <!-- Provide a quick summary of what the model is/does. -->
9
 
 
56
  model_name = 'davidsi/Llama3_1-8B-Instruct-AMD-python'
57
  tokenizer = AutoTokenizer.from_pretrained(model_name)
58
  llm = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16)
59
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
+
61
+ messages = [
62
+ {"role": "system", "content": "You are a helpful assistant for AMD technologies and python."},
63
+ {"role": "user", "content": query}
64
+ ]
65
+
66
+ terminators = [
67
+ tokenizer.eos_token_id,
68
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
69
+ ]
70
+
71
+ input_ids = tokenizer.apply_chat_template(
72
+ messages,
73
+ add_generation_prompt=True,
74
+ return_tensors="pt"
75
+ ).to(device)
76
+
77
+ outputs = model.generate(
78
+ input_ids,
79
+ max_new_tokens=16384,
80
+ eos_token_id=terminators,
81
+ pad_token_id=tokenizer.eos_token_id,
82
+ do_sample=True,
83
+ temperature=0.6,
84
+ top_p=0.9,
85
+ )
86
+ response = outputs[0][input_ids.shape[-1]:]
87
+ print(tokenizer.decode(response, skip_special_tokens=True))
88
  ~~~
89
 
 
90
 
91
  ## Training Details
92