Eurdem commited on
Commit
4bcdd87
1 Parent(s): f0e03f4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -22
README.md CHANGED
@@ -28,20 +28,12 @@ model_id = "Eurdem/Defne_llama3_2x8B"
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
30
 
31
- messages = [
32
- {"role": "system", "content": "You are a helpful chatbot, named Defne, who always responds friendly."},
33
- {"role": "user", "content": "Answer the questions: 1) Who are you? 2) f(x)=3x^2+4x+12 so what is f(3)?"},
34
  ]
35
 
36
  input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
37
-
38
- outputs = model.generate(input_ids,
39
- max_new_tokens=1024,
40
- do_sample=True,
41
- temperature=0.7,
42
- top_p=0.7,
43
- top_k=500,
44
- )
45
  response = outputs[0][input_ids.shape[-1]:]
46
  print(tokenizer.decode(response, skip_special_tokens=True))
47
  ```
@@ -75,20 +67,12 @@ model_id = "Eurdem/Defne_llama3_2x8B"
75
  tokenizer = AutoTokenizer.from_pretrained(model_id)
76
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
77
 
78
- messages = [
79
- {"role": "system", "content": "Sen Defne isimli Türkçe konuşan bir chatbotsun."},
80
- {"role": "user", "content": "Sana 2 sorum var. 1) Sen kimsin? 2)f(x)=3x^2+4x+12 ise f(3) kaçtır?"}
81
  ]
82
 
83
  input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
84
-
85
- outputs = model.generate(input_ids,
86
- max_new_tokens=1024,
87
- do_sample=True,
88
- temperature=0.7,
89
- top_p=0.7,
90
- top_k=500,
91
- )
92
  response = outputs[0][input_ids.shape[-1]:]
93
  print(tokenizer.decode(response, skip_special_tokens=True))
94
  ```
 
28
  tokenizer = AutoTokenizer.from_pretrained(model_id)
29
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
30
 
31
+ messages = [{"role": "system", "content": "You are a helpful chatbot, named Defne, who always responds friendly."},
32
+ {"role": "user", "content": "Answer the questions: 1) Who are you? 2) f(x)=3x^2+4x+12 so what is f(3)?"},
 
33
  ]
34
 
35
  input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
36
+ outputs = model.generate(input_ids, max_new_tokens=1024, do_sample=True, temperature=0.7, top_p=0.7, top_k=500,)
 
 
 
 
 
 
 
37
  response = outputs[0][input_ids.shape[-1]:]
38
  print(tokenizer.decode(response, skip_special_tokens=True))
39
  ```
 
67
  tokenizer = AutoTokenizer.from_pretrained(model_id)
68
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto", load_in_8bit= True)
69
 
70
+ messages = [{"role": "system", "content": "Sen Defne isimli Türkçe konuşan bir chatbotsun."},
71
+ {"role": "user", "content": "Sana 2 sorum var. 1) Sen kimsin? 2)f(x)=3x^2+4x+12 ise f(3) kaçtır?"}
 
72
  ]
73
 
74
  input_ids = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
75
+ outputs = model.generate(input_ids, max_new_tokens=1024, do_sample=True, temperature=0.7, top_p=0.7, top_k=500,)
 
 
 
 
 
 
 
76
  response = outputs[0][input_ids.shape[-1]:]
77
  print(tokenizer.decode(response, skip_special_tokens=True))
78
  ```