yusufcakmak commited on
Commit
740e144
1 Parent(s): 0a67ed6

Added usage

Browse files
Files changed (1) hide show
  1. README.md +49 -3
README.md CHANGED
@@ -1,3 +1,49 @@
1
- ---
2
- license: llama3
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama3
3
+ ---
4
+
5
+ ## Usage
6
+ ```python
7
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
8
+
9
+
10
+ model_id = "Trendyol/Trendyol-LLM-8b-chat-v2.0"
11
+
12
+ pipe = pipeline(
13
+ "text-generation",
14
+ model=model_id,
15
+ model_kwargs={
16
+ "torch_dtype": torch.bfloat16,
17
+ "use_cache":True,
18
+ "use_flash_attention_2": True
19
+ },
20
+ device_map='auto',
21
+ )
22
+
23
+ terminators = [
24
+ pipe.tokenizer.eos_token_id,
25
+ pipe.tokenizer.convert_tokens_to_ids("<|eot_id|>")
26
+ ]
27
+
28
+ sampling_params = dict(do_sample=True, temperature=0.3, top_k=50, top_p=0.9, repetition_penalty=1.1)
29
+ DEFAULT_SYSTEM_PROMPT = "Sen yardımsever bir asistansın ve sana verilen talimatlar doğrultusunda en iyi cevabı üretmeye çalışacaksın."
30
+
31
+ def generate_output(user_query, sys_prompt=DEFAULT_SYSTEM_PROMPT):
32
+ messages = [
33
+ {"role": "system", "content": sys_prompt},
34
+ {"role": "user", "content": user_query}
35
+ ]
36
+
37
+ outputs = pipe(
38
+ messages,
39
+ max_new_tokens=1024,
40
+ eos_token_id=terminators,
41
+ return_full_text=False,
42
+ **sampling_params
43
+ )
44
+
45
+ return outputs[0]["generated_text"]
46
+
47
+ response = generate_output("Türkiye'de kaç il var?")
48
+ print(response)
49
+ ```