mintaeng commited on
Commit
f2729ad
·
verified ·
1 Parent(s): a8fedd9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +54 -1
README.md CHANGED
@@ -21,7 +21,60 @@ language:
21
  -
22
 
23
  ## HOW TO USE
24
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  ## Model Details
27
 
 
21
  -
22
 
23
  ## HOW TO USE
24
+ ``` python
25
+
26
+ #!pip install transformers==4.40.0 accelerate
27
+ import os
28
+ import torch
29
+ from transformers import AutoTokenizer, AutoModelForCausalLM
30
+
31
+ model_id = 'Dongwookss/small_fut_final'
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
34
+ model = AutoModelForCausalLM.from_pretrained(
35
+ model_id,
36
+ torch_dtype=torch.bfloat16,
37
+ device_map="auto",
38
+ )
39
+ model.eval()
40
+ ```
41
+
42
+ **Query**
43
+
44
+ ```python
45
+ from transformers import TextStreamer
46
+ PROMPT = '''Below is an instruction that describes a task. Write a response that appropriately completes the request.
47
+ 제시하는 context에서만 대답하고 context에 없는 내용은 모르겠다고 대답해'''
48
+
49
+ messages = [
50
+ {"role": "system", "content": f"{PROMPT}"},
51
+ {"role": "user", "content": f"{instruction}"}
52
+ ]
53
+
54
+ input_ids = tokenizer.apply_chat_template(
55
+ messages,
56
+ add_generation_prompt=True,
57
+ return_tensors="pt"
58
+ ).to(model.device)
59
+
60
+ terminators = [
61
+ tokenizer.eos_token_id,
62
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
63
+ ]
64
+
65
+ text_streamer = TextStreamer(tokenizer)
66
+ _ = model.generate(
67
+ input_ids,
68
+ max_new_tokens=4096,
69
+ eos_token_id=terminators,
70
+ do_sample=True,
71
+ streamer = text_streamer,
72
+ temperature=0.6,
73
+ top_p=0.9,
74
+ repetition_penalty = 1.1
75
+ )
76
+
77
+ ```
78
 
79
  ## Model Details
80