bhuvanmdev commited on
Commit
0ce551f
1 Parent(s): 7b0e091

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -44,8 +44,8 @@ pipeline_tag: text2text-generation
44
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
45
 
46
  ### Direct Use
47
-
48
- `from peft import PeftModel
49
 
50
  model_id = 'google-t5/t5-base'
51
 
@@ -58,7 +58,6 @@ bnb_config = BitsAndBytesConfig(
58
 
59
  original_model = AutoModelForSeq2SeqLM.from_pretrained(model_id,quantization_config=bnb_config,device_map='auto')
60
  tokenizer = AutoTokenizer.from_pretrained(model_id)
61
-
62
  tokenizer.pad_token = tokenizer.eos_token
63
 
64
  peft_model = PeftModel.from_pretrained(original_model, "bhuvanmdev/t5-base-news-describer")
@@ -74,15 +73,16 @@ generation_config.eos_token_id = tokenizer.eos_token_id
74
  generation_config.use_cache = True
75
 
76
  prompt = f"""Title: A big accidient occurs in luxemberg.""".strip()
77
-
78
  encoding = tokenizer(prompt, return_tensors="pt").to(device)
 
79
  with torch.inference_mode():
80
  outputs = peft_model.generate(
81
  input_ids=encoding.input_ids,
82
  attention_mask=encoding.attention_mask,
83
  generation_config=generation_config,
84
  )
85
- print(tokenizer.decode(outputs[0], skip_special_tokens=True))`
 
86
 
87
  [More Information Needed]
88
 
 
44
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
45
 
46
  ### Direct Use
47
+ ```python
48
+ from peft import PeftModel
49
 
50
  model_id = 'google-t5/t5-base'
51
 
 
58
 
59
  original_model = AutoModelForSeq2SeqLM.from_pretrained(model_id,quantization_config=bnb_config,device_map='auto')
60
  tokenizer = AutoTokenizer.from_pretrained(model_id)
 
61
  tokenizer.pad_token = tokenizer.eos_token
62
 
63
  peft_model = PeftModel.from_pretrained(original_model, "bhuvanmdev/t5-base-news-describer")
 
73
  generation_config.use_cache = True
74
 
75
  prompt = f"""Title: A big accidient occurs in luxemberg.""".strip()
 
76
  encoding = tokenizer(prompt, return_tensors="pt").to(device)
77
+
78
  with torch.inference_mode():
79
  outputs = peft_model.generate(
80
  input_ids=encoding.input_ids,
81
  attention_mask=encoding.attention_mask,
82
  generation_config=generation_config,
83
  )
84
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
85
+ ```
86
 
87
  [More Information Needed]
88