VictorSanh commited on
Commit
54c6ef4
1 Parent(s): 1ed46b5

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -10
README.md CHANGED
@@ -143,10 +143,6 @@ model = AutoModelForVision2Seq.from_pretrained(
143
  "HuggingFaceM4/idefics2-8b-base",
144
  ).to(DEVICE)
145
 
146
- BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
147
- EOS_WORDS_IDS = [processor.tokenizer.eos_token_id]
148
-
149
-
150
  # Create inputs
151
  prompts = [
152
  "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
@@ -158,7 +154,7 @@ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
158
 
159
 
160
  # Generate
161
- generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, eos_token_id=EOS_WORDS_IDS, max_new_tokens=500)
162
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
163
 
164
  print(generated_texts)
@@ -176,10 +172,6 @@ model = AutoModelForVision2Seq.from_pretrained(
176
  "HuggingFaceM4/idefics2-8b",
177
  ).to(DEVICE)
178
 
179
- BAD_WORDS_IDS = processor.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
180
- EOS_WORDS_IDS = processor.tokenizer("<end_of_utterance>", add_special_tokens=False).input_ids + [processor.tokenizer.eos_token_id]
181
-
182
-
183
  # Create inputs
184
  messages = [
185
  {
@@ -209,7 +201,7 @@ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
209
 
210
 
211
  # Generate
212
- generated_ids = model.generate(**inputs, bad_words_ids=BAD_WORDS_IDS, eos_token_id=EOS_WORDS_IDS, max_new_tokens=500)
213
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
214
 
215
  print(generated_texts)
 
143
  "HuggingFaceM4/idefics2-8b-base",
144
  ).to(DEVICE)
145
 
 
 
 
 
146
  # Create inputs
147
  prompts = [
148
  "<image>In this image, we can see the city of New York, and more specifically the Statue of Liberty.<image>In this image,",
 
154
 
155
 
156
  # Generate
157
+ generated_ids = model.generate(**inputs, max_new_tokens=500)
158
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
159
 
160
  print(generated_texts)
 
172
  "HuggingFaceM4/idefics2-8b",
173
  ).to(DEVICE)
174
 
 
 
 
 
175
  # Create inputs
176
  messages = [
177
  {
 
201
 
202
 
203
  # Generate
204
+ generated_ids = model.generate(**inputs, max_new_tokens=500)
205
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
206
 
207
  print(generated_texts)