Update README.md
Browse files
README.md
CHANGED
@@ -109,17 +109,22 @@ Describe the image.<|im_end|>
|
|
109 |
**Process inputs**
|
110 |
|
111 |
```python
|
112 |
-
|
|
|
113 |
|
114 |
inputs['input_ids'] = inputs['input_ids'].to(model.device)
|
115 |
inputs['attention_mask'] = inputs['attention_mask'].to(model.device)
|
|
|
|
|
|
|
|
|
116 |
```
|
117 |
|
118 |
**Generate the data**
|
119 |
|
120 |
```python
|
121 |
-
|
122 |
-
|
123 |
with torch.inference_mode():
|
124 |
-
output = model.generate(**inputs, max_new_tokens=200, do_sample=True,
|
|
|
125 |
```
|
|
|
109 |
**Process inputs**
|
110 |
|
111 |
```python
|
112 |
+
with torch.inference_mode():
|
113 |
+
inputs = processor(prompt, raw_image, model, return_tensors='pt')
|
114 |
|
115 |
inputs['input_ids'] = inputs['input_ids'].to(model.device)
|
116 |
inputs['attention_mask'] = inputs['attention_mask'].to(model.device)
|
117 |
+
|
118 |
+
from transformers import TextStreamer
|
119 |
+
|
120 |
+
streamer = TextStreamer(tokenizer)
|
121 |
```
|
122 |
|
123 |
**Generate the data**
|
124 |
|
125 |
```python
|
126 |
+
%%time
|
|
|
127 |
with torch.inference_mode():
|
128 |
+
output = model.generate(**inputs, max_new_tokens=200, do_sample=True, top_p=0.9, temperature=1.2, eos_token_id=tokenizer.eos_token_id, streamer=streamer)
|
129 |
+
print(tokenizer.decode(output[0]).replace(prompt, "").replace("<|im_end|>", ""))
|
130 |
```
|