Update README.md
Browse files
README.md
CHANGED
@@ -79,7 +79,7 @@ question = "how many dogs are in the picture?"
|
|
79 |
inputs = processor(raw_image, question, return_tensors="pt")
|
80 |
|
81 |
out = model.generate(**inputs)
|
82 |
-
print(processor.decode(out[0], skip_special_tokens=True))
|
83 |
```
|
84 |
</details>
|
85 |
|
@@ -106,7 +106,7 @@ question = "how many dogs are in the picture?"
|
|
106 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda")
|
107 |
|
108 |
out = model.generate(**inputs)
|
109 |
-
print(processor.decode(out[0], skip_special_tokens=True))
|
110 |
```
|
111 |
</details>
|
112 |
|
@@ -132,7 +132,7 @@ question = "how many dogs are in the picture?"
|
|
132 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
133 |
|
134 |
out = model.generate(**inputs)
|
135 |
-
print(processor.decode(out[0], skip_special_tokens=True))
|
136 |
```
|
137 |
</details>
|
138 |
|
@@ -158,6 +158,6 @@ question = "how many dogs are in the picture?"
|
|
158 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
159 |
|
160 |
out = model.generate(**inputs)
|
161 |
-
print(processor.decode(out[0], skip_special_tokens=True))
|
162 |
```
|
163 |
</details>
|
|
|
79 |
inputs = processor(raw_image, question, return_tensors="pt")
|
80 |
|
81 |
out = model.generate(**inputs)
|
82 |
+
print(processor.decode(out[0], skip_special_tokens=True).strip())
|
83 |
```
|
84 |
</details>
|
85 |
|
|
|
106 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda")
|
107 |
|
108 |
out = model.generate(**inputs)
|
109 |
+
print(processor.decode(out[0], skip_special_tokens=True).strip())
|
110 |
```
|
111 |
</details>
|
112 |
|
|
|
132 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
133 |
|
134 |
out = model.generate(**inputs)
|
135 |
+
print(processor.decode(out[0], skip_special_tokens=True).strip())
|
136 |
```
|
137 |
</details>
|
138 |
|
|
|
158 |
inputs = processor(raw_image, question, return_tensors="pt").to("cuda", torch.float16)
|
159 |
|
160 |
out = model.generate(**inputs)
|
161 |
+
print(processor.decode(out[0], skip_special_tokens=True).strip())
|
162 |
```
|
163 |
</details>
|