patrickvonplaten
commited on
Commit
•
4b90ef4
1
Parent(s):
845d26d
Update README.md
Browse files
README.md
CHANGED
@@ -97,7 +97,7 @@ messages = [
|
|
97 |
},
|
98 |
]
|
99 |
|
100 |
-
outputs = llm.
|
101 |
|
102 |
print(outputs[0].outputs[0].text)
|
103 |
```
|
@@ -136,26 +136,31 @@ from mistral_inference.transformer import Transformer
|
|
136 |
from mistral_inference.generate import generate
|
137 |
|
138 |
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
139 |
-
from mistral_common.protocol.instruct.messages import
|
140 |
-
from mistral_common.protocol.instruct.request import CompletionRequest
|
141 |
|
|
|
142 |
tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
|
143 |
model = Transformer.from_folder(mistral_models_path)
|
144 |
|
145 |
url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
|
146 |
prompt = "The above image presents a"
|
147 |
|
148 |
-
|
149 |
|
150 |
-
|
151 |
|
152 |
-
|
153 |
-
tokens
|
154 |
-
|
155 |
-
|
|
|
|
|
|
|
|
|
156 |
result = tokenizer.decode(out_tokens[0])
|
157 |
|
158 |
-
print(
|
|
|
159 |
```
|
160 |
|
161 |
## Limitations
|
|
|
97 |
},
|
98 |
]
|
99 |
|
100 |
+
outputs = llm.generate(messages, sampling_params=sampling_params)
|
101 |
|
102 |
print(outputs[0].outputs[0].text)
|
103 |
```
|
|
|
136 |
from mistral_inference.generate import generate
|
137 |
|
138 |
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
139 |
+
from mistral_common.protocol.instruct.messages import TextChunk, ImageURLChunk
|
|
|
140 |
|
141 |
+
mistral_models_path = "/mnt/vast/shared/william/pixtral_pretrain_release"
|
142 |
tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
|
143 |
model = Transformer.from_folder(mistral_models_path)
|
144 |
|
145 |
url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
|
146 |
prompt = "The above image presents a"
|
147 |
|
148 |
+
user_content = [ImageURLChunk(image_url=url), TextChunk(text=prompt)]
|
149 |
|
150 |
+
tokens, images = tokenizer.instruct_tokenizer.encode_user_content(user_content, False)
|
151 |
|
152 |
+
out_tokens, _ = generate(
|
153 |
+
[tokens],
|
154 |
+
model,
|
155 |
+
images=[images],
|
156 |
+
max_tokens=256,
|
157 |
+
temperature=0.35,
|
158 |
+
eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id,
|
159 |
+
)
|
160 |
result = tokenizer.decode(out_tokens[0])
|
161 |
|
162 |
+
print("Prompt:", prompt)
|
163 |
+
print("Completion:", result)
|
164 |
```
|
165 |
|
166 |
## Limitations
|