vllm
patrickvonplaten commited on
Commit
4b90ef4
1 Parent(s): 845d26d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -10
README.md CHANGED
@@ -97,7 +97,7 @@ messages = [
97
  },
98
  ]
99
 
100
- outputs = llm.chat(messages, sampling_params=sampling_params)
101
 
102
  print(outputs[0].outputs[0].text)
103
  ```
@@ -136,26 +136,31 @@ from mistral_inference.transformer import Transformer
136
  from mistral_inference.generate import generate
137
 
138
  from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
139
- from mistral_common.protocol.instruct.messages import UserMessage, TextChunk, ImageURLChunk
140
- from mistral_common.protocol.instruct.request import CompletionRequest
141
 
 
142
  tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
143
  model = Transformer.from_folder(mistral_models_path)
144
 
145
  url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
146
  prompt = "The above image presents a"
147
 
148
- completion_request = CompletionRequest(prompt=[UserMessage(content=[ImageURLChunk(image_url=url), TextChunk(text=prompt)])])
149
 
150
- encoded = tokenizer.encode_chat_completion(completion_request)
151
 
152
- images = encoded.images
153
- tokens = encoded.tokens
154
-
155
- out_tokens, _ = generate([tokens], model, images=[images], max_tokens=256, temperature=0.35, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
 
 
 
 
156
  result = tokenizer.decode(out_tokens[0])
157
 
158
- print(result)
 
159
  ```
160
 
161
  ## Limitations
 
97
  },
98
  ]
99
 
100
+ outputs = llm.generate(messages, sampling_params=sampling_params)
101
 
102
  print(outputs[0].outputs[0].text)
103
  ```
 
136
  from mistral_inference.generate import generate
137
 
138
  from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
139
+ from mistral_common.protocol.instruct.messages import TextChunk, ImageURLChunk
 
140
 
141
+ mistral_models_path = "/mnt/vast/shared/william/pixtral_pretrain_release"
142
  tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tekken.json")
143
  model = Transformer.from_folder(mistral_models_path)
144
 
145
  url = "https://huggingface.co/datasets/patrickvonplaten/random_img/resolve/main/yosemite.png"
146
  prompt = "The above image presents a"
147
 
148
+ user_content = [ImageURLChunk(image_url=url), TextChunk(text=prompt)]
149
 
150
+ tokens, images = tokenizer.instruct_tokenizer.encode_user_content(user_content, False)
151
 
152
+ out_tokens, _ = generate(
153
+ [tokens],
154
+ model,
155
+ images=[images],
156
+ max_tokens=256,
157
+ temperature=0.35,
158
+ eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id,
159
+ )
160
  result = tokenizer.decode(out_tokens[0])
161
 
162
+ print("Prompt:", prompt)
163
+ print("Completion:", result)
164
  ```
165
 
166
  ## Limitations