Upload moondream.py
Browse files- moondream.py +19 -0
moondream.py
CHANGED
@@ -281,6 +281,7 @@ class Moondream(PreTrainedModel):
|
|
281 |
|
282 |
#return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
283 |
|
|
|
284 |
def answer_question(
|
285 |
self,
|
286 |
image_embeds,
|
@@ -305,6 +306,24 @@ class Moondream(PreTrainedModel):
|
|
305 |
result_queue.put(cleaned_answer)
|
306 |
else:
|
307 |
return cleaned_answer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
308 |
|
309 |
def batch_answer(
|
310 |
self,
|
|
|
281 |
|
282 |
#return tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
283 |
|
284 |
+
"""
|
285 |
def answer_question(
|
286 |
self,
|
287 |
image_embeds,
|
|
|
306 |
result_queue.put(cleaned_answer)
|
307 |
else:
|
308 |
return cleaned_answer
|
309 |
+
"""
|
310 |
+
|
311 |
+
def answer_question(
|
312 |
+
self,
|
313 |
+
image_embeds,
|
314 |
+
question,
|
315 |
+
tokenizer,
|
316 |
+
chat_history="",
|
317 |
+
**kwargs,
|
318 |
+
):
|
319 |
+
prompt = f"<image>\n\n{chat_history}Question: {question}\n\nAnswer:"
|
320 |
+
return self.generate(
|
321 |
+
image_embeds,
|
322 |
+
prompt,
|
323 |
+
tokenizer=tokenizer,
|
324 |
+
max_new_tokens=512,
|
325 |
+
**kwargs,
|
326 |
+
)
|
327 |
|
328 |
def batch_answer(
|
329 |
self,
|