æLtorio commited on
Commit
a64397f
1 Parent(s): 4784163
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -15,6 +15,12 @@ model.load_adapter(model_id,device_map="auto")
15
 
16
  def infere(image):
17
  messages = [
 
 
 
 
 
 
18
  {
19
  "role": "user",
20
  "content": [
@@ -25,9 +31,9 @@ def infere(image):
25
  ]
26
  prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
27
  inputs = processor(text=prompt, images=[image], return_tensors="pt")
28
- print(f"inputs: {inputs}")
29
  inputs = {k: v.to(device) for k, v in inputs.items()}
30
- generated_ids = model.generate(**inputs, max_new_tokens=8192)
31
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
32
  return generated_texts
33
 
 
15
 
16
  def infere(image):
17
  messages = [
18
+ {
19
+ "role": "system",
20
+ "content": [
21
+ {"type": "text", "text": "You are a valuable medical doctor and you are looking at an image of your patient."},
22
+ ]
23
+ },
24
  {
25
  "role": "user",
26
  "content": [
 
31
  ]
32
  prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
33
  inputs = processor(text=prompt, images=[image], return_tensors="pt")
34
+ # print(f"inputs: {inputs}")
35
  inputs = {k: v.to(device) for k, v in inputs.items()}
36
+ generated_ids = model.generate(**inputs, max_new_tokens=100)
37
  generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
38
  return generated_texts
39