sounar commited on
Commit
c9c43bc
·
verified ·
1 Parent(s): 65272a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -19
app.py CHANGED
@@ -37,31 +37,31 @@ def preprocess_image(image):
37
  transform = ToTensor()
38
  return transform(image).unsqueeze(0).to(model.device)
39
 
 
40
  def analyze_input(image, question):
41
  try:
42
- # Prepare inputs
43
- if image:
44
- # Process image
45
  image = image.convert('RGB')
46
  pixel_values = preprocess_image(image)
47
- prompt = f"Given the medical image and question: {question}\nPlease provide a detailed analysis."
48
-
49
- # Model inputs for multimodal processing
50
- model_inputs = {
51
- "input_ids": tokenizer(prompt, return_tensors="pt").input_ids.to(model.device),
52
- "pixel_values": pixel_values
53
- }
54
- else:
55
- # Text-only processing
56
- prompt = f"Medical question: {question}\nAnswer:"
57
- model_inputs = {
58
- "input_ids": tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
59
- }
60
 
61
- # Generate response using model's custom method
62
- outputs = model.generate(**model_inputs, max_new_tokens=256)
 
 
 
63
 
64
- # Decode and clean response
 
 
 
 
 
 
65
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
66
  return {"status": "success", "response": response}
67
 
 
37
  transform = ToTensor()
38
  return transform(image).unsqueeze(0).to(model.device)
39
 
40
+ # Handle queries
41
  def analyze_input(image, question):
42
  try:
43
+ # Process the image if provided
44
+ pixel_values = None
45
+ if image is not None:
46
  image = image.convert('RGB')
47
  pixel_values = preprocess_image(image)
48
+
49
+ # Tokenize the question
50
+ input_ids = tokenizer(question, return_tensors="pt").input_ids.to(model.device)
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Construct the model_inputs dictionary
53
+ model_inputs = {
54
+ "input_ids": input_ids,
55
+ "pixel_values": pixel_values
56
+ }
57
 
58
+ # Generate the response using the model
59
+ outputs = model.generate(
60
+ model_inputs=model_inputs,
61
+ max_new_tokens=256
62
+ )
63
+
64
+ # Decode the response
65
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
66
  return {"status": "success", "response": response}
67