Tonic commited on
Commit
4ebf1ce
·
1 Parent(s): d181372

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -6,7 +6,7 @@ import copy
6
  from pathlib import Path
7
  import secrets
8
  import torch
9
- from PIL import Image, ImageDraw
10
 
11
  model_name = "qwen/Qwen-VL-Chat"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
@@ -34,22 +34,24 @@ def clean_response(response: str) -> str:
34
  return response
35
 
36
  def chat_with_model(image_path=None, text_query=None, history=None):
37
-
38
- default_image_path = 'path/to/default/image.jpg'
39
- default_text = 'No text provided'
40
  image_input = image_path if image_path else default_image_path
41
  text_input = text_query if text_query else default_text
42
  query_elements = [
43
  {'image': image_input},
44
  {'text': text_input}
45
  ]
46
- query = tokenizer.from_list_format(query_elements)
47
- tokenized_inputs = tokenizer(query, return_tensors='pt').to(device)
48
- output = model.generate(**tokenized_inputs)
49
- response = tokenizer.decode(output[0], skip_special_tokens=True)
50
- cleaned_response = clean_response(response)
51
- return cleaned_response
52
-
 
 
 
53
  def draw_boxes(image_path, response):
54
  image = Image.open(image_path)
55
  draw = ImageDraw.Draw(image)
 
6
  from pathlib import Path
7
  import secrets
8
  import torch
9
+ from PIL import Image, ImageDraw, UnidentifiedImageError
10
 
11
  model_name = "qwen/Qwen-VL-Chat"
12
  tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
 
34
  return response
35
 
36
  def chat_with_model(image_path=None, text_query=None, history=None):
37
+ default_image_path = None
38
+ default_text = ""
 
39
  image_input = image_path if image_path else default_image_path
40
  text_input = text_query if text_query else default_text
41
  query_elements = [
42
  {'image': image_input},
43
  {'text': text_input}
44
  ]
45
+ try:
46
+ query = tokenizer.from_list_format(query_elements)
47
+ tokenized_inputs = tokenizer(query, return_tensors='pt').to(device)
48
+ output = model.generate(**tokenized_inputs)
49
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
50
+ cleaned_response = clean_response(response)
51
+ return cleaned_response
52
+ except UnidentifiedImageError:
53
+ return "Error: Unable to process the image. Please try a different image."
54
+
55
  def draw_boxes(image_path, response):
56
  image = Image.open(image_path)
57
  draw = ImageDraw.Draw(image)