mrdbourke commited on
Commit
d4a2b52
·
verified ·
1 Parent(s): a1d87e6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -0
app.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Standard library imports
2
+ import os
3
+ from datetime import datetime
4
+ import subprocess
5
+ import time
6
+
7
+ # Third-party imports
8
+ import numpy as np
9
+ import torch
10
+ from PIL import Image
11
+ import accelerate
12
+ import gradio as gr
13
+ import spaces
14
+ from transformers import (
15
+ Qwen2_5_VLForConditionalGeneration,
16
+ AutoTokenizer,
17
+ AutoProcessor
18
+ )
19
+
20
+ # Local imports
21
+ from qwen_vl_utils import process_vision_info
22
+
23
+
24
+ def array_to_image_path(image_array):
25
+ if image_array is None:
26
+ raise ValueError("No image provided. Please upload an image before submitting.")
27
+ # Convert numpy array to PIL Image
28
+ img = Image.fromarray(np.uint8(image_array))
29
+
30
+ # Generate a unique filename using timestamp
31
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
32
+ filename = f"image_{timestamp}.png"
33
+
34
+ # Save the image
35
+ img.save(filename)
36
+
37
+ # Get the full path of the saved image
38
+ full_path = os.path.abspath(filename)
39
+
40
+ return full_path
41
+
42
+ models = {
43
+ "Qwen/Qwen2.5-VL-7B-Instruct": Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct",
44
+ trust_remote_code=True,
45
+ torch_dtype="auto",
46
+ device_map="auto").eval()
47
+ }
48
+
49
+ processors = {
50
+ "Qwen/Qwen2.5-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", trust_remote_code=True)
51
+ }
52
+
53
+ DESCRIPTION = "[Qwen2.5-VL Demo](https://huggingface.co/collections/Qwen/qwen25-vl-6795ffac22b334a837c0f9a5)"
54
+
55
+ kwargs = {}
56
+ kwargs['torch_dtype'] = torch.bfloat16
57
+
58
+ user_prompt = '<|user|>\n'
59
+ assistant_prompt = '<|assistant|>\n'
60
+ prompt_suffix = "<|end|>\n"
61
+
62
+ @spaces.GPU
63
+ def run_example(image, text_input=None, model_id=None):
64
+ start_time = time.time()
65
+ image_path = array_to_image_path(image)
66
+
67
+ print(image_path)
68
+ model = models[model_id]
69
+ processor = processors[model_id]
70
+
71
+ image = Image.fromarray(image).convert("RGB")
72
+ messages = [
73
+ {
74
+ "role": "user",
75
+ "content": [
76
+ {
77
+ "type": "image",
78
+ "image": image_path,
79
+ },
80
+ {"type": "text", "text": text_input},
81
+ ],
82
+ }
83
+ ]
84
+
85
+ # Preparation for inference
86
+ text = processor.apply_chat_template(
87
+ messages, tokenize=False, add_generation_prompt=True
88
+ )
89
+ image_inputs, video_inputs = process_vision_info(messages)
90
+ inputs = processor(
91
+ text=[text],
92
+ images=image_inputs,
93
+ videos=video_inputs,
94
+ padding=True,
95
+ return_tensors="pt",
96
+ )
97
+ inputs = inputs.to("cuda")
98
+
99
+ # Inference: Generation of the output
100
+ generated_ids = model.generate(**inputs, max_new_tokens=1024)
101
+ generated_ids_trimmed = [
102
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
103
+ ]
104
+ output_text = processor.batch_decode(
105
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
106
+ )
107
+
108
+ end_time = time.time()
109
+ total_time = round(end_time - start_time, 2)
110
+
111
+ return output_text[0], total_time
112
+
113
+ css = """
114
+ #output {
115
+ height: 500px;
116
+ overflow: auto;
117
+ border: 1px solid #ccc;
118
+ }
119
+ """
120
+
121
+ with gr.Blocks(css=css) as demo:
122
+ gr.Markdown(DESCRIPTION)
123
+ with gr.Tab(label="Qwen2.5-VL Input"):
124
+ with gr.Row():
125
+ with gr.Column():
126
+ input_img = gr.Image(label="Input Picture")
127
+ model_selector = gr.Dropdown(choices=list(models.keys()),
128
+ label="Model",
129
+ value="Qwen/Qwen2.5-VL-7B-Instruct")
130
+ text_input = gr.Textbox(label="Question")
131
+ submit_btn = gr.Button(value="Submit")
132
+ with gr.Column():
133
+ output_text = gr.Textbox(label="Output Text")
134
+ time_taken = gr.Textbox(label="Time taken for processing + inference")
135
+
136
+ submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text, time_taken])
137
+
138
+ demo.queue(api_open=False)
139
+ demo.launch(debug=True)