cyberandy commited on
Commit
c7dae24
·
1 Parent(s): 3f4b390
Files changed (2) hide show
  1. app.py +144 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
4
+ from qwen_vl_utils import process_vision_info
5
+ import torch
6
+ from PIL import Image
7
+ import subprocess
8
+ from datetime import datetime
9
+ import numpy as np
10
+ import os
11
+
12
+
13
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
+
15
+ # models = {
16
+ # "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
17
+
18
+
19
+ # }
20
+ def array_to_image_path(image_array):
21
+ if image_array is None:
22
+ raise ValueError("No image provided. Please upload an image before submitting.")
23
+ # Convert numpy array to PIL Image
24
+ img = Image.fromarray(np.uint8(image_array))
25
+
26
+ # Generate a unique filename using timestamp
27
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
28
+ filename = f"image_{timestamp}.png"
29
+
30
+ # Save the image
31
+ img.save(filename)
32
+
33
+ # Get the full path of the saved image
34
+ full_path = os.path.abspath(filename)
35
+
36
+ return full_path
37
+
38
+
39
+ models = {
40
+ "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained(
41
+ "Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto"
42
+ )
43
+ .cuda()
44
+ .eval()
45
+ }
46
+
47
+ processors = {
48
+ "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained(
49
+ "Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True
50
+ )
51
+ }
52
+
53
+ DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
54
+
55
+ kwargs = {}
56
+ kwargs["torch_dtype"] = torch.bfloat16
57
+
58
+ user_prompt = "<|user|>\n"
59
+ assistant_prompt = "<|assistant|>\n"
60
+ prompt_suffix = "<|end|>\n"
61
+
62
+
63
+ @spaces.GPU
64
+ def run_example(image, text_input=None, model_id="Qwen/Qwen2-VL-7B-Instruct"):
65
+ image_path = array_to_image_path(image)
66
+
67
+ print(image_path)
68
+ model = models[model_id]
69
+ processor = processors[model_id]
70
+
71
+ prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
72
+ image = Image.fromarray(image).convert("RGB")
73
+ messages = [
74
+ {
75
+ "role": "user",
76
+ "content": [
77
+ {
78
+ "type": "image",
79
+ "image": image_path,
80
+ },
81
+ {"type": "text", "text": text_input},
82
+ ],
83
+ }
84
+ ]
85
+
86
+ # Preparation for inference
87
+ text = processor.apply_chat_template(
88
+ messages, tokenize=False, add_generation_prompt=True
89
+ )
90
+ image_inputs, video_inputs = process_vision_info(messages)
91
+ inputs = processor(
92
+ text=[text],
93
+ images=image_inputs,
94
+ videos=video_inputs,
95
+ padding=True,
96
+ return_tensors="pt",
97
+ )
98
+ inputs = inputs.to("cuda")
99
+
100
+ # Inference: Generation of the output
101
+ generated_ids = model.generate(**inputs, max_new_tokens=1024)
102
+ generated_ids_trimmed = [
103
+ out_ids[len(in_ids) :]
104
+ for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
105
+ ]
106
+ output_text = processor.batch_decode(
107
+ generated_ids_trimmed,
108
+ skip_special_tokens=True,
109
+ clean_up_tokenization_spaces=False,
110
+ )
111
+
112
+ return output_text[0]
113
+
114
+
115
+ css = """
116
+ #output {
117
+ height: 500px;
118
+ overflow: auto;
119
+ border: 1px solid #ccc;
120
+ }
121
+ """
122
+
123
+ with gr.Blocks(css=css) as demo:
124
+ gr.Markdown(DESCRIPTION)
125
+ with gr.Tab(label="Qwen2-VL-7B Input"):
126
+ with gr.Row():
127
+ with gr.Column():
128
+ input_img = gr.Image(label="Input Picture")
129
+ model_selector = gr.Dropdown(
130
+ choices=list(models.keys()),
131
+ label="Model",
132
+ value="Qwen/Qwen2-VL-7B-Instruct",
133
+ )
134
+ text_input = gr.Textbox(label="Question")
135
+ submit_btn = gr.Button(value="Submit")
136
+ with gr.Column():
137
+ output_text = gr.Textbox(label="Output Text")
138
+
139
+ submit_btn.click(
140
+ run_example, [input_img, text_input, model_selector], [output_text]
141
+ )
142
+
143
+ demo.queue(api_open=False)
144
+ demo.launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy==1.24.4
2
+ Pillow==10.3.0
3
+ Requests==2.31.0
4
+ torch
5
+ torchvision
6
+ git+https://github.com/huggingface/transformers.git
7
+ accelerate
8
+ qwen-vl-utils