Spaces:
Sleeping
Sleeping
File size: 4,654 Bytes
ec7a981 aaf6a0c ec7a981 d660402 ec7a981 aaf6a0c ec7a981 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import gradio as gr
import spaces
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch
from PIL import Image
import subprocess
from datetime import datetime
import numpy as np
import os
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
# models = {
# "Qwen/Qwen2-VL-2B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
# }
def array_to_image_path(image_array):
# Convert numpy array to PIL Image
img = Image.fromarray(np.uint8(image_array))
# Generate a unique filename using timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"image_{timestamp}.png"
# Save the image
img.save(filename)
# Get the full path of the saved image
full_path = os.path.abspath(filename)
return full_path
models = {
"qwen2-vl-fmb": Qwen2VLForConditionalGeneration.from_pretrained("fmb-quibdo/qwen2-vl-fmb", torch_dtype="auto").cuda().eval(),
"qwen2-vl-fmb-7B": Qwen2VLForConditionalGeneration.from_pretrained("fmb-quibdo/fmb-qwen-vl-7b", torch_dtype="auto").cuda().eval(),
}
processors = {
"qwen2-vl-fmb": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct"),
'qwen2-vl-fmb-7B': AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
}
DESCRIPTION = "[Qwen2-VL 2B or 7B Spanish Demo](https://huggingface.co/ajanco/qwen2-vl-fmb)"
kwargs = {}
kwargs['torch_dtype'] = torch.bfloat16
user_prompt = '<|user|>\n'
assistant_prompt = '<|assistant|>\n'
prompt_suffix = "<|end|>\n"
@spaces.GPU
def run_example(image, text_input=None, model_id="qwen2-vl-fmb"):
image_path = array_to_image_path(image)
print(image_path)
model = models[model_id]
processor = processors[model_id]
prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
image = Image.fromarray(image).convert("RGB")
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": text_input},
],
}
]
# Preparation for inference
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=4096)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
return output_text[0]
css = """
#output {
height: 500px;
overflow: auto;
border: 1px solid #ccc;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown(DESCRIPTION)
with gr.Tab(label="Qwen2-VL-7B Input"):
with gr.Row():
with gr.Column():
input_img = gr.Image(label="Input Picture")
model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="qwen2-vl-fmb")
text_input = gr.Textbox(label="Question", value="extract text in Spanish from the image")
submit_btn = gr.Button(value="Submit")
with gr.Column():
output_text = gr.Textbox(label="Output Text")
gr.Examples(
examples=[
["assets/test0.png", "extract text in Spanish", text_input],
["assets/test1.png", "extract text in Spanish", text_input],
["assets/test2.png", "extract text in Spanish", text_input],
["assets/test3.png", "extract text in Spanish", text_input],
["assets/test4.png", "extract text in Spanish", text_input]
],
inputs=[input_img, text_input],
outputs=[output_text],
fn=run_example,
cache_examples=True,
label="Try examples"
)
submit_btn.click(run_example, [input_img, text_input, model_selector], [output_text])
demo.queue(api_open=False)
demo.launch(debug=True) |