caracal / app.py
wjm55
fixed zip and ner outputs
71e27e4
raw
history blame
9.33 kB
import gradio as gr
import spaces
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
from qwen_vl_utils import process_vision_info
import torch
from PIL import Image
import subprocess
from datetime import datetime
import numpy as np
import os
from gliner import GLiNER
import json
# Initialize GLiNER model
gliner_model = GLiNER.from_pretrained("knowledgator/modern-gliner-bi-large-v1.0")
DEFAULT_NER_LABELS = "person, organization, location, date, event"
# subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
# models = {
# "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
# }
def array_to_image_path(image_array):
# Convert numpy array to PIL Image
img = Image.fromarray(np.uint8(image_array))
img.thumbnail((1024, 1024))
# Generate a unique filename using timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"image_{timestamp}.png"
# Save the image
img.save(filename)
# Get the full path of the saved image
full_path = os.path.abspath(filename)
return full_path
models = {
"Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
}
processors = {
"Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True)
}
DESCRIPTION = "This demo uses[Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
kwargs = {}
kwargs['torch_dtype'] = torch.bfloat16
user_prompt = '<|user|>\n'
assistant_prompt = '<|assistant|>\n'
prompt_suffix = "<|end|>\n"
@spaces.GPU
def run_example(image, model_id="Qwen/Qwen2-VL-7B-Instruct", run_ner=False, ner_labels=DEFAULT_NER_LABELS):
# First get the OCR text
text_input = "Convert the image to text."
image_path = array_to_image_path(image)
model = models[model_id]
processor = processors[model_id]
prompt = f"{user_prompt}<|image_1|>\n{text_input}{prompt_suffix}{assistant_prompt}"
image = Image.fromarray(image).convert("RGB")
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": text_input},
],
}
]
# Preparation for inference
text = processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
inputs = processor(
text=[text],
images=image_inputs,
videos=video_inputs,
padding=True,
return_tensors="pt",
)
inputs = inputs.to("cuda")
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=1024)
generated_ids_trimmed = [
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
ocr_text = output_text[0]
# If NER is enabled, process the OCR text
if run_ner:
ner_results = gliner_model.predict_entities(
ocr_text,
ner_labels.split(","),
threshold=0.3
)
# Format entities for highlighting
entities = []
for entity in ner_results:
entities.append((
entity["start"],
entity["end"],
entity["label"]
))
return [(ocr_text, entities)]
return [(ocr_text, [])]
css = """
/* Overall app styling */
.gradio-container {
max-width: 1200px !important;
margin: 0 auto;
padding: 20px;
background-color: #f8f9fa;
}
/* Tabs styling */
.tabs {
border-radius: 8px;
background: white;
padding: 20px;
box-shadow: 0 2px 6px rgba(0, 0, 0, 0.1);
}
/* Input/Output containers */
.input-container, .output-container {
background: white;
border-radius: 8px;
padding: 15px;
margin: 10px 0;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
}
/* Button styling */
.submit-btn {
background-color: #2d31fa !important;
border: none !important;
padding: 8px 20px !important;
border-radius: 6px !important;
color: white !important;
transition: all 0.3s ease !important;
}
.submit-btn:hover {
background-color: #1f24c7 !important;
transform: translateY(-1px);
}
/* Output text area */
#output {
height: 500px;
overflow: auto;
border: 1px solid #e0e0e0;
border-radius: 6px;
padding: 15px;
background: #ffffff;
font-family: 'Arial', sans-serif;
}
/* Dropdown styling */
.gr-dropdown {
border-radius: 6px !important;
border: 1px solid #e0e0e0 !important;
}
/* Image upload area */
.gr-image-input {
border: 2px dashed #ccc;
border-radius: 8px;
padding: 20px;
transition: all 0.3s ease;
}
.gr-image-input:hover {
border-color: #2d31fa;
}
"""
with gr.Blocks(css=css) as demo:
gr.Image("Caracal.jpg", interactive=False)
with gr.Tab(label="Image Input", elem_classes="tabs"):
with gr.Row():
with gr.Column(elem_classes="input-container"):
input_img = gr.Image(label="Input Picture", elem_classes="gr-image-input")
model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct", elem_classes="gr-dropdown")
# Add NER controls
with gr.Row():
ner_checkbox = gr.Checkbox(label="Run Named Entity Recognition", value=False)
ner_labels = gr.Textbox(
label="NER Labels (comma-separated)",
value=DEFAULT_NER_LABELS,
visible=False
)
submit_btn = gr.Button(value="Submit", elem_classes="submit-btn")
with gr.Column(elem_classes="output-container"):
output_text = gr.HighlightedText(label="Output Text", elem_id="output")
# Show/hide NER labels based on checkbox
ner_checkbox.change(
lambda x: gr.update(visible=x),
inputs=[ner_checkbox],
outputs=[ner_labels]
)
# Update submit button click handler
submit_btn.click(
run_example,
inputs=[input_img, model_selector, ner_checkbox, ner_labels],
outputs=[output_text]
)
with gr.Row():
filename = gr.Textbox(label="Save filename (without extension)", placeholder="Enter filename to save")
download_btn = gr.Button("Download Image & Text", elem_classes="submit-btn")
def create_zip(image, text_data, fname):
if not isinstance(image, (Image.Image, np.ndarray)) or not text_data or not fname:
return None
# Convert numpy array to PIL Image if needed
if isinstance(image, np.ndarray):
image = Image.fromarray(image)
# Create a temporary directory
with tempfile.TemporaryDirectory() as temp_dir:
# Save image
img_ext = image.format.lower() if hasattr(image, 'format') else 'png'
img_path = os.path.join(temp_dir, f"{fname}.{img_ext}")
image.save(img_path)
# Extract text and entities from the HighlightedText output
text, entities = text_data[0] if isinstance(text_data, list) else (text_data, [])
# Save text
txt_path = os.path.join(temp_dir, f"{fname}.txt")
with open(txt_path, 'w', encoding='utf-8') as f:
f.write(text)
# Create JSON with text, entities, and image info
json_data = {
"text": text,
"entities": [
{
"start": start,
"end": end,
"label": label,
"text": text[start:end]
}
for start, end, label in entities
],
"image_file": f"{fname}.{img_ext}"
}
# Save JSON
json_path = os.path.join(temp_dir, f"{fname}.json")
with open(json_path, 'w', encoding='utf-8') as f:
json.dump(json_data, f, indent=2, ensure_ascii=False)
# Create zip file
zip_path = os.path.join(temp_dir, f"{fname}.zip")
with zipfile.ZipFile(zip_path, 'w') as zipf:
zipf.write(img_path, os.path.basename(img_path))
zipf.write(txt_path, os.path.basename(txt_path))
zipf.write(json_path, os.path.basename(json_path))
return zip_path
demo.queue(api_open=False)
demo.launch(debug=True)