Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteria | |
import gradio as gr | |
import spaces | |
import torch | |
import numpy as np | |
import torch | |
import torchvision.transforms as T | |
from PIL import Image | |
from torchvision.transforms.functional import InterpolationMode | |
from transformers import AutoModel, AutoTokenizer | |
from PIL import Image, ExifTags | |
from threading import Thread | |
import re | |
import time | |
from PIL import Image | |
import torch | |
import spaces | |
import subprocess | |
import os | |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) | |
torch.set_default_device('cuda') | |
IMAGENET_MEAN = (0.485, 0.456, 0.406) | |
IMAGENET_STD = (0.229, 0.224, 0.225) | |
def build_transform(input_size): | |
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD | |
transform = T.Compose([ | |
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), | |
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), | |
T.ToTensor(), | |
T.Normalize(mean=MEAN, std=STD) | |
]) | |
return transform | |
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): | |
best_ratio_diff = float('inf') | |
best_ratio = (1, 1) | |
area = width * height | |
for ratio in target_ratios: | |
target_aspect_ratio = ratio[0] / ratio[1] | |
ratio_diff = abs(aspect_ratio - target_aspect_ratio) | |
if ratio_diff < best_ratio_diff: | |
best_ratio_diff = ratio_diff | |
best_ratio = ratio | |
elif ratio_diff == best_ratio_diff: | |
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: | |
best_ratio = ratio | |
return best_ratio | |
def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): | |
orig_width, orig_height = image.size | |
aspect_ratio = orig_width / orig_height | |
# calculate the existing image aspect ratio | |
target_ratios = set( | |
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if | |
i * j <= max_num and i * j >= min_num) | |
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) | |
# find the closest aspect ratio to the target | |
target_aspect_ratio = find_closest_aspect_ratio( | |
aspect_ratio, target_ratios, orig_width, orig_height, image_size) | |
# calculate the target width and height | |
target_width = image_size * target_aspect_ratio[0] | |
target_height = image_size * target_aspect_ratio[1] | |
blocks = target_aspect_ratio[0] * target_aspect_ratio[1] | |
# resize the image | |
resized_img = image.resize((target_width, target_height)) | |
processed_images = [] | |
for i in range(blocks): | |
box = ( | |
(i % (target_width // image_size)) * image_size, | |
(i // (target_width // image_size)) * image_size, | |
((i % (target_width // image_size)) + 1) * image_size, | |
((i // (target_width // image_size)) + 1) * image_size | |
) | |
# split the image | |
split_img = resized_img.crop(box) | |
processed_images.append(split_img) | |
assert len(processed_images) == blocks | |
if use_thumbnail and len(processed_images) != 1: | |
thumbnail_img = image.resize((image_size, image_size)) | |
processed_images.append(thumbnail_img) | |
return processed_images | |
def correct_image_orientation(image_path): | |
# Mở ảnh | |
image = Image.open(image_path) | |
# Kiểm tra dữ liệu Exif (nếu có) | |
try: | |
exif = image._getexif() | |
if exif is not None: | |
for tag, value in exif.items(): | |
if ExifTags.TAGS.get(tag) == "Orientation": | |
# Sửa hướng dựa trên Orientation | |
if value == 3: | |
image = image.rotate(180, expand=True) | |
elif value == 6: | |
image = image.rotate(-90, expand=True) | |
elif value == 8: | |
image = image.rotate(90, expand=True) | |
break | |
except Exception as e: | |
print("Không thể xử lý Exif:", e) | |
return image | |
def load_image(image_file, input_size=448, max_num=12): | |
image = correct_image_orientation(image_file).convert('RGB') | |
width, height = image.size | |
image = image.resize((width * 2, height * 2), Image.LANCZOS) | |
print("Image size: ", image.size) | |
transform = build_transform(input_size=input_size) | |
images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) | |
pixel_values = [transform(image) for image in images] | |
pixel_values = torch.stack(pixel_values) | |
return pixel_values | |
def extract_conclusion(text): | |
match = re.search(r"<CONCLUSION>(.*?)</CONCLUSION>", text, re.DOTALL) | |
return match.group(1).strip() if match else "" | |
def extract_think(text): | |
text = re.sub(r"<.*?>", "", text.split("<CONCLUSION>")[0]) # Loại bỏ tất cả các tag <...> | |
conclusion_part = extract_conclusion(text) | |
return text.replace(conclusion_part, "").strip() | |
def wrap_text(text, max_words=20): | |
lines = text.split('\n') # Cắt theo dòng trước | |
wrapped_lines = [] | |
for line in lines: | |
words = line.split() | |
if len(words) > max_words: | |
wrapped_lines.extend([' '.join(words[i:i+max_words]) for i in range(0, len(words), max_words)]) | |
else: | |
wrapped_lines.append(line) | |
return '\n'.join(wrapped_lines) | |
model = AutoModel.from_pretrained( | |
"5CD-AI/Vintern-3B-R-beta", | |
torch_dtype=torch.bfloat16, | |
low_cpu_mem_usage=True, | |
trust_remote_code=True, | |
use_flash_attn=True, | |
).eval().cuda() | |
tokenizer = AutoTokenizer.from_pretrained("5CD-AI/Vintern-3B-R-beta", trust_remote_code=True, use_fast=False) | |
global_think_mode =False | |
think_prompt = """Bạn là người rất cẩn thận và đa nghi, vui lòng trả lời câu hỏi dưới đây bằng tiếng Việt. Khi suy luận bạn thường liệt kê ra các bằng chứng để chỉ ra các đáp án khả thi, suy luận và giải thích tại sao lại lựa chọn và loại bỏ trước khi đưa ra câu trả lời cuối cùng. | |
Câu hỏi: | |
{question_input} | |
Hãy trả lời rất dài theo định dạng sau: | |
<SUMMARY>...</SUMMARY> | |
<CAPTION>...</CAPTION> | |
<INFORMATION_EXTRACT>...</INFORMATION_EXTRACT> | |
<EXTERNAL_KNOWLEDGE_EXPANSION>...</EXTERNAL_KNOWLEDGE_EXPANSION> | |
<FIND_CANDIDATES_REASONING>...</FIND_CANDIDATES_REASONING> | |
<TOP3_CANDIDATES>...</TOP3_CANDIDATES> | |
<REASONING_PLAN>...</REASONING_PLAN> | |
<REASONING>...</REASONING> | |
<COUNTER_ARGUMENTS>...</COUNTER_ARGUMENTS> | |
<VALIDATION_REASONING>...</VALIDATION_REASONING> | |
<CONCLUSION>...</CONCLUSION> | |
""" | |
def chat(message, history): | |
global global_think_mode | |
print("------------------------> RUN with global_think_mode: ",global_think_mode) | |
print("history",history) | |
print("message",message) | |
if len(history) != 0 and len(message["files"]) != 0: | |
return """Chúng tôi hiện chỉ hổ trợ 1 ảnh ở đầu ngữ cảnh! Vui lòng tạo mới cuộc trò chuyện. | |
We currently only support one image at the start of the context! Please start a new conversation.""" | |
if len(history) == 0 and len(message["files"]) != 0: | |
if "path" in message["files"][0]: | |
test_image = message["files"][0]["path"] | |
else: | |
test_image = message["files"][0] | |
pixel_values = load_image(test_image, max_num=6).to(torch.bfloat16).cuda() | |
elif len(history) == 0 and len(message["files"]) == 0: | |
pixel_values = None | |
elif history[0][0][0] is not None and os.path.isfile(history[0][0][0]): | |
test_image = history[0][0][0] | |
pixel_values = load_image(test_image, max_num=6).to(torch.bfloat16).cuda() | |
else: | |
pixel_values = None | |
if not global_think_mode: | |
generation_config = dict(max_new_tokens= 700, do_sample=False, num_beams = 3, repetition_penalty=2.5) | |
if len(history) == 0: | |
if pixel_values is not None: | |
question = '<image>\n'+message["text"] | |
else: | |
question = message["text"] | |
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) | |
else: | |
conv_history = [] | |
if history[0][0][0] is not None and os.path.isfile(history[0][0][0]): | |
start_index = 1 | |
else: | |
start_index = 0 | |
for i, chat_pair in enumerate(history[start_index:]): | |
if i == 0 and start_index == 1: | |
conv_history.append(tuple(['<image>\n'+chat_pair[0],chat_pair[1]])) | |
else: | |
conv_history.append(tuple(chat_pair)) | |
print("conv_history",conv_history) | |
question = message["text"] | |
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=conv_history, return_history=True) | |
print(f'User: {question}\nAssistant: {response}') | |
# return response | |
buffer = "" | |
for new_text in response: | |
buffer += new_text | |
generated_text_without_prompt = buffer[:] | |
time.sleep(0.02) | |
yield generated_text_without_prompt | |
else: | |
####################################################### thinking ####################################################### | |
generation_config = dict(max_new_tokens= 2000, do_sample=True, num_beams = 2, repetition_penalty=2.5, temperature=0.5) | |
if len(history) == 0: | |
if pixel_values is not None: | |
question = '<image>\n'+ think_prompt.format(question_input=message["text"]) | |
else: | |
question = think_prompt.format(question_input=message["text"]) | |
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) | |
else: | |
conv_history = [] | |
if history[0][0][0] is not None and os.path.isfile(history[0][0][0]): | |
start_index = 1 | |
else: | |
start_index = 0 | |
for i, chat_pair in enumerate(history[start_index:]): | |
if i == 0 and start_index == 1: | |
conv_history.append(tuple(['<image>\n'+chat_pair[0],chat_pair[1]])) | |
else: | |
conv_history.append(tuple(chat_pair)) | |
print("conv_history",conv_history) | |
question = message["text"] | |
response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=conv_history, return_history=True) | |
print(f'User: {question}\nAssistant: {response}') | |
think_part = wrap_text(extract_think(response)) | |
conclusion_part = extract_conclusion(response) | |
if conclusion_part == "": | |
conclusion_part = think_part | |
buffer = "" | |
thinking = think_part | |
accumulated_text = "💡 **Thinking process:**\n\n" | |
accumulated_text += "<pre><code>\n" | |
temp_text = "" | |
for char in thinking: | |
temp_text += char | |
yield accumulated_text + temp_text + "\n</code></pre>\n" | |
time.sleep(0.01) | |
accumulated_text += temp_text + "\n</code></pre>\n" | |
# Yield phần kết luận | |
accumulated_text += "🎯 **Conclusion:**\n\n" | |
temp_text = "" | |
for char in conclusion_part: | |
temp_text += char | |
yield accumulated_text + temp_text | |
time.sleep(0.02) | |
accumulated_text += temp_text | |
CSS =""" | |
#component-10 { | |
height: 70dvh !important; | |
transform-origin: top; /* Đảm bảo rằng phần tử mở rộng từ trên xuống */ | |
border-style: solid; | |
overflow: hidden; | |
flex-grow: 1; | |
min-width: min(160px, 100%); | |
border-width: var(--block-border-width); | |
} | |
#component-12 { | |
height: 50dvh !important; | |
border-style: solid; | |
overflow: auto; | |
flex-grow: 1; | |
min-width: min(160px, 100%); | |
border-width: var(--block-border-width); | |
} | |
#component-15 { | |
border-style: solid; | |
overflow: hidden; | |
flex-grow: 7; | |
min-width: min(160px, 100%); | |
border-width: var(--block-border-width); | |
height: 20dvh !important; | |
} | |
#think-button{ | |
width: 40% !important; | |
} | |
/* Đảm bảo ảnh bên trong nút hiển thị đúng cách cho các nút có aria-label chỉ định */ | |
button.svelte-1lcyrx4[aria-label="user's message: a file of type image/jpeg, "] img.svelte-1pijsyv { | |
width: 100%; | |
object-fit: contain; | |
height: 100%; | |
border-radius: 13px; /* Thêm bo góc cho ảnh */ | |
max-width: 50vw; /* Giới hạn chiều rộng ảnh */ | |
} | |
/* Đặt chiều cao cho nút và cho phép chọn văn bản chỉ cho các nút có aria-label chỉ định */ | |
button.svelte-1lcyrx4[aria-label="user's message: a file of type image/jpeg, "] { | |
user-select: text; | |
text-align: left; | |
height: 300px; | |
} | |
/* Thêm bo góc và giới hạn chiều rộng cho ảnh không thuộc avatar container */ | |
.message-wrap.svelte-1lcyrx4 > div.svelte-1lcyrx4 .svelte-1lcyrx4:not(.avatar-container) img { | |
border-radius: 13px; | |
max-width: 50vw; | |
} | |
.message-wrap.svelte-1lcyrx4 .message.svelte-1lcyrx4 img { | |
margin: var(--size-2); | |
max-height: 500px; | |
} | |
.image-preview-close-button { | |
position: relative; /* Nếu cần định vị trí */ | |
width: 5%; /* Chiều rộng nút */ | |
height: 5%; /* Chiều cao nút */ | |
display: flex; | |
justify-content: center; | |
align-items: center; | |
padding: 0; /* Để tránh ảnh hưởng từ padding mặc định */ | |
border: none; /* Tùy chọn để loại bỏ đường viền */ | |
background: none; /* Tùy chọn để loại bỏ nền */ | |
} | |
.example-image-container.svelte-9pi8y1 { | |
width: calc(var(--size-8) * 5); | |
height: calc(var(--size-8) * 5); | |
border-radius: var(--radius-lg); | |
overflow: hidden; | |
position: relative; | |
margin-bottom: var(--spacing-lg); | |
} | |
""" | |
js = """ | |
function forceLightTheme() { | |
const url = new URL(window.location); | |
// Cập nhật __theme thành light nếu giá trị không đúng | |
if (url.searchParams.get('__theme') !== 'light') { | |
url.searchParams.set('__theme', 'light'); | |
// Thay đổi URL mà không tải lại trang nếu cần | |
window.history.replaceState({}, '', url.href); | |
} | |
// Đảm bảo document luôn áp dụng theme light | |
document.documentElement.setAttribute('data-theme', 'light'); | |
} | |
""" | |
def toggle_think_mode(current_state): | |
global global_think_mode | |
new_state = not current_state | |
global_think_mode = not global_think_mode | |
print("global_think_mode: ",global_think_mode,"="*20) | |
button_label = "🧠DeepThink💡1minute⏳" if global_think_mode else "🧠Think" | |
return new_state, button_label | |
def reset_think_mode(): | |
return False, "🧠Think" # Trả về trạng thái mặc định | |
demo = gr.Blocks(css=CSS,js=js, theme='NoCrypt/miku') | |
# demo = gr.Blocks( theme='NoCrypt/miku') | |
with demo: | |
think_mode = gr.State(False) # Lưu trạng thái Think Mode | |
chat_demo_interface = gr.ChatInterface( | |
fn=chat, | |
description="""**Vintern-3B-R-beta** This Gradio demo is not complete yet; I am still working on it. :) """, | |
examples=[ | |
[{"text": "Trích xuất các thông tin từ ảnh trả về markdown.", "files":["./demo_1.jpg"]}, False,False], | |
[{"text": "Liệt kê toàn bộ văn bản.", "files":["./demo_2.jpg"]}, False,False], | |
[{"text": "Trích xuất thông tin kiện hàng trong ảnh và trả về dạng JSON.", "files":["./demo_4.jpg"]}, False,False] | |
], | |
# additional_inputs=[think_mode], | |
title="❄️Vintern-3B-R-beta❄️", | |
multimodal=True, | |
css=CSS, | |
js=js, | |
theme='NoCrypt/miku' | |
) | |
think_button = gr.Button("🧠Think", elem_id="think-button", variant="secondary") | |
# Khi nhấn nút, trạng thái think_mode thay đổi + đổi nhãn nút | |
think_button.click(toggle_think_mode, inputs=[think_mode], outputs=[think_mode, think_button]) | |
# Reset nút Think sau khi chat hoàn tất | |
# chat_demo_interface.submit(reset_think_mode, inputs=[], outputs=[think_mode, think_button]) | |
demo.queue().launch() |