# import spaces import os import re import time import gradio as gr import torch from transformers import AutoModelForCausalLM from transformers import TextIteratorStreamer from threading import Thread import importlib.metadata from packaging import version from transformers.utils.import_utils import ( is_torch_available, _is_package_available, is_torch_mlu_available ) def diagnose_flash_attn_2_availability(): if not is_torch_available(): return "PyTorch is not available." if not _is_package_available("flash_attn"): return "flash_attn package is not installed." import torch if not (torch.cuda.is_available() or is_torch_mlu_available()): return "Neither CUDA nor MLU is available." flash_attn_version = importlib.metadata.version("flash_attn") if torch.version.cuda: required_version = "2.1.0" if version.parse(flash_attn_version) < version.parse(required_version): return f"CUDA is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required." elif torch.version.hip: required_version = "2.0.4" if version.parse(flash_attn_version) < version.parse(required_version): return f"HIP is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required." elif is_torch_mlu_available(): required_version = "2.3.3" if version.parse(flash_attn_version) < version.parse(required_version): return f"MLU is available, but flash_attn version {flash_attn_version} is installed. Version >= {required_version} is required." else: return "Unknown PyTorch backend." return "All requirements for Flash Attention 2 are met." # 使用诊断函数 result = diagnose_flash_attn_2_availability() if result != "All requirements for Flash Attention 2 are met.": print(f"Flash Attention 2 is not available: {result}") print("Using `flash_attention_2` requires having the correct version of `flash_attn` installed.") else: print("Flash Attention 2 can be used.") model_name = 'AIDC-AI/Ovis2-16B' # load model model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, multimodal_max_length=8192, trust_remote_code=True).to(device='cuda') text_tokenizer = model.get_text_tokenizer() visual_tokenizer = model.get_visual_tokenizer() streamer = TextIteratorStreamer(text_tokenizer, skip_prompt=True, skip_special_tokens=True) image_placeholder = '' cur_dir = os.path.dirname(os.path.abspath(__file__)) def submit_chat(chatbot, text_input): response = '' chatbot.append((text_input, response)) return chatbot ,'' @spaces.GPU def ovis_chat(chatbot, image_input): # preprocess inputs conversations = [{ "from": "system", "value": "You are Ovis, a multimodal large language model developed by Alibaba International, and your task is to provide reliable and structured responses to users. 你是Ovis,由阿里国际研发的多模态大模型,你的任务是为用户提供可靠、结构化的回复。" }] response = "" text_input = chatbot[-1][0] for query, response in chatbot[:-1]: conversations.append({ "from": "human", "value": query }) conversations.append({ "from": "gpt", "value": response }) text_input = text_input.replace(image_placeholder, '') conversations.append({ "from": "human", "value": text_input }) if image_input is not None: conversations[0]["value"] = image_placeholder + '\n' + conversations[0]["value"] prompt, input_ids, pixel_values = model.preprocess_inputs(conversations, [image_input]) attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id) input_ids = input_ids.unsqueeze(0).to(device=model.device) attention_mask = attention_mask.unsqueeze(0).to(device=model.device) if image_input is None: pixel_values = [None] else: pixel_values = [pixel_values.to(dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)] with torch.inference_mode(): gen_kwargs = dict( max_new_tokens=1536, do_sample=False, top_p=None, top_k=None, temperature=None, repetition_penalty=None, eos_token_id=model.generation_config.eos_token_id, pad_token_id=text_tokenizer.pad_token_id, use_cache=True ) response = "" thread = Thread(target=model.generate, kwargs={"inputs": input_ids, "pixel_values": pixel_values, "attention_mask": attention_mask, "streamer": streamer, **gen_kwargs}) thread.start() for new_text in streamer: response += new_text chatbot[-1][1] = response yield chatbot thread.join() # debug print('*'*60) print('*'*60) print('OVIS_CONV_START') for i, (request, answer) in enumerate(chatbot[:-1], 1): print(f'Q{i}:\n {request}') print(f'A{i}:\n {answer}') print('New_Q:\n', text_input) print('New_A:\n', response) print('OVIS_CONV_END') def clear_chat(): return [], None, "" with open(f"{cur_dir}/resource/logo.svg", "r", encoding="utf-8") as svg_file: svg_content = svg_file.read() font_size = "2.5em" svg_content = re.sub(r'(]*)(>)', rf'\1 height="{font_size}" style="vertical-align: middle; display: inline-block;"\2', svg_content) html = f"""

{svg_content} {model_name.split('/')[-1]}

Ovis has been open-sourced on 😊 Huggingface and 🌟 GitHub. If you find Ovis useful, a like❤️ or a star🌟 would be appreciated.
""" latex_delimiters_set = [{ "left": "\\(", "right": "\\)", "display": True }, { "left": "\\begin{equation}", "right": "\\end{equation}", "display": True }, { "left": "\\begin{align}", "right": "\\end{align}", "display": True }, { "left": "\\begin{alignat}", "right": "\\end{alignat}", "display": True }, { "left": "\\begin{gather}", "right": "\\end{gather}", "display": True }, { "left": "\\begin{CD}", "right": "\\end{CD}", "display": True }, { "left": "\\[", "right": "\\]", "display": True }] text_input = gr.Textbox(label="prompt", placeholder="Enter your text here...", lines=1, container=False) with gr.Blocks(title=model_name.split('/')[-1], theme=gr.themes.Ocean()) as demo: gr.HTML(html) with gr.Row(): with gr.Column(scale=3): image_input = gr.Image(label="image", height=350, type="pil") gr.Examples( examples=[ [f"{cur_dir}/examples/case0.png", "Find the area of the shaded region."], [f"{cur_dir}/examples/case1.png", "explain this model to me."], [f"{cur_dir}/examples/case2.png", "What is net profit margin as a percentage of total revenue?"], ], inputs=[image_input, text_input] ) with gr.Column(scale=7): chatbot = gr.Chatbot(label="Ovis", layout="panel", height=600, show_copy_button=True, latex_delimiters=latex_delimiters_set) text_input.render() with gr.Row(): send_btn = gr.Button("Send", variant="primary") clear_btn = gr.Button("Clear", variant="secondary") send_click_event = send_btn.click(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot) submit_event = text_input.submit(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot) clear_btn.click(clear_chat, outputs=[chatbot, image_input, text_input]) demo.launch()