import os os.system('pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html') credentials_kwargs={"aws_access_key_id": os.environ["ACCESS_KEY"],"aws_secret_access_key": os.environ["SECRET_KEY"]} # work around: https://discuss.huggingface.co/t/how-to-install-a-specific-version-of-gradio-in-spaces/13552 os.system("pip uninstall -y gradio") os.system("pip install gradio==3.4.1") os.system(os.environ["DD_ADDONS"]) import time from os import getcwd, path import deepdoctection as dd from deepdoctection.dataflow.serialize import DataFromList from deepdoctection.utils.settings import get_type from dd_addons.analyzer.loader import get_loader from dd_addons.extern.guidance import TOKEN_DEFAULT_INSTRUCTION from dd_addons.utils.settings import register_llm_token_tag, register_string_categories_from_list from dd_addons.extern.openai import OpenAiLmmTokenClassifier import gradio as gr analyzer = get_loader(reset_config_file=True) demo = gr.Blocks(css="scrollbar.css") def process_analyzer(openai_api_key, categories_str, instruction_str, img, pdf, max_datapoints): categories_list = categories_str.split(",") register_string_categories_from_list(categories_list, "custom_token_classes") custom_token_class = dd.object_types_registry.get("custom_token_classes") print([token_class for token_class in custom_token_class]) register_llm_token_tag([token_class for token_class in custom_token_class]) categories = { str(idx + 1): get_type(val) for idx, val in enumerate(categories_list) } gpt_token_classifier = OpenAiLmmTokenClassifier( model_name="gpt-3.5-turbo", categories=categories, api_key=openai_api_key, instruction= instruction_str if instruction_str else None, ) analyzer.pipe_component_list[8].language_model = gpt_token_classifier if img is not None: image = dd.Image(file_name=str(time.time()).replace(".","") + ".png", location="") image.image = img[:, :, ::-1] df = DataFromList(lst=[image]) df = analyzer.analyze(dataset_dataflow=df) elif pdf: df = analyzer.analyze(path=pdf.name, max_datapoints=max_datapoints) else: raise ValueError df.reset_state() json_out = {} dpts = [] for idx, dp in enumerate(df): dpts.append(dp) json_out[f"page_{idx}"] = dp.get_token() return [dp.viz(show_cells=False, show_layouts=False, show_tables=False, show_words=True, show_token_class=True, ignore_default_token_class=True) for dp in dpts], json_out with demo: with gr.Box(): gr.Markdown("