Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import DebertaV2Tokenizer, DebertaV2ForTokenClassification | |
import torch | |
from huggingface_hub import hf_hub_download | |
import json | |
from globe import title, description, joinus, model_name, placeholder, modelinfor1, modelinfor2, id2label | |
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name) | |
model = DebertaV2ForTokenClassification.from_pretrained(model_name) | |
# # Define id2label based on config.json | |
# | |
# id2label = { | |
# 0: "author", 1: "bibliography", 2: "caption", 3: "contact", | |
# 4: "date", 5: "dialog", 6: "footnote", 7: "keywords", | |
# 8: "math", 9: "paratext", 10: "separator", 11: "table", | |
# 12: "text", 13: "title" | |
# } | |
color_map = { | |
"author": "blue", "bibliography": "purple", "caption": "orange", | |
"contact": "cyan", "date": "green", "dialog": "yellow", | |
"footnote": "pink", "keywords": "lightblue", "math": "red", | |
"paratext": "lightgreen", "separator": "gray", "table": "brown", | |
"text": "lightgray", "title": "gold" | |
} | |
def segment_text(input_text): | |
tokens = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True) | |
with torch.no_grad(): | |
outputs = model(**tokens) | |
logits = outputs.logits | |
predictions = torch.argmax(logits, dim=-1).squeeze().tolist() | |
tokens_decoded = tokenizer.convert_ids_to_tokens(tokens['input_ids'].squeeze()) | |
segments = [] | |
current_word = "" | |
for token, label_id in zip(tokens_decoded, predictions): | |
if token.startswith("β"): # handling wordpieces, specific to some tokenizers | |
if current_word: | |
segments.append((current_word, id2label[label_id])) | |
current_word = token.replace("β", "") # new word | |
else: | |
current_word += token # append subword part to current word | |
if current_word: | |
segments.append((current_word, id2label[label_id])) | |
return segments | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
gr.Markdown(title) | |
with gr.Row(): | |
with gr.Column(scale=1): | |
with gr.Group(): | |
gr.Markdown(description) | |
with gr.Accordion(label="Join Us", open=False): | |
gr.Markdown(joinus) | |
with gr.Column(scale=1): | |
with gr.Row(): | |
with gr.Group(): | |
gr.Markdown(modelinfor1) | |
with gr.Group(): | |
gr.Markdown(modelinfor2) | |
with gr.Row(): | |
input_text = gr.Textbox(label="Enter your text hereππ»", lines=5, placeholder=placeholder) | |
output_text = gr.HighlightedText(label=" PLeIAs/βοΈπ Segment Text", color_map=color_map, combine_adjacent=True, show_inline_category=True, show_legend=True) | |
def process(input_text): | |
return segment_text(input_text) | |
submit_button = gr.Button("Segment Text") | |
submit_button.click(fn=process, inputs=input_text, outputs=output_text) | |
if __name__ == "__main__": | |
demo.launch() | |