|
|
|
import datasets |
|
from transformers import pipeline |
|
from transformers.pipelines.pt_utils import KeyDataset |
|
from tqdm.auto import tqdm |
|
|
|
pipe = pipe = pipeline("token-classification", model="erdometo/xlm-roberta-base-finetuned-TQuad2") |
|
dataset = datasets.load_dataset("superb", name="asr", split="test") |
|
|
|
for out in tqdm(pipe(KeyDataset(dataset, "file"))): |
|
print(out) |
|
|
|
|
|
|
|
import gradio as gr |
|
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer, AutoModelForTokenClassification |
|
|
|
|
|
qa_model_name = "erdometo/xlm-roberta-base-finetuned-TQuad2" |
|
token_classification_model_name = "FacebookAI/xlm-roberta-large-finetuned-conll03-german" |
|
|
|
qa_model = AutoModelForQuestionAnswering.from_pretrained(qa_model_name) |
|
qa_tokenizer = AutoTokenizer.from_pretrained(qa_model_name) |
|
|
|
token_classification_model = AutoModelForTokenClassification.from_pretrained(token_classification_model_name) |
|
token_classification_tokenizer = AutoTokenizer.from_pretrained(token_classification_model_name) |
|
|
|
def predict(pipeline_type, question, context): |
|
if pipeline_type == "question-answering": |
|
qa_pipeline = pipeline("question-answering", model=qa_model, tokenizer=qa_tokenizer) |
|
result = qa_pipeline(question=question, context=context) |
|
response = [(result['answer'], result['score'])] |
|
return response |
|
elif pipeline_type == "token-classification": |
|
token_classification_pipeline = pipeline("token-classification", model=token_classification_model, tokenizer=token_classification_tokenizer) |
|
result = token_classification_pipeline(context) |
|
highlighted_text = {"text": context, "entities": result} |
|
return gr.HighlightedText(highlighted_text) |
|
|
|
|
|
iface = gr.Interface( |
|
fn=predict, |
|
inputs=[ |
|
gr.Dropdown(choices=["question-answering", "token-classification"], label="Choose Pipeline"), |
|
"text", |
|
"text" |
|
], |
|
outputs=gr.Highlight() |
|
) |
|
|
|
|
|
iface.launch() |