Spaces:
Sleeping
Sleeping
File size: 1,765 Bytes
9f96e9d 0d8634d 32b8285 0d8634d 9f96e9d 0d8634d 9f96e9d 8124738 9f96e9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
from transformers import pipeline
import gradio as gr
get_completion = pipeline("ner", model="dslim/bert-base-NER")
def merge_tokens(tokens):
merged_tokens = []
for token in tokens:
if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]):
# If current token continues the entity of the last one, merge the two tokens
last_token = merged_tokens[-1]
last_token['word'] += token['word'].replace('##', '')
last_token['end'] = token['end']
last_token['score'] = (last_token['score'] + token['score']) / 2
else:
# Otherwise, add the token to the list
merged_tokens.append(token)
return merged_tokens
def ner(input):
output = get_completion(input)
merged_tokens = merge_tokens(output)
return {"text": input, "entities": merged_tokens}
gr.close_all()
demo = gr.Interface(fn=ner,
inputs=[gr.Textbox(label="Text to find entities", lines=2)],
outputs=[gr.HighlightedText(label="Text with entities")],
title="NER with dslim/bert-base-NER",
description="Find entities using the `BERT-base` model under the hood!",
allow_flagging="never",
examples=["My name is Raul and I live in Niterói, Rio de Janeiro, Brazil",
"Lionel Messi is the greatest footballer of the new century",
"Toronto is hockey capital of the world",
"S&P 500 has gained 400 points in last trailing 7 days",
"Paris is one of most visited cities in the world every year."])
demo.launch() |