initial commit
Browse files- README.md +30 -5
- app.py +81 -0
- gitattributes +35 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,12 +1,37 @@
|
|
1 |
---
|
2 |
-
title: Bangla English
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.41.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Bangla Banglish and English Bio-Medical Entity Recognition
|
3 |
+
emoji: ππ·οΈ
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.41.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: afl-3.0
|
11 |
---
|
12 |
|
13 |
+
# Named Entity Recognition (NER) App
|
14 |
+
|
15 |
+
This application provides a simple interface to perform Named Entity Recognition (NER) on text using a pre-trained model from Hugging Face's Transformers library. The model used under the hood is `dslim/bert-base-NER`, which is designed to identify entities such as names, locations, organizations, and more in a given text.
|
16 |
+
|
17 |
+
## Features
|
18 |
+
|
19 |
+
- **Named Entity Recognition**: Automatically identify and highlight entities within a given text.
|
20 |
+
- **User-Friendly Interface**: Built using Gradio for an easy-to-use web interface.
|
21 |
+
|
22 |
+
## Model
|
23 |
+
|
24 |
+
- **Model Used**: [dslim/bert-base-NER](https://huggingface.co/dslim/bert-base-NER)
|
25 |
+
- **Framework**: Hugging Face Transformers
|
26 |
+
|
27 |
+
## Software Packages
|
28 |
+
|
29 |
+
- **Gradio**: Used to create the web interface.
|
30 |
+
- **Transformers**: Used for model inference.
|
31 |
+
- **Spaces**: Utilized for GPU acceleration during model execution.
|
32 |
+
|
33 |
+
## How to Use
|
34 |
+
|
35 |
+
1. Enter the text you want to analyze in the "Text to find entities" textbox.
|
36 |
+
2. Click "Submit" to perform Named Entity Recognition.
|
37 |
+
3. The identified entities will be highlighted in the output box.
|
app.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
from transformers import pipeline
|
4 |
+
from typing import List, Dict, Any
|
5 |
+
|
6 |
+
def merge_tokens(tokens: List[Dict[str, any]]) -> List[Dict[str, any]]:
|
7 |
+
"""
|
8 |
+
Merges tokens that belong to the same entity into a single token.
|
9 |
+
|
10 |
+
Args:
|
11 |
+
tokens (List[Dict[str, any]]): A list of token dictionaries, each containing information about
|
12 |
+
the entity, word, start, end, and score.
|
13 |
+
|
14 |
+
Returns:
|
15 |
+
List[Dict[str, any]]: A list of merged token dictionaries, where tokens that are part of the
|
16 |
+
same entity are combined into a single token with updated word, end,
|
17 |
+
and score values.
|
18 |
+
"""
|
19 |
+
merged_tokens = []
|
20 |
+
for token in tokens:
|
21 |
+
if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]):
|
22 |
+
# If the current token continues the entity of the last one, merge them
|
23 |
+
last_token = merged_tokens[-1]
|
24 |
+
last_token['word'] += token['word'].replace('##', '')
|
25 |
+
last_token['end'] = token['end']
|
26 |
+
last_token['score'] = (last_token['score'] + token['score']) / 2
|
27 |
+
else:
|
28 |
+
# Otherwise, add the token to the list
|
29 |
+
merged_tokens.append(token)
|
30 |
+
|
31 |
+
return merged_tokens
|
32 |
+
|
33 |
+
# Initialize Model
|
34 |
+
get_completion = pipeline("ner", model="kazalbrur/bangla-english-med-bert-ner", device=0)
|
35 |
+
|
36 |
+
@spaces.GPU(duration=120)
|
37 |
+
def ner(input: str) -> Dict[str, Any]:
|
38 |
+
"""
|
39 |
+
Performs Named Entity Recognition (NER) on the given input text and merges tokens that belong
|
40 |
+
to the same entity into a single entity.
|
41 |
+
|
42 |
+
Args:
|
43 |
+
input (str): The input text to analyze for named entities.
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
Dict[str, Any]: A dictionary containing the original text and a list of identified entities
|
47 |
+
with merged tokens.
|
48 |
+
- "text": The original input text.
|
49 |
+
- "entities": A list of dictionaries, where each dictionary contains information
|
50 |
+
about a recognized entity, including the word, entity type, score, and positions.
|
51 |
+
"""
|
52 |
+
output = get_completion(input)
|
53 |
+
merged_tokens = merge_tokens(output)
|
54 |
+
return {"text": input, "entities": merged_tokens}
|
55 |
+
|
56 |
+
####### GRADIO APP #######
|
57 |
+
title = """<h1 id="title"> Bangla Banglish and English Bio-Medical Entity Recognition </h1>"""
|
58 |
+
|
59 |
+
description = """
|
60 |
+
- The model used for Recognizing entities [BERT-BASE-NER](https://huggingface.co/kazalbrur/bangla-english-med-bert-ner).
|
61 |
+
"""
|
62 |
+
|
63 |
+
css = '''
|
64 |
+
h1#title {
|
65 |
+
text-align: center;
|
66 |
+
}
|
67 |
+
'''
|
68 |
+
|
69 |
+
theme = gr.themes.Soft()
|
70 |
+
demo = gr.Blocks(css=css, theme=theme)
|
71 |
+
|
72 |
+
with demo:
|
73 |
+
gr.Markdown(title)
|
74 |
+
gr.Markdown(description)
|
75 |
+
interface = gr.Interface(fn=ner,
|
76 |
+
inputs=[gr.Textbox(label="Enter Your Text to Find Entities", lines=10)],
|
77 |
+
outputs=[gr.HighlightedText(label="Text with entities")],
|
78 |
+
allow_flagging="never",
|
79 |
+
)
|
80 |
+
|
81 |
+
demo.launch()
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
transformers
|
3 |
+
torch
|