Spaces:
Runtime error
Runtime error
import streamlit as st | |
import pandas as pd | |
import spacy | |
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer | |
import PyPDF2 | |
import docx | |
import io | |
def chunk_text(text, chunk_size=128): | |
words = text.split() | |
chunks = [] | |
current_chunk = [] | |
current_length = 0 | |
for word in words: | |
if current_length + len(word) + 1 > chunk_size: | |
chunks.append(' '.join(current_chunk)) | |
current_chunk = [word] | |
current_length = len(word) | |
else: | |
current_chunk.append(word) | |
current_length += len(word) + 1 | |
if current_chunk: | |
chunks.append(' '.join(current_chunk)) | |
return chunks | |
st.set_page_config(layout="wide") | |
# Function to read text from uploaded file | |
def read_file(file): | |
if file.type == "text/plain": | |
return file.getvalue().decode("utf-8") | |
elif file.type == "application/pdf": | |
pdf_reader = PyPDF2.PdfReader(io.BytesIO(file.getvalue())) | |
return " ".join(page.extract_text() for page in pdf_reader.pages) | |
elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document": | |
doc = docx.Document(io.BytesIO(file.getvalue())) | |
return " ".join(paragraph.text for paragraph in doc.paragraphs) | |
else: | |
st.error("Unsupported file type") | |
return None | |
st.title("Turkish NER Models Testing") | |
model_list = [ | |
'girayyagmur/bert-base-turkish-ner-cased', | |
'savasy/bert-base-turkish-ner-cased', | |
'xlm-roberta-large-finetuned-conll03-english', | |
'asahi417/tner-xlm-roberta-base-ontonotes5' | |
] | |
st.sidebar.header("Select NER Model") | |
model_checkpoint = st.sidebar.radio("", model_list) | |
st.sidebar.write("For details of models: 'https://huggingface.co/akdeniz27/") | |
st.sidebar.write("Only PDF, DOCX, and TXT files are supported.") | |
# Determine aggregation strategy | |
aggregation = "simple" if model_checkpoint in ["akdeniz27/xlm-roberta-base-turkish-ner", "xlm-roberta-large-finetuned-conll03-english", "asahi417/tner-xlm-roberta-base-ontonotes5"] else "first" | |
st.subheader("Select Text Input Method") | |
input_method = st.radio("", ('Write or Paste New Text', 'Upload File')) | |
if input_method == "Write or Paste New Text": | |
input_text = st.text_area('Write or Paste Text Below', value="", height=128) | |
else: | |
uploaded_file = st.file_uploader("Choose a file", type=["txt", "pdf", "docx"]) | |
if uploaded_file is not None: | |
input_text = read_file(uploaded_file) | |
if input_text: | |
st.text_area("Extracted Text", input_text, height=128) | |
else: | |
input_text = "" | |
def setModel(model_checkpoint, aggregation): | |
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint) | |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) | |
return pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy=aggregation) | |
def entity_comb(output): | |
output_comb = [] | |
for ind, entity in enumerate(output): | |
if ind == 0: | |
output_comb.append(entity) | |
elif output[ind]["start"] == output[ind-1]["end"] and output[ind]["entity_group"] == output[ind-1]["entity_group"]: | |
output_comb[-1]["word"] += output[ind]["word"] | |
output_comb[-1]["end"] = output[ind]["end"] | |
else: | |
output_comb.append(entity) | |
return output_comb | |
def create_mask_dict(entities): | |
mask_dict = {} | |
entity_counters = {} | |
for entity in entities: | |
if entity['entity_group'] not in ['CARDINAL', 'EVENT']: | |
if entity['word'] not in mask_dict: | |
if entity['entity_group'] not in entity_counters: | |
entity_counters[entity['entity_group']] = 1 | |
else: | |
entity_counters[entity['entity_group']] += 1 | |
mask_dict[entity['word']] = f"{entity['entity_group']}_{entity_counters[entity['entity_group']]}" | |
return mask_dict | |
def create_masked_text(input_text, entities, mask_dict): | |
masked_text = input_text | |
for entity in sorted(entities, key=lambda x: x['start'], reverse=True): | |
if entity['entity_group'] not in ['CARDINAL', 'EVENT']: | |
masked_text = masked_text[:entity['start']] + mask_dict[entity['word']] + masked_text[entity['end']:] | |
return masked_text | |
def export_masked_text(masked_text, file_type): | |
if file_type == "txt": | |
return masked_text.encode("utf-8") | |
elif file_type == "pdf": | |
pdf_buffer = io.BytesIO() | |
pdf = FPDF() | |
pdf.add_page() | |
pdf.set_font("Arial", size=12) | |
pdf.multi_cell(0, 10, masked_text) | |
pdf.output(pdf_buffer) | |
pdf_buffer.seek(0) | |
return pdf_buffer.getvalue() | |
elif file_type == "docx": | |
doc = docx.Document() | |
doc.add_paragraph(masked_text) | |
buffer = io.BytesIO() | |
doc.save(buffer) | |
buffer.seek(0) | |
return buffer.getvalue() | |
else: | |
st.error("Unsupported file type for export") | |
return None | |
Run_Button = st.button("Run") | |
if Run_Button and input_text: | |
ner_pipeline = setModel(model_checkpoint, aggregation) | |
# Chunk the input text | |
chunks = chunk_text(input_text) | |
# Process each chunk | |
all_outputs = [] | |
for i, chunk in enumerate(chunks): | |
output = ner_pipeline(chunk) | |
# Adjust start and end positions for entities in chunks after the first | |
if i > 0: | |
offset = len(' '.join(chunks[:i])) + 1 | |
for entity in output: | |
entity['start'] += offset | |
entity['end'] += offset | |
all_outputs.extend(output) | |
# Combine entities | |
output_comb = entity_comb(all_outputs) | |
# Create mask dictionary | |
mask_dict = create_mask_dict(output_comb) | |
masked_text = create_masked_text(input_text, output_comb, mask_dict) | |
# Apply masking and add masked_word column | |
for entity in output_comb: | |
if entity['entity_group'] not in ['CARDINAL', 'EVENT']: | |
entity['masked_word'] = mask_dict.get(entity['word'], entity['word']) | |
else: | |
entity['masked_word'] = entity['word'] | |
#df = pd.DataFrame.from_dict(output_comb) | |
#cols_to_keep = ['word', 'entity_group', 'score', 'start', 'end'] | |
#df_final = df[cols_to_keep].loc[:,~df.columns.duplicated()].copy() | |
#st.subheader("Recognized Entities") | |
#st.dataframe(df_final) | |
# Spacy display logic with entity numbering | |
spacy_display = {"ents": [], "text": input_text, "title": None} | |
for entity in output_comb: | |
if entity['entity_group'] not in ['CARDINAL', 'EVENT']: | |
label = f"{entity['entity_group']}_{mask_dict[entity['word']].split('_')[1]}" | |
else: | |
label = entity['entity_group'] | |
spacy_display["ents"].append({"start": entity["start"], "end": entity["end"], "label": label}) | |
# Custom CSS to prevent label overlap | |
custom_css = """ | |
<style> | |
.entity-label { | |
font-size: 0.7em; | |
line-height: 1; | |
padding: 0.25em; | |
border-radius: 0.25em; | |
top: -1.5em; | |
position: relative; | |
} | |
</style> | |
""" | |
html = custom_css + spacy.displacy.render(spacy_display, style="ent", minify=True, manual=True) | |
st.write(html, unsafe_allow_html=True) | |
# Download button | |
export_file_type = uploaded_file.type.split("/")[-1] if uploaded_file is not None else "txt" | |
masked_file_content = export_masked_text(masked_text, export_file_type) | |
if masked_file_content: | |
st.download_button( | |
label="Download Masked Text", | |
data=masked_file_content, | |
file_name=f"masked_output.{export_file_type}", | |
mime=f"application/{export_file_type}" if export_file_type != "txt" else "text/plain" | |
) | |
st.subheader("Masking Dictionary") | |
st.json(mask_dict) | |
st.subheader("Masked Text Preview") | |
st.text(masked_text) |