|
import streamlit as st |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForTokenClassification |
|
from annotated_text import annotated_text |
|
|
|
|
|
model_directory = "norygano/causalBERT" |
|
tokenizer = AutoTokenizer.from_pretrained(model_directory, add_prefix_space=True) |
|
model = AutoModelForTokenClassification.from_pretrained(model_directory) |
|
|
|
|
|
model.eval() |
|
|
|
|
|
label_map = {0: "O", 1: "B-INDICATOR", 2: "I-INDICATOR", 3: "B-CAUSE", 4: "I-CAUSE", 5: "B-EFFECT", 6: "I-EFFECT"} |
|
|
|
|
|
st.markdown( |
|
""" |
|
<div style="display: flex; align-items: center; justify-content: left; font-size: 60px; font-weight: bold;"> |
|
<span>CAUSEN</span> |
|
<span style="transform: rotate(270deg); display: inline-block; margin-left: 5px;">V</span> |
|
</div> |
|
""", |
|
unsafe_allow_html=True |
|
) |
|
st.markdown("[Model](https://huggingface.co/norygano/causalBERT)") |
|
|
|
|
|
st.write("Tags indicators and causes of explicit attributions of causality. GER only (atm)") |
|
|
|
|
|
sentences_input = st.text_area("*Sentences (one per line)*", "\n".join([ |
|
"Autos stehen im Verdacht, Waldsterben zu verursachen.", |
|
"Fußball führt zu Waldschäden.", |
|
"Haustüren tragen zum Betonsterben bei.", |
|
]) |
|
, placeholder="Your Sentences here.") |
|
|
|
|
|
sentences = [sentence.strip() for sentence in sentences_input.splitlines() if sentence.strip()] |
|
|
|
|
|
if st.button("Analyze"): |
|
for sentence in sentences: |
|
|
|
inputs = tokenizer(sentence, return_tensors="pt", truncation=True, padding=True) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
|
|
|
|
logits = outputs.logits |
|
predicted_label_ids = torch.argmax(logits, dim=2) |
|
|
|
|
|
tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0]) |
|
|
|
|
|
predicted_labels = [label_map[label_id.item()] for label_id in predicted_label_ids[0]] |
|
|
|
|
|
annotations = [] |
|
current_word = "" |
|
current_label = "O" |
|
|
|
for token, label in zip(tokens, predicted_labels): |
|
if token in ['[CLS]', '[SEP]']: |
|
continue |
|
|
|
if token.startswith("##"): |
|
|
|
current_word += token[2:] |
|
else: |
|
|
|
if current_word: |
|
if current_label != "O": |
|
annotations.append((current_word, current_label)) |
|
else: |
|
annotations.append(current_word) |
|
annotations.append(" ") |
|
|
|
|
|
current_word = token |
|
current_label = label |
|
|
|
|
|
if current_word: |
|
if current_label != "O": |
|
annotations.append((current_word, current_label)) |
|
else: |
|
annotations.append(current_word) |
|
|
|
|
|
st.write(f"**Sentence:** {sentence}") |
|
annotated_text(*annotations) |
|
st.write("---") |
|
|