test_space / app.py
planetearth79's picture
Upload app.py
885c78b
raw
history blame
2.65 kB
import streamlit as st
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
import torch
import pandas as pd
import numpy as np
# import os
# os.environ['KMP_DUPLICATE_LIB_OK']='True'
st.markdown("### Some Model")
# st.markdown("<img width=200px src='https://rozetked.me/images/uploads/dwoilp3BVjlE.jpg'>", unsafe_allow_html=True)
# ^-- можно показывать пользователю текст, картинки, ограниченное подмножество html - всё как в jupyter
loaded_tokenizer = AutoTokenizer.from_pretrained("test_model")
loaded_model = AutoModelForSequenceClassification.from_pretrained("test_model")
# title_text = st.text_area("TITLE HERE")
# ^-- показать текстовое поле. В поле text лежит строка, которая находится там в данный момент
# from transformers import pipeline
# pipe = pipeline("ner", "Davlan/distilbert-base-multilingual-cased-ner-hrl")
# raw_predictions = pipe(text)
# # тут уже знакомый вам код с huggingface.transformers -- его можно заменить на что угодно от fairseq до catboost
# st.markdown(f"{raw_predictions}")
# # выводим результаты модели в текстовое поле, на потеху пользователю
# title_text = st.text_area("TITLE HERE", "input your title")
title_text = st.text_input("TITLE HERE")
summary_text = st.text_area("SUMMARY HERE")
text = title_text + " " + summary_text
title_input = loaded_tokenizer(title_text, padding="max_length", truncation=True, return_tensors='pt')
with torch.no_grad():
title_res = loaded_model(**title_input)
title_probs = torch.softmax(title_res.logits, dim=1).cpu().numpy()[0]
st.markdown(" ".join(str(x) for x in list(title_probs)))
summary_input = loaded_tokenizer(summary_text, padding="max_length", truncation=True, return_tensors='pt')
with torch.no_grad():
summary_res = loaded_model(**summary_input)
summary_probs = torch.softmax(summary_res.logits, dim=1).cpu().numpy()[0]
st.markdown(" ".join(str(x) for x in list(summary_probs)))
text_input = loaded_tokenizer(text, padding="max_length", truncation=True, return_tensors='pt')
with torch.no_grad():
text_res = loaded_model(**text_input)
text_probs = torch.softmax(text_res.logits, dim=1).cpu().numpy()[0]
st.markdown(" ".join(str(x) for x in list(text_probs)))
probs = np.stack([title_probs, summary_probs, text_probs], axis=1)
chart_data = pd.DataFrame(
probs,
columns=["title", "summary", "title + summary"])
st.bar_chart(chart_data)