import torch import streamlit as st from model import init_model, predict from data import Tokenizer, load_config config = load_config('tj-fa-3dmzfi52.pt') print('Config:', config) tokenizer = Tokenizer(config) # Load the model model = init_model('tj-fa-3dmzfi52.pt') # Create a text area box where the user can enter their text user_input = st.text_area("Enter some text here", value="Халқро тақлидашон барбод дод,\nЭй дусад лаънат бар он тақлид бод") device = "cuda" if torch.cuda.is_available() else "cpu" # Run the model on the user's text and store the output model_output = predict(model, tokenizer, user_input, device) # Display the model's output in a text area box st.text_area('The sentiment of the text is:', value=str(model_output), max_chars=None, key=None)