import torch import streamlit as st from model import init_model, predict from data import Tokenizer, load_config MODEL_PATH = 'tj-fa.pt' config = load_config(MODEL_PATH) print('Config:', config) tokenizer = Tokenizer(config) # Load the model model = init_model(MODEL_PATH) # Create a text area box where the user can enter their text user_input = st.text_area("Enter some text here", value="Он ҷо, ки висоли дӯстон аст,\nВ-оллоҳ, ки миёни хона саҳрост.") device = "cuda" if torch.cuda.is_available() else "cpu" # Run the model on the user's text and store the output model_output = predict(model, tokenizer, user_input, device) # Display the model's output in a text area box st.text_area('Transliteration:', value=str(model_output), max_chars=None, key=None)