import streamlit as st import torch from fairseq.models.transformer import TransformerModel import os import requests # Define the URLs of your model and dictionary files model_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/sent_iwslt-bt-enur_42.pt" dict_en_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/dict.en.txt" dict_ur_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/dict.ur.txt" # Define the paths to save the downloaded files model_path = "sent_iwslt-bt-enur_42.pt" dict_en_path = "dict.en.txt" dict_ur_path = "dict.ur.txt" # Define a function to download files def download_file(url, file_path): if not os.path.exists(file_path): with requests.get(url, stream=True) as r: r.raise_for_status() with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=8192): f.write(chunk) return file_path # Download the model and dictionary files download_file(model_url, model_path) download_file(dict_en_url, dict_en_path) download_file(dict_ur_url, dict_ur_path) # Load the model en_ur_model = TransformerModel.from_pretrained( '.', checkpoint_file=model_path, data_name_or_path='.' ) # Streamlit interface st.title("Translation Model Inference") input_text = st.text_area("Enter text to translate", "") if st.button("Translate"): if input_text: output_text = en_ur_model.translate(input_text) st.write(f"Translated Text: {output_text}") else: st.write("Please enter text to translate.")