File size: 1,622 Bytes
6db5958
 
3913596
6db5958
631c1d5
 
3913596
9ad115c
3913596
 
631c1d5
3913596
 
 
 
 
 
 
631c1d5
 
 
 
 
 
 
a95bd26
3913596
 
 
 
631c1d5
3913596
 
 
 
 
 
6db5958
 
 
 
 
 
 
3913596
 
6db5958
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
import torch
from fairseq.models.transformer import TransformerModel
import os
import requests

# Define the URLs of your model and dictionary files
model_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/checkpoint_8_96000.pt"
dict_en_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/dict.en.txt"
dict_ur_url = "https://huggingface.co/SLPG/English_to_Urdu_Unsupervised_MT/resolve/main/dict.ur.txt"

# Define the paths to save the downloaded files
model_path = "sent_iwslt-bt-enur_42.pt"
dict_en_path = "dict.en.txt"
dict_ur_path = "dict.ur.txt"

# Define a function to download files
def download_file(url, file_path):
    if not os.path.exists(file_path):
        with requests.get(url, stream=True) as r:
            r.raise_for_status()
            with open(file_path, 'wb') as f:
                for chunk in r.iter_content(chunk_size=8192):
                    f.write(chunk)
    return file_path

# Download the model and dictionary files
download_file(model_url, model_path)
download_file(dict_en_url, dict_en_path)
download_file(dict_ur_url, dict_ur_path)

# Load the model
en_ur_model = TransformerModel.from_pretrained(
    '.',
    checkpoint_file=model_path,
    data_name_or_path='.'
)

# Streamlit interface
st.title("Translation Model Inference")
input_text = st.text_area("Enter text to translate", "")

if st.button("Translate"):
    if input_text:
        output_text = en_ur_model.translate(input_text)
        st.write(f"Translated Text: {output_text}")
    else:
        st.write("Please enter text to translate.")