File size: 2,619 Bytes
4f58b0b
 
 
46d411c
 
 
 
 
4f58b0b
 
 
 
 
 
 
 
 
46d411c
 
4f58b0b
 
 
 
 
 
46d411c
 
 
4f58b0b
 
 
46d411c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4f58b0b
743de91
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import streamlit as st
from transformers import T5Tokenizer, T5ForConditionalGeneration
from pdfminer.high_level import extract_text
import nltk
from nltk import sent_tokenize

# Download the punkt tokenizer for sentence segmentation
nltk.download('punkt')

def main():
    st.title("PDF Translation")    
    # Upload the pdf
    uploaded_file = st.file_uploader("Upload a PDF file and we will translate the text inside to German and French", type=["pdf"])

    if uploaded_file is not None:
        # Extract text from pdf
        text = extract_text(uploaded_file)
        tokenizer = T5Tokenizer.from_pretrained("t5-base")
        model = T5ForConditionalGeneration.from_pretrained("t5-base")

        # Define translation prefixes for each language
        translation_prefixes = {
            "german": "translate English to German: ",
            "french": "translate English to French: "
        }
        # Variables to track translation state
        translated_german = False
        translated_french = False
        # Buttons to trigger translation
        translate_german = st.button("Translate to German")
        translate_french = st.button("Translate to French")
        # Translate and display for German
        if translate_german and not translated_german:
            translated_sentences_german = translate_text(text, translation_prefixes["german"], tokenizer, model)
            display_translation(translated_sentences_german, "German")
            translated_german = True
        # Translate and display for French
        if translate_french and not translated_french:
            translated_sentences_french = translate_text(text, translation_prefixes["french"], tokenizer, model)
            display_translation(translated_sentences_french, "French")
            translated_french = True

def translate_text(text, prefix, tokenizer, model):
    # Split text into sentences
    sentences = sent_tokenize(text)

    # Translate each sentence
    translated_sentences = []
    for sentence in sentences:
        text_to_translate = prefix + sentence
        input_ids = tokenizer(text_to_translate, return_tensors="pt").input_ids
        outputs = model.generate(input_ids=input_ids, max_length=500, num_beams=4, no_repeat_ngram_size=2)
        translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        translated_sentences.append(translated_text)

    return translated_sentences

def display_translation(translations, language):
    st.write(f"\nLanguage: {language}")
    st.write(f"Translation:\n {' '.join(translations)}")

if __name__ == "__main__":
    main()