Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,10 +22,24 @@ if audio_bytes:
|
|
22 |
|
23 |
|
24 |
pipe_p=pipeline(model="ramsrigouthamg/t5_sentence_paraphraser")
|
|
|
25 |
import sentencepiece as spm
|
26 |
sp=spm.SentencePieceProcessor(model_file='t5_sentence_paraphraser/spiece.model')
|
27 |
st.title("Paraphraser")
|
28 |
text_p=st.text_area('Input sentence:')
|
29 |
if text_p:
|
30 |
out_p=pipe_p(text_p)
|
31 |
-
st.text_area(label="Output sentence:", value=out_p)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
|
24 |
pipe_p=pipeline(model="ramsrigouthamg/t5_sentence_paraphraser")
|
25 |
+
st.title("Paraphraser")
|
26 |
import sentencepiece as spm
|
27 |
sp=spm.SentencePieceProcessor(model_file='t5_sentence_paraphraser/spiece.model')
|
28 |
st.title("Paraphraser")
|
29 |
text_p=st.text_area('Input sentence:')
|
30 |
if text_p:
|
31 |
out_p=pipe_p(text_p)
|
32 |
+
st.text_area(label="Output sentence:", value=out_p)
|
33 |
+
|
34 |
+
from transformers import MBartForConditionalGeneration, MBart50TokenizerFast
|
35 |
+
st.title("Language Translator")
|
36 |
+
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
37 |
+
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
38 |
+
tokenizer.src_lang = "en_XX"
|
39 |
+
text_l=st.text_area('Input sentence:')
|
40 |
+
encoded_en = tokenizer(text_l, return_tensors="pt")
|
41 |
+
generated_tokens = model.generate(
|
42 |
+
**encoded_en,
|
43 |
+
forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"]
|
44 |
+
)
|
45 |
+
tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|