import streamlit as st from transformers import pipeline pipe=pipeline(model="vennify/t5-base-grammar-correction") st.title("Grammatical Error Checker") st.header("Text Input:") text=st.text_area('Input sentence:', key=1) if text: out=pipe(text) st.text_area(label="Output sentence:", value=out) from audio_recorder_streamlit import audio_recorder pipe_s=pipeline(model="openai/whisper-large-v3") st.header("Speech Input:") audio_bytes = audio_recorder(pause_threshold=2.0, sample_rate=41_000, recording_color="#e8b62c", neutral_color="#6aa36f", icon_name="user", icon_size="6x") if audio_bytes: st.audio(audio_bytes, format="audio/wav") out_s=pipe_s(audio_bytes) st.text_area(label="Input sentence:", value=out_s) out_s=str(out_s) out_S=pipe(out_s) st.text_area(label="Output sentence:", value=out_S) from transformers import MBartForConditionalGeneration, MBart50TokenizerFast st.title("Language Translator") model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer.src_lang = "en_XX" text_l=st.text_area('Input sentence:', key=2) if text_l: encoded_en = tokenizer(text_l, return_tensors="pt") generated_tokens = model.generate(**encoded_en,forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"]) out_l=tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) st.text_area(label="Output sentence:", value=out_l) pipe_p=pipeline(model="ramsrigouthamg/t5_sentence_paraphraser") st.title("Paraphraser") text_p=st.text_area('Input sentence:', key=3) if text_p: out_p=pipe_p(text_p) st.text_area(label="Output sentence:", value=out_p)