import gradio as gr from transformers import pipeline import spacy from textblob import TextBlob from gradio_client import Client # Initialize models nlp = spacy.load("en_core_web_sm") spell_checker = pipeline("text2text-generation", model="oliverguhr/spelling-correction-english-base") def preprocess_text(text: str): """Process text and return corrections with position information""" result = { "spell_suggestions": [], "entities": [], "tags": [] } # Find and record positions of corrections doc = nlp(text) # TextBlob spell check with position tracking blob = TextBlob(text) corrected = str(blob.correct()) if corrected != text: result["spell_suggestions"].append({ "original": text, "corrected": corrected }) # Transformer spell check spell_checked = spell_checker(text, max_length=512)[0]['generated_text'] if spell_checked != text and spell_checked != corrected: result["spell_suggestions"].append({ "original": text, "corrected": spell_checked }) # Add entities and tags result["entities"] = [{"text": ent.text, "label": ent.label_} for ent in doc.ents] result["tags"] = [token.text for token in doc if token.text.startswith(('#', '@'))] return text, result def preprocess_and_forward(text: str): """Process text and forward to translation service""" original_text, preprocessing_result = preprocess_text(text) # Forward original text to translation service client = Client("Frenchizer/space_17") try: translation = client.predict(original_text) return translation, preprocessing_result except Exception as e: return f"Error: {str(e)}", preprocessing_result # Gradio interface with gr.Blocks() as demo: input_text = gr.Textbox(label="Input Text") output_text = gr.Textbox(label="Output Text") preprocess_button = gr.Button("Process") preprocess_button.click(fn=preprocess_and_forward, inputs=[input_text], outputs=[output_text]) if __name__ == "__main__": demo.launch()