import os import gradio as gr from transformers import pipeline import spacy import subprocess import nltk from nltk.corpus import wordnet from textblob import TextBlob from pattern.en import conjugate, lemma, pluralize, singularize from gector.gec_model import GecBERTModel # Import GECToR Model from utils.helpers import read_lines, normalize # GECToR utilities # Initialize the English text classification pipeline for AI detection pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta") # Function to predict the label and score for English text (AI Detection) def predict_en(text): res = pipeline_en(text)[0] return res['label'], res['score'] # Ensure necessary NLTK data is downloaded for Humanifier nltk.download('wordnet') nltk.download('omw-1.4') # Ensure the SpaCy model is installed for Humanifier try: nlp = spacy.load("en_core_web_sm") except OSError: subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"]) nlp = spacy.load("en_core_web_sm") # Function to get synonyms using NLTK WordNet (Humanifier) def get_synonyms_nltk(word, pos): synsets = wordnet.synsets(word, pos=pos) if synsets: lemmas = synsets[0].lemmas() return [lemma.name() for lemma in lemmas] return [] # Function to capitalize the first letter of sentences and proper nouns (Humanifier) def capitalize_sentences_and_nouns(text): doc = nlp(text) corrected_text = [] for sent in doc.sents: sentence = [] for token in sent: if token.i == sent.start: # First word of the sentence sentence.append(token.text.capitalize()) elif token.pos_ == "PROPN": # Proper noun sentence.append(token.text.capitalize()) else: sentence.append(token.text) corrected_text.append(' '.join(sentence)) return ' '.join(corrected_text) # Function to correct tense errors using Pattern def correct_tense_errors(text): doc = nlp(text) corrected_text = [] for token in doc: if token.pos_ == "VERB": # Use conjugate from Pattern to adjust the tense of the verb verb_form = conjugate(lemma(token.text), tense='past') # Example: fix to past tense corrected_text.append(verb_form) else: corrected_text.append(token.text) return ' '.join(corrected_text) # Function to correct singular/plural errors using Pattern def correct_singular_plural_errors(text): doc = nlp(text) corrected_text = [] for token in doc: if token.pos_ == "NOUN": if token.tag_ == "NN": # Singular noun corrected_text.append(singularize(token.text)) elif token.tag_ == "NNS": # Plural noun corrected_text.append(pluralize(token.text)) else: corrected_text.append(token.text) return ' '.join(corrected_text) # Function to correct overall grammar using TextBlob def correct_grammar_textblob(text): blob = TextBlob(text) corrected_text = str(blob.correct()) # TextBlob's built-in grammar correction return corrected_text # Initialize GECToR Model for Grammar Correction def load_gector_model(): model_path = ["gector/roberta_1_gector.th"] # Ensure model file is placed correctly vocab_path = "output_vocabulary" model = GecBERTModel(vocab_path=vocab_path, model_paths=model_path, max_len=50, min_len=3, iterations=5, min_error_probability=0.0, lowercase_tokens=0, model_name="roberta", special_tokens_fix=1, log=False, confidence=0, del_confidence=0, is_ensemble=False, weigths=None) return model # Load the GECToR model gector_model = load_gector_model() # Function to correct grammar using GECToR def correct_grammar_gector(text): sentences = [text.split()] corrected_sentences, _ = gector_model.handle_batch(sentences) return " ".join(corrected_sentences[0]) # Paraphrasing function using SpaCy and NLTK (Humanifier) def paraphrase_with_spacy_nltk(text): doc = nlp(text) paraphrased_words = [] for token in doc: # Map SpaCy POS tags to WordNet POS tags pos = None if token.pos_ in {"NOUN"}: pos = wordnet.NOUN elif token.pos_ in {"VERB"}: pos = wordnet.VERB elif token.pos_ in {"ADJ"}: pos = wordnet.ADJ elif token.pos_ in {"ADV"}: pos = wordnet.ADV synonyms = get_synonyms_nltk(token.text.lower(), pos) if pos else [] # Replace with a synonym only if it makes sense if synonyms and token.pos_ in {"NOUN", "VERB", "ADJ", "ADV"} and synonyms[0] != token.text.lower(): paraphrased_words.append(synonyms[0]) else: paraphrased_words.append(token.text) return ' '.join(paraphrased_words) # Combined function: Paraphrase -> Grammar Correction -> Capitalization (Humanifier) def paraphrase_and_correct(text): # Step 1: Paraphrase the text paraphrased_text = paraphrase_with_spacy_nltk(text) # Step 2: Apply grammatical corrections using GECToR corrected_text = correct_grammar_gector(paraphrased_text) return corrected_text # Gradio app setup with two tabs with gr.Blocks() as demo: with gr.Tab("AI Detection"): t1 = gr.Textbox(lines=5, label='Text') button1 = gr.Button("🤖 Predict!") label1 = gr.Textbox(lines=1, label='Predicted Label 🎃') score1 = gr.Textbox(lines=1, label='Prob') button1.click(predict_en, inputs=[t1], outputs=[label1, score1], api_name='predict_en') with gr.Tab("Humanifier"): text_input = gr.Textbox(lines=5, label="Input Text") paraphrase_button = gr.Button("Paraphrase & Correct") output_text = gr.Textbox(label="Paraphrased and Corrected Text") paraphrase_button.click(paraphrase_and_correct, inputs=text_input, outputs=output_text) # Launch the app demo.launch()