Spaces:
Sleeping
Sleeping
File size: 7,248 Bytes
cfaf614 2fc5fd9 cfaf614 dd84c16 cfaf614 dd84c16 cfaf614 d89cee9 a69899f d89cee9 a69899f d89cee9 dd84c16 cfaf614 dd84c16 cfaf614 a69899f cfaf614 a69899f cfaf614 92857f5 dd84c16 a69899f 9b2901c 92857f5 9b2901c 92857f5 cfaf614 a69899f cfaf614 a69899f cfaf614 a69899f cfaf614 a69899f dd84c16 cfaf614 dd84c16 cfaf614 dd84c16 cfaf614 fe589d8 a69899f fe589d8 cfaf614 dd84c16 cfaf614 dd84c16 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from spellchecker import SpellChecker
import re
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Ensure necessary NLTK data is downloaded
nltk.download('wordnet')
nltk.download('omw-1.4')
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
# Function to predict the label and score for English text (AI Detection)
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
# Function to remove redundant and meaningless words
def remove_redundant_words(text):
doc = nlp(text)
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
return ' '.join(filtered_text)
# Function to fix spacing before punctuation
def fix_punctuation_spacing(text):
# Split the text into words and punctuation
words = text.split(' ')
cleaned_words = []
punctuation_marks = {',', '.', "'", '!', '?', ':'}
for word in words:
if cleaned_words and word and word[0] in punctuation_marks:
cleaned_words[-1] += word
else:
cleaned_words.append(word)
return ' '.join(cleaned_words).replace(' ,', ',').replace(' .', '.').replace(" '", "'") \
.replace(' !', '!').replace(' ?', '?').replace(' :', ':')
# Function to fix possessives like "Earth's"
def fix_possessives(text):
text = re.sub(r'(\w)\s\'\s?s', r"\1's", text)
return text
# Function to capitalize the first letter of sentences and proper nouns
def capitalize_sentences_and_nouns(text):
doc = nlp(text)
corrected_text = []
for sent in doc.sents:
sentence = []
for token in sent:
if token.i == sent.start:
sentence.append(token.text.capitalize())
elif token.pos_ == "PROPN":
sentence.append(token.text.capitalize())
else:
sentence.append(token.text)
corrected_text.append(' '.join(sentence))
return ' '.join(corrected_text)
# Function to force capitalization of the first letter of every sentence and ensure full stops
def force_first_letter_capital(text):
sentences = re.split(r'(?<=\w[.!?])\s+', text)
capitalized_sentences = []
for sentence in sentences:
if sentence:
capitalized_sentence = sentence[0].capitalize() + sentence[1:]
if not re.search(r'[.!?]$', capitalized_sentence):
capitalized_sentence += '.'
capitalized_sentences.append(capitalized_sentence)
return " ".join(capitalized_sentences)
# Function to correct tense errors in a sentence
def correct_tense_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
corrected_text.append(lemma)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
# Function to check and correct article errors
def correct_article_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.text in ['a', 'an']:
next_token = token.nbor(1)
if token.text == "a" and next_token.text[0].lower() in "aeiou":
corrected_text.append("an")
elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
corrected_text.append("a")
else:
corrected_text.append(token.text)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
# Function to ensure subject-verb agreement
def ensure_subject_verb_agreement(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.dep_ == "nsubj" and token.head.pos_ == "VERB":
if token.tag_ == "NN" and token.head.tag_ != "VBZ":
corrected_text.append(token.head.lemma_ + "s")
elif token.tag_ == "NNS" and token.head.tag_ == "VBZ":
corrected_text.append(token.head.lemma_)
corrected_text.append(token.text)
return ' '.join(corrected_text)
# Function to correct spelling errors
def correct_spelling(text):
words = text.split()
corrected_words = []
for word in words:
corrected_word = spell.correction(word)
if corrected_word is not None:
corrected_words.append(corrected_word)
else:
corrected_words.append(word)
return ' '.join(corrected_words)
# Function to replace a word with its synonym
def replace_with_synonyms(text):
words = text.split()
replaced_words = []
for word in words:
synonyms = wordnet.synsets(word)
if synonyms:
# Take the first synonym if available
synonym = synonyms[0].lemmas()[0].name()
# Replace the word with its synonym if it's different
if synonym.lower() != word.lower():
replaced_words.append(synonym.replace('_', ' '))
else:
replaced_words.append(word)
else:
replaced_words.append(word)
return ' '.join(replaced_words)
# Main function for paraphrasing and grammar correction
def paraphrase_and_correct(text):
cleaned_text = remove_redundant_words(text)
paraphrased_text = capitalize_sentences_and_nouns(cleaned_text)
paraphrased_text = force_first_letter_capital(paraphrased_text)
paraphrased_text = correct_article_errors(paraphrased_text)
paraphrased_text = correct_tense_errors(paraphrased_text)
paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
paraphrased_text = fix_possessives(paraphrased_text)
paraphrased_text = correct_spelling(paraphrased_text)
paraphrased_text = fix_punctuation_spacing(paraphrased_text)
paraphrased_text = replace_with_synonyms(paraphrased_text) # Add synonym replacement here
return paraphrased_text
# Gradio app setup
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text')
button1 = gr.Button("π€ Predict!")
label1 = gr.Textbox(lines=1, label='Predicted Label π')
score1 = gr.Textbox(lines=1, label='Prob')
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
button2 = gr.Button("π Paraphrase and Correct")
result2 = gr.Textbox(lines=5, label='Corrected Text')
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
demo.launch(share=True)
|