LTP / app.py
sashdev's picture
Update app.py
8911544 verified
raw
history blame
8.36 kB
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from spellchecker import SpellChecker
import re
import string
import random
from language_tool_python import LanguageTool
# Download necessary NLTK data
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('averaged_perceptron_tagger_eng')
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('punkt_tab')
# Initialize stopwords
stop_words = set(stopwords.words("english"))
# Words we don't want to replace
exclude_tags = {'PRP', 'PRP$', 'MD', 'VBZ', 'VBP', 'VBD', 'VBG', 'VBN', 'TO', 'IN', 'DT', 'CC'}
exclude_words = {'is', 'am', 'are', 'was', 'were', 'have', 'has', 'do', 'does', 'did', 'will', 'shall', 'should', 'would', 'could', 'can', 'may', 'might'}
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Initialize LanguageTool
tool = LanguageTool('en-US')
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
def plagiarism_removal(text):
def plagiarism_remover(word):
if word.lower() in stop_words or word.lower() in exclude_words or word in string.punctuation:
return word
synonyms = set()
for syn in wordnet.synsets(word):
for lemma in syn.lemmas():
if "_" not in lemma.name() and lemma.name().isalpha() and lemma.name().lower() != word.lower():
synonyms.add(lemma.name())
pos_tag_word = nltk.pos_tag([word])[0]
if pos_tag_word[1] in exclude_tags:
return word
filtered_synonyms = [syn for syn in synonyms if nltk.pos_tag([syn])[0][1] == pos_tag_word[1]]
if not filtered_synonyms:
return word
synonym_choice = random.choice(filtered_synonyms)
if word.istitle():
return synonym_choice.title()
return synonym_choice
para_split = word_tokenize(text)
final_text = [plagiarism_remover(word) for word in para_split]
corrected_text = []
for i in range(len(final_text)):
if final_text[i] in string.punctuation and i > 0:
corrected_text[-1] += final_text[i]
else:
corrected_text.append(final_text[i])
return " ".join(corrected_text)
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
def remove_redundant_words(text):
doc = nlp(text)
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
return ' '.join(filtered_text)
def fix_punctuation_spacing(text):
words = text.split(' ')
cleaned_words = []
punctuation_marks = {',', '.', "'", '!', '?', ':'}
for word in words:
if cleaned_words and word and word[0] in punctuation_marks:
cleaned_words[-1] += word
else:
cleaned_words.append(word)
return ' '.join(cleaned_words).replace(' ,', ',').replace(' .', '.').replace(" '", "'") \
.replace(' !', '!').replace(' ?', '?').replace(' :', ':')
def fix_possessives(text):
text = re.sub(r'(\w)\s\'\s?s', r"\1's", text)
return text
def capitalize_sentences_and_nouns(text):
doc = nlp(text)
corrected_text = []
for sent in doc.sents:
sentence = []
for token in sent:
if token.i == sent.start:
sentence.append(token.text.capitalize())
elif token.pos_ == "PROPN":
sentence.append(token.text.capitalize())
else:
sentence.append(token.text)
corrected_text.append(' '.join(sentence))
return ' '.join(corrected_text)
def force_first_letter_capital(text):
sentences = re.split(r'(?<=\w[.!?])\s+', text)
capitalized_sentences = []
for sentence in sentences:
if sentence:
capitalized_sentence = sentence[0].capitalize() + sentence[1:]
if not re.search(r'[.!?]$', capitalized_sentence):
capitalized_sentence += '.'
capitalized_sentences.append(capitalized_sentence)
return " ".join(capitalized_sentences)
def correct_tense_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
corrected_text.append(lemma)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
def correct_article_errors(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.text in ['a', 'an']:
next_token = token.nbor(1)
if token.text == "a" and next_token.text[0].lower() in "aeiou":
corrected_text.append("an")
elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
corrected_text.append("a")
else:
corrected_text.append(token.text)
else:
corrected_text.append(token.text)
return ' '.join(corrected_text)
def ensure_subject_verb_agreement(text):
doc = nlp(text)
corrected_text = []
for token in doc:
if token.dep_ == "nsubj" and token.head.pos_ == "VERB":
if token.tag_ == "NN" and token.head.tag_ != "VBZ":
corrected_text.append(token.head.lemma_ + "s")
elif token.tag_ == "NNS" and token.head.tag_ == "VBZ":
corrected_text.append(token.head.lemma_)
corrected_text.append(token.text)
return ' '.join(corrected_text)
def correct_spelling(text):
words = text.split()
corrected_words = []
for word in words:
corrected_word = spell.correction(word)
if corrected_word is not None:
corrected_words.append(corrected_word)
else:
corrected_words.append(word)
return ' '.join(corrected_words)
def grammar_check(text):
matches = tool.check(text)
corrected_text = language_tool_python.utils.correct(text, matches)
return corrected_text
def paraphrase_and_correct(text):
cleaned_text = remove_redundant_words(text)
plag_removed = plagiarism_removal(cleaned_text)
paraphrased_text = capitalize_sentences_and_nouns(plag_removed)
paraphrased_text = force_first_letter_capital(paraphrased_text)
paraphrased_text = correct_article_errors(paraphrased_text)
paraphrased_text = correct_tense_errors(paraphrased_text)
paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
paraphrased_text = fix_possessives(paraphrased_text)
paraphrased_text = correct_spelling(paraphrased_text)
paraphrased_text = fix_punctuation_spacing(paraphrased_text)
paraphrased_text = grammar_check(paraphrased_text)
return paraphrased_text
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text')
button1 = gr.Button("πŸ€– Predict!")
label1 = gr.Textbox(lines=1, label='Predicted Label πŸŽƒ')
score1 = gr.Textbox(lines=1, label='Prob')
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
button2 = gr.Button("πŸ”„ Paraphrase and Correct")
result2 = gr.Textbox(lines=5, label='Corrected Text')
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
if __name__ == "__main__":
try:
subprocess.run(["java", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except FileNotFoundError:
print("Java is not installed. Please install Java to use LanguageTool.")
exit(1)
demo.launch(share=True)