LTP / app.py
sashdev's picture
Update app.py
8e3461e verified
raw
history blame
6.37 kB
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from spellchecker import SpellChecker
import re
import string
import random
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('averaged_perceptron_tagger_eng')
nltk.download('wordnet')
nltk.download('omw-1.4')
nltk.download('punkt_tab')
# Initialize stopwords
stop_words = set(stopwords.words("english"))
# Words we don't want to replace
exclude_tags = {'PRP', 'PRP$', 'MD', 'VBZ', 'VBP', 'VBD', 'VBG', 'VBN', 'TO', 'IN', 'DT', 'CC'}
exclude_words = {'is', 'am', 'are', 'was', 'were', 'have', 'has', 'do', 'does', 'did', 'will', 'shall', 'should', 'would', 'could', 'can', 'may', 'might'}
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
# Filter out overly formal or archaic words
def is_formal_or_rare(word):
formal_words = {"homo", "satellite", "futurity", "contemporaries"}
return word in formal_words
# Adjust synonym replacement logic
def plagiarism_removal(text):
def plagiarism_remover(word):
if word.lower() in stop_words or word.lower() in exclude_words or word in string.punctuation:
return word
# Find synonyms
synonyms = set()
for syn in wordnet.synsets(word):
for lemma in syn.lemmas():
synonym = lemma.name()
if "_" not in synonym and synonym.isalpha() and synonym.lower() != word.lower():
synonyms.add(synonym)
pos_tag_word = nltk.pos_tag([word])[0]
# Avoid replacing words based on certain POS tags
if pos_tag_word[1] in exclude_tags:
return word
# Filter synonyms to match the same part of speech
filtered_synonyms = [syn for syn in synonyms if nltk.pos_tag([syn])[0][1] == pos_tag_word[1]]
# Avoid formal/rare words or return the original word if no good synonym is found
filtered_synonyms = [syn for syn in filtered_synonyms if not is_formal_or_rare(syn)]
if not filtered_synonyms:
return word
synonym_choice = random.choice(filtered_synonyms)
if word.istitle():
return synonym_choice.title()
return synonym_choice
# Tokenize and process the text
para_split = word_tokenize(text)
final_text = [plagiarism_remover(word) for word in para_split]
# Fix spacing issues after token replacement
corrected_text = []
for i in range(len(final_text)):
if final_text[i] in string.punctuation and i > 0:
corrected_text[-1] += final_text[i] # Attach punctuation to the previous word
else:
corrected_text.append(final_text[i])
return " ".join(corrected_text)
# Other auxiliary functions remain unchanged
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
def remove_redundant_words(text):
doc = nlp(text)
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
return ' '.join(filtered_text)
def fix_punctuation_spacing(text):
words = text.split(' ')
cleaned_words = []
punctuation_marks = {',', '.', "'", '!', '?', ':'}
for word in words:
if cleaned_words and word and word[0] in punctuation_marks:
cleaned_words[-1] += word
else:
cleaned_words.append(word)
return ' '.join(cleaned_words).replace(' ,', ',').replace(' .', '.').replace(" '", "'") \
.replace(' !', '!').replace(' ?', '?').replace(' :', ':')
def fix_possessives(text):
text = re.sub(r'(\w)\s\'\s?s', r"\1's", text)
return text
def capitalize_sentences_and_nouns(text):
doc = nlp(text)
corrected_text = []
for sent in doc.sents:
sentence = []
for token in sent:
if token.i == sent.start:
sentence.append(token.text.capitalize())
elif token.pos_ == "PROPN":
sentence.append(token.text.capitalize())
else:
sentence.append(token.text)
corrected_text.append(' '.join(sentence))
return ' '.join(corrected_text)
# Continue the other auxiliary functions for article errors, spelling correction, etc.
# Main paraphrasing and correction function
def paraphrase_and_correct(text):
paragraphs = text.split("\n\n") # Split by paragraphs
# Process each paragraph separately
processed_paragraphs = []
for paragraph in paragraphs:
cleaned_text = remove_redundant_words(paragraph)
plag_removed = plagiarism_removal(cleaned_text)
paraphrased_text = capitalize_sentences_and_nouns(plag_removed)
paraphrased_text = fix_possessives(paraphrased_text)
paraphrased_text = correct_spelling(paraphrased_text)
paraphrased_text = fix_punctuation_spacing(paraphrased_text)
processed_paragraphs.append(paraphrased_text)
return "\n\n".join(processed_paragraphs) # Reassemble the text with paragraphs
# Gradio app setup
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text')
button1 = gr.Button("πŸ€– Predict!")
label1 = gr.Textbox(lines=1, label='Predicted Label πŸŽƒ')
score1 = gr.Textbox(lines=1, label='Prob')
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
button2 = gr.Button("πŸ”„ Paraphrase and Correct")
result2 = gr.Textbox(lines=5, label='Corrected Text')
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
demo.launch(share=True)