Spaces:
Sleeping
Sleeping
File size: 5,047 Bytes
04919b2 c5d2e49 04919b2 c5d2e49 04919b2 c5d2e49 04919b2 c5d2e49 04919b2 c5d2e49 04919b2 c5d2e49 486bbd6 cf3f184 c5d2e49 cf3f184 c5d2e49 cf3f184 c5d2e49 cf3f184 c5d2e49 cf3f184 c5d2e49 cf3f184 c5d2e49 486bbd6 cf3f184 c5d2e49 486bbd6 c5d2e49 cf3f184 c5d2e49 cf3f184 486bbd6 c5d2e49 353216c c5d2e49 a2b6ad0 c5d2e49 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import os
import gradio as gr
from transformers import pipeline
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from spellchecker import SpellChecker
import language_tool_python
# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
# Initialize the spell checker
spell = SpellChecker()
# Initialize the LanguageTool for grammar correction
tool = language_tool_python.LanguageTool('en-US')
# Ensure necessary NLTK data is downloaded
nltk.download('wordnet')
nltk.download('omw-1.4')
# Ensure the SpaCy model is installed
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
nlp = spacy.load("en_core_web_sm")
# Function to predict the label and score for English text (AI Detection)
def predict_en(text):
res = pipeline_en(text)[0]
return res['label'], res['score']
# Function to remove redundant and meaningless words
def remove_redundant_words(text):
doc = nlp(text)
meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
return ' '.join(filtered_text)
# Function to apply grammatical corrections using LanguageTool
def correct_grammar(text):
corrected_text = tool.correct(text)
return corrected_text
# Function to correct spelling errors
def correct_spelling(text):
words = text.split()
corrected_words = []
for word in words:
corrected_word = spell.correction(word)
corrected_words.append(corrected_word if corrected_word else word) # Keep original word if no correction
return ' '.join(corrected_words)
# Function to capitalize the first letter of each sentence and proper nouns
def capitalize_sentences_and_nouns(text):
doc = nlp(text)
corrected_text = []
for sent in doc.sents:
sentence = []
for token in sent:
if token.i == sent.start: # First word of the sentence
sentence.append(token.text.capitalize())
elif token.pos_ == "PROPN": # Proper noun
sentence.append(token.text.capitalize())
else:
sentence.append(token.text)
corrected_text.append(' '.join(sentence))
return ' '.join(corrected_text)
# Function to rephrase with contextually appropriate synonyms
def rephrase_with_synonyms(text):
doc = nlp(text)
rephrased_text = []
for token in doc:
pos_tag = None
if token.pos_ == "NOUN":
pos_tag = wordnet.NOUN
elif token.pos_ == "VERB":
pos_tag = wordnet.VERB
elif token.pos_ == "ADJ":
pos_tag = wordnet.ADJ
elif token.pos_ == "ADV":
pos_tag = wordnet.ADV
if pos_tag:
synonyms = wordnet.synsets(token.text, pos=pos_tag)
if synonyms:
synonym = synonyms[0].lemmas()[0].name() # Choose the first synonym
rephrased_text.append(synonym)
else:
rephrased_text.append(token.text)
else:
rephrased_text.append(token.text)
return ' '.join(rephrased_text)
# Comprehensive function for paraphrasing and grammar correction
def paraphrase_and_correct(text):
# Step 1: Remove meaningless or redundant words
cleaned_text = remove_redundant_words(text)
# Step 2: Capitalize sentences and proper nouns
paraphrased_text = capitalize_sentences_and_nouns(cleaned_text)
# Step 3: Correct grammar using LanguageTool
paraphrased_text = correct_grammar(paraphrased_text)
# Step 4: Rephrase with contextually appropriate synonyms
paraphrased_text = rephrase_with_synonyms(paraphrased_text)
# Step 5: Correct spelling errors
paraphrased_text = correct_spelling(paraphrased_text)
# Step 6: Correct any remaining grammar issues after rephrasing
paraphrased_text = correct_grammar(paraphrased_text)
return paraphrased_text
# Gradio app setup with two tabs
with gr.Blocks() as demo:
with gr.Tab("AI Detection"):
t1 = gr.Textbox(lines=5, label='Text')
button1 = gr.Button("π€ Predict!")
label1 = gr.Textbox(lines=1, label='Predicted Label π')
score1 = gr.Textbox(lines=1, label='Prob')
# Connect the prediction function to the button
button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
with gr.Tab("Paraphrasing & Grammar Correction"):
t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
button2 = gr.Button("π Paraphrase and Correct")
result2 = gr.Textbox(lines=5, label='Corrected Text')
# Connect the paraphrasing and correction function to the button
button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
demo.launch(share=True)
|