File size: 5,847 Bytes
062b394
04919b2
 
c5d2e49
04919b2
 
 
062b394
04919b2
062b394
 
04919b2
062b394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5d2e49
062b394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c5d2e49
062b394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import os
import gradio as gr
import spacy
import subprocess
import nltk
from nltk.corpus import wordnet
from spellchecker import SpellChecker
from ginger import get_ginger_result  # Importing the grammar correction function

# Initialize the English text classification pipeline for AI detection
pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")

# Initialize the spell checker
spell = SpellChecker()

# Ensure necessary NLTK data is downloaded
nltk.download('wordnet')
nltk.download('omw-1.4')

# Ensure the SpaCy model is installed
try:
    nlp = spacy.load("en_core_web_sm")
except OSError:
    subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
    nlp = spacy.load("en_core_web_sm")

# Function to predict the label and score for English text (AI Detection)
def predict_en(text):
    res = pipeline_en(text)[0]
    return res['label'], res['score']

# Function to get synonyms using NLTK WordNet
def get_synonyms_nltk(word, pos):
    synsets = wordnet.synsets(word, pos=pos)
    if synsets:
        lemmas = synsets[0].lemmas()
        return [lemma.name() for lemma in lemmas]
    return []

# Function to remove redundant and meaningless words
def remove_redundant_words(text):
    doc = nlp(text)
    meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
    filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
    return ' '.join(filtered_text)

# Function to capitalize the first letter of sentences and proper nouns
def capitalize_sentences_and_nouns(text):
    doc = nlp(text)
    corrected_text = []

    for sent in doc.sents:
        sentence = []
        for token in sent:
            if token.i == sent.start:  # First word of the sentence
                sentence.append(token.text.capitalize())
            elif token.pos_ == "PROPN":  # Proper noun
                sentence.append(token.text.capitalize())
            else:
                sentence.append(token.text)
        corrected_text.append(' '.join(sentence))

    return ' '.join(corrected_text)

# Function to force capitalization of the first letter of every sentence
def force_first_letter_capital(text):
    sentences = text.split(". ")  # Split by period to get each sentence
    capitalized_sentences = [sentence[0].capitalize() + sentence[1:] if sentence else "" for sentence in sentences]
    return ". ".join(capitalized_sentences)

# Function to correct tense errors in a sentence
def correct_tense_errors(text):
    doc = nlp(text)
    corrected_text = []
    for token in doc:
        if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
            lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
            corrected_text.append(lemma)
        else:
            corrected_text.append(token.text)
    return ' '.join(corrected_text)

# Function to correct singular/plural errors
def correct_singular_plural_errors(text):
    doc = nlp(text)
    corrected_text = []
    
    for token in doc:
        if token.pos_ == "NOUN":
            if token.tag_ == "NN":  # Singular noun
                if any(child.text.lower() in ['many', 'several', 'few'] for child in token.head.children):
                    corrected_text.append(token.lemma_ + 's')
                else:
                    corrected_text.append(token.text)
            elif token.tag_ == "NNS":  # Plural noun
                if any(child.text.lower() in ['a', 'one'] for child in token.head.children):
                    corrected_text.append(token.lemma_)
                else:
                    corrected_text.append(token.text)
        else:
            corrected_text.append(token.text)
    
    return ' '.join(corrected_text)

# Function to check and correct article errors
def correct_article_errors(text):
    doc = nlp(text)
    corrected_text = []
    for token in doc:
        if token.text in ['a', 'an']:
            next_token = token.nbor(1)
            if token.text == "a" and next_token.text[0].lower() in "aeiou":
                corrected_text.append("an")
            elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
                corrected_text.append("a")
            else:
                corrected_text.append(token.text)
        else:
            corrected_text.append(token.text)
    return ' '.join(corrected_text)

# Function to get the correct synonym while maintaining verb form
def replace_with_synonym(token):
    pos = None
    if token.pos_ == "VERB":
        pos = wordnet.VERB
    elif token.pos_ == "NOUN":
        pos = wordnet.NOUN
    elif token.pos_ == "ADJ":
        pos = wordnet.ADJ
    elif token.pos_ == "ADV":
        pos = wordnet.ADV
    
    synonyms = get_synonyms_nltk(token.text, pos)
    if synonyms:
        return synonyms[0]
    return token.text

# Function to use Ginger API for grammar correction (NEW)
def correct_grammar_with_ginger(text):
    result = get_ginger_result(text)
    corrected_text = text
    for suggestion in result["LightGingerTheTextResult"]:
        if suggestion["Suggestions"]:
            from_index = suggestion["From"]
            to_index = suggestion["To"] + 1
            suggested_text = suggestion["Suggestions"][0]["Text"]
            corrected_text = corrected_text[:from_index] + suggested_text + corrected_text[to_index:]
    return corrected_text

# Gradio interface
def process_text(text):
    text = correct_article_errors(text)
    text = correct_singular_plural_errors(text)
    text = correct_tense_errors(text)
    text = capitalize_sentences_and_nouns(text)
    text = remove_redundant_words(text)
    text = correct_grammar_with_ginger(text)  # Add grammar correction using Ginger here
    return text

iface = gr.Interface(fn=process_text, inputs="text", outputs="text")
iface.launch()