sashdev commited on
Commit
8911544
Β·
verified Β·
1 Parent(s): 150c539

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +233 -18
app.py CHANGED
@@ -1,23 +1,238 @@
1
- from fastapi import FastAPI
2
- import language_tool_python
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- app = FastAPI()
 
 
 
 
 
 
 
5
 
6
- # Initialize the LanguageTool instance for English
7
- tool = language_tool_python.LanguageTool('en-US')
8
 
9
- @app.get("/")
10
- def greet():
11
- return {"message": "Hello, World!"}
12
 
13
- @app.post("/check")
14
- def check_text(text: str):
15
- """
16
- This endpoint checks grammar and spelling of the input text.
17
- :param text: The text to be checked
18
- :return: A list of grammar and spelling mistakes
19
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  matches = tool.check(text)
21
- # Convert matches to a more readable format
22
- errors = [{"message": match.message, "context": match.context, "suggestions": match.replacements} for match in matches]
23
- return {"errors": errors}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+ import spacy
5
+ import subprocess
6
+ import nltk
7
+ from nltk.corpus import wordnet
8
+ from nltk.corpus import stopwords
9
+ from nltk.tokenize import word_tokenize
10
+ from spellchecker import SpellChecker
11
+ import re
12
+ import string
13
+ import random
14
+ from language_tool_python import LanguageTool
15
 
16
+ # Download necessary NLTK data
17
+ nltk.download('punkt')
18
+ nltk.download('stopwords')
19
+ nltk.download('averaged_perceptron_tagger')
20
+ nltk.download('averaged_perceptron_tagger_eng')
21
+ nltk.download('wordnet')
22
+ nltk.download('omw-1.4')
23
+ nltk.download('punkt_tab')
24
 
25
+ # Initialize stopwords
26
+ stop_words = set(stopwords.words("english"))
27
 
28
+ # Words we don't want to replace
29
+ exclude_tags = {'PRP', 'PRP$', 'MD', 'VBZ', 'VBP', 'VBD', 'VBG', 'VBN', 'TO', 'IN', 'DT', 'CC'}
30
+ exclude_words = {'is', 'am', 'are', 'was', 'were', 'have', 'has', 'do', 'does', 'did', 'will', 'shall', 'should', 'would', 'could', 'can', 'may', 'might'}
31
 
32
+ # Initialize the English text classification pipeline for AI detection
33
+ pipeline_en = pipeline(task="text-classification", model="Hello-SimpleAI/chatgpt-detector-roberta")
34
+
35
+ # Initialize the spell checker
36
+ spell = SpellChecker()
37
+
38
+ # Initialize LanguageTool
39
+ tool = LanguageTool('en-US')
40
+
41
+ # Ensure the SpaCy model is installed
42
+ try:
43
+ nlp = spacy.load("en_core_web_sm")
44
+ except OSError:
45
+ subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"])
46
+ nlp = spacy.load("en_core_web_sm")
47
+
48
+ def plagiarism_removal(text):
49
+ def plagiarism_remover(word):
50
+ if word.lower() in stop_words or word.lower() in exclude_words or word in string.punctuation:
51
+ return word
52
+
53
+ synonyms = set()
54
+ for syn in wordnet.synsets(word):
55
+ for lemma in syn.lemmas():
56
+ if "_" not in lemma.name() and lemma.name().isalpha() and lemma.name().lower() != word.lower():
57
+ synonyms.add(lemma.name())
58
+
59
+ pos_tag_word = nltk.pos_tag([word])[0]
60
+
61
+ if pos_tag_word[1] in exclude_tags:
62
+ return word
63
+
64
+ filtered_synonyms = [syn for syn in synonyms if nltk.pos_tag([syn])[0][1] == pos_tag_word[1]]
65
+
66
+ if not filtered_synonyms:
67
+ return word
68
+
69
+ synonym_choice = random.choice(filtered_synonyms)
70
+
71
+ if word.istitle():
72
+ return synonym_choice.title()
73
+ return synonym_choice
74
+
75
+ para_split = word_tokenize(text)
76
+ final_text = [plagiarism_remover(word) for word in para_split]
77
+
78
+ corrected_text = []
79
+ for i in range(len(final_text)):
80
+ if final_text[i] in string.punctuation and i > 0:
81
+ corrected_text[-1] += final_text[i]
82
+ else:
83
+ corrected_text.append(final_text[i])
84
+
85
+ return " ".join(corrected_text)
86
+
87
+ def predict_en(text):
88
+ res = pipeline_en(text)[0]
89
+ return res['label'], res['score']
90
+
91
+ def remove_redundant_words(text):
92
+ doc = nlp(text)
93
+ meaningless_words = {"actually", "basically", "literally", "really", "very", "just"}
94
+ filtered_text = [token.text for token in doc if token.text.lower() not in meaningless_words]
95
+ return ' '.join(filtered_text)
96
+
97
+ def fix_punctuation_spacing(text):
98
+ words = text.split(' ')
99
+ cleaned_words = []
100
+ punctuation_marks = {',', '.', "'", '!', '?', ':'}
101
+
102
+ for word in words:
103
+ if cleaned_words and word and word[0] in punctuation_marks:
104
+ cleaned_words[-1] += word
105
+ else:
106
+ cleaned_words.append(word)
107
+
108
+ return ' '.join(cleaned_words).replace(' ,', ',').replace(' .', '.').replace(" '", "'") \
109
+ .replace(' !', '!').replace(' ?', '?').replace(' :', ':')
110
+
111
+ def fix_possessives(text):
112
+ text = re.sub(r'(\w)\s\'\s?s', r"\1's", text)
113
+ return text
114
+
115
+ def capitalize_sentences_and_nouns(text):
116
+ doc = nlp(text)
117
+ corrected_text = []
118
+
119
+ for sent in doc.sents:
120
+ sentence = []
121
+ for token in sent:
122
+ if token.i == sent.start:
123
+ sentence.append(token.text.capitalize())
124
+ elif token.pos_ == "PROPN":
125
+ sentence.append(token.text.capitalize())
126
+ else:
127
+ sentence.append(token.text)
128
+ corrected_text.append(' '.join(sentence))
129
+
130
+ return ' '.join(corrected_text)
131
+
132
+ def force_first_letter_capital(text):
133
+ sentences = re.split(r'(?<=\w[.!?])\s+', text)
134
+ capitalized_sentences = []
135
+
136
+ for sentence in sentences:
137
+ if sentence:
138
+ capitalized_sentence = sentence[0].capitalize() + sentence[1:]
139
+ if not re.search(r'[.!?]$', capitalized_sentence):
140
+ capitalized_sentence += '.'
141
+ capitalized_sentences.append(capitalized_sentence)
142
+
143
+ return " ".join(capitalized_sentences)
144
+
145
+ def correct_tense_errors(text):
146
+ doc = nlp(text)
147
+ corrected_text = []
148
+ for token in doc:
149
+ if token.pos_ == "VERB" and token.dep_ in {"aux", "auxpass"}:
150
+ lemma = wordnet.morphy(token.text, wordnet.VERB) or token.text
151
+ corrected_text.append(lemma)
152
+ else:
153
+ corrected_text.append(token.text)
154
+ return ' '.join(corrected_text)
155
+
156
+ def correct_article_errors(text):
157
+ doc = nlp(text)
158
+ corrected_text = []
159
+ for token in doc:
160
+ if token.text in ['a', 'an']:
161
+ next_token = token.nbor(1)
162
+ if token.text == "a" and next_token.text[0].lower() in "aeiou":
163
+ corrected_text.append("an")
164
+ elif token.text == "an" and next_token.text[0].lower() not in "aeiou":
165
+ corrected_text.append("a")
166
+ else:
167
+ corrected_text.append(token.text)
168
+ else:
169
+ corrected_text.append(token.text)
170
+ return ' '.join(corrected_text)
171
+
172
+ def ensure_subject_verb_agreement(text):
173
+ doc = nlp(text)
174
+ corrected_text = []
175
+ for token in doc:
176
+ if token.dep_ == "nsubj" and token.head.pos_ == "VERB":
177
+ if token.tag_ == "NN" and token.head.tag_ != "VBZ":
178
+ corrected_text.append(token.head.lemma_ + "s")
179
+ elif token.tag_ == "NNS" and token.head.tag_ == "VBZ":
180
+ corrected_text.append(token.head.lemma_)
181
+ corrected_text.append(token.text)
182
+ return ' '.join(corrected_text)
183
+
184
+ def correct_spelling(text):
185
+ words = text.split()
186
+ corrected_words = []
187
+ for word in words:
188
+ corrected_word = spell.correction(word)
189
+ if corrected_word is not None:
190
+ corrected_words.append(corrected_word)
191
+ else:
192
+ corrected_words.append(word)
193
+ return ' '.join(corrected_words)
194
+
195
+ def grammar_check(text):
196
  matches = tool.check(text)
197
+ corrected_text = language_tool_python.utils.correct(text, matches)
198
+ return corrected_text
199
+
200
+ def paraphrase_and_correct(text):
201
+ cleaned_text = remove_redundant_words(text)
202
+ plag_removed = plagiarism_removal(cleaned_text)
203
+ paraphrased_text = capitalize_sentences_and_nouns(plag_removed)
204
+ paraphrased_text = force_first_letter_capital(paraphrased_text)
205
+ paraphrased_text = correct_article_errors(paraphrased_text)
206
+ paraphrased_text = correct_tense_errors(paraphrased_text)
207
+ paraphrased_text = ensure_subject_verb_agreement(paraphrased_text)
208
+ paraphrased_text = fix_possessives(paraphrased_text)
209
+ paraphrased_text = correct_spelling(paraphrased_text)
210
+ paraphrased_text = fix_punctuation_spacing(paraphrased_text)
211
+ paraphrased_text = grammar_check(paraphrased_text)
212
+
213
+ return paraphrased_text
214
+
215
+ with gr.Blocks() as demo:
216
+ with gr.Tab("AI Detection"):
217
+ t1 = gr.Textbox(lines=5, label='Text')
218
+ button1 = gr.Button("πŸ€– Predict!")
219
+ label1 = gr.Textbox(lines=1, label='Predicted Label πŸŽƒ')
220
+ score1 = gr.Textbox(lines=1, label='Prob')
221
+
222
+ button1.click(fn=predict_en, inputs=t1, outputs=[label1, score1])
223
+
224
+ with gr.Tab("Paraphrasing & Grammar Correction"):
225
+ t2 = gr.Textbox(lines=5, label='Enter text for paraphrasing and grammar correction')
226
+ button2 = gr.Button("πŸ”„ Paraphrase and Correct")
227
+ result2 = gr.Textbox(lines=5, label='Corrected Text')
228
+
229
+ button2.click(fn=paraphrase_and_correct, inputs=t2, outputs=result2)
230
+
231
+ if __name__ == "__main__":
232
+ try:
233
+ subprocess.run(["java", "-version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
234
+ except FileNotFoundError:
235
+ print("Java is not installed. Please install Java to use LanguageTool.")
236
+ exit(1)
237
+
238
+ demo.launch(share=True)