Spaces:
Running
Running
Commit
·
cb77053
1
Parent(s):
d38b838
Update app.py
Browse files
app.py
CHANGED
@@ -1,125 +1,61 @@
|
|
1 |
-
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
-
import textblob
|
6 |
-
from polyglot.detect import Detector
|
7 |
import numpy as np
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
def index():
|
15 |
-
return render_template("index.html")
|
16 |
-
|
17 |
-
@app.route("/paraphrase", methods=["POST"])
|
18 |
-
def paraphrase():
|
19 |
-
input_text = request.form["input_text"]
|
20 |
-
options = request.form.getlist("options")
|
21 |
-
|
22 |
-
# Remove special characters
|
23 |
-
if "remove_special_characters" in options:
|
24 |
-
input_text = remove_special_characters(input_text)
|
25 |
-
|
26 |
-
# Correct grammar
|
27 |
-
if "correct_grammar" in options:
|
28 |
-
input_text = correct_grammar(input_text)
|
29 |
-
|
30 |
-
# Summarize text
|
31 |
-
if "summarize_text" in options:
|
32 |
-
input_text = summarize_text(input_text)
|
33 |
-
|
34 |
-
# Multilingual support
|
35 |
-
target_language = request.form.get("target_language")
|
36 |
-
if target_language:
|
37 |
-
input_text = translate(input_text, target_language)
|
38 |
-
|
39 |
-
# Custom synonyms
|
40 |
-
custom_synonyms = request.form.getlist("custom_synonyms")
|
41 |
-
for word, synonym in custom_synonyms:
|
42 |
-
input_text = replace_word(input_text, word, synonym)
|
43 |
-
|
44 |
-
# Output customization
|
45 |
-
input_text = customise_output(input_text, options)
|
46 |
-
|
47 |
-
# Integration with other NLP tools
|
48 |
-
named_entities = get_named_entities(input_text)
|
49 |
-
part_of_speech = get_part_of_speech(input_text)
|
50 |
-
sentiment = get_sentiment(input_text)
|
51 |
-
|
52 |
-
# Emotion detector
|
53 |
-
emotion = detect_emotion(input_text)
|
54 |
-
input_text = adjust_tone(input_text, emotion)
|
55 |
-
|
56 |
-
return render_template("index.html", paraphrased_text=input_text, named_entities=named_entities, part_of_speech=part_of_speech, sentiment=sentiment)
|
57 |
-
|
58 |
-
def remove_special_characters(input_text):
|
59 |
-
# Code to remove special characters
|
60 |
-
return input_text
|
61 |
|
62 |
-
|
63 |
-
# Code to summarize the text
|
64 |
-
return input_text
|
65 |
|
66 |
-
|
67 |
-
detector = Detector(input_text)
|
68 |
-
language = detector.language.code
|
69 |
-
return language
|
70 |
-
|
71 |
-
def translate(input_text, target_language):
|
72 |
-
blob = textblob.TextBlob(input_text)
|
73 |
-
translated_text = blob.translate(to=target_language)
|
74 |
-
return translated_text
|
75 |
-
|
76 |
-
def get_synonyms(word):
|
77 |
-
synonyms = []
|
78 |
-
for syn in wordnet.synsets(word):
|
79 |
-
for lemma in syn.lemmas():
|
80 |
-
synonyms.append(lemma.name())
|
81 |
-
return synonyms
|
82 |
-
|
83 |
-
def replace_word(input_text, word, synonym):
|
84 |
-
words = word_tokenize(input_text)
|
85 |
-
words = [synonym if w == word else w for w in words]
|
86 |
-
input_text = " ".join(words)
|
87 |
-
return input_text
|
88 |
-
|
89 |
-
def customise_output(input_text, options):
|
90 |
-
# Code to customise output based on options
|
91 |
-
return input_text
|
92 |
-
|
93 |
-
def get_named_entities(input_text):
|
94 |
-
named_entities = ne_chunk(pos_tag(word_tokenize(input_text)))
|
95 |
-
return named_entities
|
96 |
-
|
97 |
-
def get_part_of_speech(input_text):
|
98 |
-
pos = pos_tag(word_tokenize(input_text))
|
99 |
-
return pos
|
100 |
-
|
101 |
-
def get_sentiment(input_text):
|
102 |
-
blob = textblob.TextBlob(input_text)
|
103 |
-
sentiment = blob.sentiment.polarity
|
104 |
-
return sentiment
|
105 |
-
|
106 |
-
def correct_grammar(input_text):
|
107 |
-
blob = textblob.TextBlob(input_text)
|
108 |
-
corrected_text = str(blob.correct())
|
109 |
-
return corrected_text
|
110 |
-
|
111 |
-
def detect_emotion(input_text):
|
112 |
-
words = word_tokenize(input_text)
|
113 |
-
words = [w.lower() for w in words]
|
114 |
-
words = [w for w in words if w.isalpha()]
|
115 |
-
input_text = " ".join(words)
|
116 |
-
input_text = np.array([input_text])
|
117 |
-
sentiment = model.predict(input_text, batch_size=1, verbose=0)[0]
|
118 |
-
return sentiment
|
119 |
-
|
120 |
-
def adjust_tone(input_text, emotion):
|
121 |
-
# Code to adjust tone based on emotion
|
122 |
-
return input_text
|
123 |
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from textblob import TextBlob
|
3 |
+
import nltk
|
4 |
+
import emopy
|
|
|
|
|
5 |
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import seaborn as sns
|
9 |
+
import torch
|
10 |
+
import spacy
|
11 |
+
from flask import Flask, request, render_template
|
12 |
|
13 |
+
nltk.download('averaged_perceptron_tagger')
|
14 |
+
nltk.download('punkt')
|
15 |
+
nltk.download('maxent_ne_chunker')
|
16 |
+
nltk.download('words')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
nlp = spacy.load("en_core_web_sm")
|
|
|
|
|
19 |
|
20 |
+
app = Flask(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
@app.route('/', methods=['GET', 'POST'])
|
23 |
+
def index():
|
24 |
+
if request.method == 'POST':
|
25 |
+
text = request.form['text']
|
26 |
+
paraphrase_option = request.form['paraphrase_option']
|
27 |
+
remove_special_chars = request.form.get('remove_special_chars')
|
28 |
+
summarize = request.form.get('summarize')
|
29 |
+
correct_grammar = request.form.get('correct_grammar')
|
30 |
+
|
31 |
+
if correct_grammar:
|
32 |
+
text = str(TextBlob(text).correct())
|
33 |
+
|
34 |
+
if remove_special_chars:
|
35 |
+
text = re.sub(r'[^\w\s]', '', text)
|
36 |
+
|
37 |
+
if summarize:
|
38 |
+
doc = nlp(text)
|
39 |
+
sentences = [sent.text for sent in doc.sents]
|
40 |
+
text = " ".join(sentences[:3]) + "..."
|
41 |
+
|
42 |
+
if paraphrase_option == 'repeat':
|
43 |
+
text = re.sub(r'\b(\w+)\b', r'\1', text)
|
44 |
+
elif paraphrase_option == 'emotion_detector':
|
45 |
+
emotion = emopy.EmotionDetector()
|
46 |
+
emotions = emotion.detect_emotion(text)
|
47 |
+
emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
|
48 |
+
index = np.argmax(emotions)
|
49 |
+
emotion = emotion_labels[index]
|
50 |
+
if emotion == 'happy':
|
51 |
+
text = text.upper()
|
52 |
+
elif emotion == 'sad':
|
53 |
+
text = text.lower()
|
54 |
+
else:
|
55 |
+
text = text.capitalize()
|
56 |
+
|
57 |
+
return render_template('index.html', text=text)
|
58 |
+
return render_template('index.html')
|
59 |
+
|
60 |
+
if __name__ == '__main__':
|
61 |
+
app.run(host="0.0.0.0",port=7860,debug=True)
|