imseldrith commited on
Commit
49be6a8
·
1 Parent(s): 167ca4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -62
app.py CHANGED
@@ -1,71 +1,68 @@
1
  from flask import Flask, render_template, request
2
- from textblob import TextBlob
3
- import re
4
- import nltk
5
- from nltk.translate.bleu_score import sentence_bleu
6
  from nltk.corpus import wordnet
7
- from nltk.corpus import sentiwordnet as swn
8
- from nltk.sentiment import SentimentIntensityAnalyzer
 
 
 
 
9
 
10
  app = Flask(__name__)
11
 
12
- @app.route('/')
13
  def index():
14
- return render_template('index.html')
15
 
16
- @app.route('/paraphrase', methods=['POST'])
17
  def paraphrase():
18
- input_text = request.form['input_text']
19
- input_text = re.sub(r'[^\w\s]', '', input_text) # remove special characters
20
-
21
- # Correct grammar using TextBlob
22
- input_text = str(TextBlob(input_text).correct())
23
-
24
- # Summarize the text using TextBlob
25
- summarized_text = str(TextBlob(input_text).summarize())
26
-
27
- # Paraphrase the text
28
- paraphrased_text = generate_paraphrase(input_text)
29
-
30
- # Emotion detection
31
- emotion = detect_emotion(input_text)
32
-
33
- # Named Entity Recognition
34
- entities = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(input_text)))
35
-
36
- # Part-of-Speech Tagging
37
- pos_tags = nltk.pos_tag(nltk.word_tokenize(input_text))
38
-
39
- # Sentiment Analysis
40
- sentiment = SentimentIntensityAnalyzer().polarity_scores(input_text)
41
-
42
- return render_template('index.html', input_text=input_text, summarized_text=summarized_text, paraphrased_text=paraphrased_text, entities=entities, pos_tags=pos_tags, sentiment=sentiment, emotion=emotion)
43
-
44
- def generate_paraphrase(text):
45
- # Use TextBlob to generate paraphrased text
46
- paraphrased_text = str(TextBlob(text).words)
47
-
48
- # Custom synonyms
49
- custom_synonyms = [('happy', 'joyful'), ('sad', 'unhappy')]
50
- for syn in custom_synonyms:
51
- paraphrased_text = paraphrased_text.replace(syn[0], syn[1])
52
-
53
- return paraphrased_text
54
-
55
- def detect_emotion(text):
56
- # Use SentiWordNet to detect emotion in text
57
- emotions = []
58
- words = nltk.word_tokenize(text)
59
- for word in words:
60
- synset = swn.senti_synsets(word)
61
- if len(synset) > 0:
62
- emotions.append(synset[0].pos_score() - synset[0].neg_score())
63
- if emotions:
64
- emotion = max(emotions)
65
- else:
66
- emotion = 0
67
-
68
- return 'positive' if emotion > 0 else 'negative' if emotion < 0 else 'neutral'
69
-
70
- if __name__ == '__main__':
71
  app.run(host="0.0.0.0",port=7860,debug=True)
 
1
  from flask import Flask, render_template, request
 
 
 
 
2
  from nltk.corpus import wordnet
3
+ from nltk.tokenize import word_tokenize
4
+ from nltk.corpus import stopwords
5
+ from nltk.stem import WordNetLemmatizer
6
+ from summa.summarizer import summarize
7
+ from textblob import TextBlob
8
+ import spacy
9
 
10
  app = Flask(__name__)
11
 
12
+ @app.route("/")
13
  def index():
14
+ return render_template("index.html")
15
 
16
+ @app.route("/paraphrase", methods=["POST"])
17
  def paraphrase():
18
+ input_text = request.form["input_text"]
19
+
20
+ # Option to correct grammar using TextBlob
21
+ corrected_text = str(TextBlob(input_text).correct())
22
+
23
+ # Option to remove special characters
24
+ clean_text = ''.join(e for e in corrected_text if e.isalnum() or e.isspace())
25
+
26
+ # Perform text summarization
27
+ summary = summarize(clean_text)
28
+
29
+ # Perform word tokenization and remove stopwords
30
+ stop_words = set(stopwords.words("english"))
31
+ words = word_tokenize(summary)
32
+ words = [word for word in words if word.lower() not in stop_words]
33
+
34
+ # Perform lemmatization on the words
35
+ lemmatizer = WordNetLemmatizer()
36
+ lemmatized_words = [lemmatizer.lemmatize(word, pos=get_wordnet_pos(word)) for word in words]
37
+
38
+ # Load spaCy's NER model
39
+ nlp = spacy.load("en_core_web_sm")
40
+
41
+ # Use spaCy's NER to identify named entities in the input text
42
+ doc = nlp(summary)
43
+ entities = []
44
+ for ent in doc.ents:
45
+ entities.append((ent.text, ent.label_))
46
+
47
+ # Use spaCy's POS tagging on the input text
48
+ pos_tags = []
49
+ for token in doc:
50
+ pos_tags.append((token.text, token.pos_))
51
+
52
+ # Use TextBlob to perform sentiment analysis on the input text
53
+ sentiment = TextBlob(summary).sentiment.polarity
54
+
55
+ return render_template("paraphrase.html", input_text=input_text, output_text=' '.join(lemmatized_words), entities=entities, pos_tags=pos_tags, sentiment=sentiment)
56
+
57
+ def get_wordnet_pos(word):
58
+ """Map POS tag to first character used by WordNetLemmatizer"""
59
+ tag = nltk.pos_tag([word])[0][1]
60
+ tag = tag[0].upper()
61
+ tag_dict = {"J": wordnet.ADJ,
62
+ "N": wordnet.NOUN,
63
+ "V": wordnet.VERB,
64
+ "R": wordnet.ADV}
65
+ return tag_dict.get(tag, wordnet.NOUN)
66
+
67
+ if __name__ == "__main__":
 
 
 
68
  app.run(host="0.0.0.0",port=7860,debug=True)