ValadisCERTH commited on
Commit
6ae15ed
·
1 Parent(s): 1faec1a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +593 -0
app.py ADDED
@@ -0,0 +1,593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spacy
2
+ import re
3
+ import nltk
4
+ from nltk.corpus import wordnet
5
+ import numpy as np
6
+
7
+ from sklearn.metrics.pairwise import cosine_similarity
8
+
9
+ nltk.download('wordnet')
10
+ nltk.download('omw-1.4')
11
+ nltk.download('punkt')
12
+
13
+ # load the spacy model
14
+ spacy.cli.download("en_core_web_sm")
15
+ spacy.cli.download("en_core_web_lg")
16
+
17
+ # use spacy small because in that way we are closer to a BOW model which is the one we care in our case since we just compare words
18
+ nlp = spacy.load('en_core_web_sm', disable=["parser", "ner"])
19
+
20
+
21
+ def find_comptives_symbols(sentence):
22
+ """
23
+ Capture unique cases of symbols like <, >, =
24
+ If more than one symbols exist, return []
25
+ """
26
+
27
+ pattern = r"(?<![<=>])[%s](?![<=>])" % (re.escape("<=>"))
28
+ matches = re.findall(pattern, sentence)
29
+
30
+ found_symbols = []
31
+ for matching in matches:
32
+ # add symbol to list for each occurrence found
33
+ found_symbols.append({'comparative': ['symbol', matching]})
34
+
35
+ # return the found symbols
36
+ return found_symbols
37
+
38
+
39
+ def find_comptives_straight_patterns(sentence):
40
+ """
41
+ Function to identivy mentions of compartives. The form is "comparative adverbs/adjectives followed by than", "words like more/less followed by than", "equal to"
42
+ """
43
+
44
+ doc = nlp(sentence)
45
+ comparatives = []
46
+
47
+ for token in doc:
48
+
49
+ # find mentions of "equal" followed by "to"
50
+ if token.text.lower() == "equal":
51
+ next_token = token.nbor()
52
+
53
+ if next_token.text.lower() == "to":
54
+ prev_token = token.nbor(-1)
55
+
56
+ if prev_token.pos_ == "NOUN":
57
+ comparatives.append({'comparative': ["equal to", "="]})
58
+
59
+
60
+ # find mentions of "more"/"less" followed by "than"
61
+ elif token.text.lower() in ["more", "less"]:
62
+
63
+ next_token = token.nbor()
64
+
65
+ if next_token.text.lower() == "than":
66
+ prev_token = token.nbor(-1)
67
+
68
+ # this part is to check what will be before more/less. We can add a NOUN as mandatory (e.g magnitude) or even specifically the word magnitude
69
+ # for the moment we have disable it
70
+ # if prev_token.pos_ == "NOUN":
71
+
72
+ if token.text.lower() == 'more':
73
+ comparatives.append({'comparative': [token.text+" "+next_token.text, '>']})
74
+ elif token.text.lower() == 'less':
75
+ comparatives.append({'comparative': [token.text+" "+next_token.text, '<']})
76
+
77
+
78
+ # find mentions of comparative adjectives or comparative adverbs followed by "than"
79
+ elif token.tag_ == "JJR" or token.tag_ == "RBR":
80
+ next_token = token.nbor()
81
+
82
+ if next_token.text.lower() == "than" and next_token.nbor().pos_ != "NOUN":
83
+
84
+ # check if the token is a synonym of "bigger"
85
+
86
+ # retrieve a set of synonyms for the concepts of 'big' and 'bigger'
87
+ big_synonyms = set(wordnet.synsets('big') + wordnet.synsets('large') + wordnet.synsets('great') + wordnet.synsets('huge') + wordnet.synsets('enormous') + wordnet.synsets('heavy') + wordnet.synsets('strong') + wordnet.synsets('enormous') + wordnet.synsets('massive') + wordnet.synsets('immense') + wordnet.synsets('substantial'))
88
+ bigger_synonyms = set(wordnet.synsets('bigger') + wordnet.synsets('larger') + wordnet.synsets('greater') + wordnet.synsets('higher') + wordnet.synsets('taller') + wordnet.synsets('heavier') + wordnet.synsets('stronger'))
89
+
90
+ bigger_related_words = big_synonyms.union(bigger_synonyms)
91
+ bigger_rel_words = [word.name().split('.')[0] for word in bigger_related_words]
92
+
93
+ flag_bigger = 0
94
+
95
+ if token.text.lower() in bigger_rel_words:
96
+ flag_bigger = 1
97
+ comparatives.append({'comparative': [token.text+" "+next_token.text, '>']})
98
+
99
+ # if no synonym of bigger was found, check for smaller synsets
100
+ if not flag_bigger:
101
+
102
+ # retrieve a set of synonyms for the concepts of 'small' and 'smaller'
103
+ small_synonyms = set(wordnet.synsets('small') + wordnet.synsets('little') + wordnet.synsets('tiny') + wordnet.synsets('petite') + wordnet.synsets('miniature') + wordnet.synsets('slight') + wordnet.synsets('meager') + wordnet.synsets('inconsequential') + wordnet.synsets('minor'))
104
+ smaller_synonyms = set(wordnet.synsets('smaller') + wordnet.synsets('lesser') + wordnet.synsets('lower') + wordnet.synsets('shorter') + wordnet.synsets('lighter') + wordnet.synsets('weaker'))
105
+
106
+ smaller_related_words = small_synonyms.union(smaller_synonyms)
107
+ smaller_rel_words = [word.name().split('.')[0] for word in smaller_related_words]
108
+
109
+ if token.text.lower() in smaller_rel_words:
110
+ flag_bigger = 0
111
+ comparatives.append({'comparative': [token.text+" "+next_token.text, '<']})
112
+
113
+ return comparatives
114
+
115
+
116
+
117
+ # helper functions for 'identify_pattern_bigger_smaller'
118
+
119
+ def identify_comparison(sentence):
120
+ """
121
+ Capture patterns of 'word-er' followed by 'than' (e.g. 'better than', 'lesser than', etc)
122
+ """
123
+
124
+ pattern = r'\b(\w+er than)\b'
125
+ matches = re.findall(pattern, sentence)
126
+
127
+ if matches:
128
+ return matches
129
+ else:
130
+ return 0
131
+
132
+
133
+ def find_more_than_reference(sentence):
134
+ """
135
+ Capture patterns of 'more' followed by 'word' followed by 'than' (e.g. more advanced than)
136
+ """
137
+
138
+ pattern = r"(more) (\w+) than"
139
+ matches = re.findall(pattern, sentence)
140
+
141
+ if matches:
142
+ return [' '.join(match) for match in matches]
143
+ else:
144
+ return 0
145
+
146
+
147
+ def find_less_than_reference(sentence):
148
+ """
149
+ Capture patterns of 'less' followed by 'word' followed by 'than' (e.g. less advanced than)
150
+ """
151
+
152
+ pattern = r"(less) (\w+) than"
153
+ matches = re.findall(pattern, sentence)
154
+
155
+ if matches:
156
+ return [' '.join(match) for match in matches]
157
+ else:
158
+ return 0
159
+
160
+
161
+ def is_related_to(word, target_word):
162
+ """
163
+ Returns True if the input 'word' is semantically related to the 'target_word', otherwise False.
164
+ """
165
+
166
+ target_synsets = set(wordnet.synsets(target_word))
167
+ word_synsets = set(wordnet.synsets(word))
168
+
169
+ if word_synsets.intersection(target_synsets):
170
+ return True
171
+ else:
172
+ return False
173
+
174
+
175
+ def is_related_to_bigger(word):
176
+ """
177
+ Returns True if the input 'word' is semantically related to the concept 'bigger', otherwise False.
178
+ """
179
+
180
+ if word.lower() == "more" or word.lower().startswith("more "):
181
+ return True
182
+
183
+ # retrieve a set of synonyms for the concepts of 'big' and 'bigger'
184
+ big_synonyms = set(wordnet.synsets('big') + wordnet.synsets('large') + wordnet.synsets('great') + wordnet.synsets('huge') + wordnet.synsets('enormous') + wordnet.synsets('heavy') + wordnet.synsets('strong') + wordnet.synsets('enormous') + wordnet.synsets('massive') + wordnet.synsets('immense') + wordnet.synsets('substantial'))
185
+ bigger_synonyms = set(wordnet.synsets('bigger') + wordnet.synsets('larger') + wordnet.synsets('greater') + wordnet.synsets('higher') + wordnet.synsets('taller') + wordnet.synsets('heavier') + wordnet.synsets('stronger'))
186
+
187
+ related_words = big_synonyms.union(bigger_synonyms)
188
+
189
+ # Check if the input word is semantically related to any of those 'big'/'bigger' synonyms
190
+ for related_word in related_words:
191
+ if is_related_to(word, related_word.name().split('.')[0]):
192
+ return True
193
+ return False
194
+
195
+
196
+ def is_related_to_smaller(word):
197
+ """
198
+ Returns True if the input word is semantically related to the concept of 'smaller', otherwise False.
199
+ """
200
+ if word.lower() == "less" or word.lower().startswith("less "):
201
+ return True
202
+
203
+ # retrieve a set of synonyms for the concepts of 'small' and 'smaller'
204
+ small_synonyms = set(wordnet.synsets('small') + wordnet.synsets('little') + wordnet.synsets('tiny') + wordnet.synsets('petite') + wordnet.synsets('miniature') + wordnet.synsets('slight') + wordnet.synsets('meager') + wordnet.synsets('inconsequential') + wordnet.synsets('minor'))
205
+ smaller_synonyms = set(wordnet.synsets('smaller') + wordnet.synsets('lesser') + wordnet.synsets('lower') + wordnet.synsets('shorter') + wordnet.synsets('lighter') + wordnet.synsets('weaker'))
206
+
207
+ related_words = small_synonyms.union(smaller_synonyms)
208
+
209
+ # Check if the input word is semantically related to any of those 'small'/'smaller' synonyms
210
+ for related_word in related_words:
211
+ if is_related_to(word, related_word.name().split('.')[0]):
212
+ return True
213
+ return False
214
+
215
+
216
+ def identify_bigger_smaller_advanced(sentence):
217
+ """
218
+ This is a complementary function to capture cases of 'words ending with -er' followed by 'than' and cases of 'more'/'less' followed 'word' followed by 'than'
219
+ """
220
+
221
+ # pattern 'words ending with -er' followed by 'than' (pattern1)
222
+ word_er_than = identify_comparison(sentence)
223
+
224
+ # pattern 'more' followed 'word' followed by 'than' (pattern2)
225
+ more_word_than = find_more_than_reference(sentence)
226
+
227
+ # pattern 'less' followed 'word' followed by 'than' (pattern3)
228
+ less_word_than = find_less_than_reference(sentence)
229
+
230
+ bigger_list = []
231
+ smaller_list = []
232
+
233
+ # in case any pattern is captured
234
+ if word_er_than or more_word_than or less_word_than:
235
+
236
+ # in case of pattern1
237
+ if word_er_than:
238
+ for word in word_er_than:
239
+
240
+ # perform relevant substitutions
241
+ target_word = word.replace("than", "").strip()
242
+
243
+ # examine if it is a bigger-related or smaller-related word
244
+ bigger_word = is_related_to_bigger(target_word)
245
+ smaller_word = is_related_to_smaller(target_word)
246
+
247
+ # case of bigger word
248
+ if bigger_word and not smaller_word:
249
+ bigger_list.append({"comparative":[word, ">"]})
250
+
251
+ # case of smaller word
252
+ elif smaller_word and not bigger_word:
253
+ smaller_list.append({"comparative":[word, "<"]})
254
+
255
+ # in case of pattern2
256
+ if more_word_than:
257
+ for word in more_word_than:
258
+
259
+ # perform relevant substitutions
260
+ target_word = word.replace("than", "").replace("more", "").strip()
261
+
262
+ # in this case it must be a bigger-related word
263
+ bigger_word = is_related_to_bigger(target_word)
264
+
265
+ # case of bigger word
266
+ if bigger_word:
267
+ bigger_list.append({"comparative":[word, ">"]})
268
+
269
+
270
+ # in case of pattern3
271
+ if less_word_than:
272
+ for word in less_word_than:
273
+
274
+ # perform relevant substitutions
275
+ target_word = word.replace("than", "").replace("less", "").strip()
276
+
277
+ # in this case it must be a lesser-related word
278
+ lesser_word = is_related_to_smaller(target_word)
279
+
280
+ # case of bigger word
281
+ if lesser_word:
282
+ smaller_list.append({"comparative":[word, "<"]})
283
+
284
+ # return the combined list
285
+ return bigger_list + smaller_list
286
+
287
+
288
+ def find_equal_to_comptives_ngrams(sentence):
289
+ """
290
+ This function takes a sentence as input and returns a reference phrase based on semantic similarity using n-grams.
291
+ The possible reference phrases are provided as a list.
292
+ """
293
+
294
+ # This is a reference list for the concept of 'equal to'. It has many references to perform on them the semantic similarity examination
295
+ possible_references = ["equal to", "same as", "similar to", "identical to", "equivalent to", "tantamount to", "corresponding to", "comparable to", "akin to", "commensurate with", "in line with", "on a par with" , "indistinguishable from" , "corresponding with", "congruent with"]
296
+
297
+ # that thershold is enough empirically
298
+ max_similarity = 0.85
299
+
300
+ possible_reference_list = []
301
+
302
+ # parse with the spacy model (embeddings each of the references)
303
+ embedding_references = []
304
+ for reference in possible_references:
305
+ reference_doc = nlp(reference)
306
+ embedding_references.append(reference_doc)
307
+
308
+ # Check 2-grams, 3-grams, and 4-grams
309
+ for n in range(2, 5):
310
+
311
+ # get n-grams
312
+ sentence_ngrams = list(nltk.ngrams(sentence.split(), n))
313
+
314
+ for sent_ngram in sentence_ngrams:
315
+ sentence_ngram_str = ' '.join(sent_ngram)
316
+ sentence_ngram_doc = nlp(sentence_ngram_str)
317
+
318
+ for emb_ref in embedding_references:
319
+ similarity = sentence_ngram_doc.similarity(emb_ref)
320
+
321
+ if similarity >= max_similarity:
322
+ possible_reference_list.append({'comparative': [sentence_ngram_str, emb_ref, similarity, "="]})
323
+ break
324
+
325
+ # if we have found a possible refernce that is similar enough with an n-gram of the input sentence, return the comparative '=', otherwise return 0
326
+ if possible_reference_list:
327
+ return possible_reference_list
328
+ else:
329
+ return []
330
+
331
+
332
+ def single_verb_comptives(sentence):
333
+ """
334
+ This function takes a sentence and identifies any mention of bigger than, smaller than, equal to, expressed
335
+ as single-word verb. It uses wordnet synsets to examine for synonyms and antonyms
336
+ """
337
+
338
+ # base references
339
+ bigger_references_sg = ["surpass", "exceed", "outstrip", "outdo", "outmatch", "outclass", "eclipse", "overshadow", "outrank", "overtake", "top", "beat", "transcend", "dominate", "prevail", "trump", "vanquish", "outperform", "outgun", "outdistance", "outshine"]
340
+ lesser_references_sg = ["lag", "trail", "lose", "underperform", "yield", "surrender", "submit", "succumb", "straggle", "dawdle", "lollygag", "loiter", "delay", "defer", "postpone", "procrastinate", "linger", "hesitate", "prolong", "drag"]
341
+ equal_references_sg = ["match", "equal", "tie", "correspond", "conform", "agree", "harmonize", "coordinate", "comply", "fit", "parallel", "resemble", "mirror", "emulate", "equilibrate", "balance", "counterbalance", "offset", "compensate"]
342
+
343
+ doc = nlp(sentence)
344
+
345
+ bigger_list = []
346
+ smaller_list = []
347
+ equal_list = []
348
+
349
+ # search for all verbs and examine their lemma with all the synonyms of each of the previous references. Assign a label accordingly
350
+ for token in doc:
351
+ if token.pos_ == "VERB":
352
+
353
+ for lemma in token.lemma_.split('|'):
354
+ synsets = wordnet.synsets(lemma, pos='v')
355
+
356
+ for syn in synsets:
357
+ if any(lemma in bigger_references_sg for lemma in syn.lemma_names()):
358
+ bigger_list.append({'comparative': [token.text, ">"]})
359
+ break
360
+
361
+ elif any(lemma in lesser_references_sg for lemma in syn.lemma_names()):
362
+ smaller_list.append({'comparative': [token.text, "<"]})
363
+ break
364
+
365
+ elif any(lemma in equal_references_sg for lemma in syn.lemma_names()):
366
+ # print(lemma)
367
+ equal_list.append({'comparative': [token.text, "="]})
368
+ break
369
+
370
+ final_list = bigger_list + smaller_list + equal_list
371
+
372
+ if final_list:
373
+ return final_list
374
+ else:
375
+ return []
376
+
377
+
378
+ # helper functions for 'identify_multi_word_verbs'
379
+
380
+ # Define multi-word verb lists
381
+ bigger_list = ["is a cut above", "is ahead of", "is superior to", "is greater than", "raise the bar", "climb the ladder", "set the standard", "set the pace", "break the mold", "push the envelope", "raise the game", "is a class apart"]
382
+ smaller_list = ["fall behind", "is inferior to", "is smaller than", "lag behind", "trail behind", "is second to", "bring up the rear", "lose ground", "bring up the tail end", "fall short", "fall beneath", "fail to measure up", "put off"]
383
+ equal_list = ["is in line with", "is equal to", "is on a par with", "is on par with", "is the same as", "is comparable to", "is in sync with", "is in harmony with", "is in step with", "is in tune with", "is in accord with", "is consistent with", "is consonant with", "keep pace with", "keep up with", "is equivalent to", "balance out", "even out"]
384
+
385
+ # Calculate embeddings of multi-word verbs
386
+ bigger_embeddings = [np.mean([token.vector for token in nlp(verb)], axis=0) for verb in bigger_list]
387
+ smaller_embeddings = [np.mean([token.vector for token in nlp(verb)], axis=0) for verb in smaller_list]
388
+ equal_embeddings = [np.mean([token.vector for token in nlp(verb)], axis=0) for verb in equal_list]
389
+
390
+
391
+ # Define function to check if n-gram is in multi-word verb list
392
+ def check_list(ngram, verb_list):
393
+ """
394
+ This is a function to check if n-gram is in multi-word verb list
395
+ """
396
+
397
+ if ngram in verb_list:
398
+ return True
399
+ else:
400
+ return False
401
+
402
+
403
+ def cosine_sim(a, b):
404
+ """
405
+ This is a function to calculate cosine similarity
406
+ """
407
+
408
+ return cosine_similarity(a.reshape(1,-1), b.reshape(1,-1))[0][0]
409
+
410
+
411
+ # we examine the n-grams reversely and any time we find a match, we "delete" that match, so that lesser ngrams will not be matched \
412
+ # (e.g. is on a par with, would also match afterwords on a par with, par with, etc)
413
+
414
+ def multiword_verb_comptives(sentence):
415
+ """
416
+ This function takes a sentence and identifies any mention of bigger than, smaller than, equal to, expressed
417
+ as multi-word verbs. Based on three refernces lists it performs initially a simple string comparison with each
418
+ of their elements and the ngrams of the input sentence. If there is no match there, it performs the same procedure
419
+ with cosine similarity to identify any similar ngrams.
420
+ """
421
+
422
+ # Split sentence into tokens
423
+ tokens = sentence.split()
424
+
425
+ # Initialize variables to store label and max similarity
426
+ label = None
427
+ max_sim = 0
428
+
429
+ # these lists are used to capture any possible reference
430
+ bigger_l = []
431
+ smaller_l = []
432
+ equal_l = []
433
+
434
+ # Define set to keep track of matched ngrams
435
+ matched_ngrams = set()
436
+
437
+ # Iterate through n-grams of sentence, starting with the largest n-grams
438
+ for n in range(5, 1, -1):
439
+ for i in range(len(tokens)-n+1):
440
+ ngram = ' '.join(tokens[i:i+n])
441
+
442
+ # Skip ngrams that have already been matched
443
+ if ngram in matched_ngrams:
444
+ continue
445
+
446
+ # Check if n-gram is in bigger_list
447
+ if check_list(ngram, bigger_list):
448
+ matched_ngrams.update(set(ngram.split()))
449
+ bigger_l.append({"comparative": [ngram, '>']})
450
+
451
+ # Check if n-gram is in smaller_list
452
+ elif check_list(ngram, smaller_list):
453
+ matched_ngrams.update(set(ngram.split()))
454
+ smaller_l.append({"comparative":[ngram, '<']})
455
+
456
+ # Check if n-gram is in equal_list
457
+ elif check_list(ngram, equal_list):
458
+ matched_ngrams.update(set(ngram.split()))
459
+ equal_l.append({"comparative":[ngram, '=']})
460
+
461
+ # Check if n-gram is similar to any verb in bigger_list using pre-calculated embeddings
462
+ else:
463
+ ngram_emb = np.mean([token.vector for token in nlp(ngram)], axis=0)
464
+ similarities_bigger = [cosine_sim(ngram_emb, verb_emb) for verb_emb in bigger_embeddings]
465
+ max_sim_bigger = max(similarities_bigger)
466
+
467
+ # Check if n-gram is similar to any verb in smaller_list using pre-calculated embeddings
468
+ similarities_smaller = [cosine_sim(ngram_emb, verb_emb) for verb_emb in smaller_embeddings]
469
+ max_sim_smaller = max(similarities_smaller)
470
+
471
+ # Check if n-gram is similar to any verb in equal_list using pre-calculated embeddings
472
+ similarities_equal = [cosine_sim(ngram_emb, verb_emb) for verb_emb in equal_embeddings]
473
+ max_sim_equal = max(similarities_equal)
474
+
475
+ # Determine the maximum similarity value among the three lists
476
+ if max_sim_bigger > max_sim_smaller and max_sim_bigger > max_sim_equal and max_sim_bigger > max_sim:
477
+ max_sim = max_sim_bigger
478
+ if max_sim > 0.9:
479
+ matched_ngrams.update(set(ngram.split()))
480
+ bigger_l.append({"comparative":[ngram, '>']})
481
+ else:
482
+ matched_ngrams.update(set(ngram.split()))
483
+
484
+
485
+ elif max_sim_smaller > max_sim_bigger and max_sim_smaller > max_sim_equal and max_sim_smaller > max_sim:
486
+ max_sim = max_sim_smaller
487
+ if max_sim > 0.9:
488
+ matched_ngrams.update(set(ngram.split()))
489
+ smaller_l.append({"comparative":[ngram, '<']})
490
+ else:
491
+ matched_ngrams.update(set(ngram.split()))
492
+
493
+
494
+ elif max_sim_equal > max_sim_bigger and max_sim_equal > max_sim_smaller and max_sim_equal > max_sim:
495
+ max_sim = max_sim_smaller
496
+ if max_sim > 0.9:
497
+ matched_ngrams.update(set(ngram.split()))
498
+ equal_l.append({"comparative":[ngram, '=']})
499
+ else:
500
+ matched_ngrams.update(set(ngram.split()))
501
+
502
+
503
+ return bigger_l + smaller_l + equal_l
504
+
505
+
506
+ def identify_comparatives(sentence):
507
+ """
508
+ This function combines the results of all the aforementioned techniques (simple and advance) to identify bigger than, smaller than, equal to patterns
509
+ """
510
+
511
+ # Identify straightforward patterns
512
+ straight_comptives = find_comptives_straight_patterns(sentence)
513
+
514
+ # Identify advanced bigger/smaller comparatives
515
+ bigger_smaller_comparatives = identify_bigger_smaller_advanced(sentence)
516
+
517
+ # Identify advanced equal-to comparatives
518
+ equal_to_comparatives = find_equal_to_comptives_ngrams(sentence)
519
+
520
+ single_verb = single_verb_comptives(sentence)
521
+ multi_verb = multiword_verb_comptives(sentence)
522
+
523
+ # return all the patterns that were captured
524
+ comparatives = straight_comptives + bigger_smaller_comparatives + equal_to_comparatives + single_verb + multi_verb
525
+
526
+ # since those different techniques might capture similar patterns, we keep only unique references. More precisely
527
+ unique_comparatives = {}
528
+
529
+ for item in comparatives:
530
+ if item['comparative'][0] not in unique_comparatives:
531
+ unique_comparatives[item['comparative'][0]] = item
532
+
533
+ unique_output = list(unique_comparatives.values())
534
+
535
+ return unique_output
536
+
537
+
538
+ def magnitude_binding(sentence):
539
+
540
+ comparative_symbols = find_comptives_symbols(sentence)
541
+ comparative_mentions = identify_comparatives(sentence)
542
+
543
+ # starting with the symbols, if one was captured
544
+ if len(comparative_symbols) == 1:
545
+
546
+ # if the rest of the functions are empty (meaning that there are no other references)
547
+ if len(comparative_mentions) == 0:
548
+ return comparative_symbols
549
+
550
+ # in case that there is no symbol
551
+ elif len(comparative_symbols) == 0:
552
+
553
+ # we need only one mention of comparatives
554
+ if len(comparative_mentions) == 1:
555
+ return comparative_mentions
556
+
557
+ else:
558
+ return 0
559
+
560
+ # case of multiple symbol references
561
+ else:
562
+ return 0
563
+
564
+
565
+
566
+ from transformers import pipeline
567
+ import gradio as gr
568
+
569
+ title = "Natural Language module Demo for Comparatives identification"
570
+ description = "This is a simple demo just for demonstration purposes, so that Serco team might have the chance to validate the results of the Natural Language module concerning the comparatives identification, while in progress"
571
+
572
+ examples = [
573
+ ["earthquake located in Ishkoshim higher than 5, Tajikistan in May the ninth with magnitude equal to 6.2"],
574
+ ["earthquake located in Ishkoshim, Tajikistan in May the ninth with magnitude < 6.2"],
575
+ ["earthquake located in Ishkoshim that is > than the one in Rome, and < than 8.2"],
576
+ ["earthquake located in Ishkoshim, Tajikistan in May the ninth with magnitude lesser than 6.2"],
577
+ ["earthquake located in Ishkoshim, Tajikistan in May the ninth with magnitude same with 6.2"],
578
+ ["I want an earthquake that happend in Rome during 2016 with a magnitude dallying of 5."],
579
+ ["I want an earthquake that happend in Rome during 2016 and surpassed the magnitude of 5."],
580
+ ["I want an earthquake that happend in Rome during 2016 with a magnitude similar to 5."],
581
+ ["I want an earthquaqe event that happend in Italy, Rome during 2016 February with a magnitude that was in a par with 5."]
582
+ ]
583
+
584
+
585
+ gr.Interface(
586
+ fn=magnitude_binding,
587
+ inputs="text",
588
+ outputs="text",
589
+ title=title,
590
+ description=description,
591
+ examples=examples,
592
+ enable_queue=True,
593
+ ).launch()