namb0010 commited on
Commit
cdcef37
·
1 Parent(s): 1aa2306

Upload [uma_namboothiripad]assignment_2 (1).py

Browse files
[uma_namboothiripad]assignment_2 (1).py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """[Uma Namboothiripad]Assignment_2.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1_sofOjXRDnId49NOup4sdiVS1E_51T-b
8
+
9
+ Load the dataset below
10
+ """
11
+
12
+ !pip install -U spacy
13
+ #first install the library that would help us use BERT in an easy to use interface
14
+ #https://github.com/UKPLab/sentence-transformers/tree/master/sentence_transformers
15
+ !pip install -U sentence-transformers
16
+
17
+ """I was having issues connecting my csv file to the colab notebook, so I ended up connecting this to my drive"""
18
+
19
+ import spacy
20
+ from spacy.lang.en.stop_words import STOP_WORDS
21
+ from string import punctuation
22
+ from collections import Counter
23
+ from heapq import nlargest
24
+
25
+ import pandas as pd
26
+ from tqdm import tqdm
27
+ from sentence_transformers import SentenceTransformer, util
28
+ print(pd.__version__)
29
+
30
+ ! pip install -q kaggle
31
+
32
+ ! pip install lightgbm
33
+
34
+ """Setup Kaggle json credentials"""
35
+
36
+ from google.colab import files
37
+ files.upload()
38
+
39
+ !mkdir ~/.kaggle/
40
+ !cp kaggle.json ~/.kaggle/
41
+
42
+ !chmod 600 ~/.kaggle/kaggle.json
43
+
44
+ !kaggle datasets list
45
+
46
+ !kaggle datasets download -d hamzafarooq50/hotel-listings-and-reviews/HotelListInBarcelona__en2019100120191005.csv
47
+
48
+ !ls
49
+
50
+ !python -m spacy download en_core_web_sm
51
+
52
+ !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews/hotelReviewsInBarcelona__en2019100120191005.csv
53
+
54
+ !ls
55
+
56
+ nlp = spacy.load("en_core_web_sm")
57
+
58
+ import re
59
+
60
+ import nltk
61
+ nltk.download('punkt')
62
+
63
+ from nltk.tokenize import word_tokenize
64
+
65
+ nltk.download('stopwords')
66
+
67
+ from nltk.corpus import stopwords
68
+
69
+ nltk.download('wordnet')
70
+ nltk.download('omw-1.4')
71
+ from nltk.stem import WordNetLemmatizer
72
+ from nltk.stem import WordNetLemmatizer
73
+
74
+ import os
75
+ import spacy
76
+ nlp = spacy.load("en_core_web_sm")
77
+ from spacy import displacy
78
+
79
+ text = """Example text"""
80
+ #text = "I really hope that France does not win the World Cup and Morocco makes it to the finals"
81
+ doc = nlp(text)
82
+ sentence_spans = list(doc.sents)
83
+ displacy.render(doc, jupyter = True, style="ent")
84
+
85
+ stopwords = list(STOP_WORDS)
86
+ from string import punctuation
87
+ punctuation = punctuation+ '\n'
88
+
89
+ import pandas as pd
90
+
91
+ import scipy.spatial
92
+ import pickle as pkl
93
+
94
+ !pip install -U sentence-transformers
95
+
96
+ from sentence_transformers import SentenceTransformer
97
+ embedder = SentenceTransformer('all-MiniLM-L6-v2')
98
+ #embedder = SentenceTransformer('bert-base-nli-mean-tokens')
99
+
100
+ !pip install -U sentence-transformers
101
+
102
+ from sentence_transformers import SentenceTransformer
103
+
104
+ embedder = SentenceTransformer('all-MiniLM-L6-v2')
105
+ embedder = SentenceTransformer('bert-base-nli-mean-tokens')
106
+
107
+ df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Assignment#2/HotelListInBarcelona__en2019100120191005.csv',sep=",", encoding='cp1252')
108
+
109
+ !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews
110
+
111
+ df.head()
112
+
113
+ df['hotel_name'].value_counts()
114
+
115
+ df['hotel_name'].drop_duplicates()
116
+
117
+ df_combined = df.sort_values(['hotel_name']).groupby('hotel_name', sort=False).hotel_features.apply(''.join).reset_index(name='hotel_features')
118
+
119
+ df_combined.head().T
120
+
121
+ import re
122
+
123
+ df_combined['hotel_features'] = df_combined['hotel_features'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','',x))
124
+
125
+ def lower_case(input_str):
126
+ input_str = input_str.lower()
127
+ return input_str
128
+
129
+ df_combined['hotel_features']= df_combined['hotel_features'].apply(lambda x: lower_case(x))
130
+
131
+ df = df_combined
132
+
133
+ df_sentences = df_combined.set_index("hotel_features")
134
+ df_sentences = df_sentences["hotel_name"].to_dict()
135
+ df_sentences_list = list(df_sentences.keys())
136
+ len(df_sentences_list)
137
+
138
+ list(df_sentences.keys())[:5]
139
+
140
+ df_sentences_list = [str(d) for d in tqdm(df_sentences_list)]
141
+
142
+ # Corpus with example sentences
143
+ corpus = df_sentences_list
144
+ corpus_embeddings = embedder.encode(corpus,show_progress_bar=True)
145
+
146
+ corpus_embeddings[0]
147
+
148
+ queries = ['Hotel near tourist locations and with free WIFI',
149
+ ]
150
+ query_embeddings = embedder.encode(queries,show_progress_bar=True)
151
+
152
+ import torch
153
+ # Query sentences:
154
+ queries = ['Hotel at least 10 minutes away from sagrada familia'
155
+ ]
156
+
157
+
158
+ # Find the closest 3 sentences of the corpus for each query sentence based on cosine similarity
159
+ top_k = min(3, len(corpus))
160
+ for query in queries:
161
+ query_embedding = embedder.encode(query, convert_to_tensor=True)
162
+
163
+ # We use cosine-similarity and torch.topk to find the highest 5 scores
164
+ cos_scores = util.pytorch_cos_sim(query_embedding, corpus_embeddings)[0]
165
+ top_results = torch.topk(cos_scores, k=top_k)
166
+
167
+ print("\n\n======================\n\n")
168
+ print("Query:", query)
169
+ print("\nTop 3 most similar sentences in corpus:")
170
+
171
+ for score, idx in zip(top_results[0], top_results[1]):
172
+ print("(Score: {:.4f})".format(score))
173
+ print(corpus[idx], "(Score: {:.4f})".format(score))
174
+ row_dict = df.loc[df['hotel_features']== corpus[idx]]
175
+ print("paper_id: " , row_dict['hotel_name'] , "\n")
176
+ # for idx, distance in results[0:closest_n]:
177
+ # print("Score: ", "(Score: %.4f)" % (1-distance) , "\n" )
178
+ # print("Paragraph: ", corpus[idx].strip(), "\n" )
179
+ # row_dict = df.loc[df['all_review']== corpus[idx]]
180
+ # print("paper_id: " , row_dict['Hotel'] , "\n")
181
+
182
+ model = SentenceTransformer('sentence-transformers/paraphrase-xlm-r-multilingual-v1')
183
+ embeddings = model.encode(corpus)
184
+ #print(embeddings)
185
+
186
+ query_embedding.shape
187
+
188
+ # Query sentences:
189
+ queries = ['Hotel at least 10 minutes away from good food',
190
+ 'quiet'
191
+ ]
192
+
193
+
194
+ # Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
195
+ top_k = min(5, len(corpus))
196
+ for query in queries:
197
+
198
+ query_embedding = model.encode(query, convert_to_tensor=True)
199
+
200
+ # We use cosine-similarity and torch.topk to find the highest 5 scores
201
+ cos_scores = util.pytorch_cos_sim(query_embedding, embeddings)[0]
202
+ top_results = torch.topk(cos_scores, k=top_k)
203
+
204
+ print("\n\n======================\n\n")
205
+ print("Query:", query)
206
+ print("\nTop 5 most similar sentences in corpus:")
207
+
208
+ for score, idx in zip(top_results[0], top_results[1]):
209
+ print("(Score: {:.4f})".format(score))
210
+ print(corpus[idx], "(Score: {:.4f})".format(score))
211
+ row_dict = df.loc[df['hotel_features']== corpus[idx]]
212
+ print("paper_id: " , row_dict['hotel_name'] , "\n")
213
+
214
+ df
215
+
216
+ hits = util.semantic_search(query_embedding, embeddings, top_k=5)
217
+ hits = hits[0] #Get the hits for the first query
218
+ for hit in hits:
219
+ print (hit)
220
+ print("(Score: {:.4f})".format(hit['score']))
221
+ print(corpus[hit['corpus_id']])
222
+ row_dict = df.loc[df['hotel_features']== corpus[hit['corpus_id']]]
223
+ print("paper_id: " , row_dict['hotel_name'] , "\n")
224
+
225
+ !pip freeze > requirements.txt