namb0010 commited on
Commit
1aa2306
·
1 Parent(s): efe701d

Delete [uma_namboothiripad]assignment_2.py

Browse files
Files changed (1) hide show
  1. [uma_namboothiripad]assignment_2.py +0 -226
[uma_namboothiripad]assignment_2.py DELETED
@@ -1,226 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """[Uma Namboothiripad]Assignment_2.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1_sofOjXRDnId49NOup4sdiVS1E_51T-b
8
-
9
- Load the dataset below
10
- """
11
-
12
- !pip install -U spacy
13
- #first install the library that would help us use BERT in an easy to use interface
14
- #https://github.com/UKPLab/sentence-transformers/tree/master/sentence_transformers
15
- !pip install -U sentence-transformers
16
-
17
- """I was having issues connecting my csv file to the colab notebook, so I ended up connecting this to my drive"""
18
-
19
- import spacy
20
- from spacy.lang.en.stop_words import STOP_WORDS
21
- from string import punctuation
22
- from collections import Counter
23
- from heapq import nlargest
24
- from google.colab import drive
25
- drive.mount('/content/drive')
26
-
27
- import pandas as pd
28
- from tqdm import tqdm
29
- from sentence_transformers import SentenceTransformer, util
30
-
31
- ! pip install -q kaggle
32
-
33
- ! pip install lightgbm
34
-
35
- """Setup Kaggle json credentials"""
36
-
37
- from google.colab import files
38
- files.upload()
39
-
40
- !mkdir ~/.kaggle/
41
- !cp kaggle.json ~/.kaggle/
42
-
43
- !chmod 600 ~/.kaggle/kaggle.json
44
-
45
- !kaggle datasets list
46
-
47
- !kaggle datasets download -d hamzafarooq50/hotel-listings-and-reviews/HotelListInBarcelona__en2019100120191005.csv
48
-
49
- !ls
50
-
51
- !python -m spacy download en_core_web_sm
52
-
53
- !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews/hotelReviewsInBarcelona__en2019100120191005.csv
54
-
55
- !ls
56
-
57
- nlp = spacy.load("en_core_web_sm")
58
-
59
- import re
60
-
61
- import nltk
62
- nltk.download('punkt')
63
-
64
- from nltk.tokenize import word_tokenize
65
-
66
- nltk.download('stopwords')
67
-
68
- from nltk.corpus import stopwords
69
-
70
- nltk.download('wordnet')
71
- nltk.download('omw-1.4')
72
- from nltk.stem import WordNetLemmatizer
73
- from nltk.stem import WordNetLemmatizer
74
-
75
- import os
76
- import spacy
77
- nlp = spacy.load("en_core_web_sm")
78
- from spacy import displacy
79
-
80
- text = """Example text"""
81
- #text = "I really hope that France does not win the World Cup and Morocco makes it to the finals"
82
- doc = nlp(text)
83
- sentence_spans = list(doc.sents)
84
- displacy.render(doc, jupyter = True, style="ent")
85
-
86
- stopwords = list(STOP_WORDS)
87
- from string import punctuation
88
- punctuation = punctuation+ '\n'
89
-
90
- import pandas as pd
91
-
92
- import scipy.spatial
93
- import pickle as pkl
94
-
95
- !pip install -U sentence-transformers
96
-
97
- from sentence_transformers import SentenceTransformer
98
- embedder = SentenceTransformer('all-MiniLM-L6-v2')
99
- #embedder = SentenceTransformer('bert-base-nli-mean-tokens')
100
-
101
- !pip install -U sentence-transformers
102
-
103
- from sentence_transformers import SentenceTransformer
104
-
105
- embedder = SentenceTransformer('all-MiniLM-L6-v2')
106
- embedder = SentenceTransformer('bert-base-nli-mean-tokens')
107
-
108
- df = pd.read_csv('/content/drive/My Drive/Colab Notebooks/Assignment#2/HotelListInBarcelona__en2019100120191005.csv',sep=",", encoding='cp1252')
109
-
110
- !kaggle datasets download --force -d hamzafarooq50/hotel-listings-and-reviews
111
-
112
- df.head()
113
-
114
- df['hotel_name'].value_counts()
115
-
116
- df['hotel_name'].drop_duplicates()
117
-
118
- df_combined = df.sort_values(['hotel_name']).groupby('hotel_name', sort=False).hotel_features.apply(''.join).reset_index(name='hotel_features')
119
-
120
- df_combined.head().T
121
-
122
- import re
123
-
124
- df_combined['hotel_features'] = df_combined['hotel_features'].apply(lambda x: re.sub('[^a-zA-z0-9\s]','',x))
125
-
126
- def lower_case(input_str):
127
- input_str = input_str.lower()
128
- return input_str
129
-
130
- df_combined['hotel_features']= df_combined['hotel_features'].apply(lambda x: lower_case(x))
131
-
132
- df = df_combined
133
-
134
- df_sentences = df_combined.set_index("hotel_features")
135
- df_sentences = df_sentences["hotel_name"].to_dict()
136
- df_sentences_list = list(df_sentences.keys())
137
- len(df_sentences_list)
138
-
139
- list(df_sentences.keys())[:5]
140
-
141
- df_sentences_list = [str(d) for d in tqdm(df_sentences_list)]
142
-
143
- # Corpus with example sentences
144
- corpus = df_sentences_list
145
- corpus_embeddings = embedder.encode(corpus,show_progress_bar=True)
146
-
147
- corpus_embeddings[0]
148
-
149
- queries = ['Hotel near tourist locations and with free WIFI',
150
- ]
151
- query_embeddings = embedder.encode(queries,show_progress_bar=True)
152
-
153
- import torch
154
- # Query sentences:
155
- queries = ['Hotel at least 10 minutes away from sagrada familia'
156
- ]
157
-
158
-
159
- # Find the closest 3 sentences of the corpus for each query sentence based on cosine similarity
160
- top_k = min(3, len(corpus))
161
- for query in queries:
162
- query_embedding = embedder.encode(query, convert_to_tensor=True)
163
-
164
- # We use cosine-similarity and torch.topk to find the highest 5 scores
165
- cos_scores = util.pytorch_cos_sim(query_embedding, corpus_embeddings)[0]
166
- top_results = torch.topk(cos_scores, k=top_k)
167
-
168
- print("\n\n======================\n\n")
169
- print("Query:", query)
170
- print("\nTop 3 most similar sentences in corpus:")
171
-
172
- for score, idx in zip(top_results[0], top_results[1]):
173
- print("(Score: {:.4f})".format(score))
174
- print(corpus[idx], "(Score: {:.4f})".format(score))
175
- row_dict = df.loc[df['hotel_features']== corpus[idx]]
176
- print("paper_id: " , row_dict['hotel_name'] , "\n")
177
- # for idx, distance in results[0:closest_n]:
178
- # print("Score: ", "(Score: %.4f)" % (1-distance) , "\n" )
179
- # print("Paragraph: ", corpus[idx].strip(), "\n" )
180
- # row_dict = df.loc[df['all_review']== corpus[idx]]
181
- # print("paper_id: " , row_dict['Hotel'] , "\n")
182
-
183
- model = SentenceTransformer('sentence-transformers/paraphrase-xlm-r-multilingual-v1')
184
- embeddings = model.encode(corpus)
185
- #print(embeddings)
186
-
187
- query_embedding.shape
188
-
189
- # Query sentences:
190
- queries = ['Hotel at least 10 minutes away from good food',
191
- 'quiet'
192
- ]
193
-
194
-
195
- # Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
196
- top_k = min(5, len(corpus))
197
- for query in queries:
198
-
199
- query_embedding = model.encode(query, convert_to_tensor=True)
200
-
201
- # We use cosine-similarity and torch.topk to find the highest 5 scores
202
- cos_scores = util.pytorch_cos_sim(query_embedding, embeddings)[0]
203
- top_results = torch.topk(cos_scores, k=top_k)
204
-
205
- print("\n\n======================\n\n")
206
- print("Query:", query)
207
- print("\nTop 5 most similar sentences in corpus:")
208
-
209
- for score, idx in zip(top_results[0], top_results[1]):
210
- print("(Score: {:.4f})".format(score))
211
- print(corpus[idx], "(Score: {:.4f})".format(score))
212
- row_dict = df.loc[df['hotel_features']== corpus[idx]]
213
- print("paper_id: " , row_dict['hotel_name'] , "\n")
214
-
215
- df
216
-
217
- hits = util.semantic_search(query_embedding, embeddings, top_k=5)
218
- hits = hits[0] #Get the hits for the first query
219
- for hit in hits:
220
- print (hit)
221
- print("(Score: {:.4f})".format(hit['score']))
222
- print(corpus[hit['corpus_id']])
223
- row_dict = df.loc[df['hotel_features']== corpus[hit['corpus_id']]]
224
- print("paper_id: " , row_dict['hotel_name'] , "\n")
225
- !pip freeze > requirements.txt
226
-