path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73069645/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum()
code
73069645/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check
code
73069645/cell_32
[ "text_plain_output_1.png" ]
import imageio import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1=sns.barplot(x="Host_ID", y="P_Count", data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45) sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] #we can see from our statistical table that we have some extreme values, therefore we need to remove them for the sake of a better visualization #creating a sub-dataframe with no extreme values / less than 500 sub_6=data[data.price < 500] #using violinplot to showcase density and distribtuion of prices viz_2=sns.violinplot(data=sub_6, x='neighbourhood_group', y='price') viz_2.set_title('Density and distribution of prices for each neighberhood_group') #let's what we can do with our given longtitude and latitude columns #let's see how scatterplot will come out viz_4=sub_6.plot(kind='scatter', x='longitude', y='latitude', label='availability_365', c='price', cmap=plt.get_cmap('jet'), colorbar=True, alpha=0.4, figsize=(10,8)) viz_4.legend() import urllib plt.figure(figsize=(10, 8)) import imageio nyc_img = imageio.imread('https://upload.wikimedia.org/wikipedia/commons/e/ec/Neighbourhoods_New_York_City_Map.PNG') plt.imshow(nyc_img, zorder=0, extent=[-74.258, -73.7, 40.49, 40.92]) ax = plt.gca() sub_6.plot(kind='scatter', x='longitude', y='latitude', label='availability_365', c='price', ax=ax, cmap=plt.get_cmap('jet'), colorbar=True, alpha=0.4, zorder=5) plt.legend() plt.show()
code
73069645/cell_28
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1=sns.barplot(x="Host_ID", y="P_Count", data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45) sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] #we can see from our statistical table that we have some extreme values, therefore we need to remove them for the sake of a better visualization #creating a sub-dataframe with no extreme values / less than 500 sub_6=data[data.price < 500] #using violinplot to showcase density and distribtuion of prices viz_2=sns.violinplot(data=sub_6, x='neighbourhood_group', y='price') viz_2.set_title('Density and distribution of prices for each neighberhood_group') sub_7 = data.loc[data['neighbourhood'].isin(['Williamsburg', 'Bedford-Stuyvesant', 'Harlem', 'Bushwick', 'Upper West Side', "Hell's Kitchen", 'East Village', 'Upper East Side', 'Crown Heights', 'Midtown'])] viz_3 = sns.catplot(x='neighbourhood', hue='neighbourhood_group', col='room_type', data=sub_7, kind='count') viz_3.set_xticklabels(rotation=90)
code
73069645/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum()
code
73069645/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique()
code
73069645/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() top_host = data.host_id.value_counts().head(10) top_host
code
73069645/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] p_l_b_n_2 = [] nei_list = ['Brooklyn', 'Manhattan', 'Queens', 'Staten Island', 'Bronx'] for x in price_list_by_n: i = x.describe(percentiles=[0.25, 0.5, 0.75]) i = i.iloc[3:] i.reset_index(inplace=True) i.rename(columns={'index': 'Stats'}, inplace=True) p_l_b_n_2.append(i) p_l_b_n_2[0].rename(columns={'price': nei_list[0]}, inplace=True) p_l_b_n_2[1].rename(columns={'price': nei_list[1]}, inplace=True) p_l_b_n_2[2].rename(columns={'price': nei_list[2]}, inplace=True) p_l_b_n_2[3].rename(columns={'price': nei_list[3]}, inplace=True) p_l_b_n_2[4].rename(columns={'price': nei_list[4]}, inplace=True) stat_df = p_l_b_n_2 stat_df = [df.set_index('Stats') for df in stat_df] stat_df = stat_df[0].join(stat_df[1:]) stat_df
code
73069645/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique())
code
73069645/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.head(3)
code
73069645/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.shape data.isnull().sum() data.drop(['id', 'host_name', 'last_review'], axis=1, inplace=True) data.fillna({'reviews_per_month': 0}, inplace=True) data.reviews_per_month.isnull().sum() data.neighbourhood_group.unique() len(data.neighbourhood.unique()) data.room_type.unique() #we will skip first column for now and begin from host_id #let's see what hosts (IDs) have the most listings on Airbnb platform and taking advantage of this service top_host=data.host_id.value_counts().head(10) top_host top_host_check = data.calculated_host_listings_count.max() top_host_check sns.set(rc={'figure.figsize': (10, 8)}) sns.set_style('white') top_host_df = pd.DataFrame(top_host) top_host_df.reset_index(inplace=True) top_host_df.rename(columns={'index': 'Host_ID', 'host_id': 'P_Count'}, inplace=True) top_host_df viz_1=sns.barplot(x="Host_ID", y="P_Count", data=top_host_df, palette='Blues_d') viz_1.set_title('Hosts with the most listings in NYC') viz_1.set_ylabel('Count of listings') viz_1.set_xlabel('Host IDs') viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45) sub_1 = data.loc[data['neighbourhood_group'] == 'Brooklyn'] price_sub1 = sub_1[['price']] sub_2 = data.loc[data['neighbourhood_group'] == 'Manhattan'] price_sub2 = sub_2[['price']] sub_3 = data.loc[data['neighbourhood_group'] == 'Queens'] price_sub3 = sub_3[['price']] sub_4 = data.loc[data['neighbourhood_group'] == 'Staten Island'] price_sub4 = sub_4[['price']] sub_5 = data.loc[data['neighbourhood_group'] == 'Bronx'] price_sub5 = sub_5[['price']] price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5] #we can see from our statistical table that we have some extreme values, therefore we need to remove them for the sake of a better visualization #creating a sub-dataframe with no extreme values / less than 500 sub_6=data[data.price < 500] #using violinplot to showcase density and distribtuion of prices viz_2=sns.violinplot(data=sub_6, x='neighbourhood_group', y='price') viz_2.set_title('Density and distribution of prices for each neighberhood_group') data.neighbourhood.value_counts().head(10)
code
73069645/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/airbnbnewyork/listings.csv') data.head()
code
90147386/cell_21
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) comments = comments.str.lower() print('Hasil Case Folding : \n') print(comments) print('\n\n\n')
code
90147386/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from PIL import Image from wordcloud import WordCloud,STOPWORDS,ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator from PIL import Image def create_wordcloud(text, path): stopwords = set(STOPWORDS) wc = WordCloud(background_color='white', max_words=3000, stopwords=stopwords, random_state=42, width=900, height=500, repeat=True) wc.generate(str(text)) wc.to_file(path) print('Word Cloud Saved Successfully') path = path display(Image.open(path)) plt.figure(figsize=(16, 5), dpi=80) create_wordcloud(analysis_df['text'].values, 'all.png')
code
90147386/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() comment_len = pd.Series([len(comment.split()) for comment in df['text']]) plt.figure(figsize=(8, 5)) comment_len.plot(kind='box') plt.ylabel('Comment Length')
code
90147386/cell_25
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) comments = comments.str.lower() tokenizer = Tokenizer() tokenizer.fit_on_texts(comments) encoded_comments = tokenizer.texts_to_sequences(comments) print(encoded_comments)
code
90147386/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts()
code
90147386/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) y = df['label'] label_encoder = LabelEncoder() y = label_encoder.fit_transform(y) y = to_categorical(y) y
code
90147386/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] print(analysis_df.shape) analysis_df.head(5)
code
90147386/cell_29
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) y = df['label'] label_encoder = LabelEncoder() y = label_encoder.fit_transform(y) y = to_categorical(y) y comments = comments.str.lower() tokenizer = Tokenizer() tokenizer.fit_on_texts(comments) encoded_comments = tokenizer.texts_to_sequences(comments) padded_sequence = pad_sequences(encoded_comments, maxlen=200, padding='post') X = padded_sequence print('Shape of X is ', X.shape) print('Shape of y is', y.shape)
code
90147386/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) plt.figure(figsize=(10, 5)) sns.histplot(df[df['label'] == 'Non_CB']['Length'], color='g') plt.title('Distribution of Tweet Length for not_cyberbullying') display(df.Length[df['label'] == 'Non_CB'].describe())
code
90147386/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) analysis_df.columns analysis_df['label'].value_counts()
code
90147386/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) analysis_df.columns
code
90147386/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() plt.figure(figsize=(10, 10)) sns.countplot(df.label, palette='mako')
code
90147386/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from wordcloud import WordCloud,STOPWORDS,ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] analysis_df_negative = analysis_df[analysis_df['label'] == 'CB'] analysis_df_positive = analysis_df[analysis_df['label'] == 'Non_CB'] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator from PIL import Image def create_wordcloud(text, path): stopwords = set(STOPWORDS) wc = WordCloud(background_color='white', max_words=3000, stopwords=stopwords, random_state=42, width=900, height=500, repeat=True) wc.generate(str(text)) wc.to_file(path) path = path create_wordcloud(analysis_df['text'].values, 'all.png') create_wordcloud(analysis_df_negative['text'].values, 'negative.png') plt.figure(figsize=(15, 8), dpi=80) create_wordcloud(analysis_df_positive['text'].values, 'positive.png')
code
90147386/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) df.head(20)
code
90147386/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df
code
90147386/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) df.head(20)
code
90147386/cell_24
[ "text_html_output_1.png" ]
from tensorflow.keras.preprocessing.text import Tokenizer import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) comments = comments.str.lower() tokenizer = Tokenizer() tokenizer.fit_on_texts(comments) print(tokenizer.index_word)
code
90147386/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from PIL import Image from wordcloud import WordCloud,STOPWORDS,ImageColorGenerator import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] analysis_df_negative = analysis_df[analysis_df['label'] == 'CB'] analysis_df_positive = analysis_df[analysis_df['label'] == 'Non_CB'] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator from PIL import Image def create_wordcloud(text, path): stopwords = set(STOPWORDS) wc = WordCloud(background_color='white', max_words=3000, stopwords=stopwords, random_state=42, width=900, height=500, repeat=True) wc.generate(str(text)) wc.to_file(path) path = path create_wordcloud(analysis_df['text'].values, 'all.png') plt.figure(figsize=(15, 8), dpi=80) create_wordcloud(analysis_df_negative['text'].values, 'negative.png')
code
90147386/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import re import seaborn as sns import string df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() analysis_df = df[['text', 'label']] comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) comments = analysis_df.text def removeURL(comments): url_pattern = re.compile('https?://\\S+|www\\.\\S+') comments = url_pattern.sub('', comments) return comments comments = comments.apply(lambda x: removeURL(x)) def removePunctuation(comments): punctuationFree = ''.join([i for i in comments if i not in string.punctuation]) return punctuationFree comments = comments.apply(lambda x: removePunctuation(x)) df['label'].value_counts()
code
90147386/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() comment_len = pd.Series([len(comment.split()) for comment in df['text']]) plt.figure(figsize=(10, 5)) sns.histplot(comment_len, palette='deep')
code
90147386/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() comment_len = pd.Series([len(comment.split()) for comment in df['text']]) df['Length'] = df.text.str.split().apply(len) df['Length'] = df.text.str.split().apply(len) plt.figure(figsize=(10, 5)) sns.histplot(df[df['label'] == 'CB']['Length'], color='r') plt.title('Distribution of Tweet Length for not_cyberbullying') display(df.Length[df['label'] == 'Non_CB'].describe())
code
90147386/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/idcyberbullying/id-cyberbullying-instagram.tsv', sep='\t', encoding='ISO-8859-1') df df.label.value_counts() df.info()
code
74067974/cell_21
[ "text_plain_output_1.png" ]
from prophet import Prophet import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names
code
74067974/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.info()
code
74067974/cell_25
[ "text_plain_output_1.png" ]
from prophet import Prophet from prophet.plot import plot_yearly import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names fig = model.plot(predict) fig = model.plot_components(predict) from prophet.plot import plot_yearly fig = plot_yearly(model)
code
74067974/cell_23
[ "text_html_output_1.png" ]
from prophet import Prophet import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names fig = model.plot(predict)
code
74067974/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.head()
code
74067974/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum()
code
74067974/cell_19
[ "text_plain_output_1.png" ]
from prophet import Prophet import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data)
code
74067974/cell_1
[ "text_plain_output_1.png" ]
!pip install prophet
code
74067974/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.tail()
code
74067974/cell_28
[ "image_output_1.png" ]
from prophet import Prophet from prophet.plot import plot_plotly, plot_components_plotly import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names fig = model.plot(predict) fig = model.plot_components(predict) plot_components_plotly(model, predict)
code
74067974/cell_8
[ "text_html_output_2.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.describe()
code
74067974/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from prophet import Prophet import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names fig = model.plot(predict) fig = model.plot_components(predict)
code
74067974/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() sns.heatmap(df.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code
74067974/cell_27
[ "text_plain_output_1.png" ]
from prophet import Prophet from prophet.plot import plot_plotly, plot_components_plotly import pandas as pd import seaborn as sns df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna() data = df[['Date', 'Adj Close']] data = data.rename(columns={'Date': 'ds', 'Adj Close': 'y'}) bitcoin_halving = pd.DataFrame({'holiday': 'Bitcoin Halving', 'ds': pd.to_datetime(['2012-11-28', '2016-07-09', '2020-05-11']), 'lower_window': 0, 'upper_window': 1}) holidays = bitcoin_halving model = Prophet(yearly_seasonality=True, daily_seasonality=True, holidays=holidays) model.add_country_holidays(country_name='US') model.fit(data) future = model.make_future_dataframe(periods=365) predict = model.predict(future) model.train_holiday_names fig = model.plot(predict) fig = model.plot_components(predict) plot_plotly(model, predict)
code
74067974/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/crypto-historical-price/data/BTC-USD.csv') df.isnull().sum() df.dropna()
code
90106473/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] fig = sns.pairplot(df_train, diag_kind="kde") fig = sns.pairplot(df_train[['CAL', 'DTC']])
code
90106473/cell_9
[ "image_output_1.png" ]
import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') df_test.describe()
code
90106473/cell_19
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] def make_log_plot(df): color_list = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] feature_names = df.columns.tolist() feature_num = len(feature_names) Depth = np.linspace(0,len(df[feature_names[0]]),len(df[feature_names[0]])) f, ax = plt.subplots(nrows=1, ncols=feature_num, figsize=(12, 12)) for i in range(len(ax)): log = df[feature_names[i]] ax[i].plot(log, Depth, '-', color=color_list[i]) ax[i].set_ylim(Depth.min(),Depth.max()) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[i].set_xlabel(feature_names[i]) ax[i].set_xlim(log.min(),log.max()) if i > 0: ax[i].set_yticklabels([]); f.suptitle('Well logs', fontsize=14,y=0.94) X_train_orig = df_train[feature_names].values X_test_orig = df_test[feature_names].values Y_train_orig = df_train['DTC'].values scaler = StandardScaler() scaler.fit(X_train_orig) X_train_norm = scaler.transform(X_train_orig) X_test_norm = scaler.transform(X_test_orig) X_train, X_val, y_train, y_val = train_test_split(X_train_norm, Y_train_orig, test_size=0.2, random_state=1, shuffle=True) print('Size of the X_train dataset: ' + str(X_train.shape)) print('Size of the y_train dataset: ' + str(y_train.shape)) print('Size of the X_val dataset: ' + str(X_val.shape)) print('Size of the y_val dataset: ' + str(y_val.shape))
code
90106473/cell_28
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] def make_log_plot(df): color_list = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] feature_names = df.columns.tolist() feature_num = len(feature_names) Depth = np.linspace(0,len(df[feature_names[0]]),len(df[feature_names[0]])) f, ax = plt.subplots(nrows=1, ncols=feature_num, figsize=(12, 12)) for i in range(len(ax)): log = df[feature_names[i]] ax[i].plot(log, Depth, '-', color=color_list[i]) ax[i].set_ylim(Depth.min(),Depth.max()) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[i].set_xlabel(feature_names[i]) ax[i].set_xlim(log.min(),log.max()) if i > 0: ax[i].set_yticklabels([]); f.suptitle('Well logs', fontsize=14,y=0.94) X_train_orig = df_train[feature_names].values X_test_orig = df_test[feature_names].values Y_train_orig = df_train['DTC'].values scaler = StandardScaler() scaler.fit(X_train_orig) X_train_norm = scaler.transform(X_train_orig) X_test_norm = scaler.transform(X_test_orig) X_train, X_val, y_train, y_val = train_test_split(X_train_norm, Y_train_orig, test_size=0.2, random_state=1, shuffle=True) def evaluate_regression(reg, X_test, y_test): R2 = reg.score(X_test, y_test) y_pred = reg.predict(X_test) RMSE = mean_squared_error(y_test, y_pred, squared=False) plt.figure(figsize=(15, 8)) f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter( y_test, y_pred) ax1.set_xlabel('True') ax1.set_ylabel('Predicted') ax2.plot(y_test, linewidth=2, label="True") ax2.plot(y_pred, linewidth=2, label="Predicted") ax2.legend(loc='lower right') ax2.set_xlabel('Sample') ax2.set_ylabel('DTC') plt.show() print(f'R2 = {R2}') print(f'RMSE = {RMSE}') reg = LinearRegression() reg.fit(X_train, y_train) evaluate_regression(reg, X_val, y_val) predictions = reg.predict(X_test_norm) plt.plot(predictions, label='Predicted') plt.xlabel('Sample') plt.ylabel('DTC') plt.title('DTC Prediction Comparison') plt.legend(loc='lower right') plt.show()
code
90106473/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') df_train.describe()
code
90106473/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] def make_log_plot(df): color_list = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] feature_names = df.columns.tolist() feature_num = len(feature_names) Depth = np.linspace(0,len(df[feature_names[0]]),len(df[feature_names[0]])) f, ax = plt.subplots(nrows=1, ncols=feature_num, figsize=(12, 12)) for i in range(len(ax)): log = df[feature_names[i]] ax[i].plot(log, Depth, '-', color=color_list[i]) ax[i].set_ylim(Depth.min(),Depth.max()) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[i].set_xlabel(feature_names[i]) ax[i].set_xlim(log.min(),log.max()) if i > 0: ax[i].set_yticklabels([]); f.suptitle('Well logs', fontsize=14,y=0.94) make_log_plot(df_train)
code
90106473/cell_22
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] def make_log_plot(df): color_list = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D'] feature_names = df.columns.tolist() feature_num = len(feature_names) Depth = np.linspace(0,len(df[feature_names[0]]),len(df[feature_names[0]])) f, ax = plt.subplots(nrows=1, ncols=feature_num, figsize=(12, 12)) for i in range(len(ax)): log = df[feature_names[i]] ax[i].plot(log, Depth, '-', color=color_list[i]) ax[i].set_ylim(Depth.min(),Depth.max()) ax[i].invert_yaxis() ax[i].grid() ax[i].locator_params(axis='x', nbins=3) ax[i].set_xlabel(feature_names[i]) ax[i].set_xlim(log.min(),log.max()) if i > 0: ax[i].set_yticklabels([]); f.suptitle('Well logs', fontsize=14,y=0.94) X_train_orig = df_train[feature_names].values X_test_orig = df_test[feature_names].values Y_train_orig = df_train['DTC'].values scaler = StandardScaler() scaler.fit(X_train_orig) X_train_norm = scaler.transform(X_train_orig) X_test_norm = scaler.transform(X_test_orig) X_train, X_val, y_train, y_val = train_test_split(X_train_norm, Y_train_orig, test_size=0.2, random_state=1, shuffle=True) def evaluate_regression(reg, X_test, y_test): R2 = reg.score(X_test, y_test) y_pred = reg.predict(X_test) RMSE = mean_squared_error(y_test, y_pred, squared=False) plt.figure(figsize=(15, 8)) f, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter( y_test, y_pred) ax1.set_xlabel('True') ax1.set_ylabel('Predicted') ax2.plot(y_test, linewidth=2, label="True") ax2.plot(y_pred, linewidth=2, label="Predicted") ax2.legend(loc='lower right') ax2.set_xlabel('Sample') ax2.set_ylabel('DTC') plt.show() print(f'R2 = {R2}') print(f'RMSE = {RMSE}') reg = LinearRegression() reg.fit(X_train, y_train) evaluate_regression(reg, X_val, y_val)
code
90106473/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() print(f'Features: {feature_names}') label_names = df_train.columns[-1] print(f'Label: {label_names}')
code
90106473/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df_test = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/test.csv') df_train = pd.read_csv('/kaggle/input/ml-for-exploration-geophysics-2022-regression/train.csv') feature_names = df_train.columns[:-1].tolist() label_names = df_train.columns[-1] fig = sns.pairplot(df_train, diag_kind='kde')
code
90106473/cell_5
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73078642/cell_19
[ "image_png_output_1.png" ]
from IPython.display import display import ipywidgets as widgets import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.max_columns', None) from colorama import Fore, Style from sklearn.tree import plot_tree import matplotlib.pyplot as plt import seaborn as sns sns.set() from ipywidgets import HBox, Output, VBox, Layout from IPython.display import display import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import f_classif, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from xgboost import XGBClassifier, XGBRegressor from catboost import CatBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score, classification_report, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve, log_loss, brier_score_loss, hamming_loss import warnings warnings.filterwarnings('ignore') data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.sort_values('age').head() data_eda = data.copy() data_eda['sex'] = data['sex'].map({1: 'Male', 0: 'Female'}) data_eda['cp'] = data['cp'].map({1: 'Typical\nangina', 2: 'Atypical\nangina', 3: 'Non-anginal\npain', 0: 'Asymptomatic'}) data_eda['fbs'] = data['fbs'].map({1: 'True', 0: 'False'}) data_eda['restecg'] = data['restecg'].map({1: 'Normal', 2: 'Having\nST-T wave\nabnormality', 0: 'Hypertrophy'}) data_eda['exng'] = data['exng'].map({1: 'Yes', 0: 'No'}) data_eda['slp'] = data['slp'].map({2: 'Upsloping', 1: 'Flat', 0: 'Downsloping'}) data_eda['caa'] = data['caa'].map({0: '0', 1: '1', 2: '2', 3: '3', 4: '3'}) data_eda['thall'] = data['thall'].map({0: 'Normal', 2: 'Normal', 1: 'Fixed defect', 3: 'Reversible\ndefect'}) data_eda['output'] = data['output'].map({1: 'Yes', 0: 'No'}) data_eda.isnull().mean() def age_cohort(age): if age <= 45: return '0-45' elif age > 45 and age <= 55: return '45-55' elif age > 55 and age <= 60: return '55-60' elif age > 60: return '60+' data_eda['age group'] = data_eda['age'].apply(age_cohort) data_eda.sort_values('age group', inplace=True) def q(n): def q_(x): return x.quantile(n) q_.__name__ = '{:2.0f}%'.format(n * 100) return q_ def ctab(var): f, a = plt.subplots(1, 2, figsize=(8, 3), dpi=200) background_color = '#F0F6FC' color = '#000000' hue_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue'] f.patch.set_facecolor(background_color) temp = data_eda[var].value_counts() a[1].pie(x=temp, labels=temp.index, autopct='%1.1f%%', textprops={'fontsize': 9}, colors=hue_colors) a[1].set_xlabel(var) a[0].axis('off') a[0].text(0.55, 0.5, ' Percentage( % ) \n', color=color, horizontalalignment='center', verticalalignment='center', fontsize=20, fontfamily='serif') a[0].text(0.55, 0.4, "of Categorical Feature '" + var + "'\n_________________________________", color=color, horizontalalignment='center', verticalalignment='center', fontsize=10, fontfamily='serif') plt.show() ci = ['Categorical Features', 'Numeric Features'] cat_var = [var for var in data_eda.columns if data_eda[var].dtype == 'O' and var != 'output'] num_var = [var for var in data_eda.columns if data_eda[var].dtype != 'O'] def create_inner_tabs(): inner_tabs_list = [widgets.Output() for i in range(len(cat_var))] inner_tab = widgets.Tab(inner_tabs_list) for i in range(len(cat_var)): inner_tab.set_title(i, cat_var[i]) with inner_tabs_list[i]: ctab(cat_var[i]) display(inner_tab) outer_sub_tabs_list = [widgets.Output() for i in range(len(ci))] tab = widgets.Tab(children=outer_sub_tabs_list) for i in range(len(ci)): tab.set_title(i, ci[i]) with outer_sub_tabs_list[i]: if i == 0: create_inner_tabs() else: plt.figure(figsize=(11, 4), dpi=200) df_n = data_eda[num_var].agg(['count', 'mean', 'std', 'skew', 'kurt', 'min', q(0.25), 'median', q(0.75), 'max']).T df_n['range'] = df_n['max'] - df_n['min'] df_n['IQR'] = df_n['75%'] - df_n['25%'] sns.heatmap(df_n, annot=True, cmap='Blues', fmt='.2f', linewidths=5, cbar=False, annot_kws={'size': 10}) plt.xticks(size=12) plt.yticks(size=12, rotation=0) plt.title('Descriptive Statistics', size=16) plt.show() display(tab)
code
73078642/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.max_columns', None) from colorama import Fore, Style from sklearn.tree import plot_tree import matplotlib.pyplot as plt import seaborn as sns sns.set() from ipywidgets import HBox, Output, VBox, Layout from IPython.display import display import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import f_classif, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from xgboost import XGBClassifier, XGBRegressor from catboost import CatBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score, classification_report, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve, log_loss, brier_score_loss, hamming_loss import warnings warnings.filterwarnings('ignore') data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.sort_values('age').head() data_eda = data.copy() data_eda['sex'] = data['sex'].map({1: 'Male', 0: 'Female'}) data_eda['cp'] = data['cp'].map({1: 'Typical\nangina', 2: 'Atypical\nangina', 3: 'Non-anginal\npain', 0: 'Asymptomatic'}) data_eda['fbs'] = data['fbs'].map({1: 'True', 0: 'False'}) data_eda['restecg'] = data['restecg'].map({1: 'Normal', 2: 'Having\nST-T wave\nabnormality', 0: 'Hypertrophy'}) data_eda['exng'] = data['exng'].map({1: 'Yes', 0: 'No'}) data_eda['slp'] = data['slp'].map({2: 'Upsloping', 1: 'Flat', 0: 'Downsloping'}) data_eda['caa'] = data['caa'].map({0: '0', 1: '1', 2: '2', 3: '3', 4: '3'}) data_eda['thall'] = data['thall'].map({0: 'Normal', 2: 'Normal', 1: 'Fixed defect', 3: 'Reversible\ndefect'}) data_eda['output'] = data['output'].map({1: 'Yes', 0: 'No'}) print('Imbalance Ratio :', data_eda['output'].value_counts()[1] / data_eda['output'].value_counts()[0]) print('\n') print('Heart Attack\n____________') data_eda['output'].value_counts()
code
73078642/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.max_columns', None) from colorama import Fore, Style from sklearn.tree import plot_tree import matplotlib.pyplot as plt import seaborn as sns sns.set() from ipywidgets import HBox, Output, VBox, Layout from IPython.display import display import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import f_classif, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from xgboost import XGBClassifier, XGBRegressor from catboost import CatBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score, classification_report, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve, log_loss, brier_score_loss, hamming_loss import warnings warnings.filterwarnings('ignore') data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.sort_values('age').head() data_eda = data.copy() data_eda['sex'] = data['sex'].map({1: 'Male', 0: 'Female'}) data_eda['cp'] = data['cp'].map({1: 'Typical\nangina', 2: 'Atypical\nangina', 3: 'Non-anginal\npain', 0: 'Asymptomatic'}) data_eda['fbs'] = data['fbs'].map({1: 'True', 0: 'False'}) data_eda['restecg'] = data['restecg'].map({1: 'Normal', 2: 'Having\nST-T wave\nabnormality', 0: 'Hypertrophy'}) data_eda['exng'] = data['exng'].map({1: 'Yes', 0: 'No'}) data_eda['slp'] = data['slp'].map({2: 'Upsloping', 1: 'Flat', 0: 'Downsloping'}) data_eda['caa'] = data['caa'].map({0: '0', 1: '1', 2: '2', 3: '3', 4: '3'}) data_eda['thall'] = data['thall'].map({0: 'Normal', 2: 'Normal', 1: 'Fixed defect', 3: 'Reversible\ndefect'}) data_eda['output'] = data['output'].map({1: 'Yes', 0: 'No'}) data_eda.isnull().mean()
code
73078642/cell_22
[ "image_output_1.png" ]
from IPython.display import display import ipywidgets as widgets import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.max_columns', None) from colorama import Fore, Style from sklearn.tree import plot_tree import matplotlib.pyplot as plt import seaborn as sns sns.set() from ipywidgets import HBox, Output, VBox, Layout from IPython.display import display import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import f_classif, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from xgboost import XGBClassifier, XGBRegressor from catboost import CatBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score, classification_report, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve, log_loss, brier_score_loss, hamming_loss import warnings warnings.filterwarnings('ignore') data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.sort_values('age').head() data_eda = data.copy() data_eda['sex'] = data['sex'].map({1: 'Male', 0: 'Female'}) data_eda['cp'] = data['cp'].map({1: 'Typical\nangina', 2: 'Atypical\nangina', 3: 'Non-anginal\npain', 0: 'Asymptomatic'}) data_eda['fbs'] = data['fbs'].map({1: 'True', 0: 'False'}) data_eda['restecg'] = data['restecg'].map({1: 'Normal', 2: 'Having\nST-T wave\nabnormality', 0: 'Hypertrophy'}) data_eda['exng'] = data['exng'].map({1: 'Yes', 0: 'No'}) data_eda['slp'] = data['slp'].map({2: 'Upsloping', 1: 'Flat', 0: 'Downsloping'}) data_eda['caa'] = data['caa'].map({0: '0', 1: '1', 2: '2', 3: '3', 4: '3'}) data_eda['thall'] = data['thall'].map({0: 'Normal', 2: 'Normal', 1: 'Fixed defect', 3: 'Reversible\ndefect'}) data_eda['output'] = data['output'].map({1: 'Yes', 0: 'No'}) data_eda.isnull().mean() ## cohort analysis of age with output def age_cohort(age): if age <= 45: return "0-45" elif age > 45 and age <= 55: return "45-55" elif age > 55 and age <= 60: return "55-60" elif age > 60: return "60+" data_eda['age group'] = data_eda['age'].apply(age_cohort) data_eda.sort_values("age group",inplace=True) def q(n): def q_(x): return x.quantile(n) q_.__name__ = '{:2.0f}%'.format(n*100) return q_ def ctab(var): f, a = plt.subplots(1, 2,figsize=(8,3),dpi=200) background_color = "#F0F6FC" color = "#000000" hue_colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue'] f.patch.set_facecolor(background_color) temp=data_eda[var].value_counts() a[1].pie(x=temp,labels=temp.index,autopct='%1.1f%%',textprops={'fontsize':9},colors=hue_colors); a[1].set_xlabel(var) a[0].axis("off") a[0].text(.55,0.5," Percentage( % ) \n", color =color, horizontalalignment = 'center',verticalalignment = 'center' , fontsize = 20, fontfamily='serif'); a[0].text(.55,0.4,"of Categorical Feature '"+var+"'\n_________________________________", color = color, horizontalalignment = 'center',verticalalignment = 'center' , fontsize = 10, fontfamily='serif'); plt.show() ci = ["Categorical Features", "Numeric Features"] cat_var = [var for var in data_eda.columns if data_eda[var].dtype=='O' and var !='output'] num_var = [var for var in data_eda.columns if data_eda[var].dtype!='O'] # Create inner Tabs def create_inner_tabs(): inner_tabs_list=[widgets.Output() for i in range(len(cat_var))] inner_tab = widgets.Tab(inner_tabs_list) for i in range (len(cat_var)): inner_tab.set_title(i,cat_var[i]) with inner_tabs_list[i]: ctab(cat_var[i]); display(inner_tab) # Create outer Tabs outer_sub_tabs_list=[widgets.Output() for i in range(len(ci))] tab = widgets.Tab(children=outer_sub_tabs_list) for i in range (len(ci)): tab.set_title(i,ci[i]) with outer_sub_tabs_list[i]: if i ==0: create_inner_tabs(); else: plt.figure(figsize=(11,4),dpi=200) df_n=data_eda[num_var].agg(["count","mean","std","skew","kurt","min",q(.25),"median",q(.75),"max"]).T df_n["range"]=df_n["max"]-df_n["min"] df_n["IQR"] =df_n["75%"]-df_n["25%"] sns.heatmap(df_n , annot=True,cmap = "Blues", fmt= '.2f',linewidths = 5, cbar = False,annot_kws={"size": 10}) plt.xticks(size = 12); plt.yticks(size = 12, rotation = 0); plt.title("Descriptive Statistics", size = 16) plt.show() display(tab) f, a = plt.subplots(5, 2, figsize=(12, 15), dpi=200) background_color = '#F0F6FC' color = '#000000' hue_color = ['#FF5003', '#428bca'] hue_order = ['Yes', 'No'] alpha = 0.8 f.patch.set_facecolor(background_color) ax = sum(a.tolist(), [])[1:] a[0][0].axis('off') a[0][0].text(0.5, 0.4, 'Bar Plots (%)\n', color=color, horizontalalignment='center', verticalalignment='center', fontsize=31, fontfamily='serif') a[0][0].text(0.5, 0.3, 'of Categorical Features\n________________________', color=color, horizontalalignment='center', verticalalignment='center', fontsize=16.9, fontfamily='serif') for i in range(len(cat_var)): new_df = data_eda.groupby(cat_var[i])['output'].value_counts(normalize=True).mul(100).rename('Percent ( % )').reset_index() g = sns.barplot(data=new_df, x=cat_var[i], y='Percent ( % )', hue='output', ax=ax[i], palette=hue_color, alpha=alpha, edgecolor=background_color, hue_order=hue_order) ax[i].set_facecolor(background_color) ax[i].grid(color=background_color) ax[i].grid(color=color, linestyle=':', axis='y', zorder=0, dashes=(1, 5)) ax[i].spines['top'].set_visible(False) ax[i].spines['right'].set_visible(False) ax[i].spines['left'].set_visible(False) ax[i].spines['bottom'].set_color(color) ax[i].xaxis.label.set_color(color) ax[i].yaxis.label.set_color(color) ax[i].tick_params(axis='x', colors=color) ax[i].tick_params(axis='y', colors=color) if i == 1: l = ax[i].legend(loc=(0.4, 1.1), facecolor=background_color, edgecolor=background_color, title='Heart Attack', fontsize=10, title_fontsize=14) for text in l.get_texts(): text.set_color(color) l._legend_title_box._text.set_color(color) else: ax[i].legend().set_visible(False) plt.tight_layout()
code
73078642/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd pd.set_option('display.max_columns', None) from colorama import Fore, Style from sklearn.tree import plot_tree import matplotlib.pyplot as plt import seaborn as sns sns.set() from ipywidgets import HBox, Output, VBox, Layout from IPython.display import display import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.feature_selection import f_classif, mutual_info_classif from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier from xgboost import XGBClassifier, XGBRegressor from catboost import CatBoostClassifier from sklearn.model_selection import GridSearchCV from sklearn.metrics import roc_auc_score, accuracy_score, recall_score, precision_score, f1_score, classification_report, plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve, log_loss, brier_score_loss, hamming_loss import warnings warnings.filterwarnings('ignore') data = pd.read_csv('../input/heart-attack-analysis-prediction-dataset/heart.csv') data.sort_values('age').head()
code
33103199/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) train['Skewed_SP'] = np.log(train['SalePrice'] + 1) numerical_features = train.select_dtypes(include=[np.number]) numerical_features
code
33103199/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.info()
code
33103199/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max()
code
33103199/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) plt.hist(train.SalePrice) plt.show()
code
33103199/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.model_selection import cross_val_score, cross_val_predict, StratifiedKFold import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn import preprocessing from sklearn import linear_model, svm from sklearn.ensemble import GradientBoostingRegressor as xgb from sklearn.metrics import confusion_matrix, accuracy_score, f1_score from scipy import stats import seaborn as sns from sklearn.impute import SimpleImputer from sklearn import linear_model, svm import datetime from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split import time from sklearn import preprocessing from scipy.stats import skew import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33103199/cell_8
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum()
code
33103199/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) train['Skewed_SP'] = np.log(train['SalePrice'] + 1) numerical_features = train.select_dtypes(include=[np.number]) numerical_features corr = numerical_features.corr() sns.set(font_scale=1) corr_list = corr['SalePrice'].sort_values(axis=0, ascending=False).iloc[1:] print(corr_list)
code
33103199/cell_3
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape
code
33103199/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import seaborn as sns train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) train['Skewed_SP'] = np.log(train['SalePrice'] + 1) numerical_features = train.select_dtypes(include=[np.number]) numerical_features corr = numerical_features.corr() plt.figure(figsize=(16, 16)) sns.set(font_scale=1) sns.heatmap(corr, vmax=1, square=True)
code
33103199/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) train.head()
code
33103199/cell_12
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum() total = train.isnull().sum().sort_values(ascending=False) percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) train = train.drop(missing_data[missing_data['Total'] > 1].index, 1) train = train.drop(train.loc[train['Electrical'].isnull()].index) train.isnull().sum().max() train.isnull().sum() train.sort_values(by='GrLivArea', ascending=False)[:2] train = train.drop(train[train['Id'] == 1299].index) train = train.drop(train[train['Id'] == 524].index) train['Skewed_SP'] = np.log(train['SalePrice'] + 1) plt.hist(train['Skewed_SP']) plt.show()
code
33103199/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.shape train.isnull().sum()
code
129020971/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pickle import torch import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt from torch.utils.data import DataLoader from tqdm import tqdm import pickle device = 'cuda' if torch.cuda.is_available() else 'cpu' import pandas as pd import numpy as np valid_ratio = 0.2 X = pickle.load(open('/kaggle/input/embedder/train_embedding', 'rb')) y = pd.read_csv('/kaggle/input/embedder/preprocessed_train.csv').target_relabeled.values valid_size = int(y.shape[0] * valid_ratio) train_X_tensor = torch.tensor(X[:-valid_size]).to(device) train_y_tensor = torch.LongTensor(y[:-valid_size]).to(device) valid_X_tensor = torch.tensor(X[-valid_size:]).to(device) valid_y_tensor = torch.LongTensor(y[-valid_size:]).to(device) in_features = X.shape[1] mlp = torch.nn.Sequential(torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=2), torch.nn.Softmax()).to(device) Loss = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(mlp.parameters(), lr=0.0001) epoch = 400 max_patience = 5 patience = max_patience train_loss_history = [] valid_loss_history = [] mlp.train() for e in range(epoch): train_loss = 0 valid_loss = 0 outputs = mlp(train_X_tensor) loss = Loss(outputs, train_y_tensor) loss.backward() optimizer.step() optimizer.zero_grad() train_loss += loss.item() train_loss_history.append(train_loss) outputs = mlp(valid_X_tensor) loss = Loss(outputs, valid_y_tensor) valid_loss += loss.item() valid_loss_history.append(valid_loss) if e % 20 == 0: print(f'[{e:3d}/{epoch}] trainloss: {train_loss:.8f} validloss: {valid_loss:.8f} patience: {patience}') if len(valid_loss_history) > 1 and valid_loss_history[-1] > valid_loss_history[-2]: patience -= 1 if patience < 0: break else: patience = max_patience
code
129020971/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pickle import torch import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt from torch.utils.data import DataLoader from tqdm import tqdm import pickle device = 'cuda' if torch.cuda.is_available() else 'cpu' import pandas as pd import numpy as np valid_ratio = 0.2 X = pickle.load(open('/kaggle/input/embedder/train_embedding', 'rb')) y = pd.read_csv('/kaggle/input/embedder/preprocessed_train.csv').target_relabeled.values valid_size = int(y.shape[0] * valid_ratio) train_X_tensor = torch.tensor(X[:-valid_size]).to(device) train_y_tensor = torch.LongTensor(y[:-valid_size]).to(device) valid_X_tensor = torch.tensor(X[-valid_size:]).to(device) valid_y_tensor = torch.LongTensor(y[-valid_size:]).to(device) in_features = X.shape[1] mlp = torch.nn.Sequential(torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=2), torch.nn.Softmax()).to(device) Loss = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(mlp.parameters(), lr=0.0001) epoch = 400 max_patience = 5 patience = max_patience train_loss_history = [] valid_loss_history = [] mlp.train() for e in range(epoch): train_loss = 0 valid_loss = 0 outputs = mlp(train_X_tensor) loss = Loss(outputs, train_y_tensor) loss.backward() optimizer.step() optimizer.zero_grad() train_loss += loss.item() train_loss_history.append(train_loss) outputs = mlp(valid_X_tensor) loss = Loss(outputs, valid_y_tensor) valid_loss += loss.item() valid_loss_history.append(valid_loss) if len(valid_loss_history) > 1 and valid_loss_history[-1] > valid_loss_history[-2]: patience -= 1 if patience < 0: break else: patience = max_patience (plt.plot(train_loss_history), plt.plot(valid_loss_history)) plt.legend(['train loss', 'valid loss']) plt.show()
code
129020971/cell_1
[ "text_plain_output_1.png" ]
import torch import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt from torch.utils.data import DataLoader from tqdm import tqdm import pickle device = 'cuda' if torch.cuda.is_available() else 'cpu' import pandas as pd import numpy as np print('torch.cuda.is_available:', torch.cuda.is_available())
code
129020971/cell_15
[ "text_plain_output_1.png" ]
from catboost import CatBoostClassifier import pandas as pd import pickle valid_ratio = 0.2 X = pickle.load(open('/kaggle/input/embedder/train_embedding', 'rb')) y = pd.read_csv('/kaggle/input/embedder/preprocessed_train.csv').target_relabeled.values valid_size = int(y.shape[0] * valid_ratio) in_features = X.shape[1] from catboost import CatBoostClassifier catboostclassifier = CatBoostClassifier(iterations=20) catboostclassifier.fit(X[:-valid_size], y[:-valid_size], eval_set=(X[-valid_size:], y[-valid_size:]), logging_level='Silent') catboostprediction = catboostclassifier.predict(X) print('accuracy: \t', end='') print((catboostprediction == y).mean())
code
129020971/cell_14
[ "image_output_1.png" ]
from catboost import CatBoostClassifier import pandas as pd import pickle valid_ratio = 0.2 X = pickle.load(open('/kaggle/input/embedder/train_embedding', 'rb')) y = pd.read_csv('/kaggle/input/embedder/preprocessed_train.csv').target_relabeled.values valid_size = int(y.shape[0] * valid_ratio) in_features = X.shape[1] from catboost import CatBoostClassifier catboostclassifier = CatBoostClassifier(iterations=20) catboostclassifier.fit(X[:-valid_size], y[:-valid_size], eval_set=(X[-valid_size:], y[-valid_size:]), logging_level='Silent')
code
129020971/cell_10
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pickle import torch import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt from torch.utils.data import DataLoader from tqdm import tqdm import pickle device = 'cuda' if torch.cuda.is_available() else 'cpu' import pandas as pd import numpy as np valid_ratio = 0.2 X = pickle.load(open('/kaggle/input/embedder/train_embedding', 'rb')) y = pd.read_csv('/kaggle/input/embedder/preprocessed_train.csv').target_relabeled.values valid_size = int(y.shape[0] * valid_ratio) train_X_tensor = torch.tensor(X[:-valid_size]).to(device) train_y_tensor = torch.LongTensor(y[:-valid_size]).to(device) valid_X_tensor = torch.tensor(X[-valid_size:]).to(device) valid_y_tensor = torch.LongTensor(y[-valid_size:]).to(device) in_features = X.shape[1] mlp = torch.nn.Sequential(torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=in_features), torch.nn.ReLU(), torch.nn.Dropout(), torch.nn.modules.linear.Linear(in_features=in_features, out_features=2), torch.nn.Softmax()).to(device) Loss = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(mlp.parameters(), lr=0.0001) epoch = 400 max_patience = 5 patience = max_patience train_loss_history = [] valid_loss_history = [] mlp.train() for e in range(epoch): train_loss = 0 valid_loss = 0 outputs = mlp(train_X_tensor) loss = Loss(outputs, train_y_tensor) loss.backward() optimizer.step() optimizer.zero_grad() train_loss += loss.item() train_loss_history.append(train_loss) outputs = mlp(valid_X_tensor) loss = Loss(outputs, valid_y_tensor) valid_loss += loss.item() valid_loss_history.append(valid_loss) if len(valid_loss_history) > 1 and valid_loss_history[-1] > valid_loss_history[-2]: patience -= 1 if patience < 0: break else: patience = max_patience outputs = mlp(train_X_tensor) outputs = torch.argmax(outputs, 1) torch.mean((outputs == train_y_tensor).to(torch.float)) print('train_accuracy: \t', end='') print(torch.mean((outputs == train_y_tensor).to(torch.float)).item()) outputs = mlp(valid_X_tensor) outputs = torch.argmax(outputs, 1) print('valid_accuracy: \t', end='') print(torch.mean((outputs == valid_y_tensor).to(torch.float)).item())
code
128009107/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import os import os train_images_dir = 'train_images' train_maps_dir = 'train_labels' val_images_dir = 'valid_images' val_maps_dir = 'valid_labels' test_images_dir = 'test_images' test_maps_dir = 'test_labels' train_images = np.array(os.listdir(train_images_dir), dtype=object) train_maps = np.array(os.listdir(train_maps_dir), dtype=object) val_images = np.array(os.listdir(val_images_dir), dtype=object) val_maps = np.array(os.listdir(val_maps_dir), dtype=object) def remove_folder_contents(folder): for the_file in os.listdir(folder): file_path = os.path.join(folder, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) elif os.path.isdir(file_path): remove_folder_contents(file_path) os.rmdir(file_path) except Exception as e: remove_folder_contents('./test_images') remove_folder_contents('./test_labels') split_data(test_images_dir, test_maps_dir, 0.2) test_images = np.array(os.listdir(test_images_dir), dtype=object) test_maps = np.array(os.listdir(test_maps_dir), dtype=object) print('****TRAIN****') print(f'There are {len(train_images)} images') print(f'There are {len(train_maps)} masks') print('****TEST****') print(f'There are {len(test_images)} images') print(f'There are {len(test_maps)} masks')
code
128009107/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import os import os train_images_dir = 'train_images' train_maps_dir = 'train_labels' val_images_dir = 'valid_images' val_maps_dir = 'valid_labels' test_images_dir = 'test_images' test_maps_dir = 'test_labels' train_images = np.array(os.listdir(train_images_dir), dtype=object) train_maps = np.array(os.listdir(train_maps_dir), dtype=object) print('****TRAIN****') print(f'There are {len(train_images)} images') print(f'There are {len(train_maps)} masks') val_images = np.array(os.listdir(val_images_dir), dtype=object) val_maps = np.array(os.listdir(val_maps_dir), dtype=object) print('****VALID****') print(f'There are {len(val_images)} images') print(f'There are {len(val_maps)} masks')
code
128009107/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
""" counter = 1 for backbone in BACKBONES: for loss in losses: for freeze_boolean, freeze_tag in zip([True,False], ['frozen','nonFrozen']): for pretrained_state, pretrained_tag in zip(['imagenet',None], ['pretrained','nonPretrained']): model.tag = backbone + '_' + loss.name + '_' + freeze_tag + '_' + pretrained_tag + '_' + str(counter) print("******************************* ", model.tag) preprocess_input = sm.get_preprocessing(backbone) # Applies the proper preprocessing for the chosen backbone model = sm.Unet(backbone, classes=NUM_CLASSES, activation=ACTIVATION, encoder_freeze=freeze_boolean, encoder_weights=pretrained_state) # Variar entre true y false, imagenet y none model.compile(optim, loss, metrics) # Defining callbacks for learning rate scheduling and best checkpoints saving checkpoint_filename = './best_' + backbone + '_' + loss.name + '_' + freeze_tag + '_' + pretrained_tag + '.h5' callbacks = [ tf.keras.callbacks.ModelCheckpoint(checkpoint_filename, save_weights_only=True, save_best_only=True, mode='min'), tf.keras.callbacks.ReduceLROnPlateau() ] # TRAINING history = model.fit( train_yarn_dataloader, validation_data=valid_yarn_dataloader, epochs=EPOCHS, callbacks=callbacks, ) # SAVING THE MODEL df_history = pd.DataFrame(history.history) df_history.to_csv(model.tag + '.csv') model.save(model.tag + '.h5') counter += 1 """
code
128009107/cell_11
[ "text_plain_output_1.png" ]
import cv2 from PIL import Image import albumentations as A import matplotlib.pyplot as plt from tensorflow.keras.utils import Sequence
code
128009107/cell_1
[ "text_plain_output_1.png" ]
!pip install segmentation_models !yes | pip install tensorflow==2.10 !yes | apt install --allow-change-held-packages libcudnn8=8.1.0.77-1+cuda11.2
code
128009107/cell_18
[ "text_plain_output_1.png" ]
import segmentation_models as sm import tensorflow as tf import tensorflow.keras as K
code
128009107/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import os train_images_dir = 'train_images' train_maps_dir = 'train_labels' val_images_dir = 'valid_images' val_maps_dir = 'valid_labels' test_images_dir = 'test_images' test_maps_dir = 'test_labels' train_images = np.array(os.listdir(train_images_dir), dtype=object) train_maps = np.array(os.listdir(train_maps_dir), dtype=object) val_images = np.array(os.listdir(val_images_dir), dtype=object) val_maps = np.array(os.listdir(val_maps_dir), dtype=object) train_yarn_dataset = Dataset(train_images_dir, train_maps_dir) valid_yarn_dataset = Dataset(val_images_dir, val_maps_dir) sample_img, sample_map = train_yarn_dataset[8] fig, ax = plt.subplots(1, 2, figsize=(15, 10)) ax[0].imshow(sample_img) ax[1].imshow(sample_map * 255)
code
128009107/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
!cp -r /kaggle/input/crochet-samples-v3 /kaggle/working os.chdir('/kaggle/working/crochet-samples-v3') # train_images = np.array(os.listdir(train_images_dir), dtype = object) retval = os.getcwd() print("Current working directory %s" % retval)
code
105178983/cell_2
[ "text_plain_output_1.png" ]
def my_first_function(): print('success') my_first_function()
code
105178983/cell_11
[ "text_plain_output_1.png" ]
def calci(a, b, c): """ fdgfd """ if c == '+': return a + b elif c == '-': return a - b elif c == '/': return a / b elif c == '*': return a * b cal = calci(5, 6, '/') print(cal)
code
105178983/cell_18
[ "text_plain_output_1.png" ]
def upper_count(name): count = 0 for i in name: if i.isupper(): count = count + 1 return count def avg(*marks): count, total = (0, 0) for i in marks: total = total + i count = count + 1 return total / count a = avg(45, 43, 35, 67) print(a)
code
105178983/cell_8
[ "text_plain_output_1.png" ]
def upper_count(name): count = 0 for i in name: if i.isupper(): count = count + 1 return count upper_count('My Name Is Adnan')
code
105178983/cell_15
[ "text_plain_output_1.png" ]
def my_salary(weekly_hrs, week, pay_per_hour=500): salary = weekly_hrs * week * pay_per_hour return salary salary = my_salary(pay_per_hour=600, week=6, weekly_hrs=40) salary = my_salary(4, 5) print(salary)
code
105178983/cell_14
[ "text_plain_output_1.png" ]
def my_salary(weekly_hrs, week, pay_per_hour=500): salary = weekly_hrs * week * pay_per_hour return salary salary = my_salary(pay_per_hour=600, week=6, weekly_hrs=40) print(salary)
code
105178983/cell_5
[ "text_plain_output_1.png" ]
def add(a, b): return a + b result = add(4, 5) print(result)
code
106210927/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) INPUT_DIR = '/kaggle/input/sf-booking/' df_train = pd.read_csv(INPUT_DIR + '/hotels_train.csv') df_test = pd.read_csv(INPUT_DIR + 'hotels_test.csv') sample_submission = pd.read_csv(INPUT_DIR + '/submission.csv') df_train['sample'] = 1 df_test['sample'] = 0 df_test['reviewer_score'] = 0 hotels = pd.concat([df_train, df_test], ignore_index=True) hotels.head(3)
code
106210927/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) INPUT_DIR = '/kaggle/input/sf-booking/' df_train = pd.read_csv(INPUT_DIR + '/hotels_train.csv') df_test = pd.read_csv(INPUT_DIR + 'hotels_test.csv') sample_submission = pd.read_csv(INPUT_DIR + '/submission.csv') df_test.info()
code
106210927/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) INPUT_DIR = '/kaggle/input/sf-booking/' df_train = pd.read_csv(INPUT_DIR + '/hotels_train.csv') df_test = pd.read_csv(INPUT_DIR + 'hotels_test.csv') sample_submission = pd.read_csv(INPUT_DIR + '/submission.csv') sample_submission.head()
code