path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
16124829/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
sns.barplot(y=base['radius_mean'], x=base['diagnosis']) | code |
16124829/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base['radius_mean'].mean() | code |
16124829/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns) | code |
16124829/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
cor_base = base[['diagnosis', 'radius_mean', 'texture_mean', 'area_mean']]
cor_base
cor_base = base[['diagnosis', 'radius_mean', 'texture_mean', 'area_mean']]
sns.pairplot(cor_base, hue='diagnosis') | code |
16124829/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
cor_base = base[['diagnosis', 'radius_mean', 'texture_mean', 'area_mean']]
cor_base | code |
16124829/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum()
from sklearn.preprocessing import LabelEncoder
lb = LabelEncoder()
base['diagnosis'] = lb.fit_transform(base['diagnosis'])
base['diagnosis'].head() | code |
16124829/cell_33 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum()
base.diagnosis.std()
base.diagnosis.mean() | code |
16124829/cell_20 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
for i in list(base.columns):
if i != 'diagnosis':
sns.boxplot(x=i, y='diagnosis', data=base)
plt.show() | code |
16124829/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base.head() | code |
16124829/cell_29 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum()
base['diagnosis'].value_counts() | code |
16124829/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
for i in a:
print('-', i) | code |
16124829/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
sns.boxplot(x='radius_mean', y='diagnosis', data=base) | code |
16124829/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
sns.distplot(base['texture_mean']) | code |
16124829/cell_32 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum()
base.diagnosis.std() | code |
16124829/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns | code |
16124829/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.describe() | code |
16124829/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.describe(include=['O']) | code |
16124829/cell_3 | [
"text_html_output_1.png"
] | import os
import os
print(os.listdir('../input')) | code |
16124829/cell_31 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum()
base['diagnosis'].value_counts() | code |
16124829/cell_24 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"image_output_17.png",
"image_output_30.png",
"image_output_14.png",
"image_output_28.png",
"image_output_23.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_21.png",
"image_output_7.png",
"image_output_31.png",
"image_output_20.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_27.png",
"image_output_6.png",
"image_output_12.png",
"image_output_22.png",
"image_output_3.png",
"image_output_29.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
cor_base = base[['diagnosis', 'radius_mean', 'texture_mean', 'area_mean']]
cor_base
cor_base = base[['diagnosis', 'radius_mean', 'texture_mean', 'area_mean']]
cor_base.head(5) | code |
16124829/cell_22 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
sns.scatterplot(x=base['area_mean'], y=base['perimeter_mean']) | code |
16124829/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
import seaborn as sns
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base.isnull().sum() | code |
16124829/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import pandas as pd
base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv')
base = base.iloc[:, :32]
base.columns
len(base.columns)
a = list(base.columns)
base['radius_mean'] | code |
88100273/cell_2 | [
"text_plain_output_1.png"
] | !pip install scanpy | code |
88100273/cell_5 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import scanpy as sc
import scipy
import seaborn as sns
import time
import time
dict_datasets_info = {'krumsiek11': 'Simulated myeloid progenitors [Krumsiek11].', 'moignard15': 'Hematopoiesis in early mouse embryos [Moignard15].', 'pbmc3k': '3k PBMCs from 10x Genomics', 'pbmc3k_processed': 'Processed 3k PBMCs from 10x Genomics.', 'pbmc68k_reduced': 'Subsampled and processed 68k PBMCs.', 'paul15': 'Development of Myeloid Progenitors [Paul15].'}
import time
from sklearn.decomposition import PCA
import scipy
for dataset in ['pbmc68k_reduced']:
print(dataset, dict_datasets_info[dataset])
t0 = time.time()
adata = getattr(sc.datasets, dataset)()
print(np.round(time.time() - t0, 0), 'Seconds passed for loading')
print(adata)
print()
reducer = PCA(n_components=2)
if not scipy.sparse.issparse(adata.X):
r = reducer.fit_transform(adata.X)
else:
r = reducer.fit_transform(adata.X.toarray())
plt.figure(figsize=(20, 8))
if not 'n_counts' in adata.obs.columns:
sns.scatterplot(x=r[:, 0], y=r[:, 1])
else:
sns.scatterplot(x=r[:, 0], y=r[:, 1], hue=adata.obs['n_counts'])
plt.title(dataset + ' ' + str(len(adata)) + 'cells ' + '\n' + dict_datasets_info[dataset], fontsize=20)
plt.xlabel('PCA1', fontsize=20)
plt.ylabel('PCA2', fontsize=20)
plt.show()
if 'S_score' in adata.obs.columns and 'G2M_score' in adata.obs.columns and ('phase' in adata.obs.columns):
plt.figure(figsize=(20, 8))
sns.scatterplot(x=adata.obs['S_score'], y=adata.obs['G2M_score'], hue=adata.obs['phase'])
plt.title(dataset + ' ' + str(len(adata)) + 'cells ' + '\n' + dict_datasets_info[dataset], fontsize=20)
plt.xlabel('S_score', fontsize=20)
plt.ylabel('G2M_score', fontsize=20)
plt.show() | code |
32069383/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import plotly_express as px
df = pd.read_csv('../input/2019-world-happiness-report-csv-file/2019.csv')
df.shape
top_10 = df.iloc[0:10, 0:3]
top_10
fig = px.pie(top_10, values='Score', names='Country or region', color_discrete_sequence=px.colors.sequential.RdBu,
title='Top 10 Country and their score',
hover_data=['Overall rank'], labels={'Overall rank':'Overall rank'})
fig.update_traces(textposition='inside', textinfo='percent+label')
fig.show()
bottom_10 = df.iloc[142:152, 0:3]
bottom_10
bottom_10.shape
fig = px.pie(bottom_10, values='Score', names='Country or region', color_discrete_sequence=px.colors.sequential.RdBu, title='Top 10 Country and their score', hover_data=['Overall rank'], labels={'Overall rank': 'Overall rank'})
fig.update_traces(textposition='inside', textinfo='percent+label')
fig.show() | code |
1007542/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
sns.regplot('delta', 'win_extra', data=match, order=2) | code |
1007542/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
matchups = [[str(x + 1), str(16 - x)] for x in range(8)]
df = df[df.gender == 'mens']
pre = df[df.playin_flag == 1]
data = []
for region in pre.team_region.unique():
for seed in range(2, 17):
res = pre[(pre.team_region == region) & pre.team_seed.isin([str(seed) + 'a', str(seed) + 'b'])]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd1_win])
post = df[df.playin_flag == 0]
for region in post.team_region.unique():
for matchup in matchups:
res = post[(post.team_region == region) & post.team_seed.isin(matchup)]
if res.shape[0] > 1:
data.append([])
for _, row in res.iterrows():
data[-1].extend([row.team_rating, row.rd2_win])
match = pd.DataFrame(data, columns=['Team1_Rating', 'Team1_Prob', 'Team2_Rating', 'Team2_Prob'])
match['delta'] = match.Team1_Rating - match.Team2_Rating
match['win_extra'] = match.Team1_Prob - 0.5
poly = np.polyfit(match.delta, match.win_extra, 2)
poly | code |
1007542/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv')
df.head() | code |
88101963/cell_23 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
betterBot2 = train[(train.nickname == 'BetterBot') & (train.turn_number < 21) & (train.game_id < 5000) & (train.turn_type == 'Play')]
betterBot2 = betterBot2.drop(['nickname', 'game_id', 'turn_type'], 1)
betterBot2
scrabble_points = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 5, '?': 0, '(': 0, ')': 0, '.': 0}
for index in betterBot2.index:
code = 0
codeMove = 0
if isinstance(betterBot2.loc[index, 'rack'], str):
for letter in betterBot2.loc[index, 'rack']:
code += scrabble_points[letter]
betterBot2.loc[index, 'rack'] = int(code)
if isinstance(betterBot2.loc[index, 'move'], str):
for letter in betterBot2.loc[index, 'move']:
codeMove += scrabble_points[letter.upper()]
betterBot2.loc[index, 'move'] = int(codeMove)
betterBot2 = betterBot2[betterBot2.move.isnull() == False]
betterBot2.rack = betterBot2.rack.astype(int)
betterBot2.move = betterBot2.move.astype(int)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(betterBot2.location.astype(str).sort_values())
betterBot2.location = le.transform(betterBot2.location)
corr_bb = betterBot2.corr()
plt.scatter(betterBot2.rack, betterBot2.points) | code |
88101963/cell_6 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
tests.head() | code |
88101963/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
betterBot2 = train[(train.nickname == 'BetterBot') & (train.turn_number < 21) & (train.game_id < 5000) & (train.turn_type == 'Play')]
betterBot2 = betterBot2.drop(['nickname', 'game_id', 'turn_type'], 1)
betterBot2
scrabble_points = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 5, '?': 0, '(': 0, ')': 0, '.': 0}
for index in betterBot2.index:
code = 0
codeMove = 0
if isinstance(betterBot2.loc[index, 'rack'], str):
for letter in betterBot2.loc[index, 'rack']:
code += scrabble_points[letter]
betterBot2.loc[index, 'rack'] = int(code)
if isinstance(betterBot2.loc[index, 'move'], str):
for letter in betterBot2.loc[index, 'move']:
codeMove += scrabble_points[letter.upper()]
betterBot2.loc[index, 'move'] = int(codeMove)
betterBot2 = betterBot2[betterBot2.move.isnull() == False]
betterBot2.rack = betterBot2.rack.astype(int)
betterBot2.move = betterBot2.move.astype(int)
betterBot2.info() | code |
88101963/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.metrics import mean_squared_error
from sklearn.svm import SVR
import tensorflow as tf
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88101963/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
sample_submission.head() | code |
88101963/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
betterBot2 = train[(train.nickname == 'BetterBot') & (train.turn_number < 21) & (train.game_id < 5000) & (train.turn_type == 'Play')]
betterBot2 = betterBot2.drop(['nickname', 'game_id', 'turn_type'], 1)
betterBot2
scrabble_points = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 5, '?': 0, '(': 0, ')': 0, '.': 0}
for index in betterBot2.index:
code = 0
codeMove = 0
if isinstance(betterBot2.loc[index, 'rack'], str):
for letter in betterBot2.loc[index, 'rack']:
code += scrabble_points[letter]
betterBot2.loc[index, 'rack'] = int(code)
if isinstance(betterBot2.loc[index, 'move'], str):
for letter in betterBot2.loc[index, 'move']:
codeMove += scrabble_points[letter.upper()]
betterBot2.loc[index, 'move'] = int(codeMove)
betterBot2 = betterBot2[betterBot2.move.isnull() == False]
betterBot2.rack = betterBot2.rack.astype(int)
betterBot2.move = betterBot2.move.astype(int) | code |
88101963/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
train.info() | code |
88101963/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
betterBot2 = train[(train.nickname == 'BetterBot') & (train.turn_number < 21) & (train.game_id < 5000) & (train.turn_type == 'Play')]
betterBot2 = betterBot2.drop(['nickname', 'game_id', 'turn_type'], 1)
betterBot2
scrabble_points = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 5, '?': 0, '(': 0, ')': 0, '.': 0}
for index in betterBot2.index:
code = 0
codeMove = 0
if isinstance(betterBot2.loc[index, 'rack'], str):
for letter in betterBot2.loc[index, 'rack']:
code += scrabble_points[letter]
betterBot2.loc[index, 'rack'] = int(code)
if isinstance(betterBot2.loc[index, 'move'], str):
for letter in betterBot2.loc[index, 'move']:
codeMove += scrabble_points[letter.upper()]
betterBot2.loc[index, 'move'] = int(codeMove)
betterBot2 = betterBot2[betterBot2.move.isnull() == False]
betterBot2.rack = betterBot2.rack.astype(int)
betterBot2.move = betterBot2.move.astype(int)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(betterBot2.location.astype(str).sort_values())
betterBot2.location = le.transform(betterBot2.location)
corr_bb = betterBot2.corr()
plt.figure(figsize=(14, 12))
sns.heatmap(corr_bb, annot=True)
plt.show() | code |
88101963/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
betterBot2 = train[(train.nickname == 'BetterBot') & (train.turn_number < 21) & (train.game_id < 5000) & (train.turn_type == 'Play')]
betterBot2 = betterBot2.drop(['nickname', 'game_id', 'turn_type'], 1)
betterBot2 | code |
88101963/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/scrabble-point-value/turns_train.csv')
tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv')
games = pd.read_csv('../input/scrabble-point-value/games.csv')
sample_submission = pd.read_csv('../input/scrabble-point-value/sample_submission.csv')
scores = pd.read_csv('../input/scrabble-point-value/scores.csv')
train.head() | code |
330925/cell_13 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
decadeList = list(new_df['Decade'].unique())
boys_percentileList = []
girls_percentileList = []
boys_df = new_df[new_df['Gender'] == 'M'].copy()
girls_df = new_df[new_df['Gender'] == 'F'].copy()
for i in decadeList:
scaler = MinMaxScaler()
boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']]))
girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']]))
boys_df['decade_percentile'] = boys_percentileList
girls_df['decade_percentile'] = girls_percentileList
new_df = boys_df.append(girls_df)
new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100)
new_df.sort_index(inplace=True)
del boys_df
del girls_df
plt.plot(new_df[(new_df['Name'] == 'John') & (new_df['Gender'] == 'M')]['Decade'], new_df[(new_df['Name'] == 'John') & (new_df['Gender'] == 'M')]['decade_percentile']) | code |
330925/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
new_df.head() | code |
330925/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df.head() | code |
330925/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
decadeList = list(new_df['Decade'].unique())
boys_percentileList = []
girls_percentileList = []
boys_df = new_df[new_df['Gender'] == 'M'].copy()
girls_df = new_df[new_df['Gender'] == 'F'].copy()
for i in decadeList:
scaler = MinMaxScaler()
boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']]))
girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']]))
boys_df['decade_percentile'] = boys_percentileList
girls_df['decade_percentile'] = girls_percentileList
new_df = boys_df.append(girls_df)
new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100)
new_df.sort_index(inplace=True)
del boys_df
del girls_df
def nameFilter(decade, gender, lowerBound, upperBound, startsWith=None):
"""
This function helps you find rare/common baby names!
Inputs:
decade : integer = Decade as a 4 digit number, e.g. 1980.
gender : string = Gender as a single letter string, e.g. 'M' for Male
lowerBound: float = Lower percentage of the names you want to query, e.g. 25 for 25%, NOT 0.25
upperBound: float = Upper percentage of the names you want to query
startsWith: str = (Optional) Single letter representing the starting letter of a name
Returns:
A dataframe slice fitting your parameters.
"""
if upperBound < lowerBound:
raise ValueError('lowerBound needs to be less than upperBound')
if startsWith != None:
result_df = new_df[(new_df['Decade'] == decade) & (new_df['Gender'] == gender) & (new_df['decade_percentile'] >= lowerBound) & (new_df['decade_percentile'] <= upperBound) & (new_df['Name'].str[0] == startsWith.upper())]
else:
result_df = new_df[(new_df['Decade'] == decade) & (new_df['Gender'] == gender) & (new_df['decade_percentile'] >= lowerBound) & (new_df['decade_percentile'] <= upperBound)]
return result_df
nameFilter(decade=1980, gender='M', lowerBound=50, upperBound=100, startsWith='C') | code |
330925/cell_19 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
decadeList = list(new_df['Decade'].unique())
boys_percentileList = []
girls_percentileList = []
boys_df = new_df[new_df['Gender'] == 'M'].copy()
girls_df = new_df[new_df['Gender'] == 'F'].copy()
for i in decadeList:
scaler = MinMaxScaler()
boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']]))
girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']]))
boys_df['decade_percentile'] = boys_percentileList
girls_df['decade_percentile'] = girls_percentileList
new_df = boys_df.append(girls_df)
new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100)
new_df.sort_index(inplace=True)
del boys_df
del girls_df
plt.figure()
sns.distplot(new_df[new_df['Gender'] == 'M']['decade_percentile'], bins=100)
plt.xlim(xmin=0, xmax=100)
plt.title('Boys Name Popularity Distribution')
plt.figure()
sns.distplot(new_df[new_df['Gender'] == 'F']['decade_percentile'], bins=100)
plt.xlim(xmin=0, xmax=100)
plt.title('Girls Name Popularity Distribution')
plt.show() | code |
330925/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df.tail() | code |
330925/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
decadeList = list(new_df['Decade'].unique())
boys_percentileList = []
girls_percentileList = []
boys_df = new_df[new_df['Gender'] == 'M'].copy()
girls_df = new_df[new_df['Gender'] == 'F'].copy()
for i in decadeList:
scaler = MinMaxScaler()
boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']]))
girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']]))
boys_df['decade_percentile'] = boys_percentileList
girls_df['decade_percentile'] = girls_percentileList
new_df = boys_df.append(girls_df)
new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100)
new_df.sort_index(inplace=True)
del boys_df
del girls_df
new_df[new_df['decade_percentile'] >= 99.0] | code |
330925/cell_17 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
df['Decade'] = df['Year'].apply(lambda x: x - x % 10)
df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')
new_df = pd.DataFrame()
new_df['Decade'] = df_pivot.index.get_level_values('Decade')
new_df['Name'] = df_pivot.index.get_level_values('Name')
new_df['Gender'] = df_pivot.index.get_level_values('Gender')
new_df['Count'] = df_pivot.values
decadeList = list(new_df['Decade'].unique())
boys_percentileList = []
girls_percentileList = []
boys_df = new_df[new_df['Gender'] == 'M'].copy()
girls_df = new_df[new_df['Gender'] == 'F'].copy()
for i in decadeList:
scaler = MinMaxScaler()
boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']]))
girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']]))
boys_df['decade_percentile'] = boys_percentileList
girls_df['decade_percentile'] = girls_percentileList
new_df = boys_df.append(girls_df)
new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100)
new_df.sort_index(inplace=True)
del boys_df
del girls_df
new_df[new_df['decade_percentile'] < 1] | code |
330925/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/NationalNames.csv')
print('Data year ranges from {} to {}'.format(min(df['Year']), max(df['Year']))) | code |
130004949/cell_13 | [
"text_plain_output_1.png"
] | from collections import ChainMap
from esm.model.esm2 import ESM2
from multiprocess import Pool
from tqdm import tqdm
from tqdm import tqdm
from tqdm import tqdm
import esm
import gc
import gc
import numpy as np
import numpy as np
import os
import os
import os
import pandas as pd
import pandas as pd
import re
import tensorflow as tf
import torch
import torch.distributed as dist
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])])
import multiprocess
import gc
import os
import torch
import torch.distributed as dist
import esm
from esm.model.esm2 import ESM2
from tqdm import tqdm
import re
def upgrade_state_dict(state_dict):
"""Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'."""
prefixes = ['encoder.sentence_encoder.', 'encoder.']
pattern = re.compile('^' + '|'.join(prefixes))
state_dict = {pattern.sub('', name): param for name, param in state_dict.items()}
return state_dict
model_path = '/kaggle/input/fair-esm/esm2_t33_650M_UR50D.pt'
token_layer = 33
url = 'tcp://localhost:23456'
dist.init_process_group(backend='gloo', init_method=url, world_size=1, rank=0)
model_data = torch.load(str(model_path), map_location='cpu')
cfg = model_data['cfg']['model']
state_dict = model_data['model']
state_dict = upgrade_state_dict(state_dict)
vocab = esm.data.Alphabet.from_architecture('ESM-1b')
model = ESM2(num_layers=cfg.encoder_layers, embed_dim=cfg.encoder_embed_dim, attention_heads=cfg.encoder_attention_heads, alphabet=vocab, token_dropout=cfg.token_dropout)
batch_converter = vocab.get_batch_converter()
data = [(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
batch_lens = (batch_tokens != vocab.padding_idx).sum(1)
if torch.cuda.is_available():
model.cuda().eval()
batch_tokens = batch_tokens.cuda()
else:
model.eval()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[token_layer], return_contacts=False)
token_representations = results['representations'][token_layer].to('cpu')
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
dist.destroy_process_group()
sequence_representations = {}
for (i, tokens_len), peptide, label in tqdm(zip(enumerate(batch_lens), token_representations, data), unit='peptide', total=len(batch_lens)):
sequence_representations[label[0]] = tf.convert_to_tensor(token_representations[i, 1:tokens_len - 1].numpy())
del token_representations
del batch_lens
del model
del batch_strs
del batch_converter
del batch_tokens
del results
del state_dict
del cfg
del model_data
del data
gc.collect()
from collections import ChainMap
stringColumns = ['Peptide', 'UniProt', 'visit_id']
stringToManuallyEncodeColumns = ['upd23b_clinical_state_on_medication']
numericColumns = ['visit_month', 'patient_id', 'PeptideAbundance', 'NPX', 'Peptide oxidation score', 'Protein oxidation score', 'Peptide carbamidomethylation score', 'Protein carbamidomethylation score', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']
target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']
lookupDict = {}
for name in stringColumns:
vocab = list({char for item in peptideData[name].unique() for char in item})
lookupDict[name] = tf.keras.layers.StringLookup(vocabulary=vocab, output_mode='int')
def generate_embedding(column, embeddingMethod, dataframe=peptideData):
grouped = dataframe.groupby(column)
for value, _ in tqdm(grouped.groups.items(), unit='embedding'):
yield {value: [embeddingMethod(char) for char in value]}
print('Generating embeddings...')
peptide_embeddings = dict(ChainMap(*generate_embedding('Peptide', lookupDict['Peptide'])))
uniprot_embeddings = dict(ChainMap(*generate_embedding('UniProt', lookupDict['UniProt'])))
visit_id_embeddings = dict(ChainMap(*generate_embedding('visit_id', lookupDict['visit_id']))) | code |
130004949/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from multiprocess import Pool
import numpy as np
import numpy as np
import os
import os
import pandas as pd
import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
print('Preprocessing peptides...')
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
print('Concatenating...')
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
print('Done!') | code |
130004949/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptideData | code |
130004949/cell_11 | [
"text_html_output_1.png"
] | from multiprocess import Pool
import numpy as np
import numpy as np
import os
import os
import pandas as pd
import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]) | code |
130004949/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
len(peptideData['Peptide'].unique()) | code |
130004949/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
print(peptides_per_patient) | code |
130004949/cell_16 | [
"text_plain_output_1.png"
] | from esm.model.esm2 import ESM2
from multiprocess import Pool
from tqdm import tqdm
from tqdm import tqdm
from tqdm import tqdm
import esm
import gc
import gc
import numpy as np
import numpy as np
import os
import os
import os
import pandas as pd
import pandas as pd
import re
import tensorflow as tf
import torch
import torch.distributed as dist
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])])
import multiprocess
import gc
import os
import torch
import torch.distributed as dist
import esm
from esm.model.esm2 import ESM2
from tqdm import tqdm
import re
def upgrade_state_dict(state_dict):
"""Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'."""
prefixes = ['encoder.sentence_encoder.', 'encoder.']
pattern = re.compile('^' + '|'.join(prefixes))
state_dict = {pattern.sub('', name): param for name, param in state_dict.items()}
return state_dict
model_path = '/kaggle/input/fair-esm/esm2_t33_650M_UR50D.pt'
token_layer = 33
url = 'tcp://localhost:23456'
dist.init_process_group(backend='gloo', init_method=url, world_size=1, rank=0)
model_data = torch.load(str(model_path), map_location='cpu')
cfg = model_data['cfg']['model']
state_dict = model_data['model']
state_dict = upgrade_state_dict(state_dict)
vocab = esm.data.Alphabet.from_architecture('ESM-1b')
model = ESM2(num_layers=cfg.encoder_layers, embed_dim=cfg.encoder_embed_dim, attention_heads=cfg.encoder_attention_heads, alphabet=vocab, token_dropout=cfg.token_dropout)
batch_converter = vocab.get_batch_converter()
data = [(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
batch_lens = (batch_tokens != vocab.padding_idx).sum(1)
if torch.cuda.is_available():
model.cuda().eval()
batch_tokens = batch_tokens.cuda()
else:
model.eval()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[token_layer], return_contacts=False)
token_representations = results['representations'][token_layer].to('cpu')
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
dist.destroy_process_group()
sequence_representations = {}
for (i, tokens_len), peptide, label in tqdm(zip(enumerate(batch_lens), token_representations, data), unit='peptide', total=len(batch_lens)):
sequence_representations[label[0]] = tf.convert_to_tensor(token_representations[i, 1:tokens_len - 1].numpy())
del token_representations
del batch_lens
del model
del batch_strs
del batch_converter
del batch_tokens
del results
del state_dict
del cfg
del model_data
del data
gc.collect()
indexLabels = ['visit_id', 'visit_month', 'patient_id']
clinicalData = train_clinical_data
allData = peptideData.merge(clinicalData, on=indexLabels)
peptidesByRowIndex = allData['Peptide'].copy()
allData | code |
130004949/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import missingno as msno
import pandas as pd
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
import missingno as msno
msno.matrix(train_clinical_data)
msno.matrix(supplemental_clinical_data)
msno.matrix(train_peptides)
msno.matrix(train_proteins) | code |
130004949/cell_17 | [
"text_plain_output_1.png"
] | from collections import ChainMap
from esm.model.esm2 import ESM2
from multiprocess import Pool
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
from tqdm import tqdm
from tqdm import tqdm
import esm
import gc
import gc
import networkx as nx
import numpy as np
import numpy as np
import os
import os
import os
import pandas as pd
import pandas as pd
import re
import tensorflow as tf
import torch
import torch.distributed as dist
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])])
import multiprocess
import gc
import os
import torch
import torch.distributed as dist
import esm
from esm.model.esm2 import ESM2
from tqdm import tqdm
import re
def upgrade_state_dict(state_dict):
"""Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'."""
prefixes = ['encoder.sentence_encoder.', 'encoder.']
pattern = re.compile('^' + '|'.join(prefixes))
state_dict = {pattern.sub('', name): param for name, param in state_dict.items()}
return state_dict
model_path = '/kaggle/input/fair-esm/esm2_t33_650M_UR50D.pt'
token_layer = 33
url = 'tcp://localhost:23456'
dist.init_process_group(backend='gloo', init_method=url, world_size=1, rank=0)
model_data = torch.load(str(model_path), map_location='cpu')
cfg = model_data['cfg']['model']
state_dict = model_data['model']
state_dict = upgrade_state_dict(state_dict)
vocab = esm.data.Alphabet.from_architecture('ESM-1b')
model = ESM2(num_layers=cfg.encoder_layers, embed_dim=cfg.encoder_embed_dim, attention_heads=cfg.encoder_attention_heads, alphabet=vocab, token_dropout=cfg.token_dropout)
batch_converter = vocab.get_batch_converter()
data = [(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
batch_lens = (batch_tokens != vocab.padding_idx).sum(1)
if torch.cuda.is_available():
model.cuda().eval()
batch_tokens = batch_tokens.cuda()
else:
model.eval()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[token_layer], return_contacts=False)
token_representations = results['representations'][token_layer].to('cpu')
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
dist.destroy_process_group()
sequence_representations = {}
for (i, tokens_len), peptide, label in tqdm(zip(enumerate(batch_lens), token_representations, data), unit='peptide', total=len(batch_lens)):
sequence_representations[label[0]] = tf.convert_to_tensor(token_representations[i, 1:tokens_len - 1].numpy())
del token_representations
del batch_lens
del model
del batch_strs
del batch_converter
del batch_tokens
del results
del state_dict
del cfg
del model_data
del data
gc.collect()
from collections import ChainMap
stringColumns = ['Peptide', 'UniProt', 'visit_id']
stringToManuallyEncodeColumns = ['upd23b_clinical_state_on_medication']
numericColumns = ['visit_month', 'patient_id', 'PeptideAbundance', 'NPX', 'Peptide oxidation score', 'Protein oxidation score', 'Peptide carbamidomethylation score', 'Protein carbamidomethylation score', 'updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']
target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']
lookupDict = {}
for name in stringColumns:
vocab = list({char for item in peptideData[name].unique() for char in item})
lookupDict[name] = tf.keras.layers.StringLookup(vocabulary=vocab, output_mode='int')
def generate_embedding(column, embeddingMethod, dataframe=peptideData):
grouped = dataframe.groupby(column)
for value, _ in tqdm(grouped.groups.items(), unit='embedding'):
yield {value: [embeddingMethod(char) for char in value]}
peptide_embeddings = dict(ChainMap(*generate_embedding('Peptide', lookupDict['Peptide'])))
uniprot_embeddings = dict(ChainMap(*generate_embedding('UniProt', lookupDict['UniProt'])))
visit_id_embeddings = dict(ChainMap(*generate_embedding('visit_id', lookupDict['visit_id'])))
indexLabels = ['visit_id', 'visit_month', 'patient_id']
clinicalData = train_clinical_data
allData = peptideData.merge(clinicalData, on=indexLabels)
peptidesByRowIndex = allData['Peptide'].copy()
import networkx as nx
from sklearn.preprocessing import MinMaxScaler
G = nx.Graph()
print('Normalizing numeric columns...')
scaler = MinMaxScaler()
numericColumns = ['PeptideAbundance', 'NPX', 'Peptide oxidation score', 'Protein oxidation score', 'Peptide carbamidomethylation score', 'Protein carbamidomethylation score']
allData[numericColumns] = scaler.fit_transform(allData[numericColumns])
visit_to_index = {}
index_to_visit = {}
current_index = 0
target = ['updrs_1', 'updrs_2', 'updrs_3', 'updrs_4']
num_targets = len(target)
labels = np.zeros((len(allData['visit_id'].unique()), num_targets), dtype=np.float32)
protein_to_index = {}
index_to_protein = {}
peptide_to_index = {}
index_to_peptide = {}
next_protein_index = 0
next_peptide_index = 0
for index, row in tqdm(allData.iterrows(), total=len(allData), unit='row'):
visit_month = row['visit_month']
patient_id = row['patient_id']
visit_id = str(row['visit_id'])
if not G.has_node(visit_id):
G.add_node(visit_id, node_type='visit')
visit_to_index[str(visit_id)] = current_index
index_to_visit[current_index] = visit_id
current_index += 1
protein_id = row['UniProt']
if protein_id not in protein_to_index:
G.add_node(protein_id, node_type='protein')
protein_to_index[protein_id] = next_protein_index
index_to_protein[next_protein_index] = protein_id
next_protein_index += 1
peptide_id = row['Peptide']
if peptide_id not in peptide_to_index:
G.add_node(peptide_id, peptide_embedding=sequence_representations[peptide_id], node_type='peptide')
peptide_to_index[peptide_id] = next_peptide_index
index_to_peptide[next_peptide_index] = peptide_id
next_peptide_index += 1
G.add_edge(visit_id, protein_id, edge_type='visit_protein', protein_carb_weight=row['Protein carbamidomethylation score'], protein_oxi_weight=row['Protein oxidation score'], protein_expression_weight=row['NPX'])
G.add_edge(visit_id, peptide_id, edge_type='visit_peptide', peptide_carb_weight=row['Peptide carbamidomethylation score'], peptide_oxi_weight=row['Peptide oxidation score'], peptide_expression_weight=row['PeptideAbundance'])
G.add_edge(protein_id, peptide_id, edge_type='protein_peptide')
index = visit_to_index[str(visit_id)]
for i, updrs in enumerate(target):
labels[index, i] = row[updrs] | code |
130004949/cell_14 | [
"text_plain_output_1.png"
] | from esm.model.esm2 import ESM2
from multiprocess import Pool
from tqdm import tqdm
from tqdm import tqdm
from tqdm import tqdm
import esm
import gc
import gc
import numpy as np
import numpy as np
import os
import os
import os
import pandas as pd
import pandas as pd
import re
import tensorflow as tf
import torch
import torch.distributed as dist
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])])
import multiprocess
import gc
import os
import torch
import torch.distributed as dist
import esm
from esm.model.esm2 import ESM2
from tqdm import tqdm
import re
def upgrade_state_dict(state_dict):
"""Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'."""
prefixes = ['encoder.sentence_encoder.', 'encoder.']
pattern = re.compile('^' + '|'.join(prefixes))
state_dict = {pattern.sub('', name): param for name, param in state_dict.items()}
return state_dict
model_path = '/kaggle/input/fair-esm/esm2_t33_650M_UR50D.pt'
token_layer = 33
url = 'tcp://localhost:23456'
dist.init_process_group(backend='gloo', init_method=url, world_size=1, rank=0)
model_data = torch.load(str(model_path), map_location='cpu')
cfg = model_data['cfg']['model']
state_dict = model_data['model']
state_dict = upgrade_state_dict(state_dict)
vocab = esm.data.Alphabet.from_architecture('ESM-1b')
model = ESM2(num_layers=cfg.encoder_layers, embed_dim=cfg.encoder_embed_dim, attention_heads=cfg.encoder_attention_heads, alphabet=vocab, token_dropout=cfg.token_dropout)
batch_converter = vocab.get_batch_converter()
data = [(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
batch_lens = (batch_tokens != vocab.padding_idx).sum(1)
if torch.cuda.is_available():
model.cuda().eval()
batch_tokens = batch_tokens.cuda()
else:
model.eval()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[token_layer], return_contacts=False)
token_representations = results['representations'][token_layer].to('cpu')
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
dist.destroy_process_group()
sequence_representations = {}
for (i, tokens_len), peptide, label in tqdm(zip(enumerate(batch_lens), token_representations, data), unit='peptide', total=len(batch_lens)):
sequence_representations[label[0]] = tf.convert_to_tensor(token_representations[i, 1:tokens_len - 1].numpy())
del token_representations
del batch_lens
del model
del batch_strs
del batch_converter
del batch_tokens
del results
del state_dict
del cfg
del model_data
del data
gc.collect()
len(peptideData['Peptide'].unique()) | code |
130004949/cell_10 | [
"image_output_4.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | !pip install --no-index --no-deps /kaggle/input/fair-esm/fair_esm-2.0.0-py3-none-any.whl | code |
130004949/cell_12 | [
"text_plain_output_1.png"
] | from esm.model.esm2 import ESM2
from multiprocess import Pool
from tqdm import tqdm
from tqdm import tqdm
from tqdm import tqdm
import esm
import gc
import gc
import numpy as np
import numpy as np
import os
import os
import os
import pandas as pd
import pandas as pd
import re
import tensorflow as tf
import torch
import torch.distributed as dist
import warnings
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import gc
from tqdm import tqdm
import pickle
import os
import warnings
warnings.filterwarnings('ignore')
train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv')
train_peptides = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_peptides.csv')
train_proteins = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/train_proteins.csv')
supplemental_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv')
peptideIndexLabels = ['visit_id', 'patient_id', 'UniProt']
train_peptides.set_index(peptideIndexLabels, drop=True)
peptideData = train_peptides.merge(train_proteins, on=['visit_id', 'visit_month', 'patient_id', 'UniProt'], how='inner').drop_duplicates()
peptides_per_patient = peptideData.groupby('patient_id')['Peptide'].nunique()
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocess import Pool
import os
import tensorflow as tf
peptideData['Peptide oxidation score'] = 0
peptideData['Protein oxidation score'] = 0
peptideData['Peptide carbamidomethylation score'] = 0
peptideData['Protein carbamidomethylation score'] = 0
def preprocessPeptides(args):
(patientID, proteinID), currentPeptides = args
peptideCount = len(currentPeptides)
if peptideCount == 0:
return currentPeptides
def deserializeUniMod(peptide, scoreAndRemove=['UniMod_35', 'UniMod_4']):
oxidationScore, carbamidomethylationScore = (0, 0)
for string in scoreAndRemove:
while string in peptide:
peptide = peptide.replace(f'({string})', '', 1)
if string == 'UniMod_35':
oxidationScore += np.log(len(peptide))
elif string == 'UniMod_4':
carbamidomethylationScore += np.log(len(peptide))
return pd.Series([peptide, oxidationScore, carbamidomethylationScore])
currentPeptides[['Peptide', 'Peptide oxidation score', 'Peptide carbamidomethylation score']] = currentPeptides['Peptide'].apply(deserializeUniMod)
normalized_carbamidomethylation_score = currentPeptides['Peptide carbamidomethylation score'].sum() / peptideCount
normalized_oxidation_score = currentPeptides['Peptide oxidation score'].sum() / peptideCount
currentPeptides['Protein oxidation score'] = normalized_oxidation_score
currentPeptides['Protein carbamidomethylation score'] = normalized_carbamidomethylation_score
return currentPeptides
grouped_peptides = peptideData.groupby(['patient_id', 'UniProt'])
with Pool(os.cpu_count()) as pool:
preprocessed_peptides = pool.map(preprocessPeptides, [(group, data) for group, data in grouped_peptides])
peptideData = pd.concat(preprocessed_peptides).reset_index(drop=True)
len([(row[0], row[0]) for row in peptideData.groupby(['Peptide'])])
import multiprocess
import gc
import os
import torch
import torch.distributed as dist
import esm
from esm.model.esm2 import ESM2
from tqdm import tqdm
import re
def upgrade_state_dict(state_dict):
"""Removes prefixes 'model.encoder.sentence_encoder.' and 'model.encoder.'."""
prefixes = ['encoder.sentence_encoder.', 'encoder.']
pattern = re.compile('^' + '|'.join(prefixes))
state_dict = {pattern.sub('', name): param for name, param in state_dict.items()}
return state_dict
model_path = '/kaggle/input/fair-esm/esm2_t33_650M_UR50D.pt'
token_layer = 33
url = 'tcp://localhost:23456'
dist.init_process_group(backend='gloo', init_method=url, world_size=1, rank=0)
print('Loading model...')
model_data = torch.load(str(model_path), map_location='cpu')
cfg = model_data['cfg']['model']
state_dict = model_data['model']
state_dict = upgrade_state_dict(state_dict)
vocab = esm.data.Alphabet.from_architecture('ESM-1b')
model = ESM2(num_layers=cfg.encoder_layers, embed_dim=cfg.encoder_embed_dim, attention_heads=cfg.encoder_attention_heads, alphabet=vocab, token_dropout=cfg.token_dropout)
batch_converter = vocab.get_batch_converter()
print('Loading peptides...')
data = [(row[0], row[0]) for row in peptideData.groupby(['Peptide'])]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
batch_lens = (batch_tokens != vocab.padding_idx).sum(1)
if torch.cuda.is_available():
model.cuda().eval()
batch_tokens = batch_tokens.cuda()
else:
model.eval()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[token_layer], return_contacts=False)
token_representations = results['representations'][token_layer].to('cpu')
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
dist.destroy_process_group()
sequence_representations = {}
for (i, tokens_len), peptide, label in tqdm(zip(enumerate(batch_lens), token_representations, data), unit='peptide', total=len(batch_lens)):
sequence_representations[label[0]] = tf.convert_to_tensor(token_representations[i, 1:tokens_len - 1].numpy())
del token_representations
del batch_lens
del model
del batch_strs
del batch_converter
del batch_tokens
del results
del state_dict
del cfg
del model_data
del data
gc.collect()
print(f'Done!') | code |
33112981/cell_6 | [
"text_plain_output_1.png"
] | import json
import re # Regular expressions
testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
def readTaskFile(filename):
f = open(filename, 'r')
data = json.loads(f.read())
data['id'] = re.sub('(.*/)|(\\.json)', '', filename)
f.close()
return data
filename = testDirectory + '3b4c2228.json'
readTaskFile(filename) | code |
33112981/cell_11 | [
"text_plain_output_1.png"
] | f2 = open('submission.csv', 'r')
print(f2.read())
f2.close() | code |
33112981/cell_8 | [
"text_plain_output_1.png"
] | import json
import re # Regular expressions
def flattener(pred):
str_pred = str([row for row in pred])
str_pred = str_pred.replace(', ', '')
str_pred = str_pred.replace('[[', '|')
str_pred = str_pred.replace('][', '|')
str_pred = str_pred.replace(']]', '|')
return str_pred
testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/'
def readTaskFile(filename):
f = open(filename, 'r')
data = json.loads(f.read())
data['id'] = re.sub('(.*/)|(\\.json)', '', filename)
f.close()
return data
filename = testDirectory + '3b4c2228.json'
readTaskFile(filename)
def getNoopAnswer(filename):
data = readTaskFile(filename)
testSection = data['test']
ident = data['id']
numTests = len(testSection)
answer = {}
for i in range(numTests):
answer[ident + '_' + str(i)] = flattener(testSection[i]['input'])
return answer
filename = testDirectory + '3b4c2228.json'
getNoopAnswer(filename) | code |
32070001/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
16168087/cell_4 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path | code |
16168087/cell_20 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
learner.unfreeze()
learner.fit_one_cycle(2)
learner.lr_find()
learner.recorder.plot() | code |
16168087/cell_6 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
print(f'Classes to classify: \n {data.classes}')
data.show_batch(rows=5, figsize=(7, 7)) | code |
16168087/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
inter = ClassificationInterpretation.from_learner(learner)
learner.unfreeze()
learner.fit_one_cycle(2)
learner.lr_find()
learner.fit_one_cycle(5, max_lr=slice(1e-06, 0.001))
inter = ClassificationInterpretation.from_learner(learner)
inter.plot_top_losses(9, figsize=(20, 20)) | code |
16168087/cell_18 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
learner.unfreeze()
learner.fit_one_cycle(2) | code |
16168087/cell_28 | [
"text_html_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
inter = ClassificationInterpretation.from_learner(learner)
learner.unfreeze()
learner.fit_one_cycle(2)
learner.lr_find()
learner.fit_one_cycle(5, max_lr=slice(1e-06, 0.001))
inter = ClassificationInterpretation.from_learner(learner)
inter.plot_confusion_matrix(figsize=(10, 10), dpi=75)
learner.save('malaria-fastai-V1') | code |
16168087/cell_8 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.recorder.plot() | code |
16168087/cell_16 | [
"text_html_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
inter = ClassificationInterpretation.from_learner(learner)
inter.plot_confusion_matrix(figsize=(10, 10), dpi=75) | code |
16168087/cell_24 | [
"text_html_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
learner.unfreeze()
learner.fit_one_cycle(2)
learner.lr_find()
learner.fit_one_cycle(5, max_lr=slice(1e-06, 0.001))
learner.recorder.plot_losses() | code |
16168087/cell_14 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
inter = ClassificationInterpretation.from_learner(learner)
inter.plot_top_losses(9, figsize=(20, 20)) | code |
16168087/cell_22 | [
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
learner.unfreeze()
learner.fit_one_cycle(2)
learner.lr_find()
learner.fit_one_cycle(5, max_lr=slice(1e-06, 0.001)) | code |
16168087/cell_10 | [
"text_plain_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1') | code |
16168087/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | base_dir = '../input/cell_images/cell_images/'
base_path = Path(base_dir)
base_path
data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats)
learner = create_cnn(data, models.resnet50, metrics=accuracy, model_dir='/tmp/model/')
learner.lr_find()
learner.fit_one_cycle(10, max_lr=slice(0.0001, 0.001))
learner.save('stage-1')
learner.recorder.plot_losses() | code |
88105225/cell_9 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
print('Shape before removing duplicate rows:', tweets.shape)
tweets = tweets.drop_duplicates()
print('Shape after removing duplicate rows:', tweets.shape) | code |
88105225/cell_33 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import emoji
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
stop = stopwords.words('english')
vect = TfidfVectorizer(ngram_range=(1, 2), max_features=1000000, stop_words=stop)
vect.fit(X_train)
X_train_tfid = vect.transform(X_train)
X_test_tfid = vect.transform(X_test)
log_clf = LogisticRegression(C=2, max_iter=1000, n_jobs=-1)
log_clf.fit(X_train_tfid, y_train)
log_pred = log_clf.predict(X_test_tfid)
print(classification_report(y_test, log_pred)) | code |
88105225/cell_20 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from wordcloud import WordCloud
import emoji
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 0].processed_text))
plt.figure(figsize=(20, 20))
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 1].processed_text))
plt.imshow(wc, interpolation='bilinear') | code |
88105225/cell_6 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install wordninja
!pip install emoji
!pip install catboost
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import nltk # used commonly for NLP tasks
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
import pickle
import re # regular expression operations
import string
import emoji
import spacy
import wordninja
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.naive_bayes import MultinomialNB
import xgboost as xgb
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import BernoulliNB
from keras.preprocessing.text import Tokenizer
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import LSTM
from tensorflow.keras.optimizers import Adam | code |
88105225/cell_29 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import emoji
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
stop = stopwords.words('english')
vect = TfidfVectorizer(ngram_range=(1, 2), max_features=1000000, stop_words=stop)
vect.fit(X_train)
X_train_tfid = vect.transform(X_train)
X_test_tfid = vect.transform(X_test)
log_clf = LogisticRegression(C=2, max_iter=1000, n_jobs=-1)
log_clf.fit(X_train_tfid, y_train)
log_pred = log_clf.predict(X_test_tfid)
print('Logistic Regression Accuracy :', accuracy_score(y_test, log_pred)) | code |
88105225/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | # The gloVe is a pretrained word embedding model
!wget http://nlp.stanford.edu/data/glove.6B.zip
!unzip glove.6B.zip | code |
88105225/cell_48 | [
"text_plain_output_1.png"
] | from keras.layers import LSTM
from keras.layers.core import Activation, Dropout, Dense
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
from tensorflow.keras.optimizers import Adam
import emoji
import numpy as np
import numpy as np # linear algebra
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30)
embeddings_dict = {}
glove_file = open('glove.6B.300d.txt', encoding='utf8')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dict[word] = vector_dimensions
glove_file.close()
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in word_index.items():
embedding_vector = embeddings_dict.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=30, trainable=False)
model2 = Sequential()
model2.add(embedding_layer)
model2.add(Dropout(0.5))
model2.add(LSTM(128))
model2.add(Dropout(0.5))
model2.add(Dense(512, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
model2.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
history = model2.fit(X_train_tok, y_train, batch_size=128, epochs=10, verbose=1, validation_split=0.2) | code |
88105225/cell_41 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import emoji
import numpy as np
import numpy as np # linear algebra
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
embeddings_dict = {}
glove_file = open('glove.6B.300d.txt', encoding='utf8')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dict[word] = vector_dimensions
glove_file.close()
print('Found %s word vectors.' % len(embeddings_dict)) | code |
88105225/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
tweets['tweet_text'][0] | code |
88105225/cell_19 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from wordcloud import WordCloud
import emoji
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
plt.figure(figsize=(20, 20))
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 0].processed_text))
plt.imshow(wc, interpolation='bilinear') | code |
88105225/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88105225/cell_7 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.head() | code |
88105225/cell_45 | [
"text_plain_output_1.png"
] | from keras.layers import LSTM
from keras.layers.core import Activation, Dropout, Dense
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
import emoji
import numpy as np
import numpy as np # linear algebra
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30)
embeddings_dict = {}
glove_file = open('glove.6B.300d.txt', encoding='utf8')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dict[word] = vector_dimensions
glove_file.close()
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in word_index.items():
embedding_vector = embeddings_dict.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=30, trainable=False)
model2 = Sequential()
model2.add(embedding_layer)
model2.add(Dropout(0.5))
model2.add(LSTM(128))
model2.add(Dropout(0.5))
model2.add(Dense(512, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
print(model2.summary()) | code |
88105225/cell_49 | [
"text_plain_output_1.png"
] | from keras.layers import LSTM
from keras.layers.core import Activation, Dropout, Dense
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from tensorflow.keras.optimizers import Adam
import emoji
import numpy as np
import numpy as np # linear algebra
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30)
embeddings_dict = {}
glove_file = open('glove.6B.300d.txt', encoding='utf8')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dict[word] = vector_dimensions
glove_file.close()
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in word_index.items():
embedding_vector = embeddings_dict.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=30, trainable=False)
model2 = Sequential()
model2.add(embedding_layer)
model2.add(Dropout(0.5))
model2.add(LSTM(128))
model2.add(Dropout(0.5))
model2.add(Dense(512, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
model2.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
history = model2.fit(X_train_tok, y_train, batch_size=128, epochs=10, verbose=1, validation_split=0.2)
y_pred = model2.predict(X_test_tok)
y_pred_new = np.round(y_pred)
print('Accuracy: ', metrics.accuracy_score(y_test, y_pred_new))
confusion_matrix = metrics.confusion_matrix(y_test, y_pred_new)
print('Confusion Matrix:\n', confusion_matrix)
print('Classification Report:\n', classification_report(y_test, y_pred_new)) | code |
88105225/cell_18 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import emoji
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
dataset.head() | code |
88105225/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from wordcloud import WordCloud
import emoji
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import seaborn as sns
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 0].processed_text))
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 1].processed_text))
stop = stopwords.words('english')
vect = TfidfVectorizer(ngram_range=(1, 2), max_features=1000000, stop_words=stop)
vect.fit(X_train)
X_train_tfid = vect.transform(X_train)
X_test_tfid = vect.transform(X_test)
log_clf = LogisticRegression(C=2, max_iter=1000, n_jobs=-1)
log_clf.fit(X_train_tfid, y_train)
log_pred = log_clf.predict(X_test_tfid)
cm = confusion_matrix(y_test, log_pred)
print(cm)
plt.figure(figsize=(5, 5))
sns.heatmap(cm, annot=True)
plt.show() | code |
88105225/cell_51 | [
"text_plain_output_1.png"
] | from keras.layers import LSTM
from keras.layers.core import Activation, Dropout, Dense
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from tensorflow.keras.optimizers import Adam
from wordcloud import WordCloud
import emoji
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import seaborn as sns
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 0].processed_text))
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 1].processed_text))
stop = stopwords.words('english')
vect = TfidfVectorizer(ngram_range=(1, 2), max_features=1000000, stop_words=stop)
vect.fit(X_train)
X_train_tfid = vect.transform(X_train)
X_test_tfid = vect.transform(X_test)
log_clf = LogisticRegression(C=2, max_iter=1000, n_jobs=-1)
log_clf.fit(X_train_tfid, y_train)
log_pred = log_clf.predict(X_test_tfid)
cm = confusion_matrix(y_test, log_pred)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30)
embeddings_dict = {}
glove_file = open('glove.6B.300d.txt', encoding='utf8')
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = np.asarray(records[1:], dtype='float32')
embeddings_dict[word] = vector_dimensions
glove_file.close()
embedding_matrix = np.zeros((vocab_size, 300))
for word, i in word_index.items():
embedding_vector = embeddings_dict.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(vocab_size, 300, weights=[embedding_matrix], input_length=30, trainable=False)
model2 = Sequential()
model2.add(embedding_layer)
model2.add(Dropout(0.5))
model2.add(LSTM(128))
model2.add(Dropout(0.5))
model2.add(Dense(512, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(1, activation='sigmoid'))
model2.compile(optimizer=Adam(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy'])
history = model2.fit(X_train_tok, y_train, batch_size=128, epochs=10, verbose=1, validation_split=0.2)
training_accuracy = history.history['accuracy']
test_accuracy = history.history['val_accuracy']
epoch_count = range(1, len(training_accuracy) + 1)
plt.plot(epoch_count, training_accuracy, 'r--')
plt.plot(epoch_count, test_accuracy, 'b-')
plt.legend(['Training accuracy', 'Test accuracy'])
plt.xlabel('epoch')
plt.ylabel('accuracy score')
plt.show() | code |
88105225/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum() | code |
88105225/cell_38 | [
"text_plain_output_1.png"
] | from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30)
print('X_train shape: ', X_train_tok.shape)
print('X_test shape: ', X_test_tok.shape) | code |
88105225/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from wordcloud import WordCloud
import emoji
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re # regular expression operations
import wordninja
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
dataset = tweets.copy()
wordLemm = WordNetLemmatizer()
dataset['processed_text'] = preprocess(list(tweets['tweet_text']), wordLemm)
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 0].processed_text))
wc = WordCloud(max_words=2000, width=1600, height=800).generate(' '.join(dataset[dataset.sentiment_label == 1].processed_text))
X = dataset['processed_text']
y = tweets['sentiment_label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=6)
print('Train Data size: ', len(X_train))
print('Test Data size: ', len(X_test)) | code |
88105225/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t')
tweets.isnull().values.any().sum()
tweets = tweets.drop_duplicates()
sns.countplot(x='sentiment_label', data=tweets) | code |
88105225/cell_27 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
import emoji
import re # regular expression operations
import wordninja
stop_words = stopwords.words('english')
abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keyboard', 'app': 'application', 'approx': 'approximately', 'apps': 'applications', 'asap': 'as soon as possible', 'asl': 'age, sex, location', 'ave.': 'avenue', 'b.c': 'before christ', 'b2b': 'business to business', 'b4': 'before', 'bae': 'before anyone else', 'bak': 'back at keyboard', 'bbl': 'be back later', 'bbs': 'be back soon', 'be4': 'before', 'blvd': 'boulevard', 'bout': 'about', 'brb': 'be right back', 'btw': 'by the way', 'c/o': 'care of', 'cf': 'compare', 'cia': 'central intelligence agency', 'cu': 'see you', 'cya': 'see you', 'dae': 'does anyone else', 'diy': 'do it yourself', 'dm': 'direct message', 'eet': 'eastern european time', 'eg': 'example', 'encl.': 'enclosed', 'etc': 'and so on', 'fb': 'facebook', 'fc': 'fingers crossed', 'fig': 'figure', 'fwiw': 'for what it is worth', 'fyi': 'for your information', 'ft.': 'feet', 'ft': 'featuring', 'ftl': 'for the loss', 'ftw': 'for the win', 'gal': 'get a life', 'gcse': 'general certificate of secondary education', 'gg': 'good game', 'gl': 'good luck', 'gmt': 'greenwich mean time', 'gn': 'good night', 'goat': 'greatest of all time', 'gps': 'global positioning system', 'gr8': 'great', 'gratz': 'congratulations', 'gyal': 'girl', 'irl': 'in real life', 'hp': 'horsepower', 'hr': 'hour', 'ht': 'height', 'ic': 'i see', 'idc': 'i do not care', 'idgaf': 'i do not give a fuck', 'idk': 'i do not know', 'ie': 'that is', 'iirc': 'if i remember correctly', 'ilu': 'i love you', 'ily': 'i love you', 'imho': 'in my humble opinion', 'imo': 'in my opinion', 'iow': 'in other words', 'jk': 'just kidding', 'l8r': 'later', 'lb': 'pound', 'lbs': 'pounds', 'lmao': 'laugh my ass off', 'lmfao': 'laugh my fucking ass off', 'lol': 'laughing out loud', 'ltd': 'limited', 'm8': 'mate', 'mf': 'motherfucker', 'mofo': 'motherfucker', 'mph': 'miles per hour', 'mr': 'mister', 'ms': 'miss', 'mte': 'my thoughts exactly', 'nbc': 'national broadcasting company', 'nfs': 'not for sale', 'ngl': 'not going to lie', 'nhs': 'national health service', 'nsfw': 'not safe for work', 'rt': 'retweet', 'nth': 'nice to have', 'nvr': 'never', 'nyc': 'new york city', 'oc': 'original content', 'og': 'original', 'oic': 'oh i see', 'omg': 'oh my god', 'omw': 'on my way', 'p.m': 'after midday', 'poc': 'people of color', 'pov': 'point of view', 'pp': 'pages', 'ppl': 'people', 'ps': 'postscript', 'pt': 'point', 'pto': 'please turn over', 'ratchet': 'rude', 'roflol': 'rolling on the floor laughing out loud', 'sk8': 'skate', 'smh': 'shake my head', 'sq': 'square', 'tbh': 'to be honest', 'tbs': 'tablespooful', 'thks': 'thank you', 'tho': 'though', 'thx': 'thank you', 'tia': 'thanks in advance', 'til': 'today i learned', 'tl;dr': 'too long i did not read', 'u2': 'you too', 'w/': 'with', 'tmb': 'tweet me back', 'ttyl': 'talk to you later', 'u': 'you', 'w/o': 'without', 'w8': 'wait', 'wassup': 'what is up', 'wb': 'welcome back', 'wtf': 'what the fuck', 'wtg': 'way to go', 'some1': 'someone', 'yrs': 'years', 'hrs': 'hours', '2morow': 'tomorrow', '2moro': 'tomorrow', '2day': 'today', '4got': 'forget', 'bday': 'birthday', 'b-day': 'birthday'}
def preprocess(text, wordLemm):
processedText = []
for tweet in text:
tweet = tweet.lower()
tweet = re.sub('((http://)[^ ]*|(https://)[^ ]*|( www\\.)[^ ]*)', 'url', tweet)
tweet = emoji.demojize(tweet)
tweet = re.sub('@[^\\s]+', 'user', tweet)
splitted = wordninja.split(tweet)
tweet = ' '.join((word for word in splitted))
for s in abbreviations.keys():
tweet = tweet.replace(' ' + s, ' ' + abbreviations[s])
tweet = re.sub('[^a-z\\s]', '', tweet)
tweet = re.sub('(.)\\1\\1+', '\\1', tweet)
tweetwords = ''
for word in tweet.split():
if len(word) > 1:
word = wordLemm.lemmatize(word)
tweetwords += word + ' '
processedText.append(tweetwords)
return processedText
stop = stopwords.words('english')
vect = TfidfVectorizer(ngram_range=(1, 2), max_features=1000000, stop_words=stop)
vect.fit(X_train) | code |
88105225/cell_37 | [
"text_plain_output_1.png"
] | from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary size: ', vocab_size)
X_train_tok = pad_sequences(X_train_tok, padding='post', maxlen=30)
X_test_tok = pad_sequences(X_test_tok, padding='post', maxlen=30) | code |
Subsets and Splits