markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
4. Alltogether | ns1 = NS('n', dim=2).truncate(2.0, 0.8, lambda m: np.sum(np.abs(m), axis=1)) + 4
ns2 = 2 * NS('u', dim=2).truncate(1, expr=lambda m: np.sum(m, axis=1)) - (1, 1)
ns3 = NS('n', dim=2).truncate(1.5, expr=lambda m: np.sum(np.square(m), axis=1)) + (4, 0)
ns4 = ((NS('n', dim=2).truncate(2.5, expr=lambda m: np.sum(np.square(m), axis=1)) * 4)
.apply(lambda m: m.astype(np.int)) / 4 + (0, 3))
ns = 0.4 & ns1 | 0.2 & ns2 | 0.39 & ns3 | 0.01 & ns4
plt.imshow(np.histogramdd(ns.sample(int(1e6)), bins=100, normed=True)[0]) | _____no_output_____ | Apache-2.0 | examples/tutorials/07_sampler.ipynb | abrikoseg/batchflow |
5. Notes * parallellism`Sampler`-objects allow for parallelism with `mutliprocessing`. Just make sure to use explicitly defined functions (not `lambda`s) when running `Sampler.apply` or `Sampler.truncate`: | def transform(m):
return np.sum(np.abs(m), axis=1)
ns = NS('n', dim=2).truncate(2.0, 0.8, expr=transform) + 4
from multiprocessing import Pool
def test_func(s):
return s.sample(2)
p = Pool(5)
p.map(test_func, [ns, ns, ns]) | _____no_output_____ | Apache-2.0 | examples/tutorials/07_sampler.ipynb | abrikoseg/batchflow |
Example usageTo use `pyspark_delta_utility` in a project: | import pyspark_delta_utility
print(pyspark_delta_utility.__version__) | _____no_output_____ | MIT | docs/example.ipynb | AraiYuno/pyspark-delta-utility |
MLP train on K=2,3,4Train a generic MLP as binary classifier of protein-coding/non-coding RNA.Set aside a 20% test set, stratified shuffle by length.On the non-test, use random shuffle to partition train and validation sets.Train on 80% and valuate on 20% validation set. | import numpy as np
import pandas as pd
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
tf.keras.backend.set_floatx('float64') | _____no_output_____ | MIT | Length_Study/MLP_01.ipynb | ShepherdCode/ShepherdML |
K-mer frequency, K=2 | def read_features(nc_file,pc_file):
nc = pd.read_csv (nc_file)
pc = pd.read_csv (pc_file)
nc['class']=0
pc['class']=1
rna_mer=pd.concat((nc,pc),axis=0)
return rna_mer
rna_mer = read_features('ncRNA.2mer.features.csv','pcRNA.2mer.features.csv')
rna_mer
# Split into train/test stratified by sequence length.
def sizebin(df):
return pd.cut(df["seqlen"],
bins=[0,1000,2000,4000,8000,16000,np.inf],
labels=[0,1,2,3,4,5])
def make_train_test(data):
bin_labels= sizebin(data)
from sklearn.model_selection import StratifiedShuffleSplit
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=37863)
# split(x,y) expects that y is the labels.
# Trick: Instead of y, give it it the bin labels that we generated.
for train_index,test_index in splitter.split(data,bin_labels):
train_set = rna_mer.iloc[train_index]
test_set = rna_mer.iloc[test_index]
return (train_set,test_set)
(train_set,test_set)=make_train_test(rna_mer)
print("train,test")
train_set.shape,test_set.shape
def prepare_test_set(test_set):
y_test= test_set[['class']].copy()
X_test= test_set.div(test_set['seqlen'],axis=0)
X_test= X_test.drop(columns=['class','seqnum','seqlen'])
return (X_test,y_test)
(X_test,y_test)=prepare_test_set(test_set)
def prepare_train_set(train_set):
y_train_all= train_set[['class']].copy()
X_train_all= train_set.div(train_set['seqlen'],axis=0)
X_train_all= X_train_all.drop(columns=['class','seqnum','seqlen'])
from sklearn.model_selection import ShuffleSplit
splitter = ShuffleSplit(n_splits=1, test_size=0.2, random_state=37863)
for train_index,valid_index in splitter.split(X_train_all):
X_train=X_train_all.iloc[train_index]
y_train=y_train_all.iloc[train_index]
X_valid=X_train_all.iloc[valid_index]
y_valid=y_train_all.iloc[valid_index]
return (X_train,y_train,X_valid,y_valid)
(X_train,y_train,X_valid,y_valid)=prepare_train_set(train_set)
print("train")
print(X_train.shape,y_train.shape)
print("validate")
print(X_valid.shape,y_valid.shape)
# We tried all these. No difference.
act="relu"
act="tanh"
act="sigmoid"
# Adding non-trained Layer Normalization improved accuracy a tiny bit sometimes.
# Adding multiple dense layers only hurt.
mlp2mer = keras.models.Sequential([
keras.layers.LayerNormalization(trainable=False),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(1, activation=act,dtype='float32')
])
# Error:
# ValueError: logits and labels must have the same shape ((None, 2) vs (None, 1))
# This was because the output layer had 2 nodes (0 and 1) not 1 (binary).
# See page 302 for explanation of these parameters.
# See also the keras docs e.g.
# https://www.tensorflow.org/api_docs/python/tf/keras/losses/sparse_categorical_crossentropy
# Note keras can take parameters for % train vs % validation.
# It seems the BinaryCrossentropy assumes labels are probabilities.
# Instead of loss="binary_crossentropy",
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
# Tried optimizers SGD, Adam
mlp2mer.compile(loss=bc, optimizer="Adam",metrics=["accuracy"])
# With one dense layer and Adam optimizer, accuracy increases slowly.
history2mer = mlp2mer.fit(X_train,y_train,epochs=100,validation_data=(X_valid,y_valid))
pd.DataFrame(history2mer.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show() | _____no_output_____ | MIT | Length_Study/MLP_01.ipynb | ShepherdCode/ShepherdML |
K-mer frequency, K=3 | rna_mer = read_features('ncRNA.3mer.features.csv','pcRNA.3mer.features.csv')
(train_set,test_set)=make_train_test(rna_mer)
(X_train,y_train,X_valid,y_valid)=prepare_train_set(train_set)
act="sigmoid"
mlp3mer = keras.models.Sequential([
keras.layers.LayerNormalization(trainable=False),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(1, activation=act,dtype='float32')
])
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
mlp3mer.compile(loss=bc, optimizer="Adam",metrics=["accuracy"])
history3mer = mlp3mer.fit(X_train,y_train,epochs=100,validation_data=(X_valid,y_valid))
pd.DataFrame(history3mer.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show() | _____no_output_____ | MIT | Length_Study/MLP_01.ipynb | ShepherdCode/ShepherdML |
K-mer frequency, K=4 | rna_mer = read_features('ncRNA.4mer.features.csv','pcRNA.4mer.features.csv')
(train_set,test_set)=make_train_test(rna_mer)
(X_train,y_train,X_valid,y_valid)=prepare_train_set(train_set)
act="sigmoid"
mlp4mer = keras.models.Sequential([
keras.layers.LayerNormalization(trainable=False),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(32, activation=act,dtype='float32'),
keras.layers.Dense(1, activation=act,dtype='float32')
])
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
mlp4mer.compile(loss=bc, optimizer="Adam",metrics=["accuracy"])
history4mer = mlp4mer.fit(X_train,y_train,epochs=100,validation_data=(X_valid,y_valid))
pd.DataFrame(history4mer.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show() | _____no_output_____ | MIT | Length_Study/MLP_01.ipynb | ShepherdCode/ShepherdML |
Analysis of Games from the Apple Store > This dataset was taken from [Kaggle](https://www.kaggle.com/tristan581/17k-apple-app-store-strategy-games) and it was collected on the 3rd of August 2019 using the [iTunes API](https://affiliate.itunes.apple.com/resources/documentation/itunes-store-web-service-search-api/) and the [App Store sitemap](https://apps.apple.com/us/genre/ios-games/id6014). The dataset contains 18 columns: - **URL**: _URL of the app._ - **ID**: _ID of the game._ - **Name**: _Name of the game._ - **Subtitle**: _Advertisement text of the game._ - **Icon URL**: _Icon of the game, 512x512 pixels jpg._ - **Average User Rating**: _Rounded to nearest .5. Requires at least 5 ratings._ - **User Rating Count**: _Total of user ratings. Null values means it is below 5._ - **Price**: _Price in USD._ - **In-app Purchases**: _Prices of available in-app purchases._ - **Description**: _Game description._ - **Developer**: _Game developer._ - **Age Rating**: _Age to play the game. Either 4+, 9+, 12+or 17+._ - **Languages**: _Languages the game supports in ISO Alpha-2 codes._ - **Size**: _Size in bytes._ - **Genre**: _Main genre of the game._ - **Primary Genre**: _All genre the game fits in._ - **Original Release Date**: _Date the game was released._ - **Current Version Release Date**: _Date of last update._ The questions we are going to answer are: 1. Does the advance in technology impact the size of the apps? 2. Does the advance in technology impact the amount of apps being produced? 3. Are most apps free or paid and which category is more popular? 4. Is there a better one between free or paid apps? 5. How is the distribution of the age restriction? 6. Do most games offer more than one language? Below is the sequence I will be following: 1. Reading and Understanding the Data 2. Exploratory analysis -> Missing data -> Data types in the dataframe -> Sorting by a desired column -> Saving a new file after this job is done 3. Graphics and insights Important note > **This notebook is intended exclusively to practicing and learning purposes. Any corrections, comments and suggestions are more than welcome and I would really appreciate it. Feel free to get in touch if you liked it or if you want to colaborate somehow.** 1. Reading and Understanding the Data | # Important imports for the analysis of the dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
# Show the plot in the same window as the notebook
%matplotlib inline
# Create the dataframe and check the first 8 rows
app_df = pd.read_csv("appstore_games.csv")
app_df.head()
# Dropping columns that I will not use for this analysis
app_df_cut = app_df.drop(columns=['URL', 'Subtitle', 'Icon URL']) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
2. Exploratory Analysis | app_df_cut.info() | <class 'pandas.core.frame.DataFrame'>
RangeIndex: 17007 entries, 0 to 17006
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 17007 non-null int64
1 Name 17007 non-null object
2 Average User Rating 7561 non-null float64
3 User Rating Count 7561 non-null float64
4 Price 16983 non-null float64
5 In-app Purchases 7683 non-null object
6 Description 17007 non-null object
7 Developer 17007 non-null object
8 Age Rating 17007 non-null object
9 Languages 16947 non-null object
10 Size 17006 non-null float64
11 Primary Genre 17007 non-null object
12 Genres 17007 non-null object
13 Original Release Date 17007 non-null object
14 Current Version Release Date 17007 non-null object
dtypes: float64(4), int64(1), object(10)
memory usage: 1.9+ MB
| MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
***From the above cell I understand that I should take a closer look into the columns listed below because they have some missing values: - Average User Rating - User Rating Count - Price - Languages Another important thing to check is if there are any **duplicate ID's** and, if so, remove them. Also, the last two column are not *datetime* type, which they should be.The dataframe will be sorted by the "User Rating Count" column. This column will be our guide to conclude if a game is successful or not. | # Most reviewed app
#app_df_cut.iloc[app_df_cut["User Rating Count"].idxmax()]
# A better way of seeing the most reviwed apps
app_df_cut = app_df_cut.sort_values(by="User Rating Count", ascending=False)
app_df_cut.head(5) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Rating columns> I'm going to consider that all the NaN values in the "User Rating Count" column means that the game recieved no ratings and therefore is 0. If the app recieved no ratings, then the "Average User Rating" will also be zero for these games. | # Get the columns "User Rating Count" and "Average User Rating" where they are both equal to NaN and set the
# values to 0.
app_df_cut.loc[(app_df_cut["User Rating Count"].isnull()) | (app_df_cut["Average User Rating"].isnull()),
["Average User Rating", "User Rating Count"]] = 0 | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
In-app Purchases column> I'm considering that the null values within the "In-app Purchases" column means that there are no in-app purchases available**Different considerations could have been done, but I will continue with this one for now.** | # Get the column "In-app Purchases" where the value is NaN and set it to zero
app_df_cut.loc[app_df_cut["In-app Purchases"].isnull(),
"In-app Purchases"] = 0 | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
ID column> Let's check if there are missing or duplicate ID's in the dataset: | # Check if there are missing or 0 ID's
app_df_cut.loc[(app_df_cut["ID"] == 0) | (app_df_cut["ID"].isnull()),
"ID"]
# Check for duplicates in the ID column
len(app_df_cut["ID"]) - len(app_df_cut["ID"].unique())
# The number of unique values is lower than the total amount of ID's, therefore there are duplicates among them.
# Drop every duplicate ID row
app_df_cut.drop_duplicates(subset="ID", inplace=True)
app_df_cut.shape | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Size column> I will check if there are any missing or 0 values in the size column. If so, they will be removed from the data since we cannot know it's value. | # Check if there are null values in the Size column
app_df_cut[(app_df_cut["Size"].isnull()) | (app_df_cut['Size'] == 0)]
# Drop the only row in which the game has no size
app_df_cut.drop([16782], axis=0, inplace=True)
# Convert the size to MB
app_df_cut["Size"] = round(app_df_cut["Size"]/1000000)
app_df_cut.head(5) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Price column > Games with a missing value in the price column will be dropped | # Drop the row with NaN values in the "Price" column
app_df_cut = app_df_cut.drop(app_df_cut.loc[app_df_cut["Price"].isnull()].index) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Languages column> Games with a missing value in the "Languages" column will be dropped | # Drop the rows with NaN values in the "Languages" column
app_df_cut = app_df_cut.drop(app_df_cut.loc[app_df_cut["Languages"].isnull()].index)
app_df_cut.info() | <class 'pandas.core.frame.DataFrame'>
Int64Index: 16763 entries, 1378 to 17006
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 16763 non-null int64
1 Name 16763 non-null object
2 Average User Rating 16763 non-null float64
3 User Rating Count 16763 non-null float64
4 Price 16763 non-null float64
5 In-app Purchases 16763 non-null object
6 Description 16763 non-null object
7 Developer 16763 non-null object
8 Age Rating 16763 non-null object
9 Languages 16763 non-null object
10 Size 16763 non-null float64
11 Primary Genre 16763 non-null object
12 Genres 16763 non-null object
13 Original Release Date 16763 non-null object
14 Current Version Release Date 16763 non-null object
dtypes: float64(4), int64(1), object(10)
memory usage: 2.0+ MB
| MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Age Rating column > I will pad the Age Rating column with a 0 to make it easier to sort the values later | # Put a 0 in front of evry value in the 'Age Rating column'
app_df_cut['Age Rating'] = app_df_cut['Age Rating'].str.pad(width=3, fillchar='0') | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Now that the dataset is organized, let's save it into a csv file so that we do not have to redo all the steps above | app_df_cut.to_csv("app_df_clean.csv", index=False)
app_df_clean = pd.read_csv("app_df_clean.csv")
app_df_clean.head()
# Transform the string dates into datetime objects
app_df_clean["Original Release Date"] = pd.to_datetime(app_df_clean["Original Release Date"])
app_df_clean["Current Version Release Date"] = pd.to_datetime(app_df_clean["Current Version Release Date"])
app_df_clean.info() | <class 'pandas.core.frame.DataFrame'>
RangeIndex: 16763 entries, 0 to 16762
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ID 16763 non-null int64
1 Name 16763 non-null object
2 Average User Rating 16763 non-null float64
3 User Rating Count 16763 non-null float64
4 Price 16763 non-null float64
5 In-app Purchases 16763 non-null object
6 Description 16763 non-null object
7 Developer 16763 non-null object
8 Age Rating 16763 non-null object
9 Languages 16763 non-null object
10 Size 16763 non-null float64
11 Primary Genre 16763 non-null object
12 Genres 16763 non-null object
13 Original Release Date 16763 non-null datetime64[ns]
14 Current Version Release Date 16763 non-null datetime64[ns]
dtypes: datetime64[ns](2), float64(4), int64(1), object(8)
memory usage: 1.9+ MB
| MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
3. Graphics and Insights Evolution of the Apps' Size> Do the apps get bigger with time? | # Make the figure
plt.figure(figsize=(16,10))
# Variables
years = app_df_clean["Original Release Date"].apply(lambda date: date.year)
size = app_df_clean["Size"]
# Plot a swarmplot
palette = sns.color_palette("muted")
size = sns.swarmplot(x=years, y=size, palette=palette)
size.set_ylabel("Size (in MB)", fontsize=16)
size.set_xlabel("Original Release Date", fontsize=16)
size.set_title("Time Evolution of the Apps' Sizes", fontsize=20)
# Save the image. Has to be called before plt.show()
#plt.savefig("Time_Evol_App_Size.png", dpi=300)
plt.show() | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **With the advance in technology and the internet becoming cheaper and cheaper more people have access to faster networks. As the years go by, it can be seen in the graph above that the games' size gets bigger. Some games that have more than 2GB can be noted, reaching a maximum value of 4GB, but they are not the most common ones. As each game is represented by a different tiny ball in the graph above, the quantity of games seems to grow as well. Let's investigate the number of apps per year to be sure.** How does the Amount of Apps Released Change Over Time? | # Make the figure
plt.figure(figsize=(16,10))
# Plot a countplot
palette1 = sns.color_palette("inferno_r")
apps_per_year = sns.countplot(x=years, data=app_df_clean, palette=palette1)
apps_per_year.set_xlabel("Year of Release", fontsize=16)
apps_per_year.set_ylabel("Amount of Games", fontsize=16)
apps_per_year.set_title("Quantity of Apps per Year", fontsize=20)
# Write the height of each bar on top of them
for p in apps_per_year.patches:
apps_per_year.annotate("{}".format(p.get_height()),
(p.get_x() + p.get_width() / 2, p.get_height() + 40),
va="center", ha="center", fontsize=16)
# Save the figure
#plt.savefig("Quantity_Apps_Per_Year.png", dpi=300) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **From 2008 to 2016 we can identify a drastic increase in the number of games released each year in which the highest increase occurs between the years 2015 and 2016. After 2016 the amount of games released per year starts to drop down almost linearly for 2 years (2019 cannot be considered yet because the data was collected in August, 4 months of data of the current year is missing).**>> **Without further analysis, I would argue that after a boom in the production of apps it gets harder to come up with new ideas that are not out there already, making the production and release of new games slow down, but it is important to keep in mind that without further research it cannot be taken as the right explanation.** | #Make a list of years from 2014 to 2018
years_lst = [year for year in range(2014,2019)]
#For loop to get a picture of the amount of games produced from August to December
for year in years_lst:
from_August = app_df_clean["Original Release Date"].apply(lambda date: (date.year == year) & (date.month >= 8)).sum()
total = app_df_clean["Original Release Date"].apply(lambda date: date.year == year).sum()
print("In {year}, {percentage}% games were produced from August to December."
.format(year=year,
percentage=round((from_August/total)*100, 1))) | In 2014, 44.1% games were produced from August to December.
In 2015, 42.2% games were produced from August to December.
In 2016, 39.9% games were produced from August to December.
In 2017, 40.8% games were produced from August to December.
In 2018, 42.4% games were produced from August to December.
| MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **Having checked the previous five years we can see that the amount of games released from August to December represents a significant portion of the whole and that it can be considered roughly constant at 42%. Nevertheless, the last two years show a tendency for a linear decrease in the quantity of games released per year and taking into account that we still have 42% of the games of this year to be released, the total amount in the present year (2019) would be 2617. This is bigger than 2018, but this was not an elaborate calculation as we took the average of games being prouced between the months 8-12 to be 42%.** Now, can we observe a trend in the age restriction of games released each year? | # Make the figure
plt.figure(figsize=(16,10))
# Variables. Sort by age to put the legend in order.
data = app_df_clean.sort_values(by='Age Rating')
# Plot a countplot
palette1 = sns.color_palette("viridis")
apps_per_year = sns.countplot(x=years, data=data, palette=palette1, hue='Age Rating')
apps_per_year.set_xlabel("Year of Release per Age", fontsize=16)
apps_per_year.set_ylabel("Amount of Games", fontsize=16)
apps_per_year.set_title("Quantity of Apps per Year & Age", fontsize=20)
plt.legend(title='Age Restrictions', fontsize=13, title_fontsize=14, loc='upper left')
# Save the figure
#plt.savefig("Quantity_Apps_Per_Year_&_Age.png", dpi=300)
plt.show() | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **As shown above, most apps tend to target all ages.** The amount of apps had a considerable increase in the past years indicating that producing an app has been a trend and possibly a lucrative market. That being said, it is important to analyse if there is a preference for free or paid games and the range of prices they are in. | # Make the figure
plt.figure(figsize=(16,10))
# Variables
price = app_df_clean["Price"]
# Plot a Countplot
palette2 = sns.light_palette("green", reverse=True)
price_vis = sns.countplot(x=price, palette=palette2)
price_vis.set_xlabel("Price (in US dollars)", fontsize=16)
price_vis.set_xticklabels(price_vis.get_xticklabels(), fontsize=12, rotation=45)
price_vis.set_ylabel("Amount of Games", fontsize=16)
price_vis.set_title("Quantity of Apps per Price", fontsize=20)
# Write the height of the bars on top
for p in price_vis.patches:
price_vis.annotate("{:.0f}".format(p.get_height()), # Text that will appear on the screen
(p.get_x() + p.get_width() / 2 + 0.1, p.get_height()), # (x, y) has to be a tuple
ha='center', va='center', fontsize=14, color='black', xytext=(0, 10), # Customizations
textcoords='offset points')
# Save the figure
#plt.savefig("Quantity_Each_App_Per_Price.png", dpi=300) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **We can see that the majority of the games are free. That leads me to analyse if the free apps have more in-app purchases then the paid ones, meaning that this might be their source of income.** | # Make the figure
plt.figure(figsize=(16,10))
# Variables
in_app_purchases = app_df_clean["In-app Purchases"].str.split(",").apply(lambda lst: len(lst))
# Plot a stripplot
palette3 = sns.color_palette("BuGn_r", 23)
in_app_purchases_vis = sns.stripplot(x=price, y=in_app_purchases, palette=palette3)
in_app_purchases_vis.set_xlabel("Game Price (in US dollars)", fontsize=16)
in_app_purchases_vis.set_xticklabels(in_app_purchases_vis.get_xticklabels(), fontsize=12, rotation=45)
in_app_purchases_vis.set_ylabel("In-app Purchases Available", fontsize=16)
in_app_purchases_vis.set_title("Quantity of In-app Purchases per Game Price", fontsize=20)
# Save the image. Has to be called before plt.show()
#plt.savefig("Quantity_In_App_Purchase.png", dpi=300)
plt.show() | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **As expected, free and lower-priced apps provide more items to be purchased than expensive games. Two reasons can be named:**>>> **1.The developers have to invest money into making the games and updating them, therefore they need a source of income. In the case of free games, this comes with the in-app purchases available.**>>> **2. People who have spent a lot of money on an app would not be happy or willing to spend more, given that they have already made an initial high investment.** We know that most of the apps are free. Let's see if there are any links between an app being paid and being better than the free ones: | # Plot a distribution of the top 200 apps by their price
# Make the figure
plt.figure(figsize=(16,10))
# Plot a Countplot
palette4 = sns.color_palette("BuPu_r")
top_prices = sns.countplot(app_df_clean.iloc[:200]["Price"], palette=palette4)
top_prices.set_xlabel("Price (in US dollars)", fontsize=16)
top_prices.set_xticklabels(top_prices.get_xticklabels(), fontsize=12)
top_prices.set_ylabel("Amount of Games", fontsize=16)
top_prices.set_title("Quantity of Apps per Price for the Top 200", fontsize=20)
# Write the height of the bars on top
for p in top_prices.patches:
top_prices.annotate("{:.0f}".format(p.get_height()),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=14, color='black', xytext=(0, 8),
textcoords='offset points')
# Save the image.
#plt.savefig("Quantity_App_Per_Price.png", dpi=300) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **The graph above shows that among the top 200 games, the vast majority are free. This result makes sense considering you don't have to invest any money to start playing and can spend afterward if you would like to invest in it.** Even though most games are free we should take a look if a type of app (paid or free) is better. Let's do that by checking the average user rating. | # Create the DataFrames needed
paid = app_df_clean[app_df_clean["Price"] > 0]
total_paid = len(paid)
free = app_df_clean[app_df_clean["Price"] == 0]
total_free = len(free)
# Make the figure and the axes (1 row, 2 columns)
fig, axes = plt.subplots(1, 2, figsize=(16,10))
palette5 = sns.color_palette("gist_yarg", 10)
# Free apps countplot
free_vis = sns.countplot(x="Average User Rating", data=free, ax=axes[0], palette=palette5)
free_vis.set_xlabel("Average User Rating", fontsize=16)
free_vis.set_ylabel("Amount of Games", fontsize=16)
free_vis.set_title("Free Apps", fontsize=20)
# Display the percentages on top of the bars
for p in free_vis.patches:
free_vis.annotate("{:.1f}%".format(100 * (p.get_height()/total_free)),
(p.get_x() + p.get_width() / 2 + 0.1, p.get_height()),
ha='center', va='center', fontsize=14, color='black', xytext=(0, 8),
textcoords='offset points')
# Paid apps countplot
paid_vis = sns.countplot(x="Average User Rating", data=paid, ax=axes[1], palette=palette5)
paid_vis.set_xlabel("Average User Rating", fontsize=16)
paid_vis.set_ylabel(" ", fontsize=16)
paid_vis.set_title("Paid Apps", fontsize=20)
# Display the percentages on top of the bars
for p in paid_vis.patches:
paid_vis.annotate("{:.1f}%".format(100 * (p.get_height()/total_paid)),
(p.get_x() + p.get_width() / 2 + 0.1, p.get_height()),
ha='center', va='center', fontsize=14, color='black', xytext=(0, 8),
textcoords='offset points')
# Save the image.
#plt.savefig("Free_VS_Paid.png", dpi=300) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **There are no indications of whether a paid or a free game is better. Actually, the pattern of user ratings is pretty much equal for both types of games. The graph above shows that both categories seem to deliver a good service and mostly satisfy their costumers as most of the ratings are between 4-5 stars. We can also identify that the majority of the users do not rate the games.** Age Rating> Is there a preference for permitted age to the games? | # Make the figure
plt.figure(figsize=(16,10))
# Make a countplot
palette6 = sns.color_palette("BuGn_r")
age_vis = sns.countplot(x=app_df_clean["Age Rating"], order=["04+", "09+", "12+", "17+"], palette=palette6)
age_vis.set_xlabel("Age Rating", fontsize=16)
age_vis.set_ylabel("Amount of Games", fontsize=16)
age_vis.set_title("Amount of Games per Age Restriction", fontsize=20)
# Write the height of the bars on top
for p in age_vis.patches:
age_vis.annotate("{:.0f}".format(p.get_height()),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=14, color='black', xytext=(0, 8),
textcoords='offset points')
# Save the image.
#plt.savefig("Amount_Games_Per_Age.png", dpi=300) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
> **Most of the apps are in the +4 age category, which can be translated as "everyone can play". This ensures that the developers are targeting a much broader audience with their games.** Languages> Do most games have various choices of languages? | # Create a new column that contains the amount of languages that app has available
app_df_clean["numLang"] = app_df_clean["Languages"].apply(lambda x: len(x.split(",")))
#Make the figure
plt.figure(figsize=(16,10))
#Variables
lang = app_df_clean.loc[app_df_clean["numLang"] <= 25, "numLang"]
#Plot a countplot
palette7 = sns.color_palette("PuBuGn_r")
numLang_vis = sns.countplot(x=lang, data=app_df_clean, palette=palette7)
numLang_vis.set_xlabel("Quantity of Languages", fontsize=16)
numLang_vis.set_ylabel("Amount of Games", fontsize=16)
numLang_vis.set_title("Quantity of Languages Available per Game", fontsize=20)
# Write the height of the bars on top
for p in numLang_vis.patches:
numLang_vis.annotate("{:.0f}".format(p.get_height()),
(p.get_x() + p.get_width() / 2. + .1, p.get_height()),
ha='center', va='center', fontsize=12, color='black', xytext=(0, 12),
textcoords='offset points')
# Save the image.
#plt.savefig("Quantity_Lang_Per_Game.png", dpi=300)
#Amount of games that have only the English language
len(app_df_clean[(app_df_clean["numLang"] == 1) & (app_df_clean["Languages"] == "EN")])
#Amount of games that have only one language and is not English
len(app_df_clean[(app_df_clean["numLang"] == 1) & (app_df_clean["Languages"] != "EN")]) | _____no_output_____ | MIT | App Store Strategy Game Analysis.ipynb | MarceloFischer/App-Store-Dataset-Analysis |
Logistic Regression 3-class ClassifierShow below is a logistic-regression classifiers decision boundaries on the`iris `_ dataset. Thedatapoints are colored according to their labels. | print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
import pandas as pd
mydata = pd.read_csv("dataset.csv")
dt = mydata.values
X = dt[:, :2]
Y = dt[:, 3]
Y = Y.astype('int')
# import some data to play with
#iris = datasets.load_iris()
#X = iris.data[:, :2] # we only take the first two features.
#Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Length_Data')
plt.ylabel('Width_Data')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show() | Automatically created module for IPython interactive environment
| CC-BY-3.0 | Assignments/hw3/HW3_Generalized_Linear_Model_finished/plot_iris_logistic1.ipynb | Leon23N/Leon23N.github.io |
ga_sim This jn is intended to create simulations of dwarf galaxies and globular clusters using as field stars the catalog of DES. These simulations will be later copied to gawa jn, a pipeline to detect stellar systems with field's stars. In principle this pipeline read a table in data base with g and r magnitudes, subtract the extinction in each band, and randomize the positions in RA and DEC in order to avoid stellar systems in the FoV. The star clusters are inserted later, centered in each HP pixel with specific nside.To complete all the steps you just have to run all the cells below in sequence. Firstly, install the packages not available in the image via terminal. Restart the kernel and so you can run the cell bellow. | import numpy as np
from astropy.coordinates import SkyCoord
from astropy import units as u
import healpy as hp
import astropy.io.fits as fits
from astropy.table import Table
from astropy.io.fits import getdata
import sqlalchemy
import json
from pathlib import Path
import os
import sys
import parsl
from parsl.app.app import python_app, bash_app
from parsl.configs.local_threads import config
from time import sleep
from tqdm import tqdm
from ga_sim import (
make_footprint,
faker,
join_cat,
write_sim_clus_features,
download_iso,
read_cat,
gen_clus_file,
read_error,
clus_file_results,
join_cats_clean,
split_files,
clean_input_cat,
clean_input_cat_dist
)
parsl.clear()
parsl.load(config)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Below are the items of the configuration for field stars and simulations. A small description follows as a comment. | # Main settings:
confg = "ga_sim.json"
# read config file
with open(confg) as fstream:
param = json.load(fstream)
age_simulation = 1.0e10 # in years
Z_simulation = 0.001 # Assuming Z_sun = 0.0152
# Diretório para os resultados
os.system("mkdir -p " + param['results_path'])
# Reading reddening files
hdu_ngp = fits.open("sample_data/SFD_dust_4096_ngp.fits", memmap=True)
ngp = hdu_ngp[0].data
hdu_sgp = fits.open("sample_data/SFD_dust_4096_sgp.fits", memmap=True)
sgp = hdu_sgp[0].data
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Downloading the isochrone table with the last improvements from Padova.Printing age and metalicity of isochrone downloaded. Try one more time in case of problems. Sometimes there is a problem with the connection to Padova. | download_iso(param['padova_version_code'], param['survey'], Z_simulation,
age_simulation, param['av_simulation'], param['file_iso'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Checking age and metalicity of the isochrone: | # Reading [M/H], log_age, mini, g
iso_info = np.loadtxt(param['file_iso'], usecols=(1, 2, 3, 26), unpack=True)
FeH_iso = iso_info[0][0]
logAge_iso = iso_info[1][0]
m_ini_iso = iso_info[2]
g_iso = iso_info[3]
print('[Fe/H]={:.2f}, Age={:.2f} Gyr'.format(FeH_iso, 10**(logAge_iso-9)))
mM_mean = (param['mM_max'] + param['mM_min']) / 2.
print(np.max(m_ini_iso[g_iso + mM_mean < param['mmax']]))
mean_mass = (np.min(m_ini_iso[g_iso + mM_mean < param['mmax']]) +
np.max(m_ini_iso[g_iso + mM_mean < param['mmax']])) / 2.
print('Mean mass (M_sun): {:.2f}'.format(mean_mass))
hpx_ftp = make_footprint(param['ra_min'], param['ra_max'], param['dec_min'], param['dec_max'],
param['nside_ftp'], output_path=param['results_path'])
print(len(hpx_ftp))
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Reading the catalog and writing as a fits file (to avoid read from the DB many times in the case the same catalog will be used multiple times). | RA, DEC, MAG_G, MAGERR_G, MAG_R, MAGERR_R = read_cat(
param['vac_ga'], param['ra_min'], param['ra_max'], param['dec_min'], param['dec_max'],
param['mmin'], param['mmax'], param['cmin'], param['cmax'],
"DES_Y6_Gold_v1_derred.fits", 1.19863, 0.83734, ngp, sgp, param['results_path'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
The cells below reads the position, calculates the extinction using the previous function and correct the aparent magnitude (top of the Galaxy), filter the stars for magnitude and color ranges, and writes a file with the original position of the stars and corrected magnitudes. Simulation of dwarf galaxies and globular clustersIn fact, the dwarf galaxies and globular clusters are very similar in terms of stellar populations. Dwarf galaxieshave a half-light radius larger than globular clusters (given the amount of dark matter) with the same absolute magnitude. The code below simulates stars using a Kroupa or Salpeter IMF, and an exponential radius for the 2D distribution of stars. Generating the properties of clusters based on properties stated above. Writting to file 'objects.dat'. | RA_pix, DEC_pix, r_exp, ell, pa, dist, mass, mM, hp_sample_un = gen_clus_file(
param['ra_min'],
param['ra_max'],
param['dec_min'],
param['dec_max'],
param['nside_ini'],
param['border_extract'],
param['mM_min'],
param['mM_max'],
param['log10_rexp_min'],
param['log10_rexp_max'],
param['log10_mass_min'],
param['log10_mass_max'],
param['ell_min'],
param['ell_max'],
param['pa_min'],
param['pa_max'],
param['results_path']
)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Dist starsReading data from magnitude and errors. | mag1_, err1_, err2_ = read_error(param['file_error'], 0.015, 0.015)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Now simulating the clusters using 'faker' function. | @python_app
def faker_app(N_stars_cmd, frac_bin, IMF_author, x0, y0, rexp, ell_, pa, dist, hpx, output_path):
global param
faker(
N_stars_cmd,
frac_bin,
IMF_author,
x0,
y0,
rexp,
ell_,
pa,
dist,
hpx,
param['cmin'],
param['cmax'],
param['mmin'],
param['mmax'],
mag1_,
err1_,
err2_,
param['file_iso'],
output_path
)
# Diretório dos arquivo _clus.dat gerados pela faker.
fake_clus_path = param['results_path'] + '/fake_clus'
futures = list()
# Cria uma Progressbar (Opcional)
with tqdm(total=len(hp_sample_un), file=sys.stdout) as pbar:
pbar.set_description("Submit Parsls Tasks")
# Submissão dos Jobs Parsl
for i in range(len(hp_sample_un)):
# Estimating the number of stars in cmd dividing mass by mean mass
N_stars_cmd = int(mass[i] / mean_mass)
# os.register_at_fork(after_in_child=lambda: _get_font.cache_clear())
futures.append(
faker_app(
N_stars_cmd,
param['frac_bin'],
param['IMF_author'],
RA_pix[i],
DEC_pix[i],
r_exp[i],
ell[i],
pa[i],
dist[i],
hp_sample_un[i],
output_path=fake_clus_path
)
)
pbar.update()
# Progressbar para acompanhar as parsl.tasks.
print("Tasks Done:")
with tqdm(total=len(futures), file=sys.stdout) as pbar2:
# is_done é um array contendo True ou False para cada task
# is_done.count(True) retorna a quantidade de tasks que já terminaram.
is_done = list()
done_count = 0
while is_done.count(True) != len(futures):
is_done = list()
for f in futures:
is_done.append(f.done())
# Só atualiza a pbar se o valor for diferente.
if is_done.count(True) != done_count:
done_count = is_done.count(True)
# Reset é necessário por que a quantidade de iterações
# é maior que a quantidade de jobs.
pbar2.reset(total=len(futures))
# Atualiza a pbar
pbar2.update(done_count)
if done_count < len(futures):
sleep(3)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Now functions to join catalogs of simulated clusters and field stars, and to estimate signal-to-noise ratio. | # Le os arquivos _clus.dat do diretório "result/fake_clus"
# Gera o arquivo "result/<survey>_mockcat_for_detection.fits"
mockcat = join_cat(
param['ra_min'],
param['ra_max'],
param['dec_min'],
param['dec_max'],
hp_sample_un,
param['survey'],
RA,
DEC,
MAG_G,
MAG_R,
MAGERR_G,
MAGERR_R,
param['nside_ini'],
param['mmax'],
param['mmin'],
param['cmin'],
param['cmax'],
input_path=fake_clus_path,
output_path=param['results_path'])
print(mockcat)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
If necessary, split the catalog with simulated clusters into many files according HP schema. | os.makedirs(param['hpx_cats_path'], exist_ok=True)
ipix_cats = split_files(mockcat, 'ra', 'dec',
param['nside_ini'], param['hpx_cats_path'])
sim_clus_feat = write_sim_clus_features(
mockcat, hp_sample_un, param['nside_ini'], mM, output_path=param['results_path'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Merge both files in a single file. | clus_file_results(param['results_path'], "star_clusters_simulated.dat",
sim_clus_feat, 'results/objects.dat')
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
PlotsA few plots to characterize the simulated clusters. | from ga_sim.plot import (
general_plots,
plot_ftp,
plots_ang_size,
plots_ref,
plot_err,
plot_clusters_clean
)
general_plots(param['star_clusters_simulated'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Plot footprint map to check area. | hpx_ftp = param['results_path'] + "/ftp_4096_nest.fits"
plot_ftp(hpx_ftp, param['star_clusters_simulated'],
mockcat, param['ra_max'], param['ra_min'], param['dec_min'], param['dec_max'])
# Diretório onde estão os arquivo _clus.dat
plots_ang_size(param['star_clusters_simulated'], param['results_path'],
param['mmin'], param['mmax'], param['cmin'], param['cmax'],
param['output_plots'])
plots_ref(FeH_iso, param['star_clusters_simulated'], param['output_plots'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Plotting errors in main magnitude band. | # Plots to analyze the simulated clusters.
plot_err(mockcat, param['output_plots'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Removing stars close to each otherNow, we have to remove stars that are not detected in the pipeline of detection of the survey. In principle, the software used by detect sources is SExtractor, which parameter deblend is set to blend sources very close to each other.To remove sources close to each other, the approach below (or the function on that) read catalogs from ipixels (HealPixels).To each star the distance to all sources are calculated. If the second minimum distance (the first one is zero, since it is the iteration of the stars with itself) is less than the distance defined as a parameter of the function, the star is not listed in the filtered catalog.The function runs in parallel, in order to run faster using all the cores of node.Firstly, setting the string to read position of stars. | @python_app
def clean_input_cat_dist_app(file_name, ra_str, dec_str, min_dist_arcsec):
clean_input_cat_dist(
file_name,
ra_str,
dec_str,
min_dist_arcsec
)
futures = list()
# Cria uma Progressbar (Opcional)
with tqdm(total=len(ipix_cats), file=sys.stdout) as pbar:
pbar.set_description("Submit Parsls Tasks")
# Submissão dos Jobs Parsl
for i in ipix_cats:
futures.append(
clean_input_cat_dist_app(
i, param['ra_str'], param['dec_str'], param['min_dist_arcsec'])
)
pbar.update()
# Espera todas as tasks Parsl terminarem
# Este loop fica monitarando as parsl.futures
# Até que todas tenham status done.
# Esse bloco todo é opcional
print("Tasks Done:")
with tqdm(total=len(futures), file=sys.stdout) as pbar2:
# is_done é um array contendo True ou False para cada task
# is_done.count(True) retorna a quantidade de tasks que já terminaram.
is_done = list()
done_count = 0
while is_done.count(True) != len(futures):
is_done = list()
for f in futures:
is_done.append(f.done())
# Só atualiza a pbar se o valor for diferente.
if is_done.count(True) != done_count:
done_count = is_done.count(True)
# Reset é necessário por que a quantidade de iterações
# é maior que a quantidade de jobs.
pbar2.reset(total=len(futures))
# Atualiza a pbar
pbar2.update(done_count)
if done_count < len(futures):
sleep(3)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
After filtering stars in HealPixels, join all the HP into a single catalog called final cat. | ipix_clean_cats = [i.split('.')[0] + '_clean_dist.fits' for i in ipix_cats]
join_cats_clean(ipix_clean_cats,
param['final_cat'], param['ra_str'], param['dec_str'])
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Plot clusters comparing filtered and not filtered stars in each cluster. The region sampled is the center of the cluster where the crowding is more intense.Below the clusters with stars were filtered by max distance. | plot_clusters_clean(ipix_cats, ipix_clean_cats,
param['nside_ini'], param['ra_str'], param['dec_str'], 0.01)
| _____no_output_____ | MIT | ga_sim.ipynb | linea-it/ga_sim |
Let's see the simple code for Linear Regression.We will be creating a model to predict weight of a person based on independent variable height using simple linear regression.weight-height dataset is downloaded from kagglehttps://www.kaggle.com/sonalisingh1411/linear-regression-using-weight-height/data | from google.colab import drive
drive.mount('/content/drive')
%cd /content/drive/My\ Drive
%cd 'Colab Notebooks' | /content/drive/My Drive/Colab Notebooks
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
1. Simple Linear Regression with one independent variable We will read the data file and do some data exploration. | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('weight-height.csv')
df.head()
df.shape
df.columns
df['Gender'].unique()
df.corr() | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
We can see that there is high co-relation between height and weight columns.We will use Linear Regression model from sklearn library | x = df['Height']
y = df['Weight'] | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
We will split the data into train and test datasets using sklearn preprocessing library | from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.2, random_state=42)
X_train.shape
X_train = X_train.to_numpy()
X_train = X_train.reshape(-1,1) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
reshape() is called to make X_train 2-dimesional that is row and column format | X_train.shape
X_test = X_test.to_numpy()
X_test = X_test.reshape(-1,1)
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train,y_train) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
model is created as instnace of LinearRegression. With .fit() method, optimal values of coefficients (b0,b1) are calculated using existing input X_train and y_train. | model.score(X_train,y_train) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
The arguments to .score() are also X_train and y_train and it returns the R2 (coefficient of determination). | Intercept,coef = model.intercept_,model.coef_
print("Intercept is :",Intercept, sep='\n')
print("Coefficient/slope is :",coef , sep='\n') | Intercept is :
-349.7878205824451
Coefficient/slope is :
[7.70218561]
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Model attributes model.intercept_, model.coef_ give the value of (b01,b1) Now, we will use trained model to predict on test data | y_pred = model.predict(X_test)
y_pred | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
We can use also slope-intercept of line y = y-intercept + slope * x to predict the values on test data. We will use model.intercept_ and model.coef_ value for predictiion | y_pred1 = Intercept + coef * X_test
y_pred1 | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
We can see output of both y_pred and y_pred1 is same. We will plot the graph of predicted and actual values of weights using seaborn and matplotlib library | import seaborn as sns
ax = sns.regplot(x=y_pred, y=y_test,
x_estimator=np.mean) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
To clearly see the the plot, let's draw 20 samples from training dataset with actual weight values and plot it with predicted weight values for training dataset.The red dots represent the actual weight values(20 samples drawn) and the green line represents the predcted weight values by the model. The vertical distance between red dot and the green line is the error which we have to minimize to best fit the model. | plt.scatter(X_train[0:20], y_train[0:20], color = "red")
plt.plot(X_train, model.predict(X_train), color = "green")
plt.title("Weight vs Height")
plt.xlabel("Height")
plt.ylabel("Weight")
plt.show() | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
2. Multiple Linear Regressiom A regression with 2 or more independet variables is multiple linear regression. We will use same dataset to implemet multiple linear regression.The 2 independent variables will be gender and height which be used to predict the weight. | x = df.drop(columns = 'Weight')
y = df['Weight']
x.columns | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Gender column is categorical. We can not use it directly as model can work only with numbers. We have to convert it to one-hot-encoding using pandas get_dummies() method. A new column will be create dropping earlier column . The new column contain values 1 and 0 for male and female respectively. | x = pd.get_dummies(x, columns = ['Gender'], drop_first = True)
x
print(x.shape)
print(y.shape) | (10000, 2)
(10000,)
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Rest of the steps will be same as simple linear regression. | from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.2, random_state=42)
X_train = X_train.to_numpy()
X_train = X_train.reshape(-1,2)
X_test = X_test.to_numpy()
X_test = X_test.reshape(-1,2)
X_train.shape
mulLR = LinearRegression()
mulLR.fit(X_train,y_train)
mulLR.score(X_train,y_train)
Intercept,coef = mulLR.intercept_,mulLR.coef_
print("Intercept is :",Intercept, sep='\n')
print("Coefficient/slope is :",coef , sep='\n') | Intercept is :
-244.69356793639193
Coefficient/slope is :
[ 5.97314123 19.34720343]
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Coefficient array will have 2 values for gender and height respectively. | y_pred = mulLR.predict(X_test)
y_pred | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Alternate method : Predicting weight using coefficient and intercept values in equation | y_pred1 = Intercept + np.sum(coef * X_test, axis = 1)
y_pred1 | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
y_pred and y_pred1 both have same predicted values | import seaborn as sns
ax = sns.regplot(x=y_pred, y=y_test,
x_estimator=np.mean) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Above plot shows graph representing predicted and actual weight values on test dataset. 3. Polynomial Regression We will use polynomial regression to find the weight using same dataset. Note that polynomial regression is the special case of linear regression. Import class PolynomialFeatures from sklearn.preprocessing | from sklearn.preprocessing import PolynomialFeatures
x = df['Height']
y = df['Weight']
transformer = PolynomialFeatures(degree = 2, include_bias = False) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
We have to include terms like x2(x squared) as additional features when using polynomial regression.We have to transform the inputfor that transformer is defined with degree (defines the degree of polynomial regression function) and include_bias decides whether to include bias or not. | from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x,y,test_size=0.2, random_state=42)
X_train = X_train.to_numpy()
X_train = X_train.reshape(-1,1)
X_test = X_test.to_numpy()
X_test = X_test.reshape(-1,1)
transformer.fit(X_train)
X_trans = transformer.transform(X_train) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Above two lines of code can be fit into one line as below, both will give same output | x_trans = PolynomialFeatures(degree=2, include_bias=False).fit_transform(X_train)
X_transtest = PolynomialFeatures(degree=2, include_bias=False).fit_transform(X_test) | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Each value in the first column is squared and stored in second column as feature | print(x_trans) | [[ 61.39164365 3768.93390949]
[ 74.6976372 5579.7370037 ]
[ 68.50781491 4693.32070353]
...
[ 64.3254058 4137.75783102]
[ 69.07449203 4771.28544943]
[ 67.58883983 4568.25126988]]
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
Create and fit the model | poly_LR = LinearRegression().fit(x_trans,y_train)
poly_LR.score(x_trans,y_train)
y_pred = poly_LR.predict(X_transtest)
y_pred
Intercept,coef = mulLR.intercept_,mulLR.coef_
print("Intercept is :",Intercept, sep='\n')
print("Coefficient/slope is :",coef , sep='\n') | Intercept is :
-244.69356793639193
Coefficient/slope is :
[ 5.97314123 19.34720343]
| MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
The score of ploynomial regression can slighly be better than linear regression due to added complexity but the high R2 scoe does not always mean good model. Sometimes ploynomial regression could lead to overfitting due to its complexity in defining the equation for regression. | _____no_output_____ | MIT | Linear_Regression.ipynb | tejashrigadre/Linear_Regression |
|
Cleaning | beer.head()
beer.shape
beer = beer[beer['r_text'].str.contains('No Review') == False]
beer.shape
beer[beer['breakdown'].str.contains('look:') != True]['name'].value_counts()
beer = beer[beer['breakdown'].str.contains('look:') == True]
beer.shape
beer.isnull().sum()
beer['username'].fillna('Missing Username', inplace=True)
beer['ibu'].value_counts()
beer['ibu'].fillna('No IBU', inplace=True)
beer.isnull().sum()
whiskey.isnull().sum()
whiskey['age'].value_counts()
whiskey[whiskey['age'].isnull()].head()
whiskey['age'].fillna('No Age', inplace=True)
whiskey[whiskey['vint'].isnull()].head()
whiskey['vint'].fillna('No Vint', inplace=True)
whiskey[whiskey['region'].isnull()].head()
whiskey['region'].fillna('No Region', inplace=True)
whiskey['age'] = whiskey['age'].map(lambda x : x.split(' yrs')[0])
for i in whiskey['age']:
i = i.split(' yrs')[0]
print(i)
whiskey['age'].value_counts()
whiskey.isnull().sum()
whiskey2.isnull().sum()
whiskey = whiskey.dropna().copy()
whiskey.shape
whiskey['review'].dropna(inplace=True)
whiskey.shape
whiskey.head()
beer.head()
beer.rename({'r_text':'review', 'score_y':'user_rating'}, axis=1, inplace=True)
whiskey.rename({'w_name':'name', 'grade':'user_rating', 'w_type':'style'}, axis=1, inplace=True)
all_reviews[all_reviews['username'] == 'rodbeermunch']
all_reviews = beer[['name', 'review', 'username', 'user_rating', 'abv', 'style', 'id']].append(whiskey[['name', 'review', 'username', 'user_rating', 'abv', 'style', 'id']])
all_reviews.head()
all_reviews['review'].isnull().sum()
all_reviews.shape
all_reviews = all_reviews.dropna().copy() | _____no_output_____ | MIT | Model.ipynb | markorland/markorland |
Modeling | from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import TruncatedSVD
vect = CountVectorizer(ngram_range=(2,2), stop_words=stop, min_df=2)
X = vect.fit_transform(all_reviews['review'])
X.shape
svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
X_svd = svd.fit_transform(X)
# import pickle
# with open('C:/Users/Mark/Personal_GitHub/Portfolio/Capstone_What_Should_I_Drink/Pickle/svd.pkl', wb+) as f:
# pickle.dump(X_svd, f)
X_svd.shape
cosine_similarity(X_svd)
import sys
sys.getsizeof(X_svd) / 1000000000 | _____no_output_____ | MIT | Model.ipynb | markorland/markorland |
Model with grouped reviews | grouped_reviews = all_reviews.groupby('name')['review'].sum()
grouped_reviews.head()
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import TruncatedSVD
vect = CountVectorizer(ngram_range=(2,2), stop_words=stop, min_df=2)
X = vect.fit_transform(grouped_reviews)
X.shape
svd = TruncatedSVD(n_components=100, n_iter=7, random_state=42)
X_svd = svd.fit_transform(X)
X_svd.shape
# import pickle
# with open('C:/Users/Mark/Personal_GitHub/Portfolio/Capstone_What_Should_I_Drink/Pickle/svd.pkl', wb+) as f:
# pickle.dump(X_svd, f)
cos_sim = cosine_similarity(X_svd, X_svd)
cos_sim
cos_sim.shape
df_grouped_reviews = pd.DataFrame(grouped_reviews)
df_grouped_reviews.head()
df_grouped_reviews.index
pd.DataFrame(cos_sim)
df_cos_sim = pd.DataFrame(cos_sim, index=df_grouped_reviews.index)
df_cos_sim
df_cos_reviews = pd.concat([df_grouped_reviews, df_cos_sim], axis=1)
df_cos_reviews.head()
df_cos_reviews = df_cos_reviews.drop('review', axis=1)
df_cos_reviews.head()
df_cos_reviews.columns = df_cos_reviews.index
df_cos_reviews
all_reviews.head()
all_reviews_cosine = all_reviews.merge(df_cos_reviews, left_on='name', right_index=True)
all_reviews_cosine.head()
# all_reviews_cosine.to_csv('./Scraping/Scraped_Data/Data/all_reviews_cosine.csv', index=False) | _____no_output_____ | MIT | Model.ipynb | markorland/markorland |
Recommender | all_reviews_cosine = pd.read_csv('./Scraping/Scraped_Data/Data/all_reviews_cosine.csv')
beer.head()
whiskey.head()
all_reviews_cosine.head() | _____no_output_____ | MIT | Model.ipynb | markorland/markorland |
1) aisles.csv : aisle_id, aisle - 소분류 2) departments.csv : department_id, department - 대분류 2) order_products.csv : order_id, product_id, add_to_cart_order, reordered : train, prior - 주문id, 상품 id, 장바구니에 담긴 순서, 재구매 여부 3) orders.csv : order_id, user_id, eval_set, order_number, order_dow, order_hour_of_day, day_since - 주문 id, 사용자 id, 데이터 구분, 주문 수량, 주문 요일, 주문 시간, 재구매까지 걸린 시간 4) products.csv : product_id, product_name, aisle_id, department_id - 상품 id, 상품 이름, 소분류 id, 대분류 id > 내가 이 상품을 살것인가 추천하는 기준? 재구매가 몇번이상인지, 재구매까지 걸린 기간이 짧음> 그 외 추천 : 내가 사는 물건이랑 같은 소분류안에 있는 것, 내가 사는 물건과 겹치는 게 많은 사용자의 구매목록에서 내가 사지 않은 것 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
color = sns.color_palette()
%matplotlib inline
# data
train = pd.read_csv("order_products__train.csv")
prior = pd.read_csv("order_products__prior.csv")
orders_df = pd.read_csv("orders.csv")
products_df = pd.read_csv("products.csv")
aisles_df = pd.read_csv("aisles.csv")
departments_df = pd.read_csv("departments.csv")
submit = pd.read_csv("sample_submission.csv")
submit
# check orders.csv
# 확인할만큼 괄호에 숫자 넣기 (default = 5)
orders_df.head(3)
train
prior
# check order_products__train.csv
# 주문된 상품에 관한 정보
train.head(3)
# 데이터 분류
# eval_set이 기준 : 총 3개로 나뉘고 그 3개의 덩어리안에 데이터가 몇개씩 있는지 확인
cnt_srs = orders_df.eval_set.value_counts()
print (cnt_srs)
def get_unique_count(x):
return len(np.unique(x))
# eval_set이 같은 것끼리 그룹화
cnt_srs = orders_df.groupby("eval_set")["user_id"].aggregate(get_unique_count)
print (cnt_srs)
#총 206209명의 고객들이 있음
#order number 중복 있음 : 유니크한 값 아니고 단지 "구매 횟수" 라는 것을 확인
cnt_srs = orders_df.groupby("user_id")["order_number"].aggregate(np.max).reset_index()
cnt_srs
# userid - ordernumber
cnt_srs = cnt_srs.order_number.value_counts()
cnt_srs
# 요일 기준 : 요일당 주문 건수
cnt_day = orders_df.order_dow.value_counts()
print (cnt_day)
plt.figure(figsize=(12,8))
sns.countplot(x="order_dow", data=orders_df, color=color[4])
plt.ylabel('Count', fontsize=12)
plt.xlabel('Day of week', fontsize=12)
plt.xticks(rotation='vertical')
plt.title("Frequency of order by week day", fontsize=15)
plt.show()
# 물건이 제일 많이 주문된 시간은 언제?
cnt_hour = orders_df.order_hour_of_day.value_counts()
print (cnt_hour)
plt.figure(figsize=(12,8))
sns.countplot(x="order_hour_of_day", data=orders_df, color=color[1])
plt.ylabel('Count', fontsize=12)
plt.xlabel('Hour of day', fontsize=12)
plt.xticks(rotation='vertical')
plt.title("Frequency of order by hour of day", fontsize=15)
plt.show()
# 요일마다 어떤 시간에 제일 주문건수가 많은가??
grouped_df = orders_df.groupby(["order_dow", "order_hour_of_day"])["order_number"].aggregate("count").reset_index()
print (grouped_df)
# 재구매까지 걸리는 시간별 구매량
# 30일이 제일 많고 26일이 제일 적음
cnt_prior_order = orders_df.days_since_prior_order.value_counts()
print (cnt_prior_order)
train.info()
# 학습데이터 확인 > orders 랑 똑같음
train.head()
# summary : reorder 가 횟수가 아니라 맞다, 아니다뿐이라 별 도움이 안 됨ㅎ... > 각 상품마다 재구매 기간의 평균을 구해보까..?
pd.set_option('display.float_format', lambda x: '%.3f' % x)
train.describe() | _____no_output_____ | MIT | src/recommendation/data_analysis/data_analysis_1.ipynb | odobenuskr/2019_capstone_FlexAds |
TensorFlow Neural Network Lab In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, notMNIST, consists of images of a letter from A to J in different fonts.The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in! To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`". | import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.') | All modules imported.
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J). | def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.') | 100%|██████████| 210001/210001 [00:21<00:00, 9650.17files/s]
100%|██████████| 10001/10001 [00:01<00:00, 9583.57files/s]
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
Problem 1The first problem involves normalizing the features for your training and test data.Implement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.Min-Max Scaling:$X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}$*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorFlow/intro_to_tensorflow_solution.ipynb).* | # Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# Implement Min-Max scaling for grayscale image data
x_min = 0
x_max = 255
a = 0.1
b = 0.9
return a + (((image_data - x_min) * (b - a)) / (x_max-x_min))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.') | Data cached in pickle file.
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
CheckpointAll your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed. | %matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.') | Data and modules loaded.
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
Problem 2Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network. For the neural network to train on your data, you need the following float32 tensors: - `features` - Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`) - `labels` - Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`) - `weights` - Variable Tensor with random numbers from a truncated normal distribution. - See `tf.truncated_normal()` documentation for help. - `biases` - Variable Tensor with all zeros. - See `tf.zeros()` documentation for help.*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).* | # All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# Set the weights and biases tensors
weights = tf.Variable(tf.random_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.') | Accuracy function created.
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
Problem 3Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.Parameter configurations:Configuration 1* **Epochs:** 1* **Learning Rate:** * 0.8 * 0.5 * 0.1 * 0.05 * 0.01Configuration 2* **Epochs:** * 1 * 2 * 3 * 4 * 5* **Learning Rate:** 0.2The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).* | # Change if you have memory restrictions
batch_size = 256
# Find the best parameters for each configuration
#When epochs = 1, the best learning_rate is 0.5 with an accuracy of 0.7526666522026062
#When multiple epochs
#2 = 0.7515999674797058
#3 = 0.7605332732200623
#4 = 0.771733283996582
#5 = 0.7671999335289001
epochs = 4
learning_rate = 0.2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy)) | Epoch 1/4: 100%|██████████| 557/557 [00:03<00:00, 175.65batches/s]
Epoch 2/4: 100%|██████████| 557/557 [00:03<00:00, 180.07batches/s]
Epoch 3/4: 100%|██████████| 557/557 [00:03<00:00, 179.54batches/s]
Epoch 4/4: 100%|██████████| 557/557 [00:03<00:00, 179.55batches/s]
| Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
TestYou're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%. | ### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy)) | Epoch 1/4: 100%|██████████| 557/557 [00:00<00:00, 1230.28batches/s]
Epoch 2/4: 100%|██████████| 557/557 [00:00<00:00, 1277.16batches/s]
Epoch 3/4: 100%|██████████| 557/557 [00:00<00:00, 1209.55batches/s]
Epoch 4/4: 100%|██████████| 557/557 [00:00<00:00, 1254.66batches/s] | Apache-2.0 | intro-to-tensorflow/intro_to_tensorflow.ipynb | ajmaradiaga/tf-examples |
Dependencies | import numpy as np
import pandas as pd
import pingouin as pg
import seaborn as sns
import scipy.stats
import sklearn
import matplotlib.pyplot as plt
from tqdm import tqdm | _____no_output_____ | MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
Loading dataframes containing variables | # Loading the dataframes we'll be using
# Contains the DEPENDENT variables relating to language PAIRS
lang_pair_dv = pd.read_csv('/Data/Bible experimental vars/bible_dependent_vars_LANGUAGE_PAIR.csv')
# Contains the INDEPENDENT variables relating to language PAIRS
lang_pair_iv = pd.read_csv('/Data/bible_predictors_LANGUAGE_PAIR.csv')
# Contains ALL variables relating to INDIVIDUAL languages
indiv_lang_vars = pd.read_csv('/Data/bible_all_features_LANGUAGE.csv')
# Tallying zero-shot sub-cases
print('Simple zero-shot languages (LaBSE): {}'.format(sum(np.array(indiv_lang_vars['Total sentences (LaBSE)']==0))))
print('Simple zero-shot languages (LASER): {}'.format(sum(np.array(indiv_lang_vars['Total sentences (LASER)']==0))))
print('Double zero-shot language pairs (LaBSE): {}'.format(sum(np.array(lang_pair_iv['Combined sentences (LaBSE)']==0))))
print('Double zero-shot language pairs (LASER): {}'.format(sum(np.array(lang_pair_iv['Combined sentences (LASER)']==0))))
# It's pretty helpful to combine the IVs and DVs for language pairs, as Pingouin prefers to work with
# single dataframe objects
master_pair = pd.concat([lang_pair_iv, lang_pair_dv], axis=1)
master_pair.corr()
pg.ancova(data=master_pair,
dv='F1-score (LASER, average)',
between='Same Genus?',
covar=['Combined sentences (LASER)',
'Combined in-family sentences (LASER)',
'Combined in-genus sentences (LASER)'])
pg.partial_corr(data=master_pair,
x='Phonological Distance (lang2vec)',
y='Average margin score (LASER, average)',
covar=['Combined sentences (LASER)',
'Combined in-family sentences (LASER)',
'Combined in-genus sentences (LASER)'])
double_zero_shot_labse = master_pair[np.array(master_pair['Combined sentences (LaBSE)'])==0]
double_zero_shot_laser = master_pair[np.array(master_pair['Combined sentences (LASER)'])==0]
double_zero_shot_labse['Gromov-Hausdorff dist. (LaBSE, average)'] = -double_zero_shot_labse['Gromov-Hausdorff dist. (LaBSE, average)']
double_zero_shot_labse['Gromov-Hausdorff dist. (LASER, average)'] = -double_zero_shot_laser['Gromov-Hausdorff dist. (LASER, average)']
double_zero_shot_labse['Singular value gap (LaBSE, average)'] = -double_zero_shot_labse['Singular value gap (LaBSE, average)']
double_zero_shot_laser['Singular value gap (LASER, average)'] = -double_zero_shot_laser['Singular value gap (LASER, average)']
double_zero_shot_labse = double_zero_shot_labse[['Same Family?', 'Same Genus?',
'Character-level Overlap (multiset Jaccard coefficient, Book of Matthew)',
'Token-level Overlap (multiset Jaccard coefficient, Book of John)',
'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)',
'Inventory Distance (lang2vec)', 'Syntactic Distance (lang2vec)',
'Phonological Distance (lang2vec)', 'F1-score (LaBSE, average)',
'Gromov-Hausdorff dist. (LaBSE, average)',
'Singular value gap (LaBSE, average)',
'ECOND-HM (LaBSE, average)',
'Average margin score (LaBSE, average)', 'Language pair']]
double_zero_shot_laser = double_zero_shot_laser[['Same Family?', 'Same Genus?',
'Character-level Overlap (multiset Jaccard coefficient, Book of Matthew)',
'Token-level Overlap (multiset Jaccard coefficient, Book of John)',
'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)',
'Inventory Distance (lang2vec)', 'Syntactic Distance (lang2vec)',
'Phonological Distance (lang2vec)', 'F1-score (LASER, average)',
'Gromov-Hausdorff dist. (LASER, average)',
'Singular value gap (LASER, average)',
'ECOND-HM (LASER, average)',
'Average margin score (LASER, average)', 'Language pair']]
print(pg.anova(data=double_zero_shot_labse, dv='F1-score (LaBSE, average)', between='Same Word Order?'))
print(pg.anova(data=double_zero_shot_labse, dv='F1-score (LaBSE, average)', between='Same Polysynthesis Status?'))
print(pg.anova(data=double_zero_shot_labse, dv='F1-score (LaBSE, average)', between='Same Family?'))
print(pg.anova(data=double_zero_shot_labse, dv='F1-score (LaBSE, average)', between='Same Genus?'))
print(scipy.stats.pearsonr(double_zero_shot_labse['F1-score (LaBSE, average)'],
double_zero_shot_labse['Syntactic Distance (lang2vec)']))
def corrUtilIO(corr: tuple, s1:str, s2:str):
r, p = corr
out = 'Correlation between {} and {}: {} | p-value: {}'.format(s1, s2, r, p)
return out
print('Examining double-zero shot language pairs (LaBSE)')
print('--------------------------------------------------')
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_labse['F1-score (LaBSE, average)'],
double_zero_shot_labse['Inventory Distance (lang2vec)']),
'F1-score', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_labse['Gromov-Hausdorff dist. (LaBSE, average)'],
double_zero_shot_labse['Inventory Distance (lang2vec)']),
'Gromov-Hausdorff distance', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_labse['Singular value gap (LaBSE, average)'],
double_zero_shot_labse['Inventory Distance (lang2vec)']),
'singular value gap', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_labse['ECOND-HM (LaBSE, average)'],
double_zero_shot_labse['Inventory Distance (lang2vec)']),
'ECOND-HM', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_labse['Average margin score (LaBSE, average)'],
double_zero_shot_labse['Inventory Distance (lang2vec)']),
'average margin score', 'inventory distance'))
X_to_regress_1 = ['Inventory Distance (lang2vec)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)']
X_to_regress_2 = ['Inventory Distance (lang2vec)', 'Character-level Overlap (multiset Jaccard coefficient, Book of Matthew)']
pg.linear_regression(X=double_zero_shot_labse[X_to_regress_2], y=double_zero_shot_labse['F1-score (LaBSE, average)'])
print('Examining double-zero shot language pairs (LASER)')
print('--------------------------------------------------')
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_laser['F1-score (LASER, average)'],
double_zero_shot_laser['Inventory Distance (lang2vec)']),
'F1-score', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_laser['Gromov-Hausdorff dist. (LASER, average)'],
double_zero_shot_laser['Inventory Distance (lang2vec)']),
'Gromov-Hausdorff distance', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_laser['Singular value gap (LASER, average)'],
double_zero_shot_laser['Inventory Distance (lang2vec)']),
'singular value gap', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_laser['ECOND-HM (LASER, average)'],
double_zero_shot_laser['Inventory Distance (lang2vec)']),
'ECOND-HM', 'inventory distance'))
print(corrUtilIO(scipy.stats.pearsonr(double_zero_shot_laser['Average margin score (LASER, average)'],
double_zero_shot_laser['Inventory Distance (lang2vec)']),
'average margin score', 'inventory distance'))
simple_zero_shot_labse = indiv_lang_vars[np.array(indiv_lang_vars['Total sentences (LaBSE)'])==0]
simple_zero_shot_laser = indiv_lang_vars[np.array(indiv_lang_vars['Total sentences (LASER)'])==0]
simple_zero_shot_labse = simple_zero_shot_labse.drop(['Total sentences (LaBSE)', 'Total in-family sentences (LaBSE)',
'Total in-genus sentences (LaBSE)', 'Total sentences (LASER)',
'Total in-family sentences (LASER)', 'Total in-genus sentences (LASER)',
'Average F1 (LASER)', 'Average G-H dist. (LASER)', 'Average SVG (LASER)',
'Average ECOND-HM (LASER)', 'Grand mean margin score (LASER)'], axis=1)
simple_zero_shot_laser = simple_zero_shot_laser.drop(['Total sentences (LaBSE)', 'Total in-family sentences (LaBSE)',
'Total in-genus sentences (LaBSE)', 'Total sentences (LASER)',
'Total in-family sentences (LASER)', 'Total in-genus sentences (LASER)',
'Average F1 (LaBSE)', 'Average G-H dist. (LaBSE)', 'Average SVG (LaBSE)',
'Average ECOND-HM (LaBSE)', 'Grand mean margin score (LaBSE)'], axis=1)
print('Running ANOVAs to check for omnibus group mean differences in the DVs for basic word order')
print(pg.anova(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Basic Word Order', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average G-H dist. (LaBSE)', between='Basic Word Order', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average SVG (LaBSE)', between='Basic Word Order', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average ECOND-HM (LaBSE)', between='Basic Word Order', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Grand mean margin score (LaBSE)', between='Basic Word Order', ss_type=3))
print('\n')
print('Running ANOVAs to check for omnibus group mean differences in the DVs for polysyntheticity')
print(pg.anova(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Polysynthetic?', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average G-H dist. (LaBSE)', between='Polysynthetic?', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average SVG (LaBSE)', between='Polysynthetic?', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average ECOND-HM (LaBSE)', between='Polysynthetic?', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Grand mean margin score (LaBSE)', between='Polysynthetic?', ss_type=3))
print('\n')
print('Running ANOVAs to check for omnibus group mean differences in the DVs for family')
print(pg.anova(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Family', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average G-H dist. (LaBSE)', between='Family', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average SVG (LaBSE)', between='Family', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average ECOND-HM (LaBSE)', between='Family', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Grand mean margin score (LaBSE)', between='Family', ss_type=3))
print('\n')
print('Running ANOVAs to check for omnibus group mean differences in the DVs for genus')
print(pg.anova(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Genus', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average G-H dist. (LaBSE)', between='Genus', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average SVG (LaBSE)', between='Genus', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average ECOND-HM (LaBSE)', between='Genus', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Grand mean margin score (LaBSE)', between='Genus', ss_type=3))
print('\n')
print('Running ANOVAs to check for omnibus group mean differences in the DVs for script')
print(pg.anova(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Script', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average G-H dist. (LaBSE)', between='Script', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average SVG (LaBSE)', between='Script', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Average ECOND-HM (LaBSE)', between='Script', ss_type=3))
print(pg.anova(data=simple_zero_shot_labse, dv='Grand mean margin score (LaBSE)', between='Script', ss_type=3))
sns.barplot(simple_zero_shot_labse['Basic Word Order'], simple_zero_shot_labse['Average F1 (LaBSE)'])
plt.ylabel('Meta-average F1 (LaBSE), zero-shot only', fontsize=12)
plt.xlabel('Basic word order', fontsize=14)
sns.barplot(simple_zero_shot_laser['Basic Word Order'], simple_zero_shot_laser['Average F1 (LASER)'])
plt.ylabel('Meta-average F1 (LASER), zero-shot only', fontsize=12)
plt.xlabel('Basic word order', fontsize=14)
sns.barplot(simple_zero_shot_labse['Basic Word Order'], simple_zero_shot_labse['Average ECOND-HM (LaBSE)'])
plt.ylabel('Meta-average ECOND-HM (LaBSE), zero-shot only', fontsize=11)
plt.xlabel('Basic word order', fontsize=14)
pg.pairwise_tukey(data=simple_zero_shot_labse, dv='Average F1 (LaBSE)', between='Basic Word Order')
pg.pairwise_tukey(data=simple_zero_shot_laser, dv='Average F1 (LASER)', between='Basic Word Order') | _____no_output_____ | MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
Experimenting with sklearn models for feature selection | from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from itertools import chain, combinations # Used for exhaustive feature search
# The model we'll use to choose the best features for predicting F1-score for LaBSE
labse_f1_model = LinearRegression()
# All the possible pair-centric LaBSE IVs
labse_pair_iv = ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)',
'Combined in-genus sentences (LaBSE)', 'Same Family?', 'Same Genus?',
'Character-level Overlap (multiset Jaccard coefficient, Book of Matthew)',
'Token-level Overlap (multiset Jaccard coefficient, Book of John)',
'Same Word Order?', 'Same Polysynthesis Status?',
'Geographic Distance (lang2vec)', 'Syntactic Distance (lang2vec)',
'Phonological Distance (lang2vec)', 'Inventory Distance (lang2vec)']
X_pair_labse = master_pair[labse_pair_iv]
# The first DV we'll look at
y_pair_f1_labse = master_pair['F1-score (LaBSE, average)']
# Exhaustive feature search on language pair features
def getBestFeatures(model, X, y, score_method):
FOLDS = 10
n_features = X.shape[1]
all_subsets = chain.from_iterable(combinations(range(n_features), k) for k in range(n_features+1))
best_score = -np.inf
best_features = None
for subset in all_subsets:
if len(subset)!=0: # Search over all non-empty subsets of features
score_by_fold = sklearn.model_selection.cross_validate(model,
X.iloc[:, np.array(subset)],
y,
cv=FOLDS,
scoring=score_method)['test_score']
#scoring='neg_mean_squared_error')
# Convert R2 to adjusted R2 to take into account the number of predictors
def adjustedR2(r2, n, p):
num = (1-r2)*(n-1)
denom = n-p-1
adj_r2 = 1 - (num/denom)
return adj_r2
if score_method=='r2':
# Compute the adjusted R2 instead
n_subset_features = len(subset)
# Fraction of data used for training during CV
train_frac = (FOLDS-1) / FOLDS # e.g. with 10 folds, we use 9/10 of the data for training
sample_size = round(train_frac*X.shape[0])
score_by_fold = list(map(lambda r2: adjustedR2(r2,sample_size,n_subset_features), score_by_fold)) #[adjustedR2(r2, n_subset_features, sample_size) for r2 in score_by_fold]
score = np.average(score_by_fold)
# If score is current optimum . . .
if score > best_score:
best_score, best_features = score, subset # . . . flag it as such
print('Score: {} Features: {}'.format(best_score, [X.columns[i] for i in best_features]))
best_features = [X.columns[i] for i in best_features] # Return just the best features
return best_features
labse_pair_f1_best_features = getBestFeatures(model=labse_f1_model,
X=X_pair_labse,
y=y_pair_f1_labse,
score_method='r2') # really adjusted R2
# Repeating the same process for LASER
# All the possible pair-centric LASER IVs
laser_pair_iv = ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)',
'Combined in-genus sentences (LASER)', 'Same Family?', 'Same Genus?',
'Character-level Overlap (multiset Jaccard coefficient, Book of Matthew)',
'Token-level Overlap (multiset Jaccard coefficient, Book of John)',
'Same Word Order?', 'Same Polysynthesis Status?',
'Geographic Distance (lang2vec)', 'Syntactic Distance (lang2vec)',
'Phonological Distance (lang2vec)', 'Inventory Distance (lang2vec)']
X_pair_laser = master_pair[laser_pair_iv]
# The first DV we'll look at (for LASER)
y_pair_f1_laser = master_pair['F1-score (LASER, average)']
laser_f1_model = LinearRegression()
laser_pair_f1_best_features = getBestFeatures(model=laser_f1_model,
X=X_pair_laser,
y=y_pair_f1_laser,
score_method='r2')
# Overlapping best predictors
set(laser_pair_f1_best_features)&set(labse_pair_f1_best_features)
# Checking out the best predictors for the other DVs
# LaBSE
y_pair_gh_labse = master_pair['Gromov-Hausdorff dist. (LaBSE, average)']
y_pair_svg_labse = master_pair['Singular value gap (LaBSE, average)']
y_pair_econdhm_labse = master_pair['ECOND-HM (LaBSE, average)']
y_pair_avgmarg_labse = master_pair['Average margin score (LaBSE, average)']
labse_gh_model, labse_svg_model, labse_econdhm_model, labse_avgmarg_model = LinearRegression(), LinearRegression(), LinearRegression(), LinearRegression()
# LASER
y_pair_gh_laser = master_pair['Gromov-Hausdorff dist. (LASER, average)']
y_pair_svg_laser = master_pair['Singular value gap (LASER, average)']
y_pair_econdhm_laser = master_pair['ECOND-HM (LASER, average)']
y_pair_avgmarg_laser = master_pair['Average margin score (LASER, average)']
laser_gh_model, laser_svg_model, laser_econdhm_model, laser_avgmarg_model = LinearRegression(), LinearRegression(), LinearRegression(), LinearRegression()
# LaBSE best feature selection
print('Getting best features for LaBSE, GH')
labse_pair_gh_best_features = getBestFeatures(labse_gh_model, X_pair_labse, y_pair_gh_labse, 'r2')
print('Getting best features for LaBSE, SVG')
labse_pair_svg_best_features = getBestFeatures(labse_svg_model, X_pair_labse, y_pair_svg_labse, 'r2')
print('Getting best features for LaBSE, ECOND-HM')
labse_pair_econdhm_best_features = getBestFeatures(labse_econdhm_model, X_pair_labse, y_pair_econdhm_labse, 'r2')
print('Getting best features for LaBSE, avg. margin score')
labse_pair_avgmarg_best_features = getBestFeatures(labse_avgmarg_model, X_pair_labse, y_pair_avgmarg_labse, 'r2')
# LASER best feature selection
print('Getting best features for LASER, GH')
laser_pair_gh_best_features = getBestFeatures(laser_gh_model, X_pair_laser, y_pair_gh_laser, 'r2')
print('Getting best features for LASER, SVG')
laser_pair_svg_best_features = getBestFeatures(laser_svg_model, X_pair_laser, y_pair_svg_laser, 'r2')
print('Getting best features for LASER, ECOND-HM')
laser_pair_econdhm_best_features = getBestFeatures(laser_econdhm_model, X_pair_laser, y_pair_econdhm_laser, 'r2')
print('Getting best features for LASER, avg. margin score')
laser_pair_avgmarg_best_features = getBestFeatures(laser_avgmarg_model, X_pair_laser, y_pair_avgmarg_laser, 'r2') | Getting best features for LaBSE, GH
Score: -0.0413396886380951 Features: ['Combined sentences (LaBSE)']
Score: -0.021350223866934324 Features: ['Combined in-family sentences (LaBSE)']
Score: -0.01679224278785668 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)']
Score: -0.002414935334796575 Features: ['Combined in-family sentences (LaBSE)', 'Same Word Order?']
Score: 0.0003457227233038096 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Same Word Order?']
Score: 0.0042619988612207175 Features: ['Combined in-family sentences (LaBSE)', 'Same Word Order?', 'Same Polysynthesis Status?']
Score: 0.006178065339854944 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Same Word Order?', 'Same Polysynthesis Status?']
Score: 0.007388992714442766 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)']
Getting best features for LaBSE, SVG
Score: -13.442845600864931 Features: ['Combined sentences (LaBSE)']
Score: -12.91560970563965 Features: ['Same Family?']
Score: -12.70894889458604 Features: ['Same Genus?']
Getting best features for LaBSE, ECOND-HM
Score: -0.07663792223141372 Features: ['Combined sentences (LaBSE)']
Score: 0.17450005561272933 Features: ['Combined in-family sentences (LaBSE)']
Score: 0.18470724634286412 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)']
Score: 0.18710971735580179 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Combined in-genus sentences (LaBSE)']
Score: 0.19515508854609553 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Same Polysynthesis Status?']
Score: 0.1972223430662685 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Combined in-genus sentences (LaBSE)', 'Same Polysynthesis Status?']
Score: 0.19857398579111657 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Combined in-genus sentences (LaBSE)', 'Same Family?', 'Same Polysynthesis Status?']
Getting best features for LaBSE, avg. margin score
Score: 0.003132645432362735 Features: ['Combined sentences (LaBSE)']
Score: 0.08877712075136177 Features: ['Combined in-family sentences (LaBSE)']
Score: 0.11039046088757831 Features: ['Token-level Overlap (multiset Jaccard coefficient, Book of John)']
Score: 0.13403592045753307 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)']
Score: 0.17985617121967118 Features: ['Combined sentences (LaBSE)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)']
Score: 0.21704669662166767 Features: ['Combined in-family sentences (LaBSE)', 'Same Polysynthesis Status?']
Score: 0.22581187960281346 Features: ['Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Polysynthesis Status?']
Score: 0.24842980585512003 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Same Polysynthesis Status?']
Score: 0.27335157866345083 Features: ['Combined sentences (LaBSE)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Polysynthesis Status?']
Score: 0.297625168503587 Features: ['Combined sentences (LaBSE)', 'Combined in-family sentences (LaBSE)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Polysynthesis Status?']
Getting best features for LASER, GH
Score: -0.04232319615283292 Features: ['Combined sentences (LASER)']
Score: -0.011643389665897275 Features: ['Combined in-family sentences (LASER)']
Score: -0.011590823223872415 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)']
Score: -0.011067803063791825 Features: ['Combined in-family sentences (LASER)', 'Same Family?']
Score: 0.009656949432331452 Features: ['Combined in-family sentences (LASER)', 'Same Word Order?']
Score: 0.01582487308917795 Features: ['Combined in-family sentences (LASER)', 'Same Word Order?', 'Same Polysynthesis Status?']
Score: 0.01741500169028971 Features: ['Combined in-family sentences (LASER)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Word Order?', 'Same Polysynthesis Status?']
Score: 0.01914508611124365 Features: ['Combined in-family sentences (LASER)', 'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)']
Score: 0.02014953773989433 Features: ['Combined in-family sentences (LASER)', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)']
Score: 0.020236921735400204 Features: ['Combined in-family sentences (LASER)', 'Same Genus?', 'Token-level Overlap (multiset Jaccard coefficient, Book of John)', 'Same Word Order?', 'Same Polysynthesis Status?', 'Geographic Distance (lang2vec)']
Getting best features for LASER, SVG
Score: -2.1239321109011433 Features: ['Combined sentences (LASER)']
Score: -2.087700196995328 Features: ['Combined in-family sentences (LASER)']
Score: -2.03830090855165 Features: ['Combined in-genus sentences (LASER)']
Score: -2.037664077793555 Features: ['Same Family?']
Score: -2.0058829314780606 Features: ['Same Word Order?']
Score: -1.9890764864614519 Features: ['Combined in-genus sentences (LASER)', 'Same Word Order?']
Getting best features for LASER, ECOND-HM
Score: -0.08398775179803149 Features: ['Combined sentences (LASER)']
Score: 0.033989541419546 Features: ['Combined in-family sentences (LASER)']
Score: 0.05607357917655924 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)']
Score: 0.07199849287006928 Features: ['Combined in-family sentences (LASER)', 'Same Polysynthesis Status?']
Score: 0.09467553484494366 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Polysynthesis Status?']
Score: 0.10073009738796554 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Family?', 'Same Polysynthesis Status?']
Score: 0.10155085237237542 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Polysynthesis Status?', 'Syntactic Distance (lang2vec)']
Score: 0.11599358877094387 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Family?', 'Same Polysynthesis Status?', 'Syntactic Distance (lang2vec)']
Score: 0.1165912748426974 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Family?', 'Same Polysynthesis Status?', 'Syntactic Distance (lang2vec)', 'Phonological Distance (lang2vec)']
Getting best features for LASER, avg. margin score
Score: -0.08558564615220951 Features: ['Combined sentences (LASER)']
Score: 0.028976489862599188 Features: ['Combined in-family sentences (LASER)']
Score: 0.0623230762576779 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)']
Score: 0.06384934429762971 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Family?']
Score: 0.07024430593135979 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Word Order?']
Score: 0.07052342218821048 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Polysynthesis Status?']
Score: 0.08030722587494216 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Syntactic Distance (lang2vec)']
Score: 0.08734223706788617 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Polysynthesis Status?', 'Syntactic Distance (lang2vec)']
Score: 0.08791708013904954 Features: ['Combined sentences (LASER)', 'Combined in-family sentences (LASER)', 'Same Polysynthesis Status?', 'Syntactic Distance (lang2vec)', 'Phonological Distance (lang2vec)']
| MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
Applying PCA as an additional feature selection tool | pca = sklearn.decomposition.PCA(n_components=5)
labse_pair_pca = pca.fit_transform(X_pair_labse)
labse_pair_pca.shape | _____no_output_____ | MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
PCR | # Implement principal component regression (PCR)
def PCR(model, X, y, n_components, score_method):
FOLDS = 10
pca = sklearn.decomposition.PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
score_by_fold = sklearn.model_selection.cross_validate(model,
X_pca,
y,
cv=FOLDS,
scoring=score_method)['test_score']
# Convert R2 to adjusted R2 to take into account the number of predictors
def adjustedR2(r2, n, p):
num = (1-r2)*(n-1)
denom = n-p-1
adj_r2 = 1 - (num/denom)
return adj_r2
if score_method=='r2':
# Compute the adjusted R2 instead
n_subset_features = X.shape[1]
# Fraction of data used for training during CV
train_frac = (FOLDS-1) / FOLDS # e.g. with 10 folds, we use 9/10 of the data for training
sample_size = round(train_frac*X.shape[0])
score_by_fold = list(map(lambda r2: adjustedR2(r2,sample_size,n_subset_features), score_by_fold)) #[adjustedR2(r2, n_subset_features, sample_size) for r2 in score_by_fold]
score = np.average(score_by_fold)
return score
def optimizeComponentsPCR(X, y, score_method):
score_list = []
for n in range(1, X.shape[1]+1):
lr_model = LinearRegression()
score_n = PCR(lr_model, X, y, n, score_method)
score_list.append(score_n)
print('Number of components: {} | Score: {}'.format(n, score_n))
return max(enumerate(score_list), key=lambda x: x[1])[0]+1
# Computing the optimal number of components for predicting each of our DVs (LaBSE)
labse_best_components = []
print('Getting best number of components for predicting F1-score (LaBSE)')
res1 = optimizeComponentsPCR(X_pair_labse, y_pair_f1_labse, 'r2')
print('Optimal components: {}'.format(res1))
labse_best_components.append(res1)
print('Getting best number of components for predicting G-H dist. (LaBSE)')
res2 = optimizeComponentsPCR(X_pair_labse, y_pair_gh_labse, 'r2')
print('Optimal components: {}'.format(res2))
labse_best_components.append(res2)
print('Getting best number of components for predicting SVG (LaBSE)')
res3 = optimizeComponentsPCR(X_pair_labse, y_pair_svg_labse, 'r2')
print('Optimal components: {}'.format(res3))
labse_best_components.append(res3)
print('Getting best number of components for predicting ECOND-HM (LaBSE)')
res4 = optimizeComponentsPCR(X_pair_labse, y_pair_econdhm_labse, 'r2')
print('Optimal components: {}'.format(res4))
labse_best_components.append(res4)
print('Getting best number of components for predicting avg. margin score (LaBSE)')
res5 = optimizeComponentsPCR(X_pair_labse, y_pair_avgmarg_labse, 'r2')
print('Optimal components: {}'.format(res5))
labse_best_components.append(res5)
print('\nAverage best number of components (LaBSE): {}'.format(np.average(labse_best_components)))
# Computing the optimal number of components for predicting each of our DVs (LASER)
laser_best_components = []
print('Getting best number of components for predicting F1-score (LASER)')
res1 = optimizeComponentsPCR(X_pair_laser, y_pair_f1_laser, 'r2')
print('Optimal components: {}'.format(res1))
laser_best_components.append(res1)
print('Getting best number of components for predicting G-H dist. (LASER)')
res2 = optimizeComponentsPCR(X_pair_laser, y_pair_gh_laser, 'r2')
print('Optimal components: {}'.format(res2))
laser_best_components.append(res2)
print('Getting best number of components for predicting SVG (LASER)')
res3 = optimizeComponentsPCR(X_pair_laser, y_pair_svg_laser, 'r2')
print('Optimal components: {}'.format(res3))
laser_best_components.append(res3)
print('Getting best number of components for predicting ECOND-HM (LASER)')
res4 = optimizeComponentsPCR(X_pair_laser, y_pair_econdhm_laser, 'r2')
print('Optimal components: {}'.format(res4))
laser_best_components.append(res4)
print('Getting best number of components for predicting avg. margin score (LASER)')
res5 = optimizeComponentsPCR(X_pair_laser, y_pair_avgmarg_laser, 'r2')
print('Optimal components: {}'.format(res5))
laser_best_components.append(res5)
print('\nAverage best number of components (LASER): {}'.format(np.average(laser_best_components)))
# Perform ablation analysis to see how removing each predictor individually affects the regression fit
def ablateLinReg(X, y, score_method):
FOLDS = 10
n_features = X.shape[1]
ablation_feature_diffs = {}
model = LinearRegression()
# Convert R2 to adjusted R2 to take into account the number of predictors
def adjustedR2(r2, n, p):
num = (1-r2)*(n-1)
denom = n-p-1
adj_r2 = 1 - (num/denom)
return adj_r2
# Getting baseline score using all the features
score_by_fold = sklearn.model_selection.cross_validate(model,
X,
y,
cv=FOLDS,
scoring=score_method)['test_score']
if score_method=='r2':
# Compute the adjusted R2 instead
N = n_features-1
# Fraction of data used for training during CV
train_frac = (FOLDS-1) / FOLDS # e.g. with 10 folds, we use 9/10 of the data for training
sample_size = round(train_frac*X.shape[0])
score_by_fold = list(map(lambda r2: adjustedR2(r2, sample_size, N), score_by_fold))
baseline_score = np.average(score_by_fold)
# We'll drop each of the features one-by-one and see how the fit (adjusted R2) of the model changes
for i in range(n_features):
dropped_feature = X.columns[i]
X_ablated = X.drop(columns=dropped_feature) # Ablated feature space
score_by_fold = sklearn.model_selection.cross_validate(model,
X_ablated,
y,
cv=FOLDS,
scoring=score_method)['test_score']
if score_method=='r2':
# Compute the adjusted R2 instead
N = n_features-1
# Fraction of data used for training during CV
train_frac = (FOLDS-1) / FOLDS # e.g. with 10 folds, we use 9/10 of the data for training
sample_size = round(train_frac*X.shape[0])
score_by_fold = list(map(lambda r2: adjustedR2(r2, sample_size, N), score_by_fold))
score_diff = baseline_score - np.average(score_by_fold)
# The higher the score_diff, the more important that feature is
ablation_feature_diffs[dropped_feature] = score_diff
# Return dictionary sorted in descending order
ablation_feature_diffs = {k: v for k, v in sorted(ablation_feature_diffs.items(), key=lambda item: item[1], reverse=True)}
for k,v in zip(ablation_feature_diffs.keys(), ablation_feature_diffs.values()):
print('Dropped feature: {} | Score difference: {}'.format(k, v))
print('\n')
return ablation_feature_diffs
print('LaBSE F1-score ablation experiment')
labse_f1_ablation = ablateLinReg(X_pair_labse, y_pair_f1_labse, 'r2')
print('LaBSE GH dist. ablation experiment')
labse_gh_ablation = ablateLinReg(X_pair_labse, y_pair_gh_labse, 'r2')
print('LaBSE SVG ablation experiment')
labse_svg_ablation = ablateLinReg(X_pair_labse, y_pair_svg_labse, 'r2')
print('LaBSE ECOND-HM ablation experiment')
labse_econdhm_ablation = ablateLinReg(X_pair_labse, y_pair_econdhm_labse, 'r2')
print('LaBSE avg. margin score ablation experiment')
labse_avgmarg_ablation = ablateLinReg(X_pair_labse, y_pair_avgmarg_labse, 'r2')
print('LASER F1-score ablation experiment')
laser_f1_ablation = ablateLinReg(X_pair_laser, y_pair_f1_laser, 'r2')
print('LASER GH dist. ablation experiment')
laser_gh_ablation = ablateLinReg(X_pair_laser, y_pair_gh_laser, 'r2')
print('LASER SVG ablation experiment')
laser_svg_ablation = ablateLinReg(X_pair_laser, y_pair_svg_laser, 'r2')
print('LASER ECOND-HM ablation experiment')
laser_econdhm_ablation = ablateLinReg(X_pair_laser, y_pair_econdhm_laser, 'r2')
print('LASER avg. margin score ablation experiment')
laser_avgmarg_ablation = ablateLinReg(X_pair_laser, y_pair_avgmarg_laser, 'r2')
# Let's see how important each feature is, on average, according to the ablation experiments
# LaBSE
feature_orders_in_ablation_labse = {}
for idx, item in enumerate(labse_f1_ablation.keys()):
feature_orders_in_ablation_labse[item] = [idx]
for idx, item in enumerate(labse_gh_ablation.keys()):
feature_orders_in_ablation_labse[item].append(idx)
for idx, item in enumerate(labse_svg_ablation.keys()):
feature_orders_in_ablation_labse[item].append(idx)
for idx, item in enumerate(labse_econdhm_ablation.keys()):
feature_orders_in_ablation_labse[item].append(idx)
for idx, item in enumerate(labse_avgmarg_ablation.keys()):
feature_orders_in_ablation_labse[item].append(idx)
for k in feature_orders_in_ablation_labse:
feature_orders_in_ablation_labse[k] = np.average(feature_orders_in_ablation_labse[k])
# LASER
feature_orders_in_ablation_laser = {}
for idx, item in enumerate(laser_f1_ablation.keys()):
feature_orders_in_ablation_laser[item] = [idx]
for idx, item in enumerate(laser_gh_ablation.keys()):
feature_orders_in_ablation_laser[item].append(idx)
for idx, item in enumerate(laser_svg_ablation.keys()):
feature_orders_in_ablation_laser[item].append(idx)
for idx, item in enumerate(laser_econdhm_ablation.keys()):
feature_orders_in_ablation_laser[item].append(idx)
for idx, item in enumerate(laser_avgmarg_ablation.keys()):
feature_orders_in_ablation_laser[item].append(idx)
for k in feature_orders_in_ablation_laser:
feature_orders_in_ablation_laser[k] = np.average(feature_orders_in_ablation_laser[k])
# Sort the average feature order lists
feature_orders_in_ablation_labse = sorted(feature_orders_in_ablation_labse.items(), key=lambda item: item[1])
feature_orders_in_ablation_laser = sorted(feature_orders_in_ablation_laser.items(), key=lambda item: item[1])
feature_orders_in_ablation_labse
feature_orders_in_ablation_laser | _____no_output_____ | MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
Taking a look at the loadings of the first principal components | pca = sklearn.decomposition.PCA(n_components=7)
X_pair_labse_pca = pca.fit_transform(X_pair_labse)
pca_labse_loadings = pd.DataFrame(pca.components_.T, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6', 'PC7'], index=X_pair_labse.columns)
pca_labse_loadings
pca = sklearn.decomposition.PCA(n_components=6)
X_pair_laser_pca = pca.fit_transform(X_pair_laser)
pca_laser_loadings = pd.DataFrame(pca.components_.T, columns=['PC1', 'PC2', 'PC3', 'PC4', 'PC5', 'PC6'], index=X_pair_laser.columns)
pca_laser_loadings
| _____no_output_____ | MIT | src/Analysis/bible_bitexts_analysis.ipynb | AlexJonesNLP/crosslingual-analysis-101 |
Generalized ufuncs We've just seen how to make our own ufuncs using `vectorize`, but what if we need something that can operate on an input array in any way that is not element-wise?Enter `guvectorize`. There are several important differences between `vectorize` and `guvectorize` that bear close examination. Let's take a look at a few simple examples. | import numpy
from numba import guvectorize
@guvectorize('int64[:], int64, int64[:]', '(n),()->(n)')
def g(x, y, result):
for i in range(x.shape[0]):
result[i] = x[i] + y | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
* Declaration of input/output layouts* No return statements | x = numpy.arange(10) | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
In the cell below we call the function `g` with a preallocated array for the result. | result = numpy.zeros_like(x)
result = g(x, 5, result)
print(result) | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
But wait! We can still call `g` as if it were defined as `def g(x, y)````pythonres = g(x, 5)print(res)```We don't recommend this as it can have unintended consequences if some of the elements of the `results` array are not operated on by the function `g`. (The advantage is that you can preserve existing interfaces to previously written functions). | @guvectorize('float64[:,:], float64[:,:], float64[:,:]',
'(m,n),(n,p)->(m,p)')
def matmul(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
a = numpy.random.random((500, 500))
out = matmul(a, a, numpy.zeros_like(a))
%timeit matmul(a, a, numpy.zeros_like(a))
%timeit a @ a | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
And it also supports the `target` keyword argument | def g(x, y, res):
for i in range(x.shape[0]):
res[i] = x[i] + numpy.exp(y)
g_serial = guvectorize('float64[:], float64, float64[:]',
'(n),()->(n)')(g)
g_par = guvectorize('float64[:], float64, float64[:]',
'(n),()->(n)', target='parallel')(g)
%timeit res = g_serial(numpy.arange(1000000).reshape(1000, 1000), 3)
%timeit res = g_par(numpy.arange(1000000).reshape(1000, 1000), 3) | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
[Exercise: Writing signatures](./exercises/08.GUVectorize.Exercises.ipynbExercise:-2D-Heat-Transfer-signature) What's up with these boundary conditions?```pythonfor i in range(I): Tn[i, 0] = T[i, 0] Tn[i, J - 1] = Tn[i, J - 2] for j in range(J): Tn[0, j] = T[0, j] Tn[I - 1, j] = Tn[I - 2, j]```We don't pass in `Tn` explicitly, which means Numba allocates it for us (thanks!) but it's allocated using `numpy.empty_like` so if we don't touch every value in `Tn` in the function, those empty values will stick around and cause trouble. Solutions? The one above, or pass it in explicitly after doing something like `Tn = Ti.copy()` [Exercise: Remove the vanilla loops](./exercises/08.GUVectorize.Exercises.ipynbExercise:-2D-Heat-Transfer-Time-loop) The example above loops in time outside of the `vectorize`d function. That means it's looping in vanilla Python which is not the fastest thing in the world. Move the time loop inside the function. Demo: Why not `jit` the `run_ftcs` function? Because, at the moment, it won't work. (bummer). | @guvectorize('float64[:,:], float64[:,:]', '(n,n)->(n,n)')
def gucopy(a, b):
I, J = a.shape
for i in range(I):
for j in range(J):
b[i, j] = a[i, j]
from numba import jit
@jit
def make_a_copy():
a = numpy.random.random((25,25))
b = gucopy(a)
return a, b
a, b = make_a_copy()
assert numpy.allclose(a, b)
make_a_copy.inspect_types() | _____no_output_____ | CC-BY-4.0 | notebooks/08.Make.generalized.ufuncs.ipynb | IsabelAverill/Scipy-2017---Numba |
Git_Utils Instruções:Para clonar um repositório, primeiro copie o url no GitHub ou no GitLab e insira no campo `REMOTE`.O formato deve ser, conforme o caso:```https://github.com//.git``` ou ```https://gitlab.com///<nome_do_projeto.git```Em seguida, verifique se os campos `GIT_CONFIG_PATH` e `PROJECTS_PATH` correspondem aos caminhos no seu Drive para o arquivo de configuração do git e para a pasta de projetos.Por fim, execute a célula.**Atenção: o arquivo de configuração do git deve ter ao menos três linhas, na seguinte ordem:** ```emailuseraccess_token```**Para instruções sobre como obter tokens de acesso pessoal no GitHub e no GitLab, veja os guias oficiais:**+ [GitHub](https://help.github.com/pt/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-linecreating-a-token);+ [GitLab](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html). | REPO_HTTPS_URL = 'https://gitlab.com/liaa-3r/sinapses/ia-dispositivos-legais.git'
GIT_CONFIG_PATH = 'C:\\Users\\cmlima\\Desenvolvimento\\LIAA-3R\\config'
PROJECTS_PATH = 'C:\\Users\\cmlima\\Desenvolvimento\\LIAA-3R\\projetos'
ACTION = "pull"
BRANCH = 'master'
COMMIT_MESSAGE = ""
import os, re
import ipywidgets as widgets
from ipywidgets import Layout
from IPython.display import display, clear_output
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
w_repo_https_url = widgets.Text(value=REPO_HTTPS_URL, description='REPO', disabled=False, layout=Layout(width='90%'))
w_git_config_path = widgets.Text(value=GIT_CONFIG_PATH, description='CONFIG', disabled=False, layout=Layout(width='90%'))
w_projects_path = widgets.Text(value=PROJECTS_PATH, description='PROJECT', disabled=False, layout=Layout(width='90%'))
w_action = widgets.Dropdown(
options=['commit-pull-push', 'commit', 'pull', 'push', 'clone'],
value='pull',
description='ACTION',
disabled=False,
layout=Layout(width='50%')
)
w_branch = widgets.Text(value=BRANCH, description='BRANCH', disabled=False)
w_commit_message = widgets.Textarea(
value='',
placeholder='seja breve e objetivo(a)...',
description='COMMIT',
disabled=False,
layout=Layout(width='90%')
)
w_execute_button = widgets.Button(
description='executar',
disabled=False,
button_style='success',
icon='play-circle'
)
w_exit_button = widgets.Button(
description='sair',
disabled=False,
button_style='',
icon='',
layout=Layout(align_self='flex-end', margin="0 5px 0 0")
)
form = widgets.VBox([
w_repo_https_url,
w_git_config_path,
w_projects_path,
widgets.HBox([w_action, w_branch]),
w_commit_message,
widgets.HBox([w_exit_button, w_execute_button], layout=Layout(align_self='flex-end', margin="20px 10% 0 0"))
], layout=Layout(width='90%', display='flex', align_items='flex-start', justify_content='flex-start'))
def print_error(message):
print()
print(bcolors.FAIL + 'O script não pôde ser concluído.')
print(bcolors.FAIL + bcolors.BOLD + 'Erro: ' + message)
def is_valid_url(url):
return re.match("^https:\/\/(.+\/){1,}(.+)\.git$", url)
def repo_exists(path):
if os.path.isdir(path):
%cd {path}
output = !git rev-parse --is-inside-work-tree 2>/dev/null || echo 0
return output != '0'
return False
def git_is_set():
token = !git config user.password
return len(token) > 0
def is_github(url):
return 'https://github.com' in url
def get_credentials(path, url):
file_path = os.path.join(path, 'github_config.txt' if is_github(url) else 'gitlab_config.txt')
if not os.path.isfile(file_path):
raise Exception('Arquivo de configuração não localizado.')
with open(file_path, 'r') as file:
email = file.readline()
user = file.readline()
token = file.readline()
return (email,user,token)
def clone(url, root_path, token):
%cd {root_path}
if not is_github(url):
url = 'https://oauth2:' + token + '@gitlab.com' + url.replace('https://gitlab.com', '')
!git clone {url}
path = os.path.join(root_path, re.search("([^\/]*)\.git$", url).group(1))
%cd {path}
%ls
print('remote:')
!git remote -v
def pull(branch, url, token):
if not is_github(url):
remote = 'https://oauth2:' + token + '@gitlab.com' + url.replace('https://gitlab.com', '')
!git pull {remote} {branch}
def push(branch, url, token):
if is_github(url):
remote = 'https://' + token + '@github.com' + url.replace('https://github.com', '')
else:
remote = 'https://oauth2:' + token + '@gitlab.com' + url.replace('https://gitlab.com', '')
!git push {remote} {branch}
def commit(message):
if len(message) == 0:
message = 'Atualizado via git_utils'
!git add .
!git commit -m '{message}'
def clear_all(b):
form.close()
clear_output()
def wait():
w_wait_button = widgets.Button(
description='Clique para concluir o script, limpando o output',
disabled=False,
layout=Layout(align_self='center', margin="0 5px 0 0")
)
w_wait_button.on_click(clear_all)
display(w_wait_button)
def exit(b):
form.close()
clear_output()
print(bcolors.OKBLUE + bcolors.BOLD + 'Script encerrado pelo usuário...')
def execute(b):
print(bcolors.OKBLUE + bcolors.BOLD + 'iniciando...\n')
print(bcolors.ENDC + 'reunindo parâmetros...')
try:
if not is_valid_url(w_repo_https_url.value):
raise Exception('Remoto inválido.')
repo_url = w_repo_https_url.value
project_name = re.search("([^\/]*)\.git$", repo_url).group(1)
projects_path = w_projects_path.value
config_path = w_git_config_path.value
repo_path = os.path.join(projects_path, project_name)
branch = w_branch.value
action = w_action.value
commit_message = w_commit_message.value
user, email, token = get_credentials(config_path, repo_url)
if not repo_exists(repo_path) and action != 'clone':
raise Exception('O repositório local não foi localizado. Você deve primeiro cloná-lo.')
print()
if not git_is_set():
print('configurando o git...')
git_config(config_path, repo_url)
print()
if action == 'clone':
print('clonando repositório...')
clone(repo_url, projects_path, token)
elif action == 'pull':
print('atualizando repositório local (pull)...')
pull(branch, repo_url, token)
elif action == 'push':
print('atualizando repositório remoto (push)...')
push(branch, repo_url, token)
elif action == 'commit':
print('iniciando commit...')
commit(commit_message)
elif action == 'commit-pull-push':
print('iniciando sequência...')
commit(commit_message)
pull(branch, repo_url, token)
push(branch, repo_url, token)
else:
raise Exception('A ação selecionada não está implementada.')
except Exception as error:
print_error(str(error))
else:
print()
print(bcolors.OKGREEN + bcolors.BOLD + 'Script concluído.')
finally:
print()
wait()
display(form)
w_execute_button.on_click(execute)
w_exit_button.on_click(exit)
| _____no_output_____ | MIT | git_utils.ipynb | liaa-3r/utilidades-colab |
1. Zipping Lists | import string
first_example_list = [c for c in string.ascii_lowercase]
second_example_list = [i for i in range(len(string.ascii_lowercase))]
def zip_lists(first_list, second_list):
new_list = []
for i in range(min(len(first_list), len(second_list))):
new_list.append(first_list[i])
new_list.append(second_list[i])
return new_list
print(zip_lists(first_example_list, second_example_list)) | ['a', 0, 'b', 1, 'c', 2, 'd', 3, 'e', 4, 'f', 5, 'g', 6, 'h', 7, 'i', 8, 'j', 9, 'k', 10, 'l', 11, 'm', 12, 'n', 13, 'o', 14, 'p', 15, 'q', 16, 'r', 17, 's', 18, 't', 19, 'u', 20, 'v', 21, 'w', 22, 'x', 23, 'y', 24, 'z', 25]
| Apache-2.0 | Assignments/answers/Lab_3-answers.ipynb | unmeshvrije/python-for-beginners |
2. Age Differences | example_people = [(16, "Brian"), (12, "Lucy"), (18, "Harold")]
def age_differences(people):
for i in range(len(people) - 1):
first_name = people[i][1]
first_age = people[i][0]
second_name = people[i + 1][1]
second_age = people[i + 1][0]
if first_age > second_age:
difference = first_age - second_age
print("{} is {} years older than {}.".format(first_name, difference, second_name))
age_differences(example_people) | Brian is 4 years older than Lucy.
| Apache-2.0 | Assignments/answers/Lab_3-answers.ipynb | unmeshvrije/python-for-beginners |
3. Remove the Duplicates | example_doubled_list = [1, 1, 2, 3, 3, 4, 3]
def remove_doubles(doubled_list):
no_doubles = []
for number in doubled_list:
if number not in no_doubles:
no_doubles.append(number)
return no_doubles
print(remove_doubles(example_doubled_list)) | [1, 2, 3, 4]
| Apache-2.0 | Assignments/answers/Lab_3-answers.ipynb | unmeshvrije/python-for-beginners |
4. Only the Duplicates | first_example_list = [1, 2, 3, 4]
second_example_list = [1, 4, 5, 6]
def get_duplicates(first_list, second_list):
duplicates = []
for number in first_list:
if number in second_list:
duplicates.append(number)
return duplicates
print(get_duplicates(first_example_list, second_example_list)) | [1, 4]
| Apache-2.0 | Assignments/answers/Lab_3-answers.ipynb | unmeshvrije/python-for-beginners |
SAAVEDRA QUESTION 1 | import numpy as np
C = np.eye(4)
print(C) | [[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
| Apache-2.0 | PRELIM_EXAM.ipynb | Singko25/Linear-Algebra-58020 |
QUESTION 2 | import numpy as np
C = np.eye(4)
print('C = ')
print(C)
array = np.multiply(2,C)
print('Doubled = ')
print(array) | C =
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
Doubled =
[[2. 0. 0. 0.]
[0. 2. 0. 0.]
[0. 0. 2. 0.]
[0. 0. 0. 2.]]
| Apache-2.0 | PRELIM_EXAM.ipynb | Singko25/Linear-Algebra-58020 |
Subsets and Splits