markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
度分布
from collections import defaultdict import numpy as np def plotDegreeDistribution(G): degs = defaultdict(int) for i in G.degree().values(): degs[i]+=1 items = sorted ( degs.items () ) x, y = np.array(items).T y_sum = np.sum(y) y = [float(i)/y_sum for i in y] plt.plot(x, y, 'b-o') plt.xscale('log') plt.yscale('log') plt.legend(['Degree']) plt.xlabel('$K$', fontsize = 20) plt.ylabel('$P_K$', fontsize = 20) plt.title('$Degree\,Distribution$', fontsize = 20) plt.show() G = nx.karate_club_graph() plotDegreeDistribution(G)
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
网络科学理论简介****** 网络科学:分析网络结构******王成军 [email protected]计算传播网 http://computational-communication.com 规则网络
import networkx as nx import matplotlib.pyplot as plt RG = nx.random_graphs.random_regular_graph(3,200) #生成包含200个节点、每个节点有3个邻居的规则图RG pos = nx.spectral_layout(RG) #定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别 nx.draw(RG,pos,with_labels=False,node_size = 30) #绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径 plt.show() #显示图形 plotDegreeDistribution(RG)
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
ER随机网络
import networkx as nx import matplotlib.pyplot as plt ER = nx.random_graphs.erdos_renyi_graph(200,0.05) #生成包含20个节点、以概率0.2连接的随机图 pos = nx.shell_layout(ER) #定义一个布局,此处采用了shell布局方式 nx.draw(ER,pos,with_labels=False,node_size = 30) plt.show() plotDegreeDistribution(ER)
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
小世界网络
import networkx as nx import matplotlib.pyplot as plt WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3) #生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络 pos = nx.circular_layout(WS) #定义一个布局,此处采用了circular布局方式 nx.draw(WS,pos,with_labels=False,node_size = 30) #绘制图形 plt.show() plotDegreeDistribution(WS) nx.diameter(WS) cc = nx.clustering(WS) plt.hist(cc.values(), bins = 10) plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20) plt.ylabel('$Frequency, \, F$', fontsize = 20) plt.show() import numpy as np np.mean(cc.values())
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
BA网络
import networkx as nx import matplotlib.pyplot as plt BA= nx.random_graphs.barabasi_albert_graph(200,2) #生成n=20、m=1的BA无标度网络 pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式 nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形 plt.show() plotDegreeDistribution(BA) BA= nx.random_graphs.barabasi_albert_graph(20000,2) #生成n=20、m=1的BA无标度网络 plotDegreeDistribution(BA) import networkx as nx import matplotlib.pyplot as plt BA= nx.random_graphs.barabasi_albert_graph(500,1) #生成n=20、m=1的BA无标度网络 pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式 nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形 plt.show() nx.degree_histogram(BA)[:3] BA.degree().items()[:3] plt.hist(BA.degree().values()) plt.show() from collections import defaultdict import numpy as np def plotDegreeDistributionLongTail(G): degs = defaultdict(int) for i in G.degree().values(): degs[i]+=1 items = sorted ( degs.items () ) x, y = np.array(items).T y_sum = np.sum(y) y = [float(i)/y_sum for i in y] plt.plot(x, y, 'b-o') plt.legend(['Degree']) plt.xlabel('$K$', fontsize = 20) plt.ylabel('$P_K$', fontsize = 20) plt.title('$Degree\,Distribution$', fontsize = 20) plt.show() BA= nx.random_graphs.barabasi_albert_graph(5000,2) #生成n=20、m=1的BA无标度网络 plotDegreeDistributionLongTail(BA) def plotDegreeDistribution(G): degs = defaultdict(int) for i in G.degree().values(): degs[i]+=1 items = sorted ( degs.items () ) x, y = np.array(items).T x, y = np.array(items).T y_sum = np.sum(y) plt.plot(x, y, 'b-o') plt.xscale('log') plt.yscale('log') plt.legend(['Degree']) plt.xlabel('$K$', fontsize = 20) plt.ylabel('$P_K$', fontsize = 20) plt.title('$Degree\,Distribution$', fontsize = 20) plt.show() BA= nx.random_graphs.barabasi_albert_graph(50000,2) #生成n=20、m=1的BA无标度网络 plotDegreeDistribution(BA)
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
作业:- 阅读 Barabasi (1999) Internet Diameter of the world wide web.Nature.401- 绘制www网络的出度分布、入度分布- 使用BA模型生成节点数为N、幂指数为$\gamma$的网络- 计算平均路径长度d与节点数量的关系
Ns = [i*10 for i in [1, 10, 100, 1000]] ds = [] for N in Ns: print N BA= nx.random_graphs.barabasi_albert_graph(N,2) d = nx.average_shortest_path_length(BA) ds.append(d) plt.plot(Ns, ds, 'r-o') plt.xlabel('$N$', fontsize = 20) plt.ylabel('$<d>$', fontsize = 20) plt.xscale('log') plt.show()
_____no_output_____
MIT
code/17.networkx.ipynb
nju-teaching/computational-communication
Now we have to put together the different datasets.
import pandas as pd files_df = pd.read_pickle("data/clean/files_df.pkl")
_____no_output_____
MIT
S02 - Data Wrangling/HCKT02 - Data Wrangling/instructor_solution/5.putting_all_together.ipynb
jtiagosg/batch3-students
We remove from the dataframe those rows whose origin is the website (these rows have `WEBSITE` in all the values) and those that come from the API (these rows have `API` in all the values).
files_df.shape website_ids = files_df[files_df.tierafterorder.isin(['WEBSITE'])].index api_ids = files_df[files_df.tierafterorder.isin(['API'])].index files_df = files_df[-files_df.tierafterorder.isin(['WEBSITE', 'API'])] files_df.shape files_df.tierafterorder.value_counts() scraped_df = pd.read_pickle('data/clean/scraped.pkl') scraped_df.shape scraped_df = scraped_df.loc[website_ids] scraped_df.shape scraped_df.head() targets_df = pd.read_pickle('data/clean/targets.pkl') storeid_df = pd.read_pickle('data/clean/storeids.pkl') api_df = pd.read_pickle('data/clean/api_df.pickle')
_____no_output_____
MIT
S02 - Data Wrangling/HCKT02 - Data Wrangling/instructor_solution/5.putting_all_together.ipynb
jtiagosg/batch3-students
We concat the dataframes that have different ids.
train_df = pd.concat( [ pd.concat([files_df, api_df], sort=True), scraped_df ], sort=True ) train_df.shape
_____no_output_____
MIT
S02 - Data Wrangling/HCKT02 - Data Wrangling/instructor_solution/5.putting_all_together.ipynb
jtiagosg/batch3-students
Now we join the files that share an index.
train_df = train_df.drop(columns=['returned', 'storeid']).join(targets_df).join(storeid_df) train_df.shape train_df.to_pickle('data/clean/train_df_merged.pkl')
_____no_output_____
MIT
S02 - Data Wrangling/HCKT02 - Data Wrangling/instructor_solution/5.putting_all_together.ipynb
jtiagosg/batch3-students
Recommendations with IBMIn this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform. You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/!/rubrics/2322/view). **Please save regularly.**By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations. Table of ContentsI. [Exploratory Data Analysis](Exploratory-Data-Analysis)II. [Rank Based Recommendations](Rank)III. [User-User Based Collaborative Filtering](User-User)IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](Content-Recs)V. [Matrix Factorization](Matrix-Fact)VI. [Extras & Concluding](conclusions)At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.
%autosave 180 import pandas as pd import numpy as np import matplotlib.pyplot as plt import project_tests as t import pickle import seaborn as sns import re import nltk from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics.pairwise import linear_kernel nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') %matplotlib inline df = pd.read_csv('data/user-item-interactions.csv') df_content = pd.read_csv('data/articles_community.csv') del df['Unnamed: 0'] del df_content['Unnamed: 0'] # Show df to get an idea of the data df.head() # Show df_content to get an idea of the data df_content.head()
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Part I : Exploratory Data AnalysisUse the dictionary and cells below to provide some insight into the descriptive statistics of the data.`1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article.
# make a groupby instance and count how many articles were read by each user email_grouped_df = df.groupby('email') num_article_email = email_grouped_df['article_id'].count() print("Mean # article :",num_article_email.mean()) print("Quantile 0.25 , 0.5, 0.75: " , num_article_email.quantile(0.25), num_article_email.quantile(0.5), num_article_email.quantile(0.75)) # Draw histogram and boxplot using seaborn f, axes = plt.subplots(1, 2, figsize=(16,5)) f.tight_layout() sns.set(style="white", palette="muted", color_codes=True) sns.set_context("poster") sns.set_style("darkgrid") sns.distplot(num_article_email, rug=False, kde=False, norm_hist=False, color='g', ax=axes[0]) sns.boxplot(num_article_email, ax=axes[1]) # Fill in the median and maximum number of user_article interactios below median_val = num_article_email.median() max_views_by_user = num_article_email.max() print("50% of individuals interact with {} number of articles or fewer.".format(median_val)) print("The maximum number of user-article interactions by any 1 user is{}:".format(max_views_by_user)) # 50% of individuals interact with ____ number of articles or fewer. # The maximum number of user-article interactions by any 1 user is ______.
50% of individuals interact with 3.0 number of articles or fewer. The maximum number of user-article interactions by any 1 user is364:
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`2.` Explore and remove duplicate articles from the **df_content** dataframe.
# Find and explore duplicate articles df_content.head() check_dupl_df_1 = df_content[df_content.duplicated(['article_id'])] check_dupl_df_1 # Remove any rows that have the same article_id - only keep the first df_content.drop_duplicates(subset='article_id', keep='first', inplace=True) check_dupl_df_1 = df_content[df_content.duplicated(['article_id'])] check_dupl_df_1
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`3.` Use the cells below to find:**a.** The number of unique articles that have an interaction with a user. **b.** The number of unique articles in the dataset (whether they have any interactions or not).**c.** The number of unique users in the dataset. (excluding null values) **d.** The number of user-article interactions in the dataset.
unique_articles = len(df.article_id.unique()) # The number of unique articles that have at least one interaction total_articles = len(df_content.article_id.unique()) # The number of unique articles on the IBM platform df_email_na_dropped = df.dropna(subset=['email']) unique_users = len(df_email_na_dropped.email.unique()) # The number of unique users user_article_interactions = df.shape[0] # The number of user-article interactions
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).
# df_content.head(3) article_id_grouped_df = df.groupby('article_id') print(article_id_grouped_df['email'].count().sort_values(ascending=False).index[0]) print(article_id_grouped_df['email'].count().sort_values(ascending=False).values[0]) most_viewed_article_id = '1429.0'# The most viewed article in the dataset as a string with one value following the decimal max_views = 937 # The most viewed article in the dataset was viewed how many times? ## No need to change the code here - this will be helpful for later parts of the notebook # Run this cell to map the user email to a user_id column and remove the email column def email_mapper(): coded_dict = dict() cter = 1 email_encoded = [] for val in df['email']: if val not in coded_dict: coded_dict[val] = cter cter+=1 email_encoded.append(coded_dict[val]) return email_encoded email_encoded = email_mapper() del df['email'] df['user_id'] = email_encoded # show header df.head() ## If you stored all your results in the variable names above, ## you shouldn't need to change anything in this cell sol_1_dict = { '`50% of individuals have _____ or fewer interactions.`': median_val, '`The total number of user-article interactions in the dataset is ______.`': user_article_interactions, '`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user, '`The most viewed article in the dataset was viewed _____ times.`': max_views, '`The article_id of the most viewed article is ______.`': most_viewed_article_id, '`The number of unique articles that have at least 1 rating ______.`': unique_articles, '`The number of unique users in the dataset is ______`': unique_users, '`The number of unique articles on the IBM platform`': total_articles } # Test your dictionary against the solution t.sol_1_test(sol_1_dict)
It looks like you have everything right here! Nice job!
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Part II: Rank-Based RecommendationsUnlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.`1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.
def get_top_articles(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' article_id_grouped_df = df.groupby(['title']) top_articles = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist() return top_articles # Return the top article titles from df (not df_content) def get_top_article_ids(n, df=df): ''' INPUT: n - (int) the number of top articles to return df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: top_articles - (list) A list of the top 'n' article titles ''' article_id_grouped_df = df.groupby(['article_id']) top_articles_ids = article_id_grouped_df['user_id'].count().sort_values(ascending=False).iloc[:n].index.tolist() return top_articles_ids # Return the top article ids print(get_top_articles(10)) print(get_top_article_ids(10)) # Test your function by returning the top 5, 10, and 20 articles top_5 = get_top_articles(5) top_10 = get_top_articles(10) top_20 = get_top_articles(20) # Test each of your three lists from above t.sol_2_test(get_top_articles)
Your top_5 looks like the solution list! Nice job. Your top_10 looks like the solution list! Nice job. Your top_20 looks like the solution list! Nice job.
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Part III: User-User Based Collaborative Filtering`1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns. * Each **user** should only appear in each **row** once.* Each **article** should only show up in one **column**. * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1. * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**. Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.
# create the user-article matrix with 1's and 0's def create_user_item_matrix(df): ''' INPUT: df - pandas dataframe with article_id, title, user_id columns OUTPUT: user_item - user item matrix Description: Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with an article and a 0 otherwise ''' # Fill in the function here user_item = df.groupby('user_id')['article_id'].value_counts().unstack() user_item[user_item.isna() == False] = 1 return user_item # return the user_item matrix user_item = create_user_item_matrix(df) ## Tests: You should just need to run this cell. Don't change the code. assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right." assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right." assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right." print("You have passed our quick tests! Please proceed!")
You have passed our quick tests! Please proceed!
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users. Use the tests to test your function.
def find_similar_users(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user_id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: similar_users - (list) an ordered list where the closest users (largest dot product users) are listed first Description: Computes the similarity of every pair of users based on the dot product Returns an ordered ''' # compute similarity of each user to the provided user user_item_tmp = user_item.copy() user_item_tmp[user_item_tmp.isna() == True] = 0 # 1. Make Nan to 0 row = user_item_tmp.loc[user_id] # 2. Select a row result_dot = row@user_item_tmp.T # 3. Dot product of each of row of the matrix result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id most_similar_users = result_dot.sort_values(ascending=False).index.tolist() # sort by similarity # create list of just the ids return most_similar_users # return a list of the users in order from most to least similar # Do a spot check of your function print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10])) print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5])) print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))
The 10 most similar users to user 1 are: [3933, 23, 3782, 203, 4459, 3870, 131, 4201, 46, 3697] The 5 most similar users to user 3933 are: [1, 3782, 23, 203, 4459] The 3 most similar users to user 46 are: [4201, 3782, 23]
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user.
def get_article_names(article_ids, df=df): ''' INPUT: article_ids - (list) a list of article ids df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: article_names - (list) a list of article names associated with the list of article ids (this is identified by the title column) ''' # Your code here article_names = [] article_ids = list(map(float, article_ids)) for i in article_ids: try: title = df[df['article_id'] == i]['title'].unique()[0] except IndexError: title ="None" article_names.append(title) return article_names # Return the article names associated with list of article ids # try: # myVar # except IndexError: # myVar = "None" def get_user_articles(user_id, user_item=user_item): ''' INPUT: user_id - (int) a user id user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: article_ids - (list) a list of the article ids seen by the user article_names - (list) a list of article names associated with the list of article ids (this is identified by the doc_full_name column in df_content) Description: Provides a list of the article_ids and article titles that have been seen by a user ''' # Your code here article_ids = user_item.loc[user_id][user_item.loc[user_id] ==1].index.tolist() article_ids = list(map(str, article_ids)) article_names = get_article_names(article_ids, df=df) return article_ids, article_names # return the ids and names def user_user_recs(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: Users who are the same closeness are chosen arbitrarily as the 'next' user For the user where the number of recommended articles starts below m and ends exceeding m, the last items are chosen arbitrarily ''' recs = [] counter = 0 # Get seen article ids and names from selected user id article_ids, article_names = get_user_articles(user_id) # Make set to find unseen articles seen_ids_set = set(article_ids) # Find five similar users of the selected user most_similar_users = find_similar_users(user_id)[0:5] # Make recommendation list for sim_user in most_similar_users: if counter < m: # Get seen article ids and names from similar users sim_article_ids, sim_article_names = get_user_articles(sim_user) # Make dict (key: article_ids, value:article_names) sim_user_dict = dict(zip(sim_article_ids, sim_article_names)) # Make set to find unseen articles sim_seen_ids_set = set(sim_article_ids) # Create set of unseen articles_ids unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set) for i in unseen_ids_set: if counter < m: recs.append(i) counter += 1 return recs # return your recommendations for this user_id get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) # Check Results get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1 # Test your functions here - No need to change this code - just run this cell assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect." assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0']) assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']) assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0']) assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']) print("If this is all you see, you passed all of our tests! Nice job!")
If this is all you see, you passed all of our tests! Nice job!
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`4.` Now we are going to improve the consistency of the **user_user_recs** function from above. * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.* Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.
def get_top_sorted_users(user_id, df=df, user_item=user_item): ''' INPUT: user_id - (int) df - (pandas dataframe) df as defined at the top of the notebook user_item - (pandas dataframe) matrix of users by articles: 1's when a user has interacted with an article, 0 otherwise OUTPUT: neighbors_df - (pandas dataframe) a dataframe with: neighbor_id - is a neighbor user_id similarity - measure of the similarity of each user to the provided user_id num_interactions - the number of articles viewed by the user - if a u Other Details - sort the neighbors_df by the similarity and then by number of interactions where highest of each is higher in the dataframe ''' # Make neighbor_id column df_user_id_grouped =df.groupby("user_id") df_user_id_grouped['article_id'].count().sort_values(ascending=False) most_similar_users = find_similar_users(user_id)[0:10] neighbors_df = pd.DataFrame() neighbors_df['neighbor_id'] = most_similar_users # make similarity column user_item_tmp = user_item.copy() row = user_item_tmp.loc[user_id] # Select a row result_dot = row@user_item_tmp.T # Dot product of each of row of the matrix result_dot.drop(labels = [user_id], inplace=True) # remove the own user's id similarity = result_dot.sort_values(ascending=False).values.tolist()[0:10] neighbors_df['similarity'] = similarity # Make num_interactions column num_interactions = [] for i in neighbors_df['neighbor_id']: counted_interaction = df_user_id_grouped['article_id'].count().loc[i] num_interactions.append(counted_interaction) neighbors_df['num_interactions'] = num_interactions neighbors_df = neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False) return neighbors_df # Return the dataframe specified in the doc_string def user_user_recs_part2(user_id, m=10): ''' INPUT: user_id - (int) a user id m - (int) the number of recommendations you want for the user OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title Description: Loops through the users based on closeness to the input user_id For each user - finds articles the user hasn't seen before and provides them as recs Does this until m recommendations are found Notes: * Choose the users that have the most total article interactions before choosing those with fewer article interactions. * Choose articles with the articles with the most total interactions before choosing those with fewer total interactions. ''' recs = [] rec_names =[] counter = 0 # Get seen article ids and names from selected user id article_ids, article_names = get_user_articles(user_id) # Make set to find unseen articles seen_ids_set = set(article_ids) # Find five similar users of the selected user neighbors_df = get_top_sorted_users(user_id, df=df, user_item=user_item) similar_users_list = neighbors_df['neighbor_id'] # Get neighbor_df # Make recommendation list for sim_user in similar_users_list: if counter < m: # Get seen article ids and names from similar users sim_article_ids, sim_article_names = get_user_articles(sim_user) # Make dict (key: article_ids, value:article_names) sim_user_dict = dict(zip(sim_article_ids, sim_article_names)) # Make set to find unseen articles sim_seen_ids_set = set(sim_article_ids) # Create set of unseen articles_ids unseen_ids_set = sim_seen_ids_set.difference(seen_ids_set) # unseen_article_names_set = sim_seen_article_names_set.difference(seen_article_names_set) for i in unseen_ids_set: if counter < m: recs.append(i) rec_names.append(sim_user_dict[i]) counter += 1 return recs, rec_names # Quick spot check - don't change this code - just use it to test your functions rec_ids, rec_names = user_user_recs_part2(20, 10) print("The top 10 recommendations for user 20 are the following article ids:") print(rec_ids) print() print("The top 10 recommendations for user 20 are the following article names:") print(rec_names)
The top 10 recommendations for user 20 are the following article ids: ['1162.0', '1351.0', '1164.0', '491.0', '1186.0', '14.0', '1429.0', '162.0', '939.0', '813.0'] The top 10 recommendations for user 20 are the following article names: ['analyze energy consumption in buildings', 'model bike sharing data with spss', 'analyze open data sets with pandas dataframes', 'this week in data science (may 23, 2017)', 'connect to db2 warehouse on cloud and db2 using scala', 'got zip code data? prep it for analytics. – ibm watson data lab – medium', 'use deep learning for image classification', 'an introduction to stock market data analysis with r (part 1)', 'deep learning from scratch i: computational graphs', 'generative adversarial networks (gans)']
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.
### Tests with a dictionary of results neighbor_df_1 = get_top_sorted_users(1, df=df, user_item=user_item) neighbor_df_131 = get_top_sorted_users(131, df=df, user_item=user_item) user1_most_sim = neighbor_df_1.neighbor_id[0].item()# Find the user that is most similar to user 1 user131_10th_sim = neighbor_df_131.neighbor_id[9].item() # Find the 10th most similar user to user 131 ## Dictionary Test Here sol_5_dict = { 'The user that is most similar to user 1.': user1_most_sim, 'The user that is the 10th most similar to user 131': user131_10th_sim, } t.sol_5_test(sol_5_dict)
This all looks good! Nice job!
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users. Rank based recommendation is suitable for a new user because it only depends on the popularity of an article. As no interaction with articles has been made, user-user based collaborative filtering is not possible for a new user. If we know the preference of a new user, for example, a few key words are given by a new user, we could use the information to make a recommendation. In sum, content based and rank based recommendations are better methods for new users. `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.
new_user = '0.0' # top_10 = get_top_articles(10) top_10 = get_top_article_ids(10, df=df) # What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles. # Provide a list of the top 10 article ids you would give to top_10 = list(map(str, top_10)) new_user_recs = top_10# Your recommendations here assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users." print("That's right! Nice job!")
That's right! Nice job!
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information. `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations. This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
def make_content_recs(article_id, df_content, df, m=10): ''' INPUT: article_id = (int) a article id in df_content m - (int) the number of recommendations you want for the user df_content - (pandas dataframe) df_content as defined at the top of the notebook df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title ''' corpus = df_content['doc_description'] df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True) stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() # Text Processing, Feature Extraction def tokenize(text): ''' Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. The funtions also cleans irrelevant stopwords. Input: 1. text: text message Output: 1. Clean_tokens : list of tokenized clean words ''' # Get rid of other sepcial characters text = re.sub(r"[^a-zA-Z0-9]", " ", text) # Tokenize tokens = word_tokenize(text) # Lemmatize lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip() clean_tokens.append(clean_tok) # Remove stop words stopwords = nltk.corpus.stopwords.words('english') clean_tokens = [token for token in clean_tokens if token not in stopwords] return clean_tokens vect = TfidfVectorizer(tokenizer=tokenize) # get counts of each token (word) in text data X = vect.fit_transform(corpus) X = X.toarray() cosine_similarity = linear_kernel(X, X) df_similarity = pd.DataFrame(cosine_similarity[article_id], columns=['similarity']) df_similarity_modified = df_similarity.drop(article_id) recs = df_similarity_modified.similarity.sort_values(ascending=False).index[0:10].tolist() rec_names = [] for i in recs: name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0] rec_names.append(name) return recs, rec_names recs, rec_names = make_content_recs(0, df_content, df, m=10) print(recs) print("**"*30) print(rec_names)
[730, 194, 53, 470, 1005, 980, 423, 266, 681, 670] ************************************************************ ['Developing for the IBM Streaming Analytics service', 'Data science for real-time streaming analytics', 'Introducing Streams Designer', 'What’s new in the Streaming Analytics service on Bluemix', 'Real-time Sentiment Analysis of Twitter Hashtags with Spark', 'logshare', 'Web application state, à la Dogfight (1983) – IBM Watson Data Lab', 'Developing IBM Streams applications with the Python API (Version 1.6)', 'Real-Time Sentiment Analysis of Twitter Hashtags with Spark (+ PixieDust)', 'Calculate moving averages on real time data with Streams Designer']
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender? This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills. Before making my content-based recommendation system, I read an article [here](https://towardsdatascience.com/introduction-to-two-approaches-of-content-based-recommendation-system-fc797460c18c) to grap idea how to build it. After reading it, I selected TF-IDF (Term Frequency - Inverse Document Frequency) method to make keywords to be TF-IDF Vectors. Then I used cosine similarity to find similar articles to a given article. This system could perform well to a user who has only one interaction with an ariticle. In my opinion, this system needs to be improved in three cases. The first case is for a brand new user having no interaction. In this case, the system could rely on `rank based recommendation system` using `get_top_articles(n, df=df)` and `get_top_article_ids(n, df=df)`. The second case is a user having interaction with an article and the information of the article in df_content. For this, `doc_description` column could be used for find similar articles to the one. the information in the column was cleaned and vectorized by TfidfVectorizer. The matrix after vectorization consisted of rows and columns that rows saved the vectorized information of the articles and columns showed tokenized word like 'data', 'science', 'IBM'. The matrix was used to get the most top 10 similar articles. Top 10 articles will be given by the order of the articles in `rank based recommendation system`. This second case was executed by `make_content_recs(article_id, df_content, df, m=10)`.The last case is the opposite of the previous one that a user having one interaction with an aritlce but no information in df_content. To get a solution for this, not a `doc_descrpition` but `title`column in df was used to do content based recommendation. The title of a given article in df was vectorized and this information was used to slice the matrix I got before in case two. Finally, the sum of each row was calculated and the top 10 articles were given in order of the high score. The last case was performed by ` make_content_recs_2(article_id, df_content, df, m=10)` `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations. This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
def make_content_recs_2(article_id, df_content, df, m=10): ''' INPUT: article_id = (int) a article id in df_content m - (int) the number of recommendations you want for the user df_content - (pandas dataframe) df_content as defined at the top of the notebook df - (pandas dataframe) df as defined at the top of the notebook OUTPUT: recs - (list) a list of recommendations for the user by article id rec_names - (list) a list of recommendations for the user by article title ''' if article_id in df_content.article_id: recs, rec_names =make_content_recs(article_id, df_content, df, m=10) else : corpus = df_content['doc_description'] df_content['doc_description'].fillna(df_content['doc_full_name'], inplace=True) stop_words = stopwords.words("english") lemmatizer = WordNetLemmatizer() # Text Processing, Feature Extraction def tokenize(text): ''' Function splits text into separate words and gets a word lowercased and removes whitespaces at the ends of a word. The funtions also cleans irrelevant stopwords. Input: 1. text: text message Output: 1. Clean_tokens : list of tokenized clean words ''' # Get rid of other sepcial characters text = re.sub(r"[^a-zA-Z0-9]", " ", text) # Tokenize tokens = word_tokenize(text) # Lemmatize lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok, pos='v').lower().strip() clean_tokens.append(clean_tok) # Remove stop words stopwords = nltk.corpus.stopwords.words('english') clean_tokens = [token for token in clean_tokens if token not in stopwords] return clean_tokens vect = TfidfVectorizer(tokenizer=tokenize) # get counts of each token (word) in text data X = vect.fit_transform(corpus) X = X.toarray() tfidf_feature_name = vect.get_feature_names() # Get title of the document of interest booktitle = df[df['article_id'] == article_id]['title'].values[0] # Tokenize the title booktitle_tokenized = tokenize(booktitle) X_slice_list = [] for i in booktitle_tokenized: if i in tfidf_feature_name: X_slice_list.append(tfidf_feature_name.index(i)) X_slice_list.sort() X_sliced = X[:,X_slice_list] check_df = pd.DataFrame(X_sliced, columns=X_slice_list) check_df['sum'] = check_df.sum(axis=1) recs = check_df.sort_values("sum", ascending=False)[0:10].index.tolist() rec_names = [] for i in recs: name = df_content[df_content['article_id'] == i]['doc_full_name'].values[0] rec_names.append(name) return recs, rec_names recs, rec_names = make_content_recs_2(1427, df_content, df, m=10) print(recs, rec_names ) df.groupby('user_id')['title'].count() def final_recs(user_id=None, article_id=None, df_content=df_content, df=df, m=10): # No arguments of user_id and article_id / New user case if (user_id==None and article_id==None) or (user_id not in df['user_id'].unique() and article_id==None) : recs = get_top_article_ids(m, df=df) rec_names = get_top_articles(m, df=df) # Existed user elif user_id in df['user_id'].unique() and article_id==None: recs ,rec_names = user_user_recs_part2(user_id, m=10) # One article given elif article_id != None and user_id == None: recs, rec_names = make_content_recs_2(article_id, df_content, df, m=10) elif user_id != None and article_id !=None: print("input only user_id or article_id") recs = [] rec_names =[] return recs, rec_names # make recommendations for a brand new user recs, rec_names = final_recs(user_id=10, article_id=10, df_content=df_content, df=df, m=10) print(recs, rec_names) # make a recommendations for a user who only has interacted with article id '1427.0' recs, rec_names = final_recs(article_id=1427, df_content=df_content, df=df, m=10) print(recs, rec_names) recs, rec_names = final_recs(article_id=1314.0, df_content=df_content, df=df, m=10) print(recs, rec_names) # normal cases recs, rec_names = final_recs(user_id=23, df_content=df_content, df=df, m=10) print(recs, rec_names) recs, rec_names = final_recs(user_id=21, df_content=df_content, df=df, m=10) print(recs, rec_names) recs, rec_names = final_recs(df_content=df_content, df=df, m=10) print(recs, rec_names)
input only user_id or article_id [] [] [384, 805, 48, 662, 809, 161, 893, 686, 723, 655] ['Continuous Learning on Watson', 'Machine Learning for everyone', 'Data Science Experience Documentation', 'Build Deep Learning Architectures With Neural Network Modeler', 'Use the Machine Learning Library', 'Use the Machine Learning Library in Spark', 'Use the Machine Learning Library in IBM Analytics for Apache Spark', 'Score a Predictive Model Built with IBM SPSS Modeler, WML & DSX', '10 Essential Algorithms For Machine Learning Engineers', 'Create a project for Watson Machine Learning in DSX'] [730, 470, 266, 0, 670, 931, 774, 194, 53, 651] ['Developing for the IBM Streaming Analytics service', 'What’s new in the Streaming Analytics service on Bluemix', 'Developing IBM Streams applications with the Python API (Version 1.6)', 'Detect Malfunctioning IoT Sensors with Streaming Analytics', 'Calculate moving averages on real time data with Streams Designer', 'Short-Notice Serverless Conference', 'Authenticating Node-RED using JSONWebToken, part 2', 'Data science for real-time streaming analytics', 'Introducing Streams Designer', 'Analyzing streaming Data from Kafka Topics'] ['225.0', '205.0', '173.0', '522.0', '766.0', '684.0', '491.0', '1186.0', '1116.0', '57.0'] ['a visual explanation of the back propagation algorithm for neural networks', "a beginner's guide to variational methods", '10 must attend data science, ml and ai conferences in 2018', 'share the (pixiedust) magic – ibm watson data lab – medium', 'making data science a team sport', 'flexdashboard: interactive dashboards for r', 'this week in data science (may 23, 2017)', 'connect to db2 warehouse on cloud and db2 using scala', 'airbnb data for analytics: new york city reviews', 'transfer learning for flight delay prediction via variational autoencoders'] ['973.0', '252.0', '821.0', '1291.0', '348.0', '729.0', '939.0', '1159.0', '316.0', '1304.0'] ['recent trends in recommender systems', 'web picks (week of 4 september 2017)', 'using rstudio in ibm data science experience', 'fertility rate by country in total births per woman', 'this week in data science (april 25, 2017)', 'pixiedust 1.0 is here! – ibm watson data lab', 'deep learning from scratch i: computational graphs', 'analyze facebook data using ibm watson and watson studio', 'leverage python, scikit, and text classification for behavioral profiling', 'gosales transactions for logistic regression model'] [1429.0, 1330.0, 1431.0, 1427.0, 1364.0, 1314.0, 1293.0, 1170.0, 1162.0, 1304.0] ['use deep learning for image classification', 'insights from new york car accident reports', 'visualize car data with brunel', 'use xgboost, scikit-learn & ibm watson machine learning apis', 'predicting churn with the spss random tree algorithm', 'healthcare python streaming application demo', 'finding optimal locations of new store using decision optimization', 'apache spark lab, part 1: basic concepts', 'analyze energy consumption in buildings', 'gosales transactions for logistic regression model']
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Part V: Matrix FactorizationIn this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.`1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook.
# Load the matrix here user_item_matrix = pd.read_pickle('user_item_matrix.p') # quick look at the matrix user_item_matrix.head() user_item_matrix.shape user_item_matrix.to_numpy()
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.
# Perform SVD on the User-Item Matrix Here u, s, vt = np.linalg.svd(user_item_matrix)# use the built in to get the three matrices s.shape, u.shape, vt.shape
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Here, the user-item matrix passed in linalg.svd has no missing values. All elements in the matrix are 0 or 1. In the previous lesson, there were a lot of null cells in the matrix. It was not able to be passed in. `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.
num_latent_feats = np.arange(10,700+10,20) sum_errs = [] for k in num_latent_feats: # restructure with k latent features s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :] # take dot product user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new)) # compute error for each prediction to actual value diffs = np.subtract(user_item_matrix, user_item_est) # total errors and keep track of them err = np.sum(np.sum(np.abs(diffs))) sum_errs.append(err) plt.figure(figsize=(16,5)) plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features');
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below. Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below: * How many users can we make predictions for in the test set? * How many users are we not able to make predictions for because of the cold start problem?* How many articles can we make predictions for in the test set? * How many articles are we not able to make predictions for because of the cold start problem?
df_train = df.head(40000) df_test = df.tail(5993) def create_test_and_train_user_item(df_train, df_test): ''' INPUT: df_train - training dataframe df_test - test dataframe OUTPUT: user_item_train - a user-item matrix of the training dataframe (unique users for each row and unique articles for each column) user_item_test - a user-item matrix of the testing dataframe (unique users for each row and unique articles for each column) test_idx - all of the test user ids test_arts - all of the test article ids ''' user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) # nan to zero user_item_train[np.isnan(user_item_train)] = 0 user_item_test[np.isnan(user_item_test)] = 0 test_idx = user_item_test.index.values test_arts = user_item_test.columns.values return user_item_train, user_item_test, test_idx, test_arts user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test) df_train = df.head(40000) df_test = df.tail(5993) user_item_train = create_user_item_matrix(df_train) user_item_test = create_user_item_matrix(df_test) user_item_train[np.isnan(user_item_train)] = 0 user_item_test[np.isnan(user_item_test)] = 0 user_item_test.index.values user_item_test.columns.values user_item_train.shape # 'How many users can we make predictions for in the test set?': train_idx = user_item_train.index.values test_idx = user_item_test.index.values answer1 = len(np.intersect1d(test_idx,train_idx)) # 'How many users in the test set are we not able to make predictions for because of the cold start problem?': answer2 = len(test_idx) - answer1 # 'How many articles can we make predictions for in the test set?' train_arts = user_item_train.columns.values test_arts = user_item_test.columns.values answer3 = len(np.intersect1d(test_arts,train_arts)) # 'How many articles in the test set are we not able to make predictions for because of the cold start problem? answer4 = len(test_arts) - answer3 print(answer1, answer2, answer3, answer4) # Replace the values in the dictionary below a = 662 b = 574 c = 20 d = 0 sol_4_dict = { 'How many users can we make predictions for in the test set?': c, 'How many users in the test set are we not able to make predictions for because of the cold start problem?': a, 'How many movies can we make predictions for in the test set?': b, 'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d } t.sol_4_test(sol_4_dict)
Awesome job! That's right! All of the test movies are in the training data, but there are only 20 test users that were also in the training set. All of the other users that are in the test set we have no data on. Therefore, we cannot make predictions for these users using SVD.
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data.
# fit SVD on the user_item_train matrix u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below # Use these cells to see how well you can use the training # decomposition to predict on test data both_rows = user_item_train.index.isin(test_idx) rows_mask = np.intersect1d(user_item_train.index,test_idx) both_cols = user_item_train.columns.isin(test_arts) cols_mask = np.intersect1d(user_item_train.columns,test_arts) u_test = u_train[both_rows,:] vt_test = vt_train[:, both_cols] user_item_test = user_item_test.loc[rows_mask, cols_mask] user_item_test.shape user_item_train.shape num_latent_feats = np.arange(10,700+10,20) sum_errs_train = [] sum_errs_test = [] for k in num_latent_feats: # restructure with k latent features s_train_new, u_train_new, vt_train_new = np.diag(s_train[:k]), u_train[:, :k], vt_train[:k, :] s_test_new, u_test_new, vt_test_new = np.diag(s_train[:k]), u_test[:, :k], vt_test[:k, :] # take dot product user_item_est_train = np.around(np.dot(np.dot(u_train_new, s_train_new), vt_train_new)) user_item_est_test = np.around(np.dot(np.dot(u_test_new, s_test_new), vt_test_new)) # compute error for each prediction to actual value diffs_train = np.subtract(user_item_train, user_item_est_train) diffs_test = np.subtract(user_item_test, user_item_est_test) # total errors and keep track of them err_train = np.sum(np.sum(np.abs(diffs_train))) sum_errs_train.append(err_train) err_test = np.sum(np.sum(np.abs(diffs_test))) sum_errs_test.append(err_test) plt.figure(figsize=(16,5)) plt.plot(num_latent_feats, 1 - np.array(sum_errs_train)/(user_item_train.shape[0]*user_item_train.shape[1]), label="Train"); plt.plot(num_latent_feats, 1 - np.array(sum_errs_test)/(user_item_test.shape[0]*user_item_test.shape[1]), label="Test"); plt.xlabel('Number of Latent Features'); plt.ylabel('Accuracy'); plt.title('Accuracy vs. Number of Latent Features'); plt.legend();
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
`6.` Use the cell below to comment on the results you found in the previous question. Given the circumstances of your results, discuss what you might do to determine if the recommendations you make with any of the above recommendation systems are an improvement to how users currently find articles? 1. Brief summary of plotAs the number of latent features increases, the accuracy of train set increases but that of test set decreases in the above plot. I think the result relies on the characteristic of the matrix used in SVD that the matrix has many 0's in every rows. So the accuracy of predicting 0s was an easy job by, however the predicting 1s (meaning that the user read the article) was not succesful. It is possible to check the cell below. The cell showed the sum of absolute values of elements in each row in the diffs dataframe made by substracting predicted user_item_test matrix from real user_item_test. If SVD had predicted correctly, the sum of difference would have been small.2. Suggestions for the recommendation systems to be improvedIn order to validate recommendation systems, there could be two ways suggested in the lesson of Matrix Factorization for Recommendations. One is to check validation metrics like sales, higher engagement. or click rates after deploying new recommendation systems. It is called online testing, and it could be performed by A/B testing. To do that the entire users of IBM Watson Studio platform has to be divided into two groups statiscally correctly. A group has to be exposed to old recommendation systems. B group has to use new recommendation system. The access of articles is only allowed logged in users in IBM cloud, so it would be useful to set click rates or liked rates on the recommended articles as the metrics for this experiment. The other one is offline testing like I did with SVD before. As shown above, the model was not good to predict because of imbalanced dataset having lots of 0s and few 1s. However, like shown in previous lessons, applying FunkSVD could be another solution to validate the recommendation systems. To do this, the rating system has to be in IBM Watson Studio platform.
print("# of row / sum of readings / sum of difference") for i in range(20): print(i, " ",user_item_test.iloc[i].abs().sum(), " ",diffs_test.iloc[i].abs().sum())
# of row / sum of readings / sum of difference 0 2.0 12.0 1 7.0 31.0 2 5.0 16.0 3 5.0 6.0 4 1.0 5.0 5 32.0 48.0 6 3.0 36.0 7 55.0 65.0 8 1.0 2.0 9 26.0 26.0 10 8.0 24.0 11 1.0 2.0 12 1.0 2.0 13 8.0 10.0 14 10.0 9.0 15 2.0 22.0 16 16.0 20.0 17 5.0 19.0 18 26.0 37.0 19 4.0 16.0
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
ExtrasUsing your workbook, you could now save your recommendations for each user, develop a class to make new predictions and update your results, and make a flask app to deploy your results. These tasks are beyond what is required for this project. However, from what you learned in the lessons, you certainly capable of taking these tasks on to improve upon your work here! Conclusion> Congratulations! You have reached the end of the Recommendations with IBM project! > **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the [rubric](https://review.udacity.com/!/rubrics/2322/view). You should also probably remove all of the "Tips" like this one so that the presentation is as polished as possible. Directions to Submit> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.> Once you've done this, you can submit your project by clicking on the "Submit Project" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!
from subprocess import call call(['python', '-m', 'nbconvert', 'Recommendations_with_IBM.ipynb'])
_____no_output_____
MIT
notebook/Recommendations_with_IBM.ipynb
dalpengholic/Udacity_Recommendations_with_IBM
Generalised RegressionIn this notebook, we will build a generalised regression model on the **electricity consumption** dataset. The dataset contains two variables - year and electricity consumption.
#importing libraries import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn import metrics #fetching data elec_cons = pd.read_csv("total-electricity-consumption-us.csv", sep = ',', header= 0 ) elec_cons.head() # number of observations: 51 elec_cons.shape # checking NA # there are no missing values in the dataset elec_cons.isnull().values.any() size = len(elec_cons.index) index = range(0, size, 5) train = elec_cons[~elec_cons.index.isin(index)] test = elec_cons[elec_cons.index.isin(index)] print(len(train)) print(len(test)) # converting X to a two dimensional array, as required by the learning algorithm X_train = train.Year.reshape(-1,1) #Making X two dimensional y_train = train.Consumption X_test = test.Year.reshape(-1,1) #Making X two dimensional y_test = test.Consumption # Doing a polynomial regression: Comparing linear, quadratic and cubic fits # Pipeline helps you associate two models or objects to be built sequentially with each other, # in this case, the objects are PolynomialFeatures() and LinearRegression() r2_train = [] r2_test = [] degrees = [1, 2, 3] for degree in degrees: pipeline = Pipeline([('poly_features', PolynomialFeatures(degree=degree)), ('model', LinearRegression())]) pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) r2_test.append(metrics.r2_score(y_test, y_pred)) # training performance y_pred_train = pipeline.predict(X_train) r2_train.append(metrics.r2_score(y_train, y_pred_train)) # plot predictions and actual values against year fig, ax = plt.subplots() ax.set_xlabel("Year") ax.set_ylabel("Power consumption") ax.set_title("Degree= " + str(degree)) # train data in blue ax.scatter(X_train, y_train) ax.plot(X_train, y_pred_train) # test data ax.scatter(X_train, y_train) ax.plot(X_test, y_pred) plt.show() # respective test r-squared scores of predictions print(degrees) print(r2_train) print(r2_test)
[1, 2, 3] [0.84237474021761372, 0.99088967445535958, 0.9979789881969624] [0.81651704638268097, 0.98760805026754717, 0.99848974839924587]
MIT
Section 16/AdvanceReg/Teclov_generalised_regression.ipynb
ashokjohn/ML_RealWorld
Import library
# !pip install --upgrade tables # !pip install eli5 # !pip install xgboost # !pip install hyperopt import pandas as pd import numpy as np import xgboost as xgb from sklearn.metrics import mean_absolute_error as mae from sklearn.model_selection import cross_val_score from hyperopt import hp, fmin, tpe, STATUS_OK import eli5 from eli5.sklearn import PermutationImportance cd "drive/My Drive/Colab Notebooks/dw_matrix_car" df = pd.read_hdf('data/car.h5') df.shape
_____no_output_____
MIT
matrix_two/day5.ipynb
jedrzejd/dw_matrix_car
Feature Engineering
SUFFIX_CAT = '__cat' for feat in df.columns: if isinstance(df[feat][0], list): continue factorized_values = df[feat].factorize()[0] if SUFFIX_CAT in feat: continue df[feat + SUFFIX_CAT] = factorized_values df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ', '')) ) df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(x.split(' ')[0]) ) df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) == 'None' else int(x)) def run_model(model, feats): X = df[feats].values y = df['price_value'].values scores = cross_val_score(model, X, y, cv = 3, scoring = 'neg_mean_absolute_error') return np.mean(scores), np.std(scores) feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] xgb_parms = { 'max_depth': 5, 'n_estimators': 50, 'learning_rate': 0.1, 'seed': 0 } run_model(xgb.XGBRegressor( **xgb_parms), feats) def obj_func(params): mean_mae, score_std = run_model(xgb.XGBRegressor(**params), feats) print("Training with params:") print(params, np.abs(mean_mae)) return {'loss': np.abs(mean_mae), 'status': STATUS_OK} # space xgb_reg_params = { 'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05,dtype=float)), 'max_depth': hp.choice('max_depth', np.arange(5, 16, 1, dtype = int)), 'subsample': hp.quniform('subsample', 0.5, 1, 0.05), 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05), 'objective': 'reg:squarederror', 'n_estimators': 100, 'seed': 0 } # run best = fmin(obj_func, xgb_reg_params, algo = tpe.suggest, max_evals = 5)
Training with params: {'colsample_bytree': 0.6000000000000001, 'learning_rate': 0.15000000000000002, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001} 8021.26782298684 Training with params: {'colsample_bytree': 0.8500000000000001, 'learning_rate': 0.2, 'max_depth': 12, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001} 7670.749769854843 Training with params: {'colsample_bytree': 0.7000000000000001, 'learning_rate': 0.1, 'max_depth': 9, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.55} 8072.788374825047 Training with params: {'colsample_bytree': 0.9500000000000001, 'learning_rate': 0.2, 'max_depth': 14, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.9} 7661.6167853284 Training with params: {'colsample_bytree': 0.8, 'learning_rate': 0.1, 'max_depth': 13, 'n_estimators': 100, 'objective': 'reg:squarederror', 'seed': 0, 'subsample': 0.7000000000000001} 7484.451006042277 100%|██████████| 5/5 [06:06<00:00, 73.38s/it, best loss: 7484.451006042277]
MIT
matrix_two/day5.ipynb
jedrzejd/dw_matrix_car
Best Config XGBoost
feats = ['param_napęd__cat', 'param_rok-produkcji', 'param_stan__cat', 'param_skrzynia-biegów__cat', 'param_faktura-vat__cat', 'param_moc', 'param_marka-pojazdu__cat', 'feature_kamera-cofania__cat', 'param_typ__cat', 'param_pojemność-skokowa', 'seller_name__cat', 'feature_wspomaganie-kierownicy__cat', 'param_model-pojazdu__cat', 'param_wersja__cat', 'param_kod-silnika__cat', 'feature_system-start-stop__cat', 'feature_asystent-pasa-ruchu__cat', 'feature_czujniki-parkowania-przednie__cat', 'feature_łopatki-zmiany-biegów__cat', 'feature_regulowane-zawieszenie__cat'] xgb_best_params = { 'learning_rate': 0.1, 'max_depth': 13, 'subsample': 0.7, 'colsample_bytree': 0.8, 'objective': 'reg:squarederror', 'n_estimators': 100, 'seed': 0 } run_model(xgb.XGBRegressor( **xgb_best_params), feats)
_____no_output_____
MIT
matrix_two/day5.ipynb
jedrzejd/dw_matrix_car
Obtaining Statistics of the RoomNav dataset 1. Average Geodesic Distances2. Histogram of distances vs episodes3. Average of top-down maps4. Lenght of oracle
import habitat import numpy as np import random %matplotlib inline import matplotlib.pyplot as plt splits = ['train'] data_path = '../data/datasets/roomnav/mp3d/v1/{split}/{split}.json.gz' for split in splits: avg_gd = 0 avg_ed = 0 min_gd = 10000000000 max_gd = 0 min_ed = 10000000000 max_ed = 0 gd_dists = [] ed_dists = [] gd2ed = [] config = habitat.get_config(config_paths='../configs/tasks/pointnav_roomnav_mp3d.yaml') config.defrost() config.DATASET.DATA_PATH = data_path.format(split=split) config.DATASET.SCENES_DIR = '../data/scene_datasets/' config.freeze() env = habitat.Env(config=config) print('EPISODE COUNT:', len(env.episodes)) for i in range(len(env.episodes)): observations = env.reset() sp = env.current_episode.start_position tp = env.current_episode.goals[0].position gd = env.sim.geodesic_distance(sp, tp) ed = np.power(np.power(np.array(sp) - np.array(tp), 2).sum(0), 0.5) gd2ed.append(gd/ed) gd_dists.append(gd) ed_dists.append(ed) env.close() ed_dists = np.asarray(ed_dists) gd_dists = np.asarray(gd_dists) gd2ed = np.asarray(gd2ed) print('SPLIT: ', split) print('Average Euclidean Distance: ', np.mean(ed_dists)) print('Max Euclidean Distance: ', np.max(ed_dists)) print('Min Euclidean Distance: ', np.min(ed_dists)) print('Average Geodesic Distance: ', np.mean(gd_dists)) print('Max Geodesic Distance: ', np.max(gd_dists)) print('Min Geodesic Distance: ', np.min(gd_dists)) plt.hist(gd_dists.astype(int), bins=int(np.max(gd_dists))) plt.title("Geodesic Distance") plt.ylabel('Episodes') plt.show() plt.hist(ed_dists.astype(int), bins=int(np.max(ed_dists))) plt.title("Euclidean Distance") plt.ylabel('Episodes') plt.show() plt.hist(np.around(gd2ed, decimals=4), bins=100) plt.title("Geodesic to Euclidean Ratio") plt.ylabel('Episodes') plt.show() import habitat import numpy as np import random %matplotlib inline import matplotlib.pyplot as plt splits = ['test'] data_path = '../data/datasets/roomnav/mp3d/v1/{split}/{split}.json.gz' for split in splits: avg_gd = 0 avg_ed = 0 min_gd = 10000000000 max_gd = 0 min_ed = 10000000000 max_ed = 0 gd_dists = [] ed_dists = [] gd2ed = [] config = habitat.get_config(config_paths='../configs/tasks/pointnav_roomnav_mp3d.yaml') config.defrost() config.DATASET.DATA_PATH = data_path.format(split=split) config.DATASET.SCENES_DIR = '../data/scene_datasets/' config.freeze() env = habitat.Env(config=config) print(len(env.episodes)) for i in range(len(env.episodes)): observations = env.reset() sp = env.current_episode.start_position tp = env.current_episode.goals[0].position gd = env.sim.geodesic_distance(sp, tp) ed = np.power(np.power(np.array(sp) - np.array(tp), 2).sum(0), 0.5) gd2ed.append(gd/ed) gd_dists.append(gd) ed_dists.append(ed) env.close() ed_dists = np.asarray(ed_dists) gd_dists = np.asarray(gd_dists) gd2ed = np.asarray(gd2ed) print('SPLIT: ', split) print('Average Euclidean Distance: ', np.mean(ed_dists)) print('Max Euclidean Distance: ', np.max(ed_dists)) print('Min Euclidean Distance: ', np.min(ed_dists)) print('Average Geodesic Distance: ', np.mean(gd_dists)) print('Max Geodesic Distance: ', np.max(gd_dists)) print('Min Geodesic Distance: ', np.min(gd_dists)) plt.hist(gd_dists.astype(int), bins=int(np.max(gd_dists))) plt.title("Geodesic Distance") plt.ylabel('Episodes') plt.show() plt.hist(ed_dists.astype(int), bins=int(np.max(ed_dists))) plt.title("Euclidean Distance") plt.ylabel('Episodes') plt.show() plt.hist(np.around(gd2ed, decimals=4), bins=100) plt.title("Geodesic to Euclidean Ratio") plt.ylabel('Episodes') plt.show() '''Oracle Path Lengths''' import habitat_sim import json import gzip from pydash import py_ import numpy as np import tqdm import glob splits = ['train', 'test', 'val'] data_path = '../data/datasets/roomnav/mp3d/v1/{split}/{split}_all.json.gz' for split in splits: with gzip.open(data_path.format(split=split), "rt") as f: episodes = json.load(f)["episodes"] act_path_lens = [] for scene_id, eps in tqdm.tqdm(py_.group_by(episodes, "scene_id").items()): agent_cfg = habitat_sim.AgentConfiguration() sim_cfg = habitat_sim.SimulatorConfiguration() sim_cfg.scene.id = scene_id sim = habitat_sim.Simulator( habitat_sim.Configuration(sim_cfg, [agent_cfg]) ) for ep in tqdm.tqdm(eps, leave=False): state = sim.get_agent(0).state state.position = ep["start_position"] state.rotation = ep["start_rotation"] state.sensor_states = dict() sim.get_agent(0).state = state act_path_lens.append( len( sim.make_greedy_follower().find_path( ep["goals"][0]["position"] ) ) ) act_path_lens = np.array(act_path_lens) print('SPLIT: ', split) print("Min=", np.min(act_path_lens)) print("Mean=", np.mean(act_path_lens)) print("Median=", np.median(act_path_lens)) print("Max=", np.max(act_path_lens)) import os import shutil import cv2 import numpy as np import habitat from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower from habitat.utils.visualizations import maps class SimpleRLEnv(habitat.RLEnv): def get_reward_range(self): return [-1, 1] def get_reward(self, observations): return 0 def get_done(self, observations): return self.habitat_env.episode_over def get_info(self, observations): return self.habitat_env.get_metrics() def get_original_map(): top_down_map = maps.get_topdown_map( self._sim, self._map_resolution, self._num_samples, self._config.DRAW_BORDER, ) range_x = np.where(np.any(top_down_map, axis=1))[0] range_y = np.where(np.any(top_down_map, axis=0))[0] self._ind_x_min = range_x[0] self._ind_x_max = range_x[-1] self._ind_y_min = range_y[0] self._ind_y_max = range_y[-1] return top_down_map def draw_source_and_target(top_down_map, episode): # mark source point s_x, s_y = maps.to_grid( episode.start_position[0], episode.start_position[2], self._coordinate_min, self._coordinate_max, self._map_resolution, ) point_padding = 2 * int( np.ceil(self._map_resolution[0] / MAP_THICKNESS_SCALAR) ) top_down_map[ s_x - point_padding : s_x + point_padding + 1, s_y - point_padding : s_y + point_padding + 1, ] = maps.MAP_SOURCE_POINT_INDICATOR # mark target point t_x, t_y = maps.to_grid( episode.goals[0].position[0], episode.goals[0].position[2], self._coordinate_min, self._coordinate_max, self._map_resolution, ) top_down_map[ t_x - point_padding : t_x + point_padding + 1, t_y - point_padding : t_y + point_padding + 1, ] = maps.MAP_TARGET_POINT_INDICATOR return top_down_map def draw_top_down_map(info, heading, output_size): top_down_map = maps.colorize_topdown_map(info["top_down_map"]["map"]) original_map_size = top_down_map.shape[:2] map_scale = np.array( (1, original_map_size[1] * 1.0 / original_map_size[0]) ) new_map_size = np.round(output_size * map_scale).astype(np.int32) # OpenCV expects w, h but map size is in h, w top_down_map = cv2.resize(top_down_map, (new_map_size[1], new_map_size[0])) map_agent_pos = info["top_down_map"]["agent_map_coord"] map_agent_pos = np.round( map_agent_pos * new_map_size / original_map_size ).astype(np.int32) top_down_map = maps.draw_agent( top_down_map, map_agent_pos, heading - np.pi / 2, agent_radius_px=top_down_map.shape[0] / 40, ) return top_down_map def shortest_path_example(): splits = ['train', 'test', 'val'] data_path = '../data/datasets/roomnav/mp3d/v1/{split}/{split}.json.gz' for split in splits: config = habitat.get_config(config_paths="configs/tasks/roomnav_mp3d.yaml") config.defrost() config.DATASET.DATA_PATH = data_path.format(split=split) config.DATASET.SCENES_DIR = '../data/scene_datasets/' config.TASK.MEASUREMENTS.append("TOP_DOWN_MAP") config.TASK.SENSORS.append("HEADING_SENSOR") config.freeze() outfile = 'AverageTopDown-{split}'.format(split=split) env = SimpleRLEnv(config=config) goal_radius = env.episodes[0].goals[0].radius if goal_radius is None: goal_radius = config.SIMULATOR.FORWARD_STEP_SIZE # follower = ShortestPathFollower(env.habitat_env.sim, goal_radius, False) # follower.mode = mode print("Environment creation successful") for episode in range(len(env.episodes)): observations = env.reset() # dirname = os.path.join( # IMAGE_DIR, "shortest_path_example", mode, "%02d" % episode # ) # if os.path.exists(dirname): # shutil.rmtree(dirname) # os.makedirs(dirname) top_down_map = env.get_info(observations) print(top_down_map) # top_down_map = draw_source_and_target(draw_source_and_target, env.episode) plt.hist(gd2ed, bins=600) plt.title("Geodesic to Euclidean Ratio") plt.ylabel('Episodes') plt.show()
_____no_output_____
MIT
notebooks/dataset_statistics.ipynb
medhini/habitat-api
Validataion set
embs_model = learn.model.eval() embs_model.outputEmbs = True valid_embs, _ = embs_from_model(embs_model, dls.valid) dists, inds = get_nearest(valid_embs, do_chunk(valid_embs)) valid_df=train_df[train_df.is_valid==True].copy().reset_index() valid_df = add_target_groups(valid_df) pairs = sorted_pairs(dists, inds)[:len(valid_df)*10] _=build_from_pairs(pairs, valid_df.target.to_list()) torch.save(learn.model.bert_model.state_dict(), 'models/bert_large_state.pth')
_____no_output_____
MIT
SBert.ipynb
slawekslex/shopee
Create a regulus file from a csv knn is the size of the neighborhood. The default is 100, which is usually sufficient
import regulus gauss4 = regulus.from_csv('gauss4', knn=8) regulus.save(gauss4, filename='gauss4')
_____no_output_____
BSD-3-Clause
examples/0-gauss_to_regulus.ipynb
yarden-livnat/ipyregulus
TSG081 - Get namespaces (Kubernetes)====================================Description-----------Get the kubernetes namespacesSteps----- Common functionsDefine helper functions used in this notebook.
# Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" try: # Load this notebook as json to get access to the expert rules in the notebook metadata. # j = load_json("tsg081-get-kubernetes-namespaces.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: # rules that have 9 elements are the injected (output) rules (the ones we want). Rules # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029, # not ../repair/tsg029-nb-name.ipynb) if len(rule) == 9: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']}
_____no_output_____
MIT
Big-Data-Clusters/CU3/Public/content/monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb
gantz-at-incomm/tigertoolbox
Show the Kubernetes namespaces
run('kubectl get namespace')
_____no_output_____
MIT
Big-Data-Clusters/CU3/Public/content/monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb
gantz-at-incomm/tigertoolbox
Show the Kubernetes namespaces with labelsKubernetes namespaces containing a SQL Server Big Data Cluster have thelabel ‘MSSQL\_CLUSTER’
run('kubectl get namespaces -o custom-columns=NAME:.metadata.name,STATUS:.status.phase,LABELS:.metadata.labels') print('Notebook execution complete.')
_____no_output_____
MIT
Big-Data-Clusters/CU3/Public/content/monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb
gantz-at-incomm/tigertoolbox
Introduction to RThis introduction to the R language aims at understanding how to represent and manipulate data objects as commonly found in *data science*, to provide basic summary statistic and to build relevant graphical representation of the data. **Important notice:** Only base commands are discussed here, not the [tidyverse](https://www.tidyverse.org). A separate cheatsheet is available for the `ggplot2` package (TODO). Installing R and RStudioThe R statistical package can be installed from [CRAN](https://cran.r-project.org). Be sure to also download [RStudio](https://www.rstudio.com) as it provided a full-featured user interface to interact with R. To use Jupyter notebook, you will also need the [IR kernel](https://irkernel.github.io). Useful additional packagesThis tutorial mainly relies on core facilities that come along so called R [base packages](https://stackoverflow.com/a/9705725). However, it is possible to install additional packages and, in particular, the [ggplot2](https://ggplot2.tidyverse.org) package, as shown below: install.packages("ggplot2") SetupThe following setup will be used for graphical displays:
library(ggplot2) theme_set(theme_minimal())
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Note that you need to load the `ggplot2` package only once, at the start of your R session. Getting started VariablesThere are fundamentally two kind of data structures in statistics-oriented programming languages: numbers and strings. Numbers can be integers or real numbers and they are used to represent values observed for a continuous or discrete statistical variable, while strings are everything else that cannot be represented as numbers or list of numbers, e.g. address of a building, answer to an open-ended question in a survey, etc.Here is how we can create a simple variable, say `x`, to store a list of 5 numerical values:
x <- c(1, 3, 2, 5, 4)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Note that the symbol `<-` stands for the recommended assignment operator, yet it is possible to use `=` to assign some quantity to a given variable, which appears on the left hand side of the above expression. Also, the series of values is reported between round brackets, and each values is separated by a comma. From now on, we will talk interchangeably of values or of observations as if we were talking of a measure collected on a statistical unit.Some properties of this newly created variable can be queried online, e.g. how many elements does `x` has or how those elements are represneted in R:
length(x) typeof(x)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
It should be noted that `x` contains values stored as real numbers (`double`) while they may just be stored as integers. It is however possible to ask R to use truly integer values:
x <- c(1L, 3L, 2L, 5L, 4L) typeof(x)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
The distinction between 32 bits integers and reals will not be that important in common analysis tasks, but it is important to keep in mind that it is sometimes useful to check whether data are represented as expected, especially in the case of categorical variables, also called 'factor' in R parlance (more on this latter).The list of numbers we stored in `x` is called a *vector*, and it is one of the building block of common R data structures. Oftentimes, we will need richer data structures, especially two-dimensional objects, like *matrix* or *data frame*, or higher-dimensional objects such as *array* or *list*.![](assets/lang-r-base-001.png) VectorsThe command `c` ('concatenate') we used to create our list of integers will be very useful when it comes to pass multiple options to a command. It can be nested into another call to `c` like in the following exemple:
x <- c(c(1, 2, 3), c(4, 5, 6), 7, 8)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
In passing, note that since we use the same name for our newly created variable, `x`, the old content referenced in `x` (1, 3, 2, 5, 4) is definitively lost. Once you have a vector of values, you can access each item by providing the (one-based) index of the item(s), e.g.:
x[1] x[3] x[c(1,3)] x[1:3]
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
A convenient shorhand notation for regular sequence of integers is `start:end`, where `start` is the starting value and `end` is the last value (both included). Hence, `c(1,2,3,4)` is the same as `1:4`. This is useful when one wants to preview the first 3 or 5 values in a vector, for example. A more general function to work with regular sequence of numbers is `seq`. Here is an example of use:
seq(1, 10) seq(1, 10, by = 2) seq(0, 10, length = 5)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Updating content of a vector can be done directly by assigning a new value to one of the item:
x[3] <- NA
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
In the above statement, the third item has been assigned a missing value, which is coded as `NA` ('not available') in R. Again, there is no way to go back to the previous state of the variable, so be careful when updating the content of a variable.The presence of missing data is important to check before engaging into any serious statistical stuff. The `is.na` function can be used to check for the presence of any missing value in a variable, while `which` will return the index that matches a `TRUE` result, if any:
is.na(x) which(is.na(x))
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Notice that many functions like `is.na`, or `which`, act in a vectorized way, meaning that you don't have to iterate manually over each item in the vector. Moreover, function calls can be nested one into the other. In the latter R expression, `which` is actually processing the values returned by the call to `is.na`. Vectors and random samplingThe `sample` function allows to randomly shuffle an existing vector or to generate a sequence of random numbers. Whenever we rely on the random number generator (RNG), it is recommend to set the seed of the RNG in order to ensure that those pseudo-random sequence could be reproduced later. Here is an illustration:
s <- c(1, 4, 2, 3, 8) sample(s) sample(1:10, size = 5) sample(0:1, size = 10, replace = TRUE)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
In summary, `sample(1:n, size = n)` returns a permutation of the `n` elements, while `sample(1:n, size = n, replace = TRUE)` provides a bootstrap sample of the original data. SortingSorting a list of values or finding the index or rank of any value in a vector are common tasks in statistical programming. It is different from computing ranks of observed values, which is handled by the `rank` function. The two main instructions to sort a list of values and to get the index of the sorted item are `sort` and `order`, respectively:
z <- c(1, 6, 7, 2, 8, 3, 9, 4, 5) sort(z) order(z)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Data framesData frames are one of the core data structures to store and represent statistical data. Many routine functions that are used to load data stored in flat files or databases or to preprocess data stored in memory rely on data frames. Likewise, graphical commands such as those found in the `ggplot2` package generally assumes a data frame as input. The same applies to functions used in statistical modeling (`lm`, `glm`, etc.).In a data frame, observations are arranged in rows and variables are arranged in columns. Each variable can be viewed as a single vector, but those variables are all recorded into a common data structure, each with an unique name. Moreover, each column, or variable, can be of a different type--numeric, factor, character or boolean, which makes data frame slightly different from 'matrix' object where only values of the same type can be stored.Here is an example of a built-in data frame, readily available by using the command `data`:
data(ToothGrowth) head(ToothGrowth) str(ToothGrowth)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
While `head` allows to preview the first 6 lines of a data frame, `str` provides a concise overview of what's available in the data frame, namely: the name of each variable (column), its mode of representation, et the first 10 observations (values).The dimensions (number of lines and columns) of a data frame can be verified using `dim` (a shortcut for the combination of `nrows` and `ncols`):
dim(ToothGrowth)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
To access any given cell in this data frame, we will use the indexing trick we used in the case of vectors, but this time we have to indicate the line number as well as the column number, or name: Hence, `ToothGrowth[i,j]` means the value located at line `i` and column `j`, while `ToothGrowth[c(a,b),j]` would mean values at line `a` and `b` for the same column `j`.![](assets/lang-r-base-002.png)Here is how we can retrieve the second observation in the first column:
ToothGrowth[2,1]
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Since the columns of a data frame have names, it is equivalent to use `ToothGrowth[2,1]` and `ToothGrowth[2,"len"]`. In the latter case, variable names must be quoted. Column names can be displayed using `colnames` or `names` (in the special case of data frames), while row names are available *via* `rownames`. Row names can be used as unique identifier for statistical units, but best practice is usually to store unique IDs as characters or factor levels in a dedicated column in the data frame.Since we know that we can use `c` to create a list of numbers, we can use `c` to create a list of line numbers to look for. Imagine you want to access the content of a given column (`len`, which is the first column, numbered 1), for lines 2 and 4: (`c(2, 4)`):![](assets/lang-r-base-003.png)Here is how we would do in R:
ToothGrowth[c(2,4),1]
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
This amounts to 'indexed selection', meaning that we need to provide the row (or column) numbers, while most of the time we are interested in criterion-based indexation, that is: "which observation fullfills a given criterion." We generally call this a 'filter'. Since most R operations are vectorized, this happens to be really easy. For instance, to display observations on `supp` that satisfy the condition `len > 6`, we would use:
head(ToothGrowth$supp[ToothGrowth$len > 6])
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
![](assets/lang-r-base-004.png) Likewise, it is possible to combine different filters using logical operators: `&` stands for 'and' (logical conjunction) and `|` stands for 'or' (logical disjonction); logical equality is denoted as `==` while its negation reads `!=`. Here is an example where we want to select observations that satisfy a given condition on both the `dose` (dose = 0.5) and `len` (len > 10) variables:![](assets/lang-r-base-005.png)In R, we would write:
ToothGrowth[,ToothGrowth$len > 10 & ToothGrowth$dose < 1]
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
You will soon realize that for complex queries this notation become quite cumbersome: all variable must be prefixed by the name of the data frame, which can result in a very long statement. While it is recommended practice for programming or when developing dedicated package, it is easier to rely on `subset` in an interactive session. The `subset` command asks for three arguments, namely the name of the data frame we are working on, the rows we want to select (or filter), and the columns we want to return. The result of a call to `subset` is always a data frame.![](assets/lang-r-base-006.png)Here is an example of use:
subset(ToothGrowth, len > 10 & dose < 1)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
![](assets/lang-r-base-007.png) It is also possible to use the technique discussed in the case of vectors to sort a data frame in ascending or descending order according to one or more variables. Here is an example using the `len` variable:
head(ToothGrowth) head(ToothGrowth[order(ToothGrowth$len),])
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
The `which` function can also be used to retrieve a specific observation in a data frame, like in the following instruction:
which(ToothGrowth$len < 8)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Statistical summariesAs explained above, the `str` function is useful to check a given data structure, and individual properties of a data frame can be queried using dedicated functions, e.g. `nrow` or `ncol`. Now, to compute statistical quantities on a variable, we can use dedicated functions like `mean` (arithmetical mean), `sd` (standard deviation; see also `var`), `IQR` (interquartile range), `range` (range of values; see also `min` and `max`), or the `summary` function, which computes a five-point summary in the case of a numerical variable or a table of counts for categorical outcomes. Univariate caseHere are some applications in the case we are interested in summarizing one variable at a time:
mean(ToothGrowth$len) range(ToothGrowth$len) c(min(ToothGrowth$len), max(ToothGrowth$len)) table(ToothGrowth$dose) summary(ToothGrowth)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Of course, the above functions can be applied to a subset of the original data set:
mean(ToothGrowth$len[ToothGrowth$dose == 1]) table(ToothGrowth$dose[ToothGrowth$len < 20])
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Bivariate caseIf we want to summarize a numerical variable according the values that a factor variable takes, we can use `tapply` or `aggregate`. The latter expects a 'formula' describing the relation between the variables we are interested in: the response variable or outcome appears on the left-hand side (LHS), while the factors or descriptors are listed in the right-hand side (RHS). The last argument to `aggregate` is the function we want to apply to each chunk of observations defined by the RHS. Here is an example of use:
aggregate(len ~ dose, data = ToothGrowth, mean) aggregate(len ~ supp + dose, data = ToothGrowth, mean)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Note that only one function can be applied to the 'formula'. Even if it possible to write a custom function that computes the mean and standard deviation of a variable, both results will be returned as single column in the data frame returned by `aggregate`. There do exist other ways to perform such computation, though (see, e.g., the `plyr`, `dplyr` or `Hmisc` packages, to name a few), if the results are to be kept in separate variables for later. This, however, does not preclude from using `aggregate` for printing multivariate results in the console:
aggregate(len ~ dose, data = ToothGrowth, summary) f <- function(x) c(mean = mean(x), sd = sd(x)) aggregate(len ~ dose, data = ToothGrowth, f)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
The `table` functions also works with two (or even three) variables:
table(ToothGrowth$dose, ToothGrowth$supp)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
If formulas are to be preferred, the `xtabs` function provides a convenient replacement for `table`:
xtabs(~ dose + supp, data = ToothGrowth)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
In either case, frequencies can be computed from the table of counts using `prop.table`, using the desired margin (row=1, column=2) in the bivariate case:
prop.table(table(ToothGrowth$dose)) prop.table(table(ToothGrowth$dose, ToothGrowth$supp), margins = 1)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Practical use case: The ESS surveyThe `data` directory includes three [RDS](https://www.rdocumentation.org/packages/base/versions/3.5.3/topics/readRDS) files related to the [European Social Survey](https://www.europeansocialsurvey.org) (ESS). This survey first ran in 2002 (round 1), and it is actually renewed every two years. The codebook can be downloaded, along [other data sheets](http://www.europeansocialsurvey.org/data/download.html), on the main website.There are two files related to data collected in France (round 1 or rounds 1-5, `ess-*-fr.rds`) and one file for all participating countries (`ess-one-round.rds`). French dataAssuming the `data` directory is available in the current working directory, here is how we can load French data for round 1:
d <- readRDS("data/ess-one-round-fr.rds") head(d[1:10]) table(d$yrbrn) summary(d$agea)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Let us focus on the following list of variables, readily available in the file `ess-one-round-29vars-fr.rds`:- `tvtot`: TV watching, total time on average weekday- `rdtot`: Radio listening, total time on average weekday- `nwsptot`: Newspaper reading, total time on average weekday- `polintr`: How interested in politics- `trstlgl`: Trust in the legal system- `trstplc`: Trust in the police- `trstplt`: Trust in politicians- `vote`: Voted last national election- `happy`: How happy are you- `sclmeet`: How often socially meet with friends, relatives or colleagues- `inmdisc`: Anyone to discuss intimate and personal matters with- `sclact`: Take part in social activities compared to others of same age- `health`: Subjective general health- `ctzcntr`: Citizen of country- `brncntr`: Born in country- `facntr`: Father born in country- `mocntr`: Mother born in country- `hhmmb`: Number of people living regularly as member of household- `gndr`: Gender- `yrbrn`: Year of birth- `agea`: Age of respondent, calculated- `edulvla`: Highest level of education- `eduyrs`: Years of full-time education completed- `pdjobyr`: Year last in paid job- `wrkctr`: Employment contract unlimited or limited duration- `wkhct`: Total contracted hours per week in main job overtime excluded- `marital`: Legal marital status- `martlfr`: Legal marital status, France- `lvghw`: Currently living with husband/wife Recoded French dataNote that variables in the file `ess-one-round-29vars-fr.rds` have been recoded and categorical variables now have proper labels. See the script file `scripts/ess-one-round-29vars-fr.r` to see what has been done to the base file.
d <- readRDS("data/ess-one-round-29vars-fr.rds")
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
First, let us look at the distribution of the `gndr` variable, using a simple bar diagram:
summary(d$gndr) p <- ggplot(data = d, aes(x = gndr)) + geom_bar() + labs(x = "Sex of respondant", y = "Counts") p
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Now, let's look at the distribution of age. The `ggplot2` package offer a `geom_density` function but it is also possible to draw a line using the precomputed empirical density function, or to let `ggplot2` compute the density function itself using the `stat=` option. Here is how it looks:
summary(d$agea) p <- ggplot(data = d, aes(x = agea)) + geom_line(stat = "density", bw = 2) + labs(x = "Age of respondant") p
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
The distribution of age can also be represented as an histogram, and `ggplot2` makes it quite easy to split the display depending on the sex of the respondants, which is called a 'facet' in `ggplot2` parlance:
p <- ggplot(data = d, aes(x = agea)) + geom_histogram(binwidth = 5) + facet_grid(~ gndr) + labs(x = "Age of respondant") p
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Finally, a boxplot might also be an option, especially when we want to compare the distribution of a numerical variable across levels of a categorical variable. The `coord_flip` instruction is used to swap the X and Y axes, but keep in mind that `x=` and `y=` labels still refer to the `x=` and `y=` variable defined in the `aes()`:
p <- ggplot(data = d, aes(x = gndr, y = agea)) + geom_boxplot() + coord_flip() + labs(x = NULL, y = "Age of respondants") p
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
**Sidenote:** In the above instructions, we used the following convention to build a `ggplot2` object:- we assign to a variable, say `p`, the call to `ggplot2` plus any further instructions ('geom', 'scale', 'coord_', etc.) using the `+` operator;- we use only one `aes()` structure, when calling `ggplot`, so that it makes it clear what are the data and what variables are used;- we display the graph at the end, by simpling calling our variable `p`.Yet, it is possible to proceed in many different ways, depending on your taste and needs. The following instructions are all valid expressions and will yield the same result: ggplot(data = d, aes(x = gndr, y = agea, color = vote)) + geom_boxplot() p <- ggplot(data = d, aes(x = gndr, y = agea, color = vote)) p <- p + geom_boxplot() + labs(x = "Gender") p p <- ggplot(data = d, aes(x = gndr, y = agea)) p + geom_boxplot(aes(color = vote)) + labs(x = "Gender")Moreover, it is also possible to use the quick one-liner version of `ggplot`, namely `qplot`: qplot(x = gndr, y = agea, data = d, color = vote, geom = "boxplot") + labs(x = "Gender")or even: qplot(x = gndr, y = agea, data = d, color = vote, geom = "boxplot", xlab = "Gender") Further details are available in the handout "lang-r-ggplot". Data from other countriesData from all other participating countries can be loaded in the same manner:
db <- readRDS("data/ess-one-round.rds") cat("No. observations =", nrow(db)) table(db$cntry)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Since French data are (deliberately) missing from this dataset, we can append them to the above data frame as follows:
db <- rbind.data.frame(db, d) cat("No. observations =", nrow(db)) db$cntry <- factor(db$cntry) table(db$cntry)
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
Remember that is also possible to use `summary()` with a factor variable to display a table of counts. In this particular case, we are just appending a data frame to another data frame already loaded in memory. This assumes that both share the name columns, of course. Sometimes, another common operation might be performed, an 'inner join' between two data tables. For example, imagine that part of the information is spread out in one data frame, and the rest of the data sits in another data frame. If the two data frames have a common unique ID, it is then easy to merge both data frames using the `merge` command. Here is a simplified example using the above data, that we will split in two beforehand:
db$id <- 1:nrow(db) db1 <- db[,c(1:10,ncol(db))] db2 <- db[,c(11:(ncol(db)-1),ncol(db))] all <- merge(db1, db2, by = "id")
_____no_output_____
BSD-3-Clause
lang-r-base.ipynb
duchesnay/dspyr
IMPORT
import os import sys import logging import subprocess import numpy as np from shutil import copy sys.path.insert(0, '/home/yongliang/third_party/merlin/src') from io_funcs.binary_io import BinaryIOCollection %load_ext autoreload %autoreload 2
The autoreload extension is already loaded. To reload it, use: %reload_ext autoreload
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
UTILITY
def get_file_list_of_dir(dir_path): res = [os.path.join(dir_path, f) for f in os.listdir(dir_path)] res.sort() return res def gen_file_list(dir_path, file_id_list, ext): return [os.path.join(dir_path, f + '.' + ext) for f in file_id_list] def get_file_id_list(file_list): return [os.path.splitext(os.path.basename(f))[0] for f in file_list] io_funcs = BinaryIOCollection()
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
CONFIGURATION
merlin_dir = '/home/yongliang/third_party/merlin' silence_pattern = ['*-pau+*', '*-sil+*'] curr_dir = os.getcwd() # hardcoded nit_dir = os.path.join(curr_dir, 'nit') wav_dir = os.path.join(nit_dir, 'wav2') exp_dir = os.path.join(curr_dir, 'exp') if not os.path.exists(exp_dir): os.makedirs(exp_dir) lab_dir = os.path.join(exp_dir, 'lab') orig_lab_file_list = get_file_list_of_dir(lab_dir) file_id_list = get_file_id_list(orig_lab_file_list) SPTK = {'VOPR': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/vopr', 'MGC2SP': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/mgc2sp', 'C2ACR': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/c2acr', 'FREQT': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/freqt', 'MC2B': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/mc2b', 'MLPG': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/mlpg', 'B2MC': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/b2mc', 'VSUM': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/vsum', 'MERGE': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/merge', 'SOPR': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/sopr', 'BCP': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/bcp', 'VSTAT': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/vstat', 'X2X': '/home/yongliang/third_party/merlin/tools/bin/SPTK-3.9/x2x'} WORLD = {'SYNTHESIS': '/home/yongliang/third_party/merlin/tools/bin/WORLD/synth', 'ANALYSIS': '/home/yongliang/third_party/merlin/tools/bin/WORLD/analysis'}
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Prepare label files
hmm_dir = os.path.join(exp_dir, 'hmm') full_dir = os.path.join(nit_dir, 'full') mono_dir = os.path.join(nit_dir, 'mono') phones = os.path.join(nit_dir, 'monophone') from src.forced_alignment import ForcedAlignment aligner = ForcedAlignment(hmm_dir, wav_dir, full_dir, mono_dir, phones, lab_dir) aligner.prepare_training() aligner.train_hmm(7, 32) aligner.align()
---make file_id_list.scp: /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/hmm/file_id_list.scp ---make copy.scp: /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/hmm/config/copy.scp ---mfcc extraction at: /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/hmm/mfc ------make copy.cfg: /home/yongliang/third_party/merlin/egs/singing_synthesis/s3/exp/hmm/config/cfg ------extracting mfcc features...
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Feature extraction
feat_dir = os.path.join(exp_dir, 'feat') lf0_dir = os.path.join(feat_dir, 'lf0') bap_dir = os.path.join(feat_dir, 'bap') mgc_dir = os.path.join(feat_dir, 'mgc') sample_rate = 16000 from src.feature_extraction import FeatureExtractor feature_extractor = FeatureExtractor(wav_dir, sample_rate, feat_dir) feature_extractor.extract_feat()
nitech_jp_song070_f001_003 Running REAPER f0 extraction...
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Duration model Model configuration
# duration model related # hardcoded dur_lab_dim = 368 dur_cmp_dim = 5 dur_train_file_number = 27 dur_valid_file_number = 1 dur_test_file_number = 1 dur_mdl_dir = os.path.join(exp_dir, 'duration_model') if not os.path.exists(dur_mdl_dir): os.makedirs(dur_mdl_dir) dur_tmp_dir = os.path.join(dur_mdl_dir, 'tmp') if not os.path.exists(dur_tmp_dir): os.makedirs(dur_tmp_dir) dur_inter_dir = os.path.join(dur_mdl_dir, 'inter') if not os.path.exists(dur_inter_dir): os.makedirs(dur_inter_dir) dur_lab_dir = os.path.join(dur_inter_dir, 'lab_' + str(dur_lab_dim)) if not os.path.exists(dur_lab_dir): os.makedirs(dur_lab_dir) dur_lab_no_silence_dir = os.path.join(dur_inter_dir, 'lab_no_silence_' + str(dur_lab_dim)) if not os.path.exists(dur_lab_no_silence_dir): os.makedirs(dur_lab_no_silence_dir) dur_lab_no_silence_norm_dir = os.path.join(dur_inter_dir, 'lab_no_silence_norm_' + str(dur_lab_dim)) if not os.path.exists(dur_lab_no_silence_norm_dir): os.makedirs(dur_lab_no_silence_norm_dir) dur_dur_dir = os.path.join(dur_inter_dir, 'dur') if not os.path.exists(dur_dur_dir): os.makedirs(dur_dur_dir) dur_cmp_dir = os.path.join(dur_inter_dir, 'cmp_' + str(dur_cmp_dim)) if not os.path.exists(dur_cmp_dir): os.makedirs(dur_cmp_dir) dur_cmp_no_silence_dir = os.path.join(dur_inter_dir, 'cmp_no_silence_' + str(dur_cmp_dim)) if not os.path.exists(dur_cmp_no_silence_dir): os.makedirs(dur_cmp_no_silence_dir) dur_cmp_no_silence_norm_dir = os.path.join(dur_inter_dir, 'cmp_no_silence_norm_' + str(dur_cmp_dim)) if not os.path.exists(dur_cmp_no_silence_norm_dir): os.makedirs(dur_cmp_no_silence_norm_dir) dur_variance_dir = os.path.join(dur_inter_dir, 'variance') if not os.path.exists(dur_variance_dir): os.makedirs(dur_variance_dir) dur_nn_mdl_dir = os.path.join(dur_mdl_dir, 'mdl') if not os.path.exists(dur_nn_mdl_dir): os.makedirs(dur_nn_mdl_dir) dur_lab_norm_file = os.path.join(dur_inter_dir, 'lab_norm_' + str(dur_lab_dim) + '.dat') dur_cmp_norm_file = os.path.join(dur_inter_dir, 'cmp_norm_' + str(dur_cmp_dim) + '.dat') dur_dur_var_file = os.path.join(dur_variance_dir, 'dur') dur_lab_file_list = gen_file_list(dur_lab_dir, file_id_list, 'labbin') dur_lab_no_silence_file_list = gen_file_list(dur_lab_no_silence_dir, file_id_list, 'labbin') dur_lab_no_silence_norm_file_list = gen_file_list(dur_lab_no_silence_norm_dir, file_id_list, 'labbin') dur_dur_file_list = gen_file_list(dur_dur_dir, file_id_list, 'dur') dur_cmp_file_list = gen_file_list(dur_cmp_dir, file_id_list, 'cmp') dur_cmp_no_silence_file_list = gen_file_list(dur_cmp_no_silence_dir, file_id_list, 'cmp') dur_cmp_no_silence_norm_file_list = gen_file_list(dur_cmp_no_silence_norm_dir, file_id_list, 'cmp')
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Feature extraction from label files
ques_dir = os.path.join(curr_dir, 'ques') question = os.path.join(ques_dir, 'general') from frontend.label_normalisation import HTSLabelNormalisation dur_lab_normaliser = HTSLabelNormalisation(question, add_frame_features=False, subphone_feats='none') dur_lab_normaliser.perform_normalisation(orig_lab_file_list, dur_lab_file_list)
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Remove silence phone
from frontend.silence_remover import SilenceRemover dur_silence_remover = SilenceRemover(n_cmp=dur_lab_dim, silence_pattern=silence_pattern, remove_frame_features=False, subphone_feats='none') dur_silence_remover.remove_silence(dur_lab_file_list, orig_lab_file_list, dur_lab_no_silence_file_list) _, num_frame = io_funcs.load_binary_file_frame(dur_lab_file_list[2], 368) _, num_frame_nn = io_funcs.load_binary_file_frame(dur_lab_no_silence_file_list[2], 368) print(num_frame) print(num_frame_nn) tmp, _ = io_funcs.load_binary_file_frame(dur_lab_file_list[2], 368) print(tmp)
[[ 0. 0. 0. ... -1. -1. -1.] [ 0. 0. 0. ... -1. -1. -1.] [ 0. 0. 0. ... 192. 0. 100.] ... [ 0. 0. 0. ... 72. 57. 43.] [ 0. 0. 0. ... -1. -1. -1.] [ 0. 0. 0. ... -1. -1. -1.]]
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Input feature normalization
from frontend.min_max_norm import MinMaxNormalisation dur_min_max_normaliser = MinMaxNormalisation(feature_dimension=dur_lab_dim, min_value=0.01, max_value=0.99) dur_min_max_normaliser.find_min_max_values(dur_lab_no_silence_file_list[0: dur_train_file_number]) dur_min_max_normaliser.normalise_data(dur_lab_no_silence_file_list, dur_lab_no_silence_norm_file_list) dur_label_min_vector = dur_min_max_normaliser.min_vector dur_label_max_vector = dur_min_max_normaliser.max_vector dur_label_norm_info = np.concatenate((dur_label_min_vector, dur_label_max_vector), axis=0) dur_label_norm_info = np.array(dur_label_norm_info, 'float32') fid = open(dur_lab_norm_file, 'wb') dur_label_norm_info.tofile(fid) fid.close() dur_label_norm_info.shape
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Compute duration from label files
dur_lab_normaliser.prepare_dur_data(orig_lab_file_list, dur_dur_file_list, feature_type='numerical') feat, num_frame = io_funcs.load_binary_file_frame(dur_dur_file_list[0], 5) print(feat.shape) print(num_frame) print(feat[0:10, :]) dur_dur_file_list[2]
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Make output features for duration model
delta_win = [-0.5, 0.0, 0.5] acc_win = [1.0, -2.0, 1.0] """ "in" & "out" just mean before & after feature composition like if we compute dynamic features, dimensions of out will be 3 times of in not really mean in & out of the network """ dur_in_dimension_dict = {'dur': 5} dur_out_dimension_dict = {'dur': 5} dur_in_file_list_dict = {'dur': dur_dur_file_list} from frontend.acoustic_composition import AcousticComposition dur_acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win) dur_acoustic_worker.prepare_nn_data(dur_in_file_list_dict, dur_cmp_file_list, dur_in_dimension_dict, dur_out_dimension_dict) feat, num_frame = io_funcs.load_binary_file_frame(dur_cmp_file_list[2], 5) print(feat.shape) print(num_frame) print(feat[0:10, :])
(131, 5) 131 [[ 13. 72. 166. 2. 54.] [ 9. 47. 1. 54. 32.] [ 1. 18. 4. 4. 4.] [ 3. 100. 1. 2. 6.] [ 4. 5. 3. 3. 3.] [ 1. 32. 1. 1. 3.] [ 5. 3. 2. 2. 3.] [ 2. 26. 1. 5. 8.] [ 2. 3. 3. 7. 6.] [ 6. 77. 17. 5. 5.]]
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Remove silence phone
dur_silence_remover = SilenceRemover(n_cmp = dur_cmp_dim, silence_pattern = silence_pattern, remove_frame_features = False, subphone_feats = 'none') dur_silence_remover.remove_silence(dur_cmp_file_list, orig_lab_file_list, dur_cmp_no_silence_file_list) _, num_frame = io_funcs.load_binary_file_frame(dur_cmp_file_list[2], 5) _, num_frame_nn = io_funcs.load_binary_file_frame(dur_cmp_no_silence_file_list[2], 5) print(num_frame) print(num_frame_nn)
131 124
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Output feature (duration) normalization
from frontend.mean_variance_norm import MeanVarianceNorm dur_mvn_normaliser = MeanVarianceNorm(feature_dimension=dur_cmp_dim) dur_global_mean_vector = dur_mvn_normaliser.compute_mean(dur_cmp_no_silence_file_list[0: dur_train_file_number], 0, dur_cmp_dim) dur_global_std_vector = dur_mvn_normaliser.compute_std(dur_cmp_no_silence_file_list[0: dur_train_file_number], dur_global_mean_vector, 0, dur_cmp_dim) dur_mvn_normaliser.feature_normalisation(dur_cmp_no_silence_file_list, dur_cmp_no_silence_norm_file_list) dur_cmp_norm_info = np.concatenate((dur_global_mean_vector, dur_global_std_vector), axis=0) dur_cmp_norm_info = np.array(dur_cmp_norm_info, 'float32') fid = open(dur_cmp_norm_file, 'wb') dur_cmp_norm_info.tofile(fid) fid.close() tmp1, num1 = io_funcs.load_binary_file_frame(dur_cmp_no_silence_file_list[0], 5) tmp2, num2 = io_funcs.load_binary_file_frame(dur_cmp_no_silence_norm_file_list[0], 5) print(num1 == num2) print(tmp2) dur_cmp_norm_info.shape dur_variance_file_dict = {'dur': dur_dur_var_file} feat_ind = 0 for feat in list(dur_out_dimension_dict.keys()): feat_std_vector = np.array(dur_global_std_vector[:, feat_ind: feat_ind + dur_out_dimension_dict[feat]], 'float32') fid = open(dur_variance_file_dict[feat], 'w') feat_var_vector = feat_std_vector**2 feat_var_vector.tofile(fid) fid.close() feat_ind += dur_out_dimension_dict[feat] print(dur_global_std_vector) print(feat_var_vector)
[[ 4.77816306 32.451236 30.33884862 25.81974051 6.89332861]] [[ 22.830841 1053.0828 920.4457 666.659 47.51798 ]]
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Model training
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data from torch.autograd import Variable import math import matplotlib.pyplot as plt class DurationDataset(data.Dataset): def __init__(self, lab_file_list, cmp_file_list, lab_dim=368, cmp_dim=5): assert(len(lab_file_list) == len(cmp_file_list)) for i in range(len(lab_file_list)): lab_basename = os.path.splitext(os.path.basename(lab_file_list[i]))[0] cmp_basename = os.path.splitext(os.path.basename(cmp_file_list[i]))[0] # print(lab_basename) # print(cmp_basename) # print('*' * 20) assert lab_basename == cmp_basename self.lab_file_list = lab_file_list self.cmp_file_list = cmp_file_list self.lab_dim = lab_dim self.cmp_dim = cmp_dim self.io_funcs = BinaryIOCollection() def __len__(self): return len(self.lab_file_list) def __getitem__(self, ind): X = torch.from_numpy(self.io_funcs.load_binary_file_frame(self.lab_file_list[ind], self.lab_dim)[0]) Y = torch.from_numpy(self.io_funcs.load_binary_file_frame(self.cmp_file_list[ind], self.cmp_dim)[0]) return X, Y def collate_fn(batch): def func(p): return p[0].size(0) batch_size = len(batch) max_seq_len = max(batch, key=func)[0].size(0) min_seq_len = min(batch, key=func)[0].size(0) lab_dim = batch[0][0].size(1) cmp_dim = batch[0][1].size(1) if max_seq_len <= 2 * min_seq_len: sample_len = int(min_seq_len/2) else: sample_len = min_seq_len total_sample_num = 0 for i in range(batch_size): lab = batch[i][0] cmp = batch[i][1] num_seq = math.ceil(lab.size(0)/sample_len) total_sample_num += num_seq labs = torch.zeros(total_sample_num, sample_len, lab_dim) cmps = torch.zeros(total_sample_num, sample_len, cmp_dim) curr_sample_ind = 0 for i in range(batch_size): ind_in_file = 0 for j in range(math.floor(batch[i][0].size(0)/sample_len)): labs[curr_sample_ind].copy_(batch[i][0][ind_in_file * sample_len: (ind_in_file+1) * sample_len][:]) cmps[curr_sample_ind].copy_(batch[i][1][ind_in_file * sample_len: (ind_in_file+1) * sample_len][:]) ind_in_file += 1 curr_sample_ind += 1 if batch[i][0].size(0) % sample_len != 0: labs[curr_sample_ind].copy_(batch[i][0][-sample_len:][:]) cmps[curr_sample_ind].copy_(batch[i][1][-sample_len:][:]) curr_sample_ind += 1 assert(curr_sample_ind == total_sample_num) # print("lab dimension: " + str(lab_dim)) # print("cmp dimension: " + str(cmp_dim)) # seq_len_list = [i[0].size(0) for i in batch] # print("sequence length for each file: " + str(seq_len_list)) # print('max sequence length of original file: ', str(max_seq_len)) # print('min sequence length of original file: ', str(min_seq_len)) # print("sample length: ", str(sample_len)) # print('total_sample_num: ' + str(total_sample_num)) # torch.set_printoptions(profile="full") # print(batch[0][1]) # print(cmps) # torch.set_printoptions(profile="default") return labs, cmps, sample_len class DurationModel(nn.Module): def __init__(self, dur_lab_dim, dur_cmp_dim): super(DurationModel, self).__init__() self.fc1 = nn.Linear(dur_lab_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, 512) self.fc4 = nn.Linear(512, 512) # self.fc4 = nn.Linear(512, dur_cmp_dim) self.fc5 = nn.Linear(512, dur_cmp_dim) def forward(self, dur_lab): res = F.relu(self.fc1(dur_lab)) res = F.relu(self.fc2(res)) res = F.relu(self.fc3(res)) # res = self.fc4(res) res = F.relu(self.fc4(res)) res = self.fc5(res) return res def train(model, train_loader, valid_loader, epoch): model.train() for batch_ind, batch in enumerate(train_loader): lab, cmp = Variable(batch[0]), Variable(batch[1]) loss = criterion(model(lab), cmp) optimizer.zero_grad() loss.backward() optimizer.step() if batch_ind % log_interval == 0: val_loss = evaluate(model, valid_loader) train_loss = loss.item() epoch_progress = 100. * batch_ind / len(train_loader) print('Train Epoch: {}({:.0f}%)\tTrain Loss: {:.6f}\tVal Loss: {:.6f}'.format( epoch, epoch_progress, train_loss, val_loss)) def evaluate(model, valid_loader): model.eval() total_loss = 0 n_examples = 0 for batch_ind, batch in enumerate(valid_loader): lab, cmp, sample_len = Variable(batch[0], volatile=True), Variable(batch[1]), batch[2] output = model(lab) loss = criterion(output, cmp, size_average=False).item() # print('loss without average: ' + str(loss)) total_loss += loss n = len(lab) # print('num samples this batch: ' + str(n)) # print(cmp.size()) n_examples += n total_loss /= (n_examples * sample_len * cmp.size()[-1]) return total_loss # TODO to implement cross-validation, print something here to see whether could do normalisation in Pytorch batch_size = int(dur_train_file_number) # batch_size = 1 # dur_train_set = DurationDataset([dur_lab_no_silence_norm_file_list[0]] * dur_train_file_number, # [dur_cmp_no_silence_norm_file_list[0]] * dur_train_file_number) # dur_valid_set = DurationDataset(dur_lab_no_silence_norm_file_list[dur_train_file_number: dur_train_file_number + dur_valid_file_number], # dur_cmp_no_silence_norm_file_list[dur_train_file_number: dur_train_file_number + dur_valid_file_number]) dur_train_set = DurationDataset(dur_lab_no_silence_norm_file_list[:dur_train_file_number], dur_cmp_no_silence_norm_file_list[:dur_train_file_number]) dur_valid_set = DurationDataset(dur_lab_no_silence_norm_file_list[:dur_valid_file_number], dur_cmp_no_silence_norm_file_list[:dur_valid_file_number]) dur_train_loader = data.DataLoader(dur_train_set, shuffle=False, batch_size=batch_size, collate_fn=collate_fn) dur_valid_loader = data.DataLoader(dur_valid_set, shuffle=False, batch_size=dur_valid_file_number, collate_fn=collate_fn) tmp = next(iter(dur_train_loader)) lab_, cmp_, _ = tmp print(lab_.size()) print(cmp_.size()) print(len(lab_)) print(len(Variable(lab_))) print(len(dur_valid_set)) lr = 0.001 log_interval = 1 epochs = 100 duration_model = nn.Sequential( nn.Linear(dur_lab_dim, 512), nn.Dropout(0.5), nn.ReLU(), nn.Linear(512, 512), nn.Dropout(0.5), nn.ReLU(), nn.Linear(512, 512), nn.Dropout(0.5), nn.ReLU(), nn.Linear(512, 512), nn.Dropout(0.5), nn.ReLU(), nn.Linear(512, dur_cmp_dim) ) print(duration_model) optimizer = torch.optim.Adam(duration_model.parameters(), lr=lr) criterion = F.mse_loss scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[90], gamma=0.01) for epoch in range(1, epochs+1): scheduler.step() train(duration_model, dur_train_loader, dur_valid_loader, epoch) lr = 0.25 log_interval = 1 epochs = 200 duration_model = DurationModel(dur_lab_dim, dur_cmp_dim) print(duration_model) optimizer = torch.optim.SGD(duration_model.parameters(), lr=lr) criterion = F.mse_loss for epoch in range(1, epochs+1): train(duration_model, dur_train_loader, dur_valid_loader, epoch) dur_nn_mdl_file = os.path.join(dur_nn_mdl_dir, 'dur_nn_mdl.pt') torch.save(duration_model.state_dict(), dur_nn_mdl_file)
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Test
print('files for test: ') print(dur_lab_no_silence_norm_file_list[-1]) print(dur_cmp_no_silence_norm_file_list[-1]) print(dur_cmp_no_silence_file_list[-1]) print('*' * 20) input_lab, num_input_frame = io_funcs.load_binary_file_frame(dur_lab_no_silence_norm_file_list[-1], 368) target_cmp, num_target_frame = io_funcs.load_binary_file_frame(dur_cmp_no_silence_norm_file_list[-1], 5) assert(num_input_frame == num_target_frame) print('target output cmp: ') print(target_cmp[:10, :]) input_lab = torch.from_numpy(input_lab) input_lab = input_lab[None, :, :] output_cmp = duration_model(input_lab) assert(output_cmp.size()[1] == num_target_frame) print('output cmp: ') print(output_cmp[0].detach().numpy()[:10, :]) target_dur, num_dur_frame = io_funcs.load_binary_file_frame(dur_cmp_no_silence_file_list[-1], 5) assert(num_dur_frame == num_target_frame) print('target dur: ') print(target_dur[:10, :]) fid = open(dur_cmp_norm_file, 'rb') dur_cmp_norm_info = np.fromfile(fid, dtype=np.float32) fid.close() dur_cmp_norm_info = dur_cmp_norm_info.reshape(2, -1) dur_cmp_mean = dur_cmp_norm_info[0, ] dur_cmp_std = dur_cmp_norm_info[1, ] test_tmp_file = os.path.join(dur_tmp_dir, 'dur_output.cmp') output_cmp = output_cmp.detach().numpy()[0] io_funcs.array_to_binary_file(output_cmp, test_tmp_file) print('mean: ', dur_cmp_mean) print('std: ', dur_cmp_std) test_dur_denormaliser = MeanVarianceNorm(feature_dimension=dur_cmp_dim) test_dur_denormaliser.feature_denormalisation([test_tmp_file], [test_tmp_file], dur_cmp_mean, dur_cmp_std) pred_dur = io_funcs.load_binary_file_frame(test_tmp_file, 5) print('predicted dur: ') print(type(pred_dur)) print(pred_dur[0][:10, :])
files for test:
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Acoustic model Model configuration
# acoustic model related # hardcoded acou_lab_dim = 377 acou_cmp_dim = 187 acou_train_file_number = 27 acou_valid_file_number = 1 acou_test_file_number = 1 acou_mdl_dir = os.path.join(exp_dir, 'acoustic_model') if not os.path.exists(acou_mdl_dir): os.makedirs(acou_mdl_dir) acou_inter_dir = os.path.join(acou_mdl_dir, 'inter') if not os.path.exists(acou_inter_dir): os.makedirs(acou_inter_dir) acou_lab_dir = os.path.join(acou_inter_dir, 'lab_' + str(acou_lab_dim)) if not os.path.exists(acou_lab_dir): os.makedirs(acou_lab_dir) acou_lab_no_silence_dir = os.path.join(acou_inter_dir, 'lab_no_silence_' + str(acou_lab_dim)) if not os.path.exists(acou_lab_no_silence_dir): os.makedirs(acou_lab_no_silence_dir) acou_lab_no_silence_norm_dir = os.path.join(acou_inter_dir, 'lab_no_silence_norm_' + str(acou_lab_dim)) if not os.path.exists(acou_lab_no_silence_norm_dir): os.makedirs(acou_lab_no_silence_norm_dir) ''' acou_dur_dir = os.path.join(acou_inter_dir, 'dur') if not os.path.exists(acou_dur_dir): os.makedirs(acou_dur_dir) ''' acou_cmp_dir = os.path.join(acou_inter_dir, 'cmp_' + str(acou_cmp_dim)) if not os.path.exists(acou_cmp_dir): os.makedirs(acou_cmp_dir) acou_cmp_no_silence_dir = os.path.join(acou_inter_dir, 'cmp_no_silence_' + str(acou_cmp_dim)) if not os.path.exists(acou_cmp_no_silence_dir): os.makedirs(acou_cmp_no_silence_dir) acou_cmp_no_silence_norm_dir = os.path.join(acou_inter_dir, 'cmp_no_silence_norm_' + str(acou_cmp_dim)) if not os.path.exists(acou_cmp_no_silence_norm_dir): os.makedirs(acou_cmp_no_silence_norm_dir) acou_variance_dir = os.path.join(acou_inter_dir, 'variance') if not os.path.exists(acou_variance_dir): os.makedirs(acou_variance_dir) acou_nn_mdl_dir = os.path.join(acou_mdl_dir, 'mdl') if not os.path.exists(acou_nn_mdl_dir): os.makedirs(acou_nn_mdl_dir) acou_lab_norm_file = os.path.join(acou_inter_dir, 'lab_norm_' + str(acou_lab_dim) + '.dat') acou_cmp_norm_file = os.path.join(acou_inter_dir, 'cmp_norm_' + str(acou_cmp_dim) + '.dat') acou_vuv_var_file = os.path.join(acou_variance_dir, 'vuv') acou_mgc_var_file = os.path.join(acou_variance_dir, 'mgc') acou_lf0_var_file = os.path.join(acou_variance_dir, 'lf0') acou_bap_var_file = os.path.join(acou_variance_dir, 'bap') acou_lab_file_list = gen_file_list(acou_lab_dir, file_id_list, 'labbin') acou_lab_no_silence_file_list = gen_file_list(acou_lab_no_silence_dir, file_id_list, 'labbin') acou_lab_no_silence_norm_file_list = gen_file_list(acou_lab_no_silence_norm_dir, file_id_list, 'labbin') # dur_dur_file_list = gen_file_list(dur_dur_dir, file_id_list, 'dur') acou_cmp_file_list = gen_file_list(acou_cmp_dir, file_id_list, 'cmp') acou_cmp_no_silence_file_list = gen_file_list(acou_cmp_no_silence_dir, file_id_list, 'cmp') acou_cmp_no_silence_norm_file_list = gen_file_list(acou_cmp_no_silence_norm_dir, file_id_list, 'cmp') acou_lf0_file_list = gen_file_list(lf0_dir, file_id_list, 'lf0') acou_mgc_file_list = gen_file_list(mgc_dir, file_id_list, 'mgc') acou_bap_file_list = gen_file_list(bap_dir, file_id_list, 'bap')
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Feature extraction from label files
acou_lab_normaliser = HTSLabelNormalisation(question, add_frame_features=True, subphone_feats='full') acou_lab_normaliser.perform_normalisation(orig_lab_file_list, acou_lab_file_list)
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Remove silence phone
acou_silence_remover = SilenceRemover(n_cmp=acou_lab_dim, silence_pattern=silence_pattern, remove_frame_features=True, subphone_feats='full') acou_silence_remover.remove_silence(acou_lab_file_list, orig_lab_file_list, acou_lab_no_silence_file_list) _, num_frame = io_funcs.load_binary_file_frame(acou_lab_file_list[2], 377) _, num_frame_nn = io_funcs.load_binary_file_frame(acou_lab_no_silence_file_list[2], 377) print(num_frame) print(num_frame_nn) tmp, _ = io_funcs.load_binary_file_frame(acou_lab_file_list[2], 377) print(tmp)
[[0. 0. 0. ... 0.04234528 1. 0.00325733] [0. 0. 0. ... 0.04234528 0.99674267 0.00651466] [0. 0. 0. ... 0.04234528 0.99348533 0.00977199] ... [0. 0. 0. ... 0.2925373 0.00895522 0.9940299 ] [0. 0. 0. ... 0.2925373 0.00597015 0.99701494] [0. 0. 0. ... 0.2925373 0.00298507 1. ]]
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Input feature normalization
acou_min_max_normaliser = MinMaxNormalisation(feature_dimension=acou_lab_dim, min_value=0.01, max_value=0.99) acou_min_max_normaliser.find_min_max_values(acou_lab_no_silence_file_list[0: acou_train_file_number]) acou_min_max_normaliser.normalise_data(acou_lab_no_silence_file_list, acou_lab_no_silence_norm_file_list) acou_label_min_vector = acou_min_max_normaliser.min_vector acou_label_max_vector = acou_min_max_normaliser.max_vector acou_label_norm_info = np.concatenate((acou_label_min_vector, acou_label_max_vector), axis=0) acou_label_norm_info = np.array(acou_label_norm_info, 'float32') fid = open(acou_lab_norm_file, 'wb') acou_label_norm_info.tofile(fid) fid.close() acou_label_norm_info.shape
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Make output features for acoustic model
""" "in" & "out" just mean before & after feature composition like if we compute dynamic features, dimensions of out will be 3 times of in not really mean in & out of the network """ acou_in_dimension_dict = {'bap': 1, 'mgc': 60, 'lf0': 1} acou_out_dimension_dict = {'bap': 3, 'vuv': 1, 'mgc': 180, 'lf0': 3} # acou_in_dir_dict = {'bap': bap_dir, 'mgc': mgc_dir, 'lf0': lf0_dir} acou_in_file_list_dict = {'bap': acou_bap_file_list, 'mgc': acou_mgc_file_list, 'lf0': acou_lf0_file_list} acou_acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win) acou_acoustic_worker.prepare_nn_data(acou_in_file_list_dict, acou_cmp_file_list, acou_in_dimension_dict, acou_out_dimension_dict) feat, num_frame = io_funcs.load_binary_file_frame(dur_cmp_file_list[2], 5) print(feat.shape) print(feat[0:10, :]) feat, num_frame = io_funcs.load_binary_file_frame(acou_cmp_file_list[2], 187) print(feat.shape) print(feat[0:10, :])
(131, 5) [[ 13. 72. 166. 2. 54.] [ 9. 47. 1. 54. 32.] [ 1. 18. 4. 4. 4.] [ 3. 100. 1. 2. 6.] [ 4. 5. 3. 3. 3.] [ 1. 32. 1. 1. 3.] [ 5. 3. 2. 2. 3.] [ 2. 26. 1. 5. 8.] [ 2. 3. 3. 7. 6.] [ 6. 77. 17. 5. 5.]] (8640, 187) [[ 0. 0. 0. ... 0.0016107 0.01590419 0. ] [ 0. 0. 0. ... -0.03265311 0.00975457 0. ] [ 0. 0. 0. ... 0.00190853 0.00436544 0. ] ... [ 0. 0. 0. ... -0.00899457 -0.00381122 0. ] [ 0. 0. 0. ... 0.02834933 -0.01433814 0. ] [ 0. 0. 0. ... 0.02132037 -0.0286779 0. ]]
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Remove silence phone
acou_silence_remover = SilenceRemover(n_cmp = acou_cmp_dim, silence_pattern = silence_pattern, remove_frame_features = True, subphone_feats = 'full') acou_silence_remover.remove_silence(acou_cmp_file_list, orig_lab_file_list, acou_cmp_no_silence_file_list) _, num_frame = io_funcs.load_binary_file_frame(acou_cmp_file_list[2], 187) _, num_frame_nn = io_funcs.load_binary_file_frame(acou_cmp_no_silence_file_list[2], 187) print(num_frame) print(num_frame_nn)
8640 7201
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Output feature (dim 187) normalization
acou_mvn_normaliser = MeanVarianceNorm(feature_dimension=acou_cmp_dim) acou_global_mean_vector = acou_mvn_normaliser.compute_mean(acou_cmp_no_silence_file_list[0: acou_train_file_number], 0, acou_cmp_dim) acou_global_std_vector = acou_mvn_normaliser.compute_std(acou_cmp_no_silence_file_list[0: acou_train_file_number], acou_global_mean_vector, 0, acou_cmp_dim) acou_mvn_normaliser.feature_normalisation(acou_cmp_no_silence_file_list, acou_cmp_no_silence_norm_file_list) acou_cmp_norm_info = np.concatenate((acou_global_mean_vector, acou_global_std_vector), axis=0) acou_cmp_norm_info = np.array(acou_cmp_norm_info, 'float32') fid = open(acou_cmp_norm_file, 'wb') acou_cmp_norm_info.tofile(fid) fid.close() tmp1, num1 = io_funcs.load_binary_file_frame(acou_cmp_no_silence_file_list[0], 187) tmp2, num2 = io_funcs.load_binary_file_frame(acou_cmp_no_silence_norm_file_list[0], 187) print(num1 == num2) print(tmp2[:, :4]) print(acou_cmp_norm_info.shape) acou_global_std_vector acou_variance_file_dict = {'vuv': acou_vuv_var_file, 'mgc': acou_mgc_var_file, 'lf0': acou_lf0_var_file, 'bap': acou_bap_var_file} feat_ind = 0 for feat in list(acou_out_dimension_dict.keys()): feat_std_vector = np.array(acou_global_std_vector[:, feat_ind: feat_ind + acou_out_dimension_dict[feat]], 'float32') fid = open(acou_variance_file_dict[feat], 'w') feat_var_vector = feat_std_vector**2 feat_var_vector.tofile(fid) fid.close() feat_ind += acou_out_dimension_dict[feat]
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis
Model training
# TODO to implement cross-validation, print something here to see whether could do normalisation in Pytorch batch_size = int(acou_train_file_number) # batch_size = 1 print('batch_size: ' + str(batch_size)) acou_train_set = DurationDataset(acou_lab_no_silence_norm_file_list[:10], acou_cmp_no_silence_norm_file_list[:10], lab_dim=377, cmp_dim=187) acou_valid_set = DurationDataset(acou_lab_no_silence_norm_file_list[acou_train_file_number: acou_train_file_number + acou_valid_file_number], acou_cmp_no_silence_norm_file_list[acou_train_file_number: acou_train_file_number + acou_valid_file_number], lab_dim=377, cmp_dim=187) acou_train_loader = data.DataLoader(acou_train_set, shuffle=True, batch_size=batch_size, collate_fn=collate_fn) acou_valid_loader = data.DataLoader(acou_valid_set, shuffle=True, batch_size=acou_valid_file_number, collate_fn=collate_fn) tmp = next(iter(dur_train_loader)) lab_, cmp_, _ = tmp print(lab_.size()) print(cmp_.size()) print(len(Variable(lab_))) print(len(dur_valid_set)) tmp = next(iter(acou_train_loader)) lab_, cmp_, sp_len = tmp print(lab_.size()) print(cmp_.size()) print(len(Variable(lab_))) print(len(acou_valid_set)) print(sp_len) lr = 0.015 log_interval = 1 epochs = 50 acoustic_model = nn.Sequential( nn.Linear(acou_lab_dim, 300), nn.ReLU(), nn.Linear(300, acou_cmp_dim) ) # acoustic_model = nn.Sequential( # nn.Linear(acou_lab_dim, 512), # nn.Dropout(0.5), # nn.ReLU(), # nn.Linear(512, 512), # nn.Dropout(0.5), # nn.ReLU(), # nn.Linear(512, 512), # nn.Dropout(0.5), # nn.ReLU(), # nn.Linear(512, acou_cmp_dim) # ) optimizer = torch.optim.Adam(acoustic_model.parameters(), lr=lr) criterion = F.mse_loss for epoch in range(1, epochs+1): train(acoustic_model, acou_train_loader, acou_valid_loader, epoch) lr = 0.1 log_interval = 1 epochs = 30 acoustic_model = DurationModel(acou_lab_dim, acou_cmp_dim) print(acoustic_model) optimizer = torch.optim.SGD(acoustic_model.parameters(), lr=lr) criterion = F.mse_loss for epoch in range(1, epochs+1): train(acoustic_model, acou_train_loader, acou_valid_loader, epoch) acou_nn_mdl_file = os.path.join(acou_nn_mdl_dir, 'acou_nn_mdl.pt') torch.save(acoustic_model.state_dict(), acou_nn_mdl_file)
_____no_output_____
Apache-2.0
egs/singing_synthesis/s3/run.ipynb
YongliangHe/SingingVoiceSynthesis