markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Define the parameter for the photometer reading
Y_e = ufloat(2673.3,1.) Y_e
_____no_output_____
CC0-1.0
empir19nrm02/Jupyter/IBudgetMETAS.ipynb
AndersThorseth/empir19nrm02
Define the parameter for the distance measurement
d=ufloat(25.0000, 0.0025) d
_____no_output_____
CC0-1.0
empir19nrm02/Jupyter/IBudgetMETAS.ipynb
AndersThorseth/empir19nrm02
The Model
I=k_e*Y_e*d**2 I [h, result_vecotr] = uncLib_PlotHist(I, xLabel='Luminous intensity / cd') print('Mean: {}, I0: {}, I1: {}'.format(result_vecotr[0], result_vecotr[1], result_vecotr[2])) h=uncLib_PlotHist(k_e, xLabel='calibration factor / lx/LSB')
_____no_output_____
CC0-1.0
empir19nrm02/Jupyter/IBudgetMETAS.ipynb
AndersThorseth/empir19nrm02
Prediction of 2017 World Series winner given data 1905 dataset.
import pandas as pd tot1905 = pd.read_csv("../clean_data/1905ML.csv") tot1905 = tot1905.drop({"Unnamed: 0", "H", "HR", "BB", "SB", "HA", "HRA", "BBA", "SOA", "E"}, axis=1) tot1905 tot2017 = pd.read_csv("../clean_data/2017ML.csv") tot2017 = tot2017.drop({"Unnamed: 0", "WSWIN"}, axis=1) tot2017 # Create a function to convert the bats and throws colums to numeric def bats_throws(col): if col == "Y": return 1 else: return 0 # Use the `apply()` method to create numeric columns from the bats and throws columns tot1905['WSWin'] = tot1905['WSWin'].apply(bats_throws) # Print out the first rows of `master_df` tot1905 features = tot1905.drop({"franchID", "WSWin", "yearID"}, axis=1) features features2017 = tot2017.drop({"franchID"}, axis=1) features2017 # Create `target` Series target = tot1905['WSWin'] target # Import cross_val_predict, KFold and LogisticRegression from 'sklearn' from sklearn.cross_validation import cross_val_predict, KFold from sklearn.linear_model import LogisticRegression # Create Logistic Regression model lr = LogisticRegression(class_weight='balanced') # Create an instance of the KFold class kf = KFold(features.shape[0], random_state=1) # Create predictions using cross validation predictions_lr = cross_val_predict(lr, features, target, cv=kf) import numpy as np # Convert predictions and target to NumPy arrays np_predictions_lr = np.asarray(predictions_lr) np_target = target.as_matrix() # Determine True Positive count tp_filter_lr = (np_predictions_lr == 1) & (np_target == 1) tp_lr = len(np_predictions_lr[tp_filter_lr]) # Determine False Negative count fn_filter_lr = (np_predictions_lr == 0) & (np_target == 1) fn_lr = len(np_predictions_lr[fn_filter_lr]) # Determine False Positive count fp_filter_lr = (np_predictions_lr == 1) & (np_target == 0) fp_lr = len(np_predictions_lr[fp_filter_lr]) # Determine True Negative count tn_filter_lr = (np_predictions_lr == 0) & (np_target == 0) tn_lr = len(np_predictions_lr[tn_filter_lr]) # Determine True Positive rate tpr_lr = tp_lr / (tp_lr + fn_lr) # Determine False Negative rate fnr_lr = fn_lr / (fn_lr + tp_lr) # Determine False Positive rate fpr_lr = fp_lr / (fp_lr + tn_lr) # Print each count print(tp_lr) print(fn_lr) print(fp_lr) # Print each rate print(tpr_lr) print(fnr_lr) print(fpr_lr) new_data = tot2017 new_features = features2017 # Fit the Random Forest model lr.fit(features, target) # Estimate probabilities of Hall of Fame induction probabilities = lr.predict_proba(new_features) # Convert predictions to a DataFrame WS_predictions = pd.DataFrame(probabilities[:,1]) # Sort the DataFrame (descending) WS_predictions = WS_predictions.sort_values(0, ascending=False) WS_predictions['Probability'] = WS_predictions[0] # Print 50 highest probability HoF inductees from still eligible players for i, row in WS_predictions.head(50).iterrows(): prob = ' '.join(('WS Probability =', str(row['Probability']))) print('') print(prob) print(new_data.iloc[i,1:27])
WS Probability = 0.000567750444291 R 818 ERA 3.3 WP 0.63 Name: 7, dtype: object WS Probability = 0.000386572498575 R 770 ERA 3.38 WP 0.642 Name: 13, dtype: object WS Probability = 0.000337225795312 R 858 ERA 3.72 WP 0.562 Name: 18, dtype: object WS Probability = 0.000294728209055 R 812 ERA 3.66 WP 0.574 Name: 0, dtype: object WS Probability = 0.000239189716566 R 785 ERA 3.7 WP 0.574 Name: 3, dtype: object WS Probability = 0.000210695494912 R 819 ERA 3.88 WP 0.599 Name: 29, dtype: object WS Probability = 0.000209445957019 R 896 ERA 4.12 WP 0.623 Name: 10, dtype: object WS Probability = 0.000188917067286 R 822 ERA 3.95 WP 0.568 Name: 4, dtype: object WS Probability = 0.000123021808249 R 761 ERA 4.01 WP 0.512 Name: 25, dtype: object WS Probability = 0.000107954043486 R 732 ERA 4 WP 0.531 Name: 15, dtype: object WS Probability = 9.26971520401e-05 R 694 ERA 3.97 WP 0.494 Name: 26, dtype: object WS Probability = 7.2908164575e-05 R 824 ERA 4.51 WP 0.537 Name: 8, dtype: object WS Probability = 6.79856381284e-05 R 710 ERA 4.2 WP 0.494 Name: 12, dtype: object WS Probability = 6.05607902049e-05 R 815 ERA 4.59 WP 0.525 Name: 16, dtype: object WS Probability = 5.35534869266e-05 R 750 ERA 4.46 WP 0.481 Name: 23, dtype: object WS Probability = 5.2541114932e-05 R 668 ERA 4.22 WP 0.463 Name: 21, dtype: object WS Probability = 4.90864652238e-05 R 799 ERA 4.66 WP 0.481 Name: 27, dtype: object WS Probability = 4.25479454223e-05 R 693 ERA 4.42 WP 0.469 Name: 28, dtype: object WS Probability = 3.52144760617e-05 R 739 ERA 4.67 WP 0.463 Name: 19, dtype: object WS Probability = 3.34410987455e-05 R 778 ERA 4.82 WP 0.475 Name: 14, dtype: object WS Probability = 3.32039235179e-05 R 690 ERA 4.55 WP 0.407 Name: 20, dtype: object WS Probability = 3.23502443078e-05 R 702 ERA 4.61 WP 0.494 Name: 11, dtype: object WS Probability = 3.10768390315e-05 R 732 ERA 4.72 WP 0.444 Name: 1, dtype: object WS Probability = 2.76859423358e-05 R 639 ERA 4.5 WP 0.395 Name: 24, dtype: object WS Probability = 2.43796231571e-05 R 706 ERA 4.78 WP 0.414 Name: 5, dtype: object WS Probability = 2.15264503579e-05 R 743 ERA 4.97 WP 0.463 Name: 2, dtype: object WS Probability = 1.91870459094e-05 R 735 ERA 5.01 WP 0.432 Name: 17, dtype: object WS Probability = 1.73770791978e-05 R 604 ERA 4.67 WP 0.438 Name: 22, dtype: object WS Probability = 1.59958103859e-05 R 753 ERA 5.17 WP 0.42 Name: 6, dtype: object WS Probability = 1.04828404913e-05 R 735 ERA 5.36 WP 0.395 Name: 9, dtype: object
MIT
world_series_prediction-master/ML/Logistic--2017.ipynb
kchhajed1/baseball_predictions
Calculating the distance between the Customer's city and the Seller's city
from pyspark.sql import SparkSession, functions as F import math spark = SparkSession.builder.getOrCreate() orders_items_df = spark.read \ .option('escape', '\"') \ .option('quote', '\"') \ .csv('./dataset/olist_order_items_dataset.csv', header=True, multiLine=True, inferSchema=True) orders_df = spark.read \ .option('escape', '\"') \ .option('quote', '\"') \ .csv('./dataset/olist_orders_dataset.csv', header=True, multiLine=True, inferSchema=True) customers_df = spark.read \ .option('escape', '\"') \ .option('quote', '\"') \ .csv('./dataset/olist_customers_dataset.csv', header=True, multiLine=True, inferSchema=True) sellers_df = spark.read \ .option('escape', '\"') \ .option('quote', '\"') \ .csv('./dataset/olist_sellers_dataset.csv', header=True, multiLine=True, inferSchema=True) geo_df = spark.read \ .option('escape', '\"') \ .option('quote', '\"') \ .csv('./dataset/olist_geolocation_dataset.csv', header=True, multiLine=True, inferSchema=True)
_____no_output_____
MIT
hypothesis_25.ipynb
IuriSly/DnA-POC-olist
Grouping data
data_df = orders_df.filter(F.col('order_status') == 'delivered').join(customers_df, 'customer_id') data_df = orders_items_df.join(data_df, 'order_id') \ .join(sellers_df, 'seller_id') \ .select('customer_state', 'customer_city', 'customer_zip_code_prefix', 'seller_zip_code_prefix', 'freight_value') geo_df = geo_df.groupBy('geolocation_zip_code_prefix').agg(F.min('geolocation_lat').alias('geolocation_lat'), F.min('geolocation_lng').alias('geolocation_lng')) data_df = data_df.join(geo_df, data_df.customer_zip_code_prefix == geo_df.geolocation_zip_code_prefix) \ .select(F.col('geolocation_lat').alias('customer_lat'), F.col('geolocation_lng').alias('customer_lng'), 'seller_zip_code_prefix', 'freight_value') \ .join(geo_df, data_df.seller_zip_code_prefix == geo_df.geolocation_zip_code_prefix) \ .select('customer_lat', 'customer_lng', F.col('geolocation_lat').alias('seller_lat'), F.col('geolocation_lng').alias('seller_lng'),'freight_value') data_df.count() data_df.show()
+-------------------+-------------------+-------------------+-------------------+-------------+ | customer_lat| customer_lng| seller_lat| seller_lng|freight_value| +-------------------+-------------------+-------------------+-------------------+-------------+ | -23.50648246805157|-47.422068081741564|-23.545262137111173| -46.66134804356862| 14.43| | -23.82558722913311| -46.56982049999999| -23.51441473688614| -46.59097058895492| 9.34| |-21.213665497085813| -47.81670447259758| -23.51441473688614| -46.59097058895492| 11.74| |-21.445954952757404| -50.12641249999996| -23.51441473688614| -46.59097058895492| 3.07| |-21.445954952757404| -50.12641249999996| -23.51441473688614| -46.59097058895492| 3.06| |-23.635655999999997| -46.751535578894| -23.51441473688614| -46.59097058895492| 9.34| | -23.49878075214959|-46.632511331380975| -23.51441473688614| -46.59097058895492| 9.34| |-22.970853233039268|-43.671131559512865|-23.593123748530044| -46.64060056549716| 15.72| | -3.814121000836711| -38.59399257936673|-23.593123748530044| -46.64060056549716| 17.63| | -3.814121000836711| -38.59399257936673|-23.593123748530044| -46.64060056549716| 17.63| | -3.814121000836711| -38.59399257936673|-23.593123748530044| -46.64060056549716| 17.63| | -3.814121000836711| -38.59399257936673|-23.593123748530044| -46.64060056549716| 17.63| | -20.26779007607431| -56.7781600607055|-23.593123748530044| -46.64060056549716| 18.77| |-12.991703180012463| -38.45058108919243|-23.593123748530044| -46.64060056549716| 17.53| | -23.51441473688614| -46.59097058895492|-23.204420999999996|-46.590299999999985| 7.46| | -23.63496067770149| -46.75505959566482|-23.204420999999996|-46.590299999999985| 7.45| | -23.74259343710035| -46.60832671234592|-23.204420999999996|-46.590299999999985| 7.87| |-23.214238574995893| -49.40174227177037|-23.204420999999996|-46.590299999999985| 13.08| |-25.546951194558265| -49.29139473619366|-23.204420999999996|-46.590299999999985| 18.3| |-26.883628877564163| -49.08164479732078|-23.204420999999996|-46.590299999999985| 18.23| +-------------------+-------------------+-------------------+-------------------+-------------+ only showing top 20 rows
MIT
hypothesis_25.ipynb
IuriSly/DnA-POC-olist
Calculating distance
def d(c_lat, c_lng, s_lat, s_lng): radius = 6371 # km dlat = math.radians(s_lat-c_lat) dlon = math.radians(s_lng-c_lng) a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(c_lat)) \ * math.cos(math.radians(s_lat)) * math.sin(dlon/2) * math.sin(dlon/2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)) d = radius * c; return d distance = F.udf(d) data_df = data_df.withColumn('distance', distance('customer_lat', 'customer_lng', 'seller_lat', 'seller_lng')) data_df.show() data_df = data_df.withColumn('distance', F.col('distance').cast('double')) data_df.printSchema() data_df.stat.corr('distance','freight_value')
_____no_output_____
MIT
hypothesis_25.ipynb
IuriSly/DnA-POC-olist
SCOPUS journal Data analysis of Indian Research About: The main aim of this data analysis is to identify the ongoing research in Indian Universities and Indian Industry. It gives a basic answer about research source and trend with top authors and publication. It also shows the participation of Industry and Universities in research. Created By : -------------**Neel Shah:** [Website](https://neelshah18.github.io/) | [Linkedin](https://www.linkedin.com/in/neel-shah-7b5495104/) | [GitHub](https://github.com/NeelShah18) | Email:**[email protected]****Open to hire**Edited By:-----------1) Malaikannan Sankarasubbu - Know about him more: [Linkedin](https://www.linkedin.com/in/malaikannan/) | [GitHub](href="https://github.com/malaikannan)Special thanks to:--------------------1) Dr. Jacob Minz - Know about him more: [Linkedin](https://www.linkedin.com/in/jacob-minz-16762a3/) | [GitHub](https://github.com/jrminz)2) Anirban Santara - Know about him more: [Linkedin](https://www.linkedin.com/in/anirbansantara/) | [GitHub](https://github.com/Santara)-------------------------------------------------------------------------------------------------------------------Technical Implementation - Open source licenseIt is implemented in a Jupyter notebook with a back of Anaconda and Python 3.6+ version. Dataset and Jupyter notebook is available under MIT - open source license. If you want to use this code or data feel free to do it, But, Please cite me.* Link to the repository : [click here!](https://github.com/NeelShah18/scopus-analysis-for-indian-researcher)* Link to Code and Dataset(store in SQLite and CSV format) : [click here!](https://github.com/NeelShah18/scopus-analysis-for-indian-researcher) Top 20 research papers from 2001 to 2016 - Indian researchersIt is surprising that from TOP 20 almost 70% papers are related to AI, ML, CV and DL. But
import sqlite3 import matplotlib.pyplot as plt import operator sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Title`,`cited_rank` FROM `AI_scopus` ORDER BY `cited_rank` DESC LIMIT 0, 20;") data = c.fetchall() conn.close() top_paper = {} #print(data) for x in data: text = (((str(x).replace("'","")).replace("(","")).replace(")","")) lis = text.split(",") #print(lis[0]) #print(lis[1].strip()) top_paper[str(lis[0])]= int(lis[1]) #print(top_paper) plt.barh(range(len(top_paper)),top_paper.values(),align='center') plt.yticks(range(len(top_paper)),list(top_paper.keys())) plt.xlabel('\n Paper cited ') plt.title("Top 20 Indian researcher's paper in SCOPUS journal \nfrom 2000 to 2016\n") plt.ylabel('---- Paper ---- \n') fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 15 fig_size[1] = 15 plt.rcParams["figure.figsize"] = fig_size plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Authors`,`cited_rank` FROM `AI_scopus` ORDER BY `cited_rank` DESC LIMIT 0, 20;") data = c.fetchall() top_author = {} text = str(data[0]).replace("'","") for x in data: cite = (str(x)[-4:-1]).strip() authors = (str(x)[2:len(x)-7]).replace("'","") top_author[str(authors)] = int(cite) #print(top_author) conn.close() plt.barh(range(len(top_author)),top_author.values(),align='center') plt.yticks(range(len(top_author)),list(top_author.keys())) plt.xlabel('\n Author cited ') plt.title('Top 20 Indian researcher in SCOPUS journal\n from 2000 to 2016\n') plt.ylabel('---- Authors ---- \n') fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 15 fig_size[1] = 15 plt.rcParams["figure.figsize"] = fig_size plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2016': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2016 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2015': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2015 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2014': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2014 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2013': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2013 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2012': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2012 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2011': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2011 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt import operator data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() #text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] #tr = (text[10:len(text)-2]).split(";") #print(tr) conn.close() data_dic = {} z = 0 word_lis = [] while z < len(data): text = str(data[z]) year = str(text[2:6]) #print(year) lis_word = (text[10:len(text)-2].replace(" ","")).split(";") #print(lis_word) if year == '2010': for word in lis_word: try: data_dic[str(word)] = int(data_dic[str(word)]) + 1 except: data_dic[str(word)] = 1 z += 1 #print(data_dic) lis_f = sorted(data_dic, key=data_dic.get, reverse=True) count = 0 draw_word_dic = {} #print(lis_f) while count < 10: draw_word_dic[str(lis_f[count])] = data_dic[str(lis_f[count])] count += 1 plt.barh(range(len(draw_word_dic)),draw_word_dic.values(),align='center') plt.yticks(range(len(draw_word_dic)),list(draw_word_dic.keys())) plt.xlabel('\nNumber of Papers') plt.title('Trend of research in 2010 "SCOPUS" journal') plt.ylabel('---- Areas ---- \n') plt.show() import sqlite3 import matplotlib.pyplot as plt #fetching the name of different fields name = [] #create the connection with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `name` FROM `university_data` ORDER BY `publish_paper` DESC LIMIT 0, 500;") #store all name in as list init_name = c.fetchall() for each in init_name: text = (str(each)[2:len(each)-4]).replace("\\n","") name.append(text) #close the connection with database conn.close() #fetching the number of publication field wise sep = [] #connection create with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `publish_paper` FROM `university_data` ORDER BY `publish_paper` DESC LIMIT 0, 500;") #store the data in sep as list sep = c.fetchall() #connection close with databae conn.close() #create a list of realtive percentage for publish paper field wise per = [] for n in sep: text = str(n)[1:len(n)-3] n_to_per = int(text) val = (n_to_per*100)/1187 val_2 = "%.2f"%val per.append(val_2) #---------------------------Graph code------------------------------ label = [] x = 0 while x < len(per): label.append(str(name[x].upper())+" : "+str(per[x])+"%") x += 1 labels = label sizes = per patches, texts = plt.pie(sizes, startangle=90) plt.legend(patches, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Set aspect ratio to be equal so that pie is drawn as a circle. plt.axis('equal') plt.title('Research done Top 15 Universitites and other Universities\n from 2001 to 2016\n Source: SCOPUS journal ') plt.tight_layout() plt.show() import sqlite3 from matplotlib import pyplot as plt from matplotlib import style import matplotlib.pyplot as plt; plt.rcdefaults() data = [] sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `Year`,`IndexKeywords` FROM `AI_scopus` DESC LIMIT 0, 5000;") data = c.fetchall() text = str(data[0]) #print(text[2:6]) #print(text[10:len(text)-2]) #tr = [] tr = ((text[10:len(text)-2]).replace(" ","")).split(";") #print(tr) conn.close() tred_word_dic = {} data_ai = {} data_nm = {} data_ls = {} data_algo = {} data_cv = {} field_lis = [] for line in data: text = str(line) year = text[2:6] field_lis = ((text[10:len(text)-2]).replace(" ","")).split(";") for field in field_lis: if field == 'Artificialintelligence': try: data_ai[year] = int(data_ai[year]) + 1 except: data_ai[year] = 1 if field == 'Neuralnetworks': try: data_nm[year] = int(data_nm[year]) + 1 except: data_nm[year] = 1 if field == 'Learningsystems': try: data_ls[year] = int(data_ls[year]) + 1 except: data_ls[year] = 1 if field == 'Algorithms': try: data_algo[year] = int(data_algo[year]) + 1 except: data_algo[year] = 1 if field == 'Computervision': try: data_cv[year] = int(data_cv[year]) + 1 except: data_cv[year] = 1 x_xix = [] y_ai = [] y_nm = [] y_ls = [] y_algo = [] y_cv = [] x = 2001 zero = 0 while x < 2017: try: #print(x) y_ai.append(data_ai[str(x)]) #print(data_CV[x]) except: y_ai.append(int(zero)) pass try: #print(x) y_nm.append(data_nm[str(x)]) #print(data_CV[x]) except: y_nm.append(int(zero)) pass try: #print(x) y_ls.append(data_ls[str(x)]) #print(data_CV[x]) except: y_ls.append(int(zero)) pass try: #print(x) y_algo.append(data_algo[str(x)]) #print(data_CV[x]) except: y_algo.append(int(zero)) pass try: #print(x) y_cv.append(data_cv[str(x)]) #print(data_CV[x]) except: y_cv.append(int(zero)) pass x_xix.append(x) x += 1 style.use('ggplot') plt.plot(x_xix,y_cv,label="Computer Vision") plt.plot(x_xix,y_ai,label="Artificial Intelligence") plt.plot(x_xix,y_algo,label="Algorithms") plt.plot(x_xix,y_ls,label="Learning Systems") plt.plot(x_xix,y_nm,label="Neural Networks") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('Trend of research in different realm of CS\n from 2001 to 2016') plt.ylabel('Number of publish paper') plt.xlabel('\nYears: 2001 - 2016') plt.show() import sqlite3 import matplotlib.pyplot as plt #fetching the name of different fields name = [] #create the connection with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `name` FROM `industry_data` ORDER BY `publish_paper` DESC LIMIT 0, 5000;") #store all name in as list init_name = c.fetchall() for each in init_name: text = (str(each)[2:len(each)-4]).replace("\\n","") name.append(text) #close the connection with database conn.close() #fetching the number of publication field wise sep = [] #connection create with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `publish_paper` FROM `industry_data` ORDER BY `publish_paper` DESC LIMIT 0, 5000;") #store the data in sep as list sep = c.fetchall() #connection close with databae conn.close() #create a list of realtive percentage for publish paper field wise per = [] for n in sep: text = str(n)[1:len(n)-3] n_to_per = int(text) val = (n_to_per*100)/200 val_2 = "%.2f"%val per.append(val_2) #---------------------------Graph code------------------------------ label = [] x = 0 while x < len(per): label.append(str(name[x].upper())+" : "+str(per[x])+"%") x += 1 labels = label sizes = per patches, texts = plt.pie(sizes, startangle=90) plt.legend(patches, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Set aspect ratio to be equal so that pie is drawn as a circle. plt.axis('equal') plt.title('Research percentage of different Industries\n from 2001 to 2016\n Source: SCOPUS journal ') plt.tight_layout() plt.show() import sqlite3 import matplotlib.pyplot as plt #fetching the name of different fields name = [] #create the connection with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `name` FROM `seprate` DESC LIMIT 0, 5000;") #store all name in as list init_name = c.fetchall() for each in init_name: text = (str(each)[2:len(each)-4]).replace("\\n","") name.append(text) #close the connection with database conn.close() #fetching the number of publication field wise sep = [] #connection create with database sqlite_database = '/home/neel/scopus_data/scopus_data.sqlite' conn = sqlite3.connect(sqlite_database) c = conn.cursor() c.execute("SELECT `number` FROM `seprate` DESC LIMIT 0, 5000;") #store the data in sep as list sep = c.fetchall() #connection close with databae conn.close() #create a list of realtive percentage for publish paper field wise per = [] for n in sep: text = str(n)[1:len(n)-3] n_to_per = int(text) val = (n_to_per*100)/1387 val_2 = "%.2f"%val per.append(val_2) #---------------------------Graph code------------------------------ label = [] x = 0 while x < len(per): label.append(str(name[x].upper())+" : "+str(per[x])+"%") x += 1 labels = label sizes = per patches, texts = plt.pie(sizes, startangle=90) plt.legend(patches, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Set aspect ratio to be equal so that pie is drawn as a circle. plt.axis('equal') plt.title('Research done by Universities and Industries\n from 2001 to 2016\n Source: SCOPUS journal ') plt.tight_layout() plt.show()
_____no_output_____
MIT
SCOPUS_data_analysis_Python3x_v1.ipynb
NeelShah18/scopus-analysis-for-indian-researcher
[![Github](https://img.shields.io/github/stars/lab-ml/python_autocomplete?style=social)](https://github.com/lab-ml/python_autocomplete)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lab-ml/python_autocomplete/blob/master/notebooks/evaluate.ipynb) Evaluate a model trained on predicting Python codeThis notebook evaluates a model trained on Python code.Here's a link to [training notebook](https://github.com/lab-ml/python_autocomplete/blob/master/notebooks/train.ipynb)[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lab-ml/python_autocomplete/blob/master/notebooks/train.ipynb) Install dependencies
%%capture !pip install labml labml_python_autocomplete
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Imports
import string import torch from torch import nn from labml import experiment, logger, lab from labml_helpers.module import Module from labml.logger import Text, Style from labml.utils.pytorch import get_modules from labml.utils.cache import cache from labml_helpers.datasets.text import TextDataset from python_autocomplete.train import Configs from python_autocomplete.evaluate import evaluate, anomalies, complete, Predictor
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
We load the model from a training run. For this demo I'm loading from a run I trained at home.[![View Run](https://img.shields.io/badge/labml-experiment-brightgreen)](https://web.lab-ml.com/run?uuid=39b03a1e454011ebbaff2b26e3148b3d)*If you want to try this on Colab you need to run this on the same space where you run the training, because models are saved locally.*
TRAINING_RUN_UUID = '39b03a1e454011ebbaff2b26e3148b3d'
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
We initialize `Configs` object defined in [`train.py`](https://github.com/lab-ml/python_autocomplete/blob/master/python_autocomplete/train.py).
conf = Configs()
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Create a new experiment in evaluation mode. In evaluation mode a new training run is not created.
experiment.evaluate()
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Load custom configurations/hyper-parameters used in the training run.
custom_conf = experiment.load_configs(TRAINING_RUN_UUID) custom_conf
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Set the custom configurations
experiment.configs(conf, custom_conf)
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Set models for saving and loading. This will load `conf.model` from the specified run.
experiment.add_pytorch_models({'model': conf.model})
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Specify which run to load from
experiment.load(TRAINING_RUN_UUID)
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Start the experiment
experiment.start()
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Initialize the `Predictor` defined in [`evaluate.py`](https://github.com/lab-ml/python_autocomplete/blob/master/python_autocomplete/evaluate.py).We load `stoi` and `itos` from cache, so that we don't have to read the dataset to generate them. `stoi` is the map for character to an integer index and `itos` is the map of integer to character map. These indexes are used in the model embeddings for each character.
p = Predictor(conf.model, cache('stoi', lambda: conf.text.stoi), cache('itos', lambda: conf.text.itos))
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Set model to evaluation mode
_ = conf.model.eval()
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
A python prompt to test completion.
PROMPT = """from torch import nn from labml_helpers.module import Module from labml_nn.lstm import LSTM class LSTM(Module): def __init__(self, *, n_tokens: int, embedding_size: int, hidden_size int, n_layers int):"""
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Get a token. `get_token` predicts character by character greedily (no beam search) until it find and end of token character (non alpha-numeric character).
%%time res = p.get_token(PROMPT) print('"' + res + '"')
" super" CPU times: user 950 ms, sys: 34.7 ms, total: 984 ms Wall time: 254 ms
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Try another token
res = p.get_token(PROMPT + res) print('"' + res + '"')
"(LSTM"
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Load a sample python file to test our model
with open(str(lab.get_data_path() / 'sample.py'), 'r') as f: sample = f.read() print(sample[-50:])
ckpoint() if __name__ == '__main__': main()
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Test the model on a sample python file`evaluate` function defined in[`evaluate.py`](https://github.com/lab-ml/python_autocomplete/blob/master/python_autocomplete/evaluate.py)will predict token by token using the `Predictor`, and simulates an editor autocompletion.Colors:* yellow: the token predicted is wrong and the user needs to type that character.* blue: the token predicted is correct and the user selects it with a special key press, such as TAB or ENTER.* green: autocompleted characters based on the prediction
%%time evaluate(p, sample)
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
`accuracy` is the fraction of charactors predicted correctly. `key_strokes` is the number of key strokes required to write the code with help of the model and `length` is the number of characters in the code, that is the number of key strokes required without the model.*Note that this sample is a classic MNIST example, and the model must have overfitted to similar codes (exept for it's use of [LabML](https://github.com/lab-ml/labml) 😛).* Test anomalies in codeWe run the model through the same sample code and visualize the probabilty of predicting each character.green means the probabilty of that character is high and red means the probability is low.
anomalies(p, sample)
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
Here we try to autocomplete 100 characters
sample = """import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from torchvision import datasets, transforms from labml import lab class Model(nn.Module): """ complete(p, sample, 100)
_____no_output_____
MIT
notebooks/evaluate.ipynb
kalufinnle/python_autocomplete
MLP 208* Operate on 16000 GenCode 34 seqs.* 5-way cross validation. Save best model per CV.* Report mean accuracy from final re-validation with best 5.* Use Adam with a learn rate decay schdule.
NC_FILENAME='ncRNA.gc34.processed.fasta' PC_FILENAME='pcRNA.gc34.processed.fasta' DATAPATH="" try: from google.colab import drive IN_COLAB = True PATH='/content/drive/' drive.mount(PATH) DATAPATH=PATH+'My Drive/data/' # must end in "/" NC_FILENAME = DATAPATH+NC_FILENAME PC_FILENAME = DATAPATH+PC_FILENAME except: IN_COLAB = False DATAPATH="" EPOCHS=200 SPLITS=5 K=1 VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN' EMBED_DIMEN=16 FILENAME='MLP208' NEURONS=32 import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import cross_val_score from sklearn.model_selection import RepeatedKFold from sklearn.model_selection import StratifiedKFold import tensorflow as tf from tensorflow import keras from keras.wrappers.scikit_learn import KerasRegressor from keras.models import Sequential from keras.layers import Bidirectional from keras.layers import GRU from keras.layers import Dense from keras.layers import LayerNormalization import time dt='float32' tf.keras.backend.set_floatx(dt)
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
Build model
def compile_model(model): adam_default_learn_rate = 0.001 schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate = adam_default_learn_rate*10, #decay_steps=100000, decay_rate=0.96, staircase=True) decay_steps=10000, decay_rate=0.99, staircase=True) # learn rate = initial_learning_rate * decay_rate ^ (step / decay_steps) opt = tf.keras.optimizers.Adam(learning_rate=schedule) bc=tf.keras.losses.BinaryCrossentropy(from_logits=False) print("COMPILE...") model.compile(loss=bc, optimizer=opt, metrics=["accuracy"]) print("...COMPILED") return model def build_model(maxlen): act="elu" #embed_layer = keras.layers.Embedding( # VOCABULARY_SIZE,EMBED_DIMEN,input_length=maxlen); dense1_layer = keras.layers.Dense(NEURONS, activation=act,dtype=dt, input_dim=VOCABULARY_SIZE) dense2_layer = keras.layers.Dense(NEURONS, activation=act,dtype=dt) #dense3_layer = keras.layers.Dense(NEURONS, activation=act,dtype=dt) output_layer = keras.layers.Dense(1, activation="sigmoid",dtype=dt) mlp = keras.models.Sequential() #mlp.add(embed_layer) mlp.add(dense1_layer) mlp.add(dense2_layer) #mlp.add(dense3_layer) mlp.add(output_layer) mlpc = compile_model(mlp) return mlpc
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
Load and partition sequences
# Assume file was preprocessed to contain one line per seq. # Prefer Pandas dataframe but df does not support append. # For conversion to tensor, must avoid python lists. def load_fasta(filename,label): DEFLINE='>' labels=[] seqs=[] lens=[] nums=[] num=0 with open (filename,'r') as infile: for line in infile: if line[0]!=DEFLINE: seq=line.rstrip() num += 1 # first seqnum is 1 seqlen=len(seq) nums.append(num) labels.append(label) seqs.append(seq) lens.append(seqlen) df1=pd.DataFrame(nums,columns=['seqnum']) df2=pd.DataFrame(labels,columns=['class']) df3=pd.DataFrame(seqs,columns=['sequence']) df4=pd.DataFrame(lens,columns=['seqlen']) df=pd.concat((df1,df2,df3,df4),axis=1) return df def separate_X_and_y(data): y= data[['class']].copy() X= data.drop(columns=['class','seqnum','seqlen']) return (X,y)
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
Make K-mers
def make_kmer_table(K): npad='N'*K shorter_kmers=[''] for i in range(K): longer_kmers=[] for mer in shorter_kmers: longer_kmers.append(mer+'A') longer_kmers.append(mer+'C') longer_kmers.append(mer+'G') longer_kmers.append(mer+'T') shorter_kmers = longer_kmers all_kmers = shorter_kmers kmer_dict = {} kmer_dict[npad]=0 value=1 for mer in all_kmers: kmer_dict[mer]=value value += 1 return kmer_dict KMER_TABLE=make_kmer_table(K) def strings_to_vectors(data,uniform_len): all_seqs=[] for seq in data['sequence']: i=0 seqlen=len(seq) kmers=[] while i < seqlen-K+1 -1: # stop at minus one for spaced seed #kmer=seq[i:i+2]+seq[i+3:i+5] # SPACED SEED 2/1/2 for K=4 kmer=seq[i:i+K] i += 1 value=KMER_TABLE[kmer] kmers.append(value) pad_val=0 while i < uniform_len: kmers.append(pad_val) i += 1 all_seqs.append(kmers) pd2d=pd.DataFrame(all_seqs) return pd2d # return 2D dataframe, uniform dimensions def make_kmers(MAXLEN,train_set): (X_train_all,y_train_all)=separate_X_and_y(train_set) X_train_kmers=strings_to_vectors(X_train_all,MAXLEN) # From pandas dataframe to numpy to list to numpy num_seqs=len(X_train_kmers) tmp_seqs=[] for i in range(num_seqs): kmer_sequence=X_train_kmers.iloc[i] tmp_seqs.append(kmer_sequence) X_train_kmers=np.array(tmp_seqs) tmp_seqs=None labels=y_train_all.to_numpy() return (X_train_kmers,labels) def make_frequencies(Xin): Xout=[] VOCABULARY_SIZE= 4**K + 1 # plus one for 'NNN' for seq in Xin: freqs =[0] * VOCABULARY_SIZE total = 0 for kmerval in seq: freqs[kmerval] += 1 total += 1 for c in range(VOCABULARY_SIZE): freqs[c] = freqs[c]/total Xout.append(freqs) Xnum = np.asarray(Xout) return (Xnum) def make_slice(data_set,min_len,max_len): slice = data_set.query('seqlen <= '+str(max_len)+' & seqlen>= '+str(min_len)) return slice
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
Cross validation
def do_cross_validation(X,y,given_model): cv_scores = [] fold=0 splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1, random_state=37863) for train_index,valid_index in splitter.split(X): fold += 1 X_train=X[train_index] # use iloc[] for dataframe y_train=y[train_index] X_valid=X[valid_index] y_valid=y[valid_index] # Avoid continually improving the same model. model = compile_model(keras.models.clone_model(given_model)) bestname=DATAPATH+FILENAME+".cv."+str(fold)+".best" mycallbacks = [keras.callbacks.ModelCheckpoint( filepath=bestname, save_best_only=True, monitor='val_accuracy', mode='max')] print("FIT") start_time=time.time() history=model.fit(X_train, y_train, # batch_size=10, default=32 works nicely epochs=EPOCHS, verbose=1, # verbose=1 for ascii art, verbose=0 for none callbacks=mycallbacks, validation_data=(X_valid,y_valid) ) end_time=time.time() elapsed_time=(end_time-start_time) print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time)) pd.DataFrame(history.history).plot(figsize=(8,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() best_model=keras.models.load_model(bestname) scores = best_model.evaluate(X_valid, y_valid, verbose=0) print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100)) cv_scores.append(scores[1] * 100) print() print("%d-way Cross Validation mean %.2f%% (+/- %.2f%%)" % (fold, np.mean(cv_scores), np.std(cv_scores)))
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
Train on RNA lengths 200-1Kb
MINLEN=200 MAXLEN=1000 print("Load data from files.") nc_seq=load_fasta(NC_FILENAME,0) pc_seq=load_fasta(PC_FILENAME,1) train_set=pd.concat((nc_seq,pc_seq),axis=0) nc_seq=None pc_seq=None print("Ready: train_set") #train_set print ("Compile the model") model=build_model(MAXLEN) print ("Summarize the model") print(model.summary()) # Print this only once model.save(DATAPATH+FILENAME+'.model') print ("Data prep") subset=make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y print ("Data reshape") (X_train,y_train)=make_kmers(MAXLEN,subset) X_train=make_frequencies(X_train) print ("Cross valiation") do_cross_validation(X_train,y_train,model) print ("Done")
_____no_output_____
MIT
Workshop/MLP_208.ipynb
ShepherdCode/ShepherdML
JAXの乱数生成について調べてみたけどよくわからない> JAXにおける乱数生成について調べたけどよくわからない- toc: true - badges: true- comments: true- categories: [Python, JAX, DeepLearning]- image: images/jax-samune.png JAX流行ってますね。JAXについての詳しい説明は、[たくさんの記事](https://www.google.com/search?q=jax%E3%81%A8%E3%81%AF)や[https://github.com/google/jax](https://github.com/google/jax) を参照していただくとして、JAXの乱数生成について勉強してみようと思います。 Numpyにおける乱数の再現性の確保さて、JAXはNumpyをとても意識して作られたライブラリですが、乱数周りに関しては大きく異なる点があります。まずは, Numpyの例を見てみます。
import numpy as np # numpy x = np.random.rand() print('x:', x) for i in range(10): x = np.random.rand() print('x:', x)
x: 0.7742336894342167 x: 0.45615033221654855 x: 0.5684339488686485 x: 0.018789800436355142 x: 0.6176354970758771 x: 0.6120957227224214 x: 0.6169339968747569 x: 0.9437480785146242 x: 0.6818202991034834 x: 0.359507900573786
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
バラバラの結果が出てきました。これを固定するには、このようなコードを書きます。
for i in range(10): np.random.seed(0) x = np.random.rand() print('x:', x)
x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248 x: 0.5488135039273248
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
ところでnumpyでは、`np.random.get_state()` で乱数生成器の状態が確認できます。
np.random.seed(0) state = np.random.get_state() print(state[0]) print('[', *state[1][:10], '...') print(*state[1][-10:], ']') np.random.seed(20040304) state = np.random.get_state() print(state[0]) print('[', *state[1][:10], '...') print(*state[1][-10:], ']')
MT19937 [ 20040304 3876245041 2868517820 934780921 2883411521 496831348 4198668490 1502140500 1427494545 3747657433 ... 744972032 1872723303 3654422950 1926579586 2599193113 3757568530 3621035041 2338180567 2885432439 2647019928 ]
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
逆に言えば、Numpyの乱数生成はグローバルな一つの状態に依存しています。このことは次のような弊害を生みます。 並列実行と実行順序、再現性 簡単なゲームを作ってみます。 関数`a`, `b`が乱数を生成するので、大きい数を返した方が勝ちというゲームです。
a = lambda : np.random.rand() b = lambda : np.random.rand() def battle(): if a() > b(): return 'A' else: return 'B' for i in range(10): print('winner is', battle(), '!')
winner is B ! winner is A ! winner is B ! winner is A ! winner is A ! winner is A ! winner is B ! winner is B ! winner is B ! winner is A !
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
また実行すれば、結果は変化します。
for i in range(10): print('winner is', battle(), '!')
winner is B ! winner is A ! winner is A ! winner is B ! winner is B ! winner is B ! winner is A ! winner is A ! winner is A ! winner is B !
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
ではこの結果の再現性を持たせるにはどうすればいいでしょうか。簡単な例はこうなります。
res1 = [] np.random.seed(0) for i in range(10): res1.append(battle()) # もう一回 res2 = [] np.random.seed(0) for i in range(10): res2.append(battle()) print('1 | 2') print('=====') for i in range(10): print(res1[i], '|', res2[i])
1 | 2 ===== B | B A | A B | B B | B A | A A | A B | B B | B B | B B | B
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
というわけで同じ結果が得られました。しかし、この結果には落とし穴があります。関数`battle`の動作をもう少し詳しく確認してみましょう。`a`と`b`が呼び出されるタイミングを確認してみます。
def a(): print('a is called!') return np.random.rand() def b(): print('b is called!') return np.random.rand() for i in range(5): battle() print('======')
a is called! b is called! ====== a is called! b is called! ====== a is called! b is called! ====== a is called! b is called! ====== a is called! b is called! ======
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
このように、aはbより常に先に呼び出されます。ここまでだと何の問題もないように見えますが、実際にはそうではありません。このコードを高速に動作させたい、つまり並列化を行う時にはどうなるでしょうか。関数`a`, `b`に依存関係はありませんから、これらを並列に動作させても問題ないように感じます。ですが、実際には `a`, `b`が返す関数は呼び出し順序に依存しています!従って、このままではせっかく`np.random.seed`をしても意味がなくなってしまいます。 JAXの乱数生成では、JAXにおける乱数生成を確認してみます。先ほどまでで述べたように、次のような条件を満たす乱数生成器を実装したいです。- 再現性があること- 並列化できることこれらを実現するために、JAXではkeyという概念が用いられます。
key = jax.random.PRNGKey(0) key
_____no_output_____
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
keyは単に二つの実数値からなるオブジェクトで、これを用いることによって、JAXでは乱数を生成します。
jax.random.normal(key)
_____no_output_____
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
そして、keyが同じであれば同じ値が生成されます。
print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key)) print(key, jax.random.normal(key))
[0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235 [0 0] -0.20584235
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
とはいえこれだけだとひとつの数字しか得ることができません。もっとたくさんの乱数が欲しくなった際には、`jax.random.split`を用います。
key1, key2 = jax.random.split(key) print(key, '->', key1, key2)
[0 0] -> [4146024105 967050713] [2718843009 1272950319]
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
`jax.random.split`によって、ひとつのkeyから2つのkeyが作り出されます。このkeyによって、また新しい乱数を生み出します。  ちなみに、この二つのkeyは等価ですが、慣例的に二つ目を新しい乱数生成につかい、一つ目はまた新しいkeyを使うために用いられるようです。(以下のコードを参照)
# 慣例的に二つ目をsub_keyとして新しい乱数生成に、一つ目をまた新しい乱数を作るために使用する(下のように書くことでsplit元の古いkeyも削除できる。keyが残ると誤って同じ乱数を作ってしまうので注意が必要。) key, sub_key = jax.random.split(key) key, subsub_key = jax.random.split(key)
_____no_output_____
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
また、同じkeyから分割されたkeyは、常に等しくなります。
def check_split(seed): key = jax.random.PRNGKey(seed) key, sub_key = jax.random.split(key) print(key, '->', key, sub_key) check_split(0) check_split(0) check_split(0) print('=============================================================================') check_split(2004) check_split(2004) check_split(2004)
[4146024105 967050713] -> [4146024105 967050713] [2718843009 1272950319] [4146024105 967050713] -> [4146024105 967050713] [2718843009 1272950319] [4146024105 967050713] -> [4146024105 967050713] [2718843009 1272950319] ============================================================================= [2965909967 2346697052] -> [2965909967 2346697052] [2813626588 818499380] [2965909967 2346697052] -> [2965909967 2346697052] [2813626588 818499380] [2965909967 2346697052] -> [2965909967 2346697052] [2813626588 818499380]
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
また、一度に何個にもsplitできます。例えば1つのkeyから次のようにして10個のkeyを得ることができます。
# 何個にもsplitできる。 key = jax.random.PRNGKey(0) key, *sub_keys = jax.random.split(key, num=10) key sub_keys
_____no_output_____
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
sequential-equivalent Numpyではsequential-equivalentが保障されています。(適切な訳語がわからない)簡単にいうと、まとめてN個の乱数を取得することと、ひとつひとつ乱数を取得して連結したものは等価である、ということが保障されています。(以下のコードを見るとわかりやすいです)
# ひとつずつ np.random.seed(0) print(np.array([np.random.rand() for i in range(10)])) print('================================================') # まとめて np.random.seed(0) print(np.random.rand(10))
[0.5488135 0.71518937 0.60276338 0.54488318 0.4236548 0.64589411 0.43758721 0.891773 0.96366276 0.38344152] ================================================ [0.5488135 0.71518937 0.60276338 0.54488318 0.4236548 0.64589411 0.43758721 0.891773 0.96366276 0.38344152]
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
ところがJAXではその限りではありません。JAXで10個の配列を取得する方法としては、- keyを10個用意する- ひとつのkeyから10個作るということが考えられます。
# やり方 1: keyを10個用意 key = jax.random.PRNGKey(0) key, *sub_keys = jax.random.split(key, 11) print(np.array([jax.random.normal(sub_key) for sub_key in sub_keys])) # やり方 2: ひとつのkeyから10個作る key = jax.random.PRNGKey(0) print(np.array(jax.random.normal(key, shape=(10,))))
[-0.372111 0.2642311 -0.18252774 -0.7368198 -0.44030386 -0.15214427 -0.6713536 -0.59086424 0.73168874 0.56730247]
Apache-2.0
_notebooks/2021-11-14-jax-random.ipynb
abap34/my-website
Lumped Elements Circuits In this notebook, we construct various network from basic lumped elements (resistor, capacitor, inductor), with the 'classic' and the `Circuit` approach. Generally the `Circuit` approach is more verbose than the 'classic' way for building a circuit. However, as the circuit complexity increases, in particular when components are connected in parallel, the `Circuit` approach is interesting as it increases the readability of the code. Moreover, `Circuit` object can be plotted using its `plot_graph()` method, which is usefull to rapidly control if the circuit is built as expected.
import numpy as np # for np.allclose() to check that S-params are similar import skrf as rf rf.stylely()
_____no_output_____
BSD-3-Clause
doc/source/examples/circuit/Lumped Element Circuits.ipynb
nmaterise/scikit-rf
LC Series Circuit In this section we reproduce a simple equivalent model of a capacitor $C$, as illustrated by the figure below:
# reference LC circuit made in Designer LC_designer = rf.Network('designer_capacitor_30_80MHz_simple.s2p') # scikit-rf: manually connecting networks line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) LC_manual = line.inductor(24e-9) ** line.capacitor(70e-12) # scikit-rf: using Circuit builder port1 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port2', z0=50) cap = rf.Circuit.SeriesImpedance(frequency=LC_designer.frequency, name='cap', z0=50, Z=1/(1j*LC_designer.frequency.w*70e-12)) ind = rf.Circuit.SeriesImpedance(frequency=LC_designer.frequency, name='ind', z0=50, Z=1j*LC_designer.frequency.w*24e-9) # NB: it is also possible to create 2-port lumped elements like: # line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) # cap = line.capacitor(70e-12, name='cap') # ind = line.inductor(24e-9, name='ind') connections = [ [(port1, 0), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (port2, 0)] ] circuit = rf.Circuit(connections) LC_from_circuit = circuit.network # testing the equivalence of the results print(np.allclose(LC_designer.s, LC_manual.s)) print(np.allclose(LC_designer.s, LC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True)
_____no_output_____
BSD-3-Clause
doc/source/examples/circuit/Lumped Element Circuits.ipynb
nmaterise/scikit-rf
A More Advanced Equivalent Model In this section we reproduce an equivalent model of a capacitor $C$, as illustrated by the figure below:
# Reference results from ANSYS Designer LCC_designer = rf.Network('designer_capacitor_30_80MHz_adv.s2p') # scikit-rf: usual way, but this time this is more tedious to deal with connection and port number freq = LCC_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) elements1 = line.resistor(1e-2) ** line.inductor(24e-9) ** line.capacitor(70e-12) elements2 = line.resistor(20e6) T_in = line.tee() T_out = line.tee() ntw = rf.connect(T_in, 1, elements1, 0) ntw = rf.connect(ntw, 2, elements2, 0) ntw = rf.connect(ntw, 1, T_out, 1) ntw = rf.innerconnect(ntw, 1, 2) LCC_manual = ntw ** line.shunt_capacitor(50e-12) # scikit-rf: using Circuit builder freq = LCC_designer.frequency port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) cap = line.capacitor(70e-12, name='cap') ind = line.inductor(24e-9, name='ind') res_series = line.resistor(1e-2, name='res_series') res_parallel = line.resistor(20e6, name='res_parallel') cap_shunt = line.capacitor(50e-12, name='cap_shunt') ground = rf.Circuit.Ground(frequency=freq, name='ground', z0=50) connections = [ [(port1, 0), (res_series, 0), (res_parallel, 0)], [(res_series, 1), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (cap_shunt, 0), (res_parallel, 1), (port2, 0)], [(cap_shunt, 1), (ground, 0)], ] circuit = rf.Circuit(connections) LCC_from_circuit = circuit.network # testing the equivalence of the results print(np.allclose(LCC_designer.s, LCC_manual.s)) print(np.allclose(LCC_designer.s, LCC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True)
_____no_output_____
BSD-3-Clause
doc/source/examples/circuit/Lumped Element Circuits.ipynb
nmaterise/scikit-rf
Pass band filter Below we construct a pass-band filter, from an example given in [Microwaves101](https://www.microwaves101.com/encyclopedias/lumped-element-filter-calculator):
# Reference result calculated from Designer passband_designer = rf.Network('designer_bandpass_filter_450_550MHz.s2p') # scikit-rf: the filter by cascading all lumped-elements freq = passband_designer.frequency passband_manual = line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) ** \ line.capacitor(2.419e-12) ** line.inductor(43.636e-9) ** \ line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) # scikit-rf: the filter with the Circuit builder freq = passband_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq) C1 = line.capacitor(25.406e-12, name='C1') C2 = line.capacitor(2.419e-12, name='C2') C3 = line.capacitor(25.406e-12, name='C3') L1 = line.inductor(4.154e-9, name='L1') L2 = line.inductor(43.636e-9, name='L2') L3 = line.inductor(4.154e-9, name='L3') port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) ground = rf.Circuit.Ground(frequency=freq, name='ground', z0=50) connections = [ [(port1, 0), (C1, 0), (L1, 0), (C2, 0)], [(C2, 1), (L2, 0)], [(L2, 1), (C3, 0), (L3, 0), (port2, 0)], [(C1, 1), (C3, 1), (L1, 1), (L3, 1), (ground, 0)], ] circuit = rf.Circuit(connections) passband_circuit = circuit.network passband_circuit.name = 'Pass-band circuit' passband_circuit.plot_s_db(m=0, n=0, lw=2) passband_circuit.plot_s_db(m=1, n=0, lw=2) passband_designer.plot_s_db(m=0, n=0, lw=2, ls='-.') passband_designer.plot_s_db(m=1, n=0, lw=2, ls='-.') circuit.plot_graph(network_labels=True, port_labels=True, edge_labels=True)
_____no_output_____
BSD-3-Clause
doc/source/examples/circuit/Lumped Element Circuits.ipynb
nmaterise/scikit-rf
Boosting: HyperparametersImport [`GradientBoostingClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingClassifier.html) and [`GradientBoostingRegressor`](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html) from `sklearn` and explore the hyperparameters. Import Boosting Algorithm for Classification & Regression
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor print(GradientBoostingClassifier()) print(GradientBoostingRegressor())
GradientBoostingClassifier(criterion='friedman_mse', init=None, learning_rate=0.1, loss='deviance', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_iter_no_change=None, presort='auto', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False) GradientBoostingRegressor(alpha=0.9, criterion='friedman_mse', init=None, learning_rate=0.1, loss='ls', max_depth=3, max_features=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=100, n_iter_no_change=None, presort='auto', random_state=None, subsample=1.0, tol=0.0001, validation_fraction=0.1, verbose=0, warm_start=False)
MIT
ml_algorithms/06_Boosting/06_03/End/06_03.ipynb
joejoeyjoseph/playground
Simulations: Dynamic learning with two learners, one oracle, and *heuristic evidence-weighting function* This notebook provides code to simulate 1D boundary learning in two agents learning from each other and from an "oracle" that always tells the truth. Each agent receives labels from the other agent (based on her current belief) and from the oracle (based on the ground truth). Neither agent knows which source is telling the truth.Each agent begins with an initial belief about the location of the category boundary, so that the two agent beliefs together can be viewed as a point in a 2D plane. Across one "batch" of learning, it is assumed that each agent moves her boundary toward a weighted average of the difference between her current boundary and the boundaries of the other two sources (ie, the other learner and the oracle). So:$$\Delta_b = w_c * d_c + (1 - w_c) * d_f$$...where $\Delta_b$ is the change in the learner's boundary, $w_c$ is the (proportional) weight given to the closer source, $1-w_c$ is the (proportional) weight given to the more distal source, and $d_c$ and $d_f$ are the distances from the closer/more distal source to the learner's current boundary.The amount moved on each batch is determined by a fixed rate parameter r:$$b_{t+1} = b_t + r * \Delta_b$$As shown in the empirical studies, the proportional weight given to each source is determined by a function that decays nonlinearly with the distance between the source's boundary and the learner's curent boundary:$$w_c = 1 - (d_c + o)/(d_c + o + s)$$...where $d_c$ is the distance from the learner's boundary to the closer source, $o$ is an offset term (distance considered 0 if less than $o$), and $s$ controls the slope of the decay. The defaults for $o$ and $s$ are the best-fitting parameters determined in Experiment 2.The functions and code are as follows:get.pwt: given a distance between learner's boundary and source, and parameters for the trust decay, return weight given to source.update.bound: given the learner's boundary, a current boundary for each source, a rate constant, and parameters for the decay curve, return the learner's new boundary.dynamic.sim: given starting boundaries for each learner, the true boundary, a learning rate constant, number of batches, and parameters for the trust decay curves, reutrn a matrix indicating each learner's boundary at each epoch of learning. Heuristic evidence-weighting function This function computes the weight given to a source as a function of its distance from the learner's current boundary, according to the heuristic evidence-weighting function determined in Experiment 2. This is a proportional weight $w_c$ relative to some fixed second source, which in turn receives a weight of $1 - w_c$. The empirical studies showed that $w_c ~= 1$ when the source boundary is within $o$ of the learner's boundary--therefore in simulations, we compute $w_c$ for the closest source boundary, then give the remaining $1-w_c$ weight to the second source.
get.pwt <- function(d, s=24, o=5, p=1){ #Computes the proportion of weight given to a source #based on the distance between learner and source boundary # #d = vector of distances between learner and source boundary for n sources #s = slope of HEW curve #o = offsetof HEW curve---distances less than this value will return an output value of 1.0 #p = power to which resulting proportion is raised. Experimental, use default of 1.0 #out = returned vector of proportional weights for far source ################### d <- d - o #Shift distances by offset so d = o becomes d = 0 tmp <- 1 - (d/(d+s)) #Proportional weight given to distal source tmp <- c(tmp)^p #Raise to power p, not currently used tmp[tmp > 1] <- 1.0 #If weight is larger than 1 replace with 1 tmp }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Here is what the weighting function looks like with default parameters from experiment 2, where s = 24 and o = 5:
plot(0:150, get.pwt(0:150, s=24, o=5), type = "l", lwd = 3, pch = 16, ylab = "Source weight", xlab = "Source distance", ylim = c(0,1))
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Other possible weighting functions Here we define some other weighting functions to investigate group learning dynamics under different hypotheses about weighting. Equal weight to both sources This function can be used in place of get.pwt to simulate learning where both sources always get equal weight.
get.samewt <-function(d, s=NA, o=NA, p=NA){ #Returns a vector of 0.5 for each element of d #essentially always giving the same .5 weight to each source #All parameters ignored except d, only included to work with other code ############# out<-rep(.5, times=length(d)) out }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Rectified linear weighting This function returns the weight of a source based on a linear decline of the source's distance from the learner's curent boundary, rectified at 0 and 1.
get.rlwt <- function(d, s=0.01, o=4.5,p=0){ #Rectified linear weighting function #d = vector of distances for sources to be weighted #o = offset; distances less than this get weight 1 #s = slope, rate at which weight diminishes with distance #p = proportion shrinkage from 1 and 0. #Returns vector of weights, one for each element in d ############# out <- 1 - ((d - o) * s) out[d <= o] <- 1.0 out[out < 0] <- 0.0 out <- (out * (1-p)) + p/2 out }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Here is what the rectified weighting function looks like. $o$ shifts it left/right, $s$ changes the slope.
plot(0:300, get.rlwt(0:300, s=.005, o=4.5, p=.0), type="l", ylim = c(0,1), ylab="Source weight", xlab="Source distance")
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Sigmoid This returns a source weight as the sigmoid of its distance from the learner's source. Like HEW and rectified linear, the function is bounded at ${0,1}$.
get.sigwt <- function(d, s=1, o=4.5,p=NA){ #Sigmoidal weighting function #d = vector of distances for sources to be weighted #o = offset, shifts sigmoid left/right #s = slope of sigmoid #p = for compatibility, not used #Returns vector of weights, one for each element in d ############# d <- c(max(d) - d) - max(d)/2 out <- 1 / (1 + exp(-1 * (o + s*d))) out }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Here is the plot:
plot(0:300, get.sigwt(0:300, s=.05, o=-3), type="l", ylim = c(0,1), ylab="Source weight", xlab="Source distance")
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Update the learner's current boundary according to evidence-weighting function. Note that different results obtain depending on whether you use the curve to compute the close source weight first or the far source weight first. Both lead to stable states where learners disagree, but Experiment 2 shows that a source with a very close boundary receives all the learner's weight--so we typically compute the closest source weight first and attribute remaining weight to the second source.
update.bound <- function(i, s1, s2, r = 1, weightfirst ="c", closebig=T, f = get.pwt, fpars=c(24, 5, 1)){ ############## #Updates learner's current boundary accourding to nonlinear weighting function # #i=learner's initial boundary #s1, s2 = source 1 and 2 boundaries #r = rate of boundary change #f = function to use for weighting, get.pwt by default #fpars = parameters for the weighting function: slope, offset, power #closebig: is the closer boundary toward the larger end of the stimulus range? #weightfirst = which source to weight first---default (c) is closest, use anything else for distal #returns the new boundary ###### #Determine which is closer and which is distal source if(abs(i-s1) < abs(i-s2)){ cs <- s1 ds <- s2 } else{ cs <- s2 ds <- s1 } #Use function f to compute weights for two sources if(weightfirst=="c"){ #If close source is computed first cwt <- f(abs(cs-i), s=fpars[1], o=fpars[2], p=fpars[3]) #get weight for close source first if((cwt < 0.5) & closebig) cwt <- 1 - cwt #closer source gets larger weight if closebig is true dwt <- 1 - cwt #Weight for more distal source } else{ #If distal source is computed first dwt <- f(abs(ds-i), s=fpars[1], o=fpars[2], p=fpars[3]) #get weight for distal source first cwt <- 1 - dwt #Weight for closer source } delta_i <- (ds-i) * dwt + (cs-i) * cwt #Change in boundary i <- i + delta_i * r #update boundary i }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Simulate two learners and one static source This simulation involves two learners and one oracle, as reported in the main paper. The following function generates the sequence of belief-states occupied by each learner over the course of learning, given their starting states, the ground truth, one of the evidence-weighting functions defined above and its parameters, and a constant indicating how quickly beliefs are updated on each round.
dynamic.sim <- function(l1, l2, static, nsteps=100, r=1, f = get.pwt, fpars=c(25,5,1)){ #Simulates two learners, learning from each other and from one static source #l1, l2, static = initial boundaries for learners 1 and 2 and static source #nsteps = number of learning steps to simulate #r = updating rate for each learner's boundary #fpars = parameters for nonlinear weighting function out <- matrix(0, nsteps, 2) #matrix to contain each learner's boundary at each step out <- rbind(c(l1,l2), out) #add initial boundaries to top of matrix #Loop to update each learner's bound based on other learner and static bound at previous step for(i1 in c(1:nsteps)){ out[i1+1,1] <- update.bound(out[i1,1], out[i1,2], static, r=r, f=f, fpars=fpars) out[i1+1,2] <- update.bound(out[i1,2], out[i1,1], static, r=r, f=f, fpars=fpars) } out }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Plot simulations for a grid of possible initial learning boundaries in the pair The code below runs the 2-learer simulation several times, with the two learners each beginning with a belief about the category boundary lying somewhere between 0 and 300. For each pair of initial beliefs, it computes how the beliefs change over time and where they stop after 100 "epochs" of learning. Each such trajectory is plotted as a gray line, with the starting beliefs shown as a green point and the ending beliefs shown as a red point. Parameters controlling the simulation are ste at the top of the block as follows:**gridpts** sets the number of starting beliefs simulated for both learners; these will be evenly-spaced in $[0-300]$.**gtruth** specifies the location of the ground truth provided by the oracle.**niter** indicates the number of learning iterations to run.**uprate** is a constant specifying the rate at which beliefs are updated. **upfunc** indicates which of the above-defined functions should be used to weight the sources**upfunc.pars** is a 3-element vector indicating the slope, offset, and power parameters for the weighting function in that orderTo reproduce main paper Figure 4A use upfunc <- get.samewt; for panel B use upfunc <- get.pwt
gridpts <- 20 #Number of grid points in each dimension gtruth <- 150 #Location of ground-truth boundary niter <- 200 #Number of learning iterations uprate <- 0.1 #Proportional rate at which beliefs are updated on each iteration upfunc <- get.pwt #Function for computing source weights upfunc.pars <- c(24,5,1) #Parameters for weighting function #Create empty plot frame: plot(150, 150, type="n", xlim = c(0,300), ylim = c(0,300), xlab = "L1 belief", ylab="L2 belief") abline(0,1, lwd=2, col = "blue") #line where learners have same boundary abline(h=150, lwd=2, col = "blue") #ground truth at 150 abline(v=150, lwd=2, col = "blue") #ground truth at 150 grspace <- floor(300/gridpts) #Compute grid spacing for(i1 in c(0:gridpts)) for(i2 in c(0:gridpts)){ #loop over grid points for l1 and l2 #for each iteration compute learners boundary trajectories tmp <- dynamic.sim(i1*grspace, i2*grspace, gtruth, nsteps = niter, r=uprate, f=upfunc, fpars = upfunc.pars) lines(tmp, col=gray(.5)) #add lines showing trajectory points(tmp[1,1], tmp[1,2], pch = 16, col = "green", cex=.5) #start point points(tmp[niter+1,1], tmp[niter+1,2], pch = 16, col = "red") #end point }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
As you can see, when learner beliefs begin in the upper left or lower right quadrants, they converge on the truth. These are cases where each learner begins on a different side of the ground truth--so the oracle and the other source are always pulling in the same direction. The two learners do not perfectly agree until they meet at the ground truth. Something different happens in the upper right and lower left quadrants, where the two learners are on the same side of the ground truth. In these cases the two learners often end up agreeing with each other, and hence giving each other all their weight, before they meet at the ground truth. This then produces different groups of beliefs, depending on where the learners began. The same behavior is observed for all nonlinear weighting functions. Explaining the experimental data Does this weighting curve allow us to explain the pattern of results in the 4 experiments? Experiments 1 and 2 are fairly straightforward: the parameters for the weighting function were derived from Experiment 2, where one source always appeared 15 units from the midline, and the other was sampled with uniform probability from $b_v \in [0,150]$ units from the midline. Since experiment 1 also used a source 15 units from the midline, the same parameters for the weighting function should apply. Simply plugging in the experiment design parameters to the update.bound function gives us the predicted new boundary after learning, and hence the expected amount of shift, and the amount of weight given to the far source:
firstbound <- 150 #Expected initial boundary newbound <- update.bound(150, 165, 50, fpars = c(25,4.5,1), weightfirst = "d") #new boundary after updating weight bshift <- newbound - firstbound #expected amount of shift dwt <- get.pwt(100) #expected weight given to far source print(round(c(firstbound, newbound, bshift, dwt),2))
[1] 150.00 141.14 -8.86 0.20
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
So given an initial boundary at 150 and sources at 165 and 50, the weighting curve from Experiment 2 predicts a final boundary around 141, with a total shift of about 9 units toward the distal source. The observed shift wa 14 +/- 7.5 units toward the far source, a confidence interval that includes this prediction. The curve also predicts a weight of about 0.2 given to the far source, quite close to and within the confidence limits of the empirical estimate of 0.23 +/- 0.05.What about experiment 3? This was motivated by the observation that, according to the curve in Experiment 2, a learner should give all weight to a source that is v close to her initial belief. So that is a fairly trivial prediction---total weight given to each group's close source should be 1; that source is very near the learner's boundary; the boundary does not change, which is what we observed.Experiment 4 is the most challenging to think about. We want to know what the model predicts about boundary change when the learner has a boundary about 50 units from the midpoint and the two sources are symmetric around the midpoint but vary in their distance. Clearly when the two sources are both very close to the midpoint, the learner should shift her boundary toward the midpoint. And we know that, when one source grows near to the learner's boundary, it should get all the weight and the learner's boundary should not shift very much. But what happens as the sources grow more discrepant, with one pulling the learner toward one extreme and the other pulling the learner toward the other extreme?Predictions from the weighting model are complicated by the fact that we only estimated parameters for the case in which one source is about 15 units from the learner's initial boundary. We don't know how/whether the slope of the weighing function will change when both sources differ in their distance from the initial boundary. So, we don't know exactly how the boundary is expected to change. Here we consider whether the model makes predictions that apply across a wide range of parameterizations.The code below considers models in which the slope parameter varies from 16 (quite steep) to 100 (fairly shallow). For each parameter, we consider what the model predicts about how the boundary should change for sources situated near the midpoint or increasingly distal to it. For each model parameter, we plot a line that indicates the expected movement of the boundary, with positive numbers indicating a shift toward the midpoint and the X-axis indicating source distance from the midpoint. The colors indicate the steepness of the weighting function, with the red end of the spectrum showing what happens when this slope is very steep and the violet end showing predictions for much shallower slopes
tmp <- rep(0, times = 100) #vector of zeros to contain predictions for one model #for(i1 in c(1:100)) tmp[i1] <- update.bound(100, 150-i1, 150+i1, fpars = c(24,5,1)) #Create empy plot plot(0,0, type = "n", xaxt = "n", xlab = "Source location", pch = 16, ylab = "Shift toward midpoint", ylim = c(-10,100), xlim = c(1,100)) mtext(side = 1, line = .5, at = c(100,50,1), adj = c(1,.5, 0), text = c("Midpoint", "Initial", "Pole")) #Consider slope values ranging from 100 to 16 for(i2 in c(100:16)){ #For each, compute prediction about how boundary changes as sources move #from midline to poles, and plot as a line for(i1 in c(1:100)) tmp[i1] <- update.bound(100, 150-i1, 150+i1, closebig=F, fpars = c(i2,5,1)) lines(100:1, tmp-100, col = rainbow(100)[i2-15], lwd=10) } for(i1 in c(1:100)) tmp[i1] <- update.bound(100, 150-i1, 150+i1, closebig=F, fpars = c(24,5,1)) lines(100:1, tmp-100, col = 1, lwd=5) abline(h=50, lty=2) #Expected shift if they go to midpoint abline(h=0) #Points above this line indicate shift toward the midpoint abline(v = c(75, 50, 25,0), lty = 3)
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
The result shows that quantitative predictions about the amount of shift vary quite a bit with the parameters of the trust weighting function, especially as the sources grow further toward the poles. But all parameterizations yield the same U-shape: when the sources are closer to the midpoint than is the learner's initial boundary, the boundary is expected to shift toward the midpoint. For a "bubble" around the learner's initial boundary, little or no shift is expected, but as the sources grow even closer to the poles, all parameterizations predict that the boundary should again shift toward the midpoint. So the weighting model makes a qualitative prediction robust under several parameterizations: learner boundaries should shift toward the midpoint when the two sources are both near the midpoint, and *also* when they are far from the midpoint. In between there should be a bubble where learners shift their boundary less or not at all. This is the prediction tested in Experiment 4, where we observed a boundary-shift toward the midpoint of about 20 units when the two sources were near the midpoint (strong agree condition) *and* when they were both far from the midpoint (strong disagree condition), with no shift observed when sources were at intermediate distances from the midpoint (moderate-agree and moderate-disagree conditions). In the main paper we note this pattern is qualitatively similar to the U-shaped curve shown by the HEW model under different parameterizations as shown above, but it is also quantitatively similar to predictions of a heuristic evidence-weighting function with a somewhat shallower slope than that estimated in Experiment 2. In case it is useful, here is some code plotting the shape of the weighting curves corresponding to the parameters that produce the above figure.
plot(0,0,type = "n", xlim = c(0,150), ylim = c(0,1), xlab = "Distance of source", ylab = "Source weight") for(i1 in c(100:5)) lines(0:150, get.pwt(0:150, i1, 5, 1), col = rainbow(100)[i1-15], lwd = 5) lines(0:150, get.pwt(0:150, 24,5,1), col=1, lwd = 5)
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Effects of social connections amongst learning pairs In the above simulation of dynamic learners we considered pairs of learners with many different initial beliefs, each learning from the other and from a static oracle. What happens in a group of learners, where the two sources for any single learner are selected according to some policy on each epoch? For instance, suppose you run a social media platform and you want to decide which opinions to "share" with each learner. On each epoch, for each learner, you must choose two opinions to share. Here are some of the ways you might pick:(1) two sources selected at random(2) the two closest sources(3) the closest and the fartherst source(4) the two farthest sources(5) the two closest sources *outside the bubble*The following code considers these possibilities. The function get.srcs takes a set of source boundaries and a learner's current boundary, and returns two source boundaries according to one of the policies noted above. The subsequent code shows what happens to 10 learners when that policy is applied over successive learning batches. Function for selecting two sources for a learner under different policies Given a learner's boundary and a set of source boundaries, return two sources for the learner according to some policy.
get.srcs <- function(l, s, p = "r", r=NA){ #Function to get two source distances for a learner based on a policy #l = learner's current boundary #s = current boundary for all sources #p = policy for choosing 2 sources: #r = random #s = two most similar #m = mixed ie most similar and most distal #f = two farthest sources #n = not too similar: choose closest outside of bubble #r = radius of similarity to avoid for policy n #o = outputs, returns distances for the two selected sources ################### d <- s - l #vector of distances from learner's boundary s <- s[order(abs(d))] #Sort sources by magnitude of distance d <- d[order(abs(d))] #Sort distances by magnitude of distance s <- s[2:length(s)] #remove first source, which is the learner herself d <- d[2:length(d)] #remove first distance, which is the learner herself if(p=="r"){ #random policy s <- s[order(runif(length(s)))] #scramble order randomly o <- s[1:2] #take first two elements } else if(p=="s"){ #two most similar policy o <- s[1:2] } else if(p=="m"){ #closest and farthest o <- s[c(1, length(s))] } else if(p=="f"){ #farthest 2 o <- s[c(length(s)-1, length(s))] } else if(p=="n"){ if(is.na(r)) stop("Radius for policy n not specified") if(sum(abs(d) > r) > 1){ #If there is at least 1 source outside radius s <- s[abs(d) > r] #Remove sources within exclusion radius o <- s[1:2] #Select closest two of those remaining } else o <- s[c(length(s)-1, length(s))] #otherwise take two farthest } else stop("Didn't recognize specified policy") o }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Check to make sure the code works
get.srcs(5, 1:10, p="n", r=2)
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Functions to simulate a population The following code populates a matrix (out) in which columns indicate learners/sources and rows indicate learning epochs. For each epoch and learner, the code selects two sources according to some policy, as determined by the get.srcs function. The learner then updates her boundary according to the specified weighting function. Each learner updates her boundary once per epoch. This procedure iterates for 100 batches.To see the results of different policies for selecting sources for a learner, use the following values for policy as an argument in the following function:r = randoms = two most similarf = two farthest (most dissimilar)m = mixed (closest and farthest)n = not-too-similar: closest sources outside some exclusion radius**note**: for policy n, you need to specify the exclusion radius by also setting a value for exrad
sim.pop <- function(l=c(1:10)*14, o=150, nsteps=300, rate=.1, policy="r", exrad=5){ init<-c(l, o) #Initial boundaries for learners and oracles nl <- length(l) #number of learners no <- length(o) #number of oracles ns <- length(init) #total number of sources out <- matrix(0, nsteps+1, ns) #Initialize output matrix out[1,] <- init #Seed first row with starting boundaries for(i1 in c(1:nsteps)) { #loop over batches for(i2 in c(1:nl)) { #loop over learners sdists <- get.srcs(out[i1,i2], out[i1,], p=policy, r=exrad) #get sources for learner i2 out[i1+1,i2] <- update.bound(out[i1,i2], sdists[1], sdists[2], r=rate) #update learner's boundary } out[i1+1,(nl+1):(nl+no)] <- out[i1,(nl+1):(nl+no)] #oracle boundaries are always the same } out } out <- sim.pop(policy="r")
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
The following code plots the change in learner boundaries over time generated by the preceding code.
plot.popsim <- function(d, nl=10){ no <- dim(d)[2] - nl #Number of oracles nsteps <- dim(d)[1] -1 #Plot initial boundaries and frame: plot(rep(0, times = nl), d[1,1:nl], pch=16, col = 3, xlim = c(0,nsteps), ylim = c(0,300), ylab = "Boundary", xlab = "Time") #Add lines showing how each learner's boundary changes over time: for(i1 in c(1:nl)) lines(c(0:nsteps), d[,i1]) #Show final bondary as red dot: points(rep(nsteps, times = nl), d[nsteps+1,1:nl], pch = 16, col = 2, cex = 2) abline(h=d[1,c((nl+1):(nl+no))], lty = 2) #Dotted lines showing oracle boundaries }
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Below code runs simulation with two oracles at 150 and random initial beliefs sampled from 10-200. Change the policy parameter to one of the above to see the result of different source-selection policies.
n <- 10 #Number of simulated agents no <- 2 #Number of oracles gt <- 150 #ground truth provided by oracles ibspan <- 100 #Maximum span of initial learner belief distribution ibshift <- 140 #Shift from 0 of initial learner belief distribution p <- "m" #Policy for selecting sources, one of: #r = random s = two most similar, f = two farthest (most dissimilar) #m = mixed (closest and farthest), n = not-too-similar #Initial beliefs will be sampled uniformly from ibshift to (ibshift + ibspan) ibounds <- runif(n)*ibspan + ibshift #Sample initial beliefs for n learners #Run simulation out <- sim.pop(l=ibounds , o=rep(gt, times = no), policy=p, rate = .1) #Plot results plot.popsim(out, nl=n) #Compute mean absolute change by end of learning period chng <- out[2:301,] - out[1:300,] mean(abs(chng[300,1:10])) sqrt(var(out[301,1:10]))
_____no_output_____
MIT
Models/AgentBasedModels.ipynb
ttrogers/frigo-chen-rogers
Copyright 2019 The TensorFlow Hub Authors.Licensed under the Apache License, Version 2.0 (the "License");
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
任意画風の高速画風変換 TensorFlow.org で表示 Google Colab で実行 GitHub でソースを表示 ノートブックをダウンロード TF Hub モデルを見る [magenta](https://github.com/tensorflow/magenta/tree/master/magenta/models/arbitrary_image_stylization) と次の発表のモデルコードに基づきます。[Exploring the structure of a real-time, arbitrary neural artistic stylization network](https://arxiv.org/abs/1705.06830). *Golnaz Ghiasi, Honglak Lee, Manjunath Kudlur, Vincent Dumoulin, Jonathon Shlens*, Proceedings of the British Machine Vision Conference (BMVC), 2017. セットアップ はじめに、TF-2 とすべての関連する依存ファイルをインポートしましょう。
import functools import os from matplotlib import gridspec import matplotlib.pylab as plt import numpy as np import tensorflow as tf import tensorflow_hub as hub print("TF Version: ", tf.__version__) print("TF-Hub version: ", hub.__version__) print("Eager mode enabled: ", tf.executing_eagerly()) print("GPU available: ", tf.test.is_gpu_available()) # @title Define image loading and visualization functions { display-mode: "form" } def crop_center(image): """Returns a cropped square image.""" shape = image.shape new_shape = min(shape[1], shape[2]) offset_y = max(shape[1] - shape[2], 0) // 2 offset_x = max(shape[2] - shape[1], 0) // 2 image = tf.image.crop_to_bounding_box( image, offset_y, offset_x, new_shape, new_shape) return image @functools.lru_cache(maxsize=None) def load_image(image_url, image_size=(256, 256), preserve_aspect_ratio=True): """Loads and preprocesses images.""" # Cache image file locally. image_path = tf.keras.utils.get_file(os.path.basename(image_url)[-128:], image_url) # Load and convert to float32 numpy array, add batch dimension, and normalize to range [0, 1]. img = tf.io.decode_image( tf.io.read_file(image_path), channels=3, dtype=tf.float32)[tf.newaxis, ...] img = crop_center(img) img = tf.image.resize(img, image_size, preserve_aspect_ratio=True) return img def show_n(images, titles=('',)): n = len(images) image_sizes = [image.shape[1] for image in images] w = (image_sizes[0] * 6) // 320 plt.figure(figsize=(w * n, w)) gs = gridspec.GridSpec(1, n, width_ratios=image_sizes) for i in range(n): plt.subplot(gs[i]) plt.imshow(images[i][0], aspect='equal') plt.axis('off') plt.title(titles[i] if len(titles) > i else '') plt.show()
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
使用する画像を取得しましょう。
# @title Load example images { display-mode: "form" } content_image_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/f/fd/Golden_Gate_Bridge_from_Battery_Spencer.jpg/640px-Golden_Gate_Bridge_from_Battery_Spencer.jpg' # @param {type:"string"} style_image_url = 'https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg' # @param {type:"string"} output_image_size = 384 # @param {type:"integer"} # The content image size can be arbitrary. content_img_size = (output_image_size, output_image_size) # The style prediction model was trained with image size 256 and it's the # recommended image size for the style image (though, other sizes work as # well but will lead to different results). style_img_size = (256, 256) # Recommended to keep it at 256. content_image = load_image(content_image_url, content_img_size) style_image = load_image(style_image_url, style_img_size) style_image = tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') show_n([content_image, style_image], ['Content image', 'Style image'])
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
TF-Hub モジュールをインポートする
# Load TF-Hub module. hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2' hub_module = hub.load(hub_handle)
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
画風に使用する Hub モジュールのシグネチャは、次のとおりです。```outputs = hub_module(content_image, style_image) stylized_image = outputs[0]```上記の `content_image`、`style_image`、および `stylized_image` は、形状 `[batch_size, image_height, image_width, 3]` の 4-D テンソルです。現在の例では 1 つの画像のみを提供するためバッチの次元は 1 ですが、同じモジュールを使用して、同時に複数の画像を処理することができます。画像の入力と出力の値範囲は [0, 1] です。コンテンツとスタイル画像の形状が一致する必要はありません。出力画像の形状はコンテンツ画像の形状と同一です。 画風の実演
# Stylize content image with given style image. # This is pretty fast within a few milliseconds on a GPU. outputs = hub_module(tf.constant(content_image), tf.constant(style_image)) stylized_image = outputs[0] # Visualize input images and the generated stylized image. show_n([content_image, style_image, stylized_image], titles=['Original content image', 'Style image', 'Stylized image'])
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
複数の画像で試してみる
# @title To Run: Load more images { display-mode: "form" } content_urls = dict( sea_turtle='https://upload.wikimedia.org/wikipedia/commons/d/d7/Green_Sea_Turtle_grazing_seagrass.jpg', tuebingen='https://upload.wikimedia.org/wikipedia/commons/0/00/Tuebingen_Neckarfront.jpg', grace_hopper='https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg', ) style_urls = dict( kanagawa_great_wave='https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg', kandinsky_composition_7='https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg', hubble_pillars_of_creation='https://upload.wikimedia.org/wikipedia/commons/6/68/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg', van_gogh_starry_night='https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg', turner_nantes='https://upload.wikimedia.org/wikipedia/commons/b/b7/JMW_Turner_-_Nantes_from_the_Ile_Feydeau.jpg', munch_scream='https://upload.wikimedia.org/wikipedia/commons/c/c5/Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg', picasso_demoiselles_avignon='https://upload.wikimedia.org/wikipedia/en/4/4c/Les_Demoiselles_d%27Avignon.jpg', picasso_violin='https://upload.wikimedia.org/wikipedia/en/3/3c/Pablo_Picasso%2C_1911-12%2C_Violon_%28Violin%29%2C_oil_on_canvas%2C_Kr%C3%B6ller-M%C3%BCller_Museum%2C_Otterlo%2C_Netherlands.jpg', picasso_bottle_of_rum='https://upload.wikimedia.org/wikipedia/en/7/7f/Pablo_Picasso%2C_1911%2C_Still_Life_with_a_Bottle_of_Rum%2C_oil_on_canvas%2C_61.3_x_50.5_cm%2C_Metropolitan_Museum_of_Art%2C_New_York.jpg', fire='https://upload.wikimedia.org/wikipedia/commons/3/36/Large_bonfire.jpg', derkovits_woman_head='https://upload.wikimedia.org/wikipedia/commons/0/0d/Derkovits_Gyula_Woman_head_1922.jpg', amadeo_style_life='https://upload.wikimedia.org/wikipedia/commons/8/8e/Untitled_%28Still_life%29_%281913%29_-_Amadeo_Souza-Cardoso_%281887-1918%29_%2817385824283%29.jpg', derkovtis_talig='https://upload.wikimedia.org/wikipedia/commons/3/37/Derkovits_Gyula_Talig%C3%A1s_1920.jpg', amadeo_cardoso='https://upload.wikimedia.org/wikipedia/commons/7/7d/Amadeo_de_Souza-Cardoso%2C_1915_-_Landscape_with_black_figure.jpg' ) content_image_size = 384 style_image_size = 256 content_images = {k: load_image(v, (content_image_size, content_image_size)) for k, v in content_urls.items()} style_images = {k: load_image(v, (style_image_size, style_image_size)) for k, v in style_urls.items()} style_images = {k: tf.nn.avg_pool(style_image, ksize=[3,3], strides=[1,1], padding='SAME') for k, style_image in style_images.items()} #@title Specify the main content image and the style you want to use. { display-mode: "form" } content_name = 'sea_turtle' # @param ['sea_turtle', 'tuebingen', 'grace_hopper'] style_name = 'munch_scream' # @param ['kanagawa_great_wave', 'kandinsky_composition_7', 'hubble_pillars_of_creation', 'van_gogh_starry_night', 'turner_nantes', 'munch_scream', 'picasso_demoiselles_avignon', 'picasso_violin', 'picasso_bottle_of_rum', 'fire', 'derkovits_woman_head', 'amadeo_style_life', 'derkovtis_talig', 'amadeo_cardoso'] stylized_image = hub_module(tf.constant(content_images[content_name]), tf.constant(style_images[style_name]))[0] show_n([content_images[content_name], style_images[style_name], stylized_image], titles=['Original content image', 'Style image', 'Stylized image'])
_____no_output_____
Apache-2.0
site/ja/hub/tutorials/tf2_arbitrary_image_stylization.ipynb
phoenix-fork-tensorflow/docs-l10n
Data seems imbalance hence need to be balanced basis resampling techniques.
def date_q(date): """ Convert Date to Quarter when separated with / """ qdate = date.strip().split('/')[1:] qdate1 = qdate[0] if qdate1 in ['01','02','03']: return (str('Q1' + '-' + qdate[1])) if qdate1 in ['04','05','06']: return (str('Q2' + '-' + qdate[1])) if qdate1 in ['07','08','09']: return (str('Q3' + '-' + qdate[1])) if qdate1 in ['10','11','12']: return (str('Q4' + '-' + qdate[1])) def date_q1(date): """ Calculates Age in years from DOB and Disbursal Date """ qdate = date.strip().split('-')[0:2] qdate1 = qdate[1] qdate2 = str(qdate[0]) if qdate1 in ['01','02','03']: return (str('Q1' + '-' + qdate2[2:])) if qdate1 in ['04','05','06']: return (str('Q2' + '-' + qdate2[2:])) if qdate1 in ['07','08','09']: return (str('Q3' + '-' + qdate2[2:])) if qdate1 in ['10','11','12']: return (str('Q4' + '-' + qdate2[2:]))
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
EDA for merged file
campaign_data_DATE = campaign_data.copy() campaign_data_DATE.head() campaign_data_DATE['start_date_q'] = campaign_data_DATE['start_date'].map(lambda x: date_q(x)) campaign_data_DATE['end_date_q'] = campaign_data_DATE['end_date'].map(lambda x: date_q(x)) campaign_data_DATE.head() campaign_data_DATE.drop(['start_date','end_date'],axis=1,inplace=True) cust_tran_data_4 = cust_tran_data.copy() cust_tran_data_4 = pd.merge(cust_tran_data_4,coupon_data,how='inner',on='item_id') cust_tran_data_4['tran_date_q'] = cust_tran_data_4['date'].map(lambda x: date_q1(x)) cust_tran_data_4.drop('date',axis=1,inplace=True) cust_tran_data_4['tot_quantity'] = pd.DataFrame(cust_tran_data_4.groupby(['customer_id','item_id','coupon_id','tran_date_q'])['quantity'].transform('sum')) cust_tran_data_4['tot_coupon_disc'] = pd.DataFrame(cust_tran_data_4.groupby(['customer_id','item_id','coupon_id','tran_date_q'])['coupon_discount'].transform('sum')) cust_tran_data_4['tot_other_disc'] = pd.DataFrame(cust_tran_data_4.groupby(['customer_id','item_id','coupon_id','tran_date_q'])['other_discount'].transform('sum')) cust_tran_data_4['tot_sell_price'] = pd.DataFrame(cust_tran_data_4.groupby(['customer_id','item_id','coupon_id','tran_date_q'])['selling_price'].transform('sum')) cust_tran_data_4.drop(['quantity','coupon_discount','other_discount','selling_price'],axis=1,inplace=True) cust_tran_data_4.drop_duplicates(subset=['customer_id','item_id','coupon_id','tran_date_q'], keep='first', inplace=True) train_data_merge_DATE = pd.merge(train_data,cust_tran_data_4,how='inner',on=['customer_id','coupon_id']) train_data_merge_DATE = pd.merge(train_data_merge_DATE,cust_demo_data,how='left',on='customer_id') train_data_merge_DATE = pd.merge(train_data_merge_DATE,item_data,how='left',on='item_id') train_data_merge_DATE = pd.merge(train_data_merge_DATE,campaign_data_DATE,how='left',on='campaign_id') train_data_merge_EDA = train_data_merge_DATE.copy() train_data_merge_EDA['no_of_children'].fillna('Unspecified',inplace=True) train_data_merge_EDA['marital_status'].fillna('Unspecified',inplace=True) train_data_merge_EDA['rented'].fillna('Unspecified',inplace=True) train_data_merge_EDA['family_size'].fillna('Unspecified',inplace=True) train_data_merge_EDA['age_range'].fillna('Unspecified',inplace=True) train_data_merge_EDA['income_bracket'].fillna('Unspecified',inplace=True) train_data_merge_EDA.info() train_data_merge_EDA.describe() col_uniq = pd.DataFrame(train_data_merge_EDA.nunique()).reset_index() col_uniq.columns = ['DataColumns','UniqCount'] col_uniq_cnt = pd.DataFrame(train_data_merge_EDA.count(axis=0)).reset_index() col_uniq_cnt.columns = ['DataColumns','UniqCount'] col_uniq['UniqCount_Pct'] = round((col_uniq['UniqCount']/col_uniq_cnt['UniqCount'])*100,2) display (col_uniq)
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Lets have a look for Customer Id's in terms of using coupons most and least number of times.
''' Customer ids using coupons at least once with their demographic details ''' a = pd.DataFrame(train_data_merge_EDA[(train_data_merge_EDA['redemption_status']==1)]) b = pd.DataFrame(a.groupby('customer_id')['redemption_status'].sum()).reset_index() b.columns = ['customer_id','redeem_count'] b.sort_values(by='redeem_count',ascending=False,inplace=True) print ('Top 5 Customers reediming coupons') display (b.head()) c = pd.DataFrame(train_data_merge_EDA[(train_data_merge_EDA['customer_id']==626)|(train_data_merge_EDA['customer_id']==1574)|(train_data_merge_EDA['customer_id']==1210)|(train_data_merge_EDA['customer_id']==235)|(train_data_merge_EDA['customer_id']==1534)][['customer_id','age_range','marital_status','rented','family_size','no_of_children','income_bracket']]) c.drop_duplicates(subset=['customer_id'], keep='first', inplace=True) display (c)
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Hypothesis to test:1. Is age_range of 36 to 55 is mostly using coupons???2. Is Marital Status as Married are to redeem coupon???3. Is couple (family size of 2) are using coupons mostly???4. Is people not on rent are mostly using coupons???5. Is no_of_children irrelevant to redeem coupon???6. Is income bracket of 5 are using coupons mostly???
''' 1. Is age_range of 36 to 55 is mostly using coupons??? ''' d = pd.DataFrame(train_data_merge_EDA.groupby(['age_range'])['redemption_status'].sum()).reset_index() d.columns = ['age_range','tot_redeem'] d['percent'] = round(d['tot_redeem']/(d['tot_redeem'].sum())*100,2) display (d) %matplotlib notebook train_data_merge_EDA.groupby(['age_range','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Out of the age demographic data available, age range of 36 to 55 are mostly redeeming coupons.
''' 2. Is Marital Status as Married are to redeem coupon??? ''' e = pd.DataFrame(train_data_merge_EDA.groupby(['marital_status'])['redemption_status'].sum()).reset_index() e.columns = ['marital_status','tot_redeem'] e['percent'] = round(e['tot_redeem']/(e['tot_redeem'].sum())*100,2) display (e) %matplotlib notebook train_data_merge_EDA.groupby(['marital_status','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : As most of the customers haven't mentioned their marital status it is difficult to support that Married customer redeem more. But basis available data, we could see Married people are mostly using coupons to redeem. So at this stage I could think of giving more weightage to a person which has discloses marital status as Married.
''' 3. Is couple (family size of 2) are using coupons mostly??? ''' f = pd.DataFrame(train_data_merge_EDA.groupby(['family_size'])['redemption_status'].sum()).reset_index() f.columns = ['family_size','tot_redeem'] f['percent'] = round(f['tot_redeem']/(f['tot_redeem'].sum())*100,2) display (f) %matplotlib notebook train_data_merge_EDA.groupby(['family_size','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Considering family size of 2 or more as a married couple (and transforming that ratio on to Marital Status data of Unspecified), then we could make an assumption over here that Married couple are mostly using the coupons to redeem.
''' 4. Is people not on rent are mostly using coupons??? ''' g = pd.DataFrame(train_data_merge_EDA.groupby(['rented'])['redemption_status'].sum()).reset_index() g.columns = ['rented','tot_redeem'] g['percent'] = round(g['tot_redeem']/(g['tot_redeem'].sum())*100,2) display (g) %matplotlib notebook train_data_merge_EDA.groupby(['rented','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Maximum people have provided status as not rented so we could assume here more weightage to such customers as they have shown greater tendency towards redemption of the coupons.
''' 5. Is no_of_children irrelevant to redeem coupon??? ''' h = pd.DataFrame(train_data_merge_EDA.groupby(['no_of_children'])['redemption_status'].sum()).reset_index() h.columns = ['no_of_children','tot_redeem'] h['percent'] = round(h['tot_redeem']/(h['tot_redeem'].sum())*100,2) display (h) %matplotlib notebook train_data_merge_EDA.groupby(['no_of_children','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Since most of the customers preferred not to disclose on number of children, at this point we can assume that this field has no significance with redemption of coupons.
''' 6. Is income bracket of 5 are using coupons mostly??? ''' j = pd.DataFrame(train_data_merge_EDA.groupby(['income_bracket'])['redemption_status'].sum()).reset_index() j.columns = ['income_bracket','tot_redeem'] j['percent'] = round(j['tot_redeem']/(j['tot_redeem'].sum())*100,2) display (j) %matplotlib notebook train_data_merge_EDA.groupby(['income_bracket','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Assuming 5 as mid income group, customers in this group clearly shows behaviour towards redemption of coupon. Let's explore from Coupon's perspective, in terms of most redeemed and attributes associated with coupons
''' Coupon ids getting redeemed very often and attributes associated with it ''' k = pd.DataFrame(a.groupby('coupon_id')['redemption_status'].sum()).reset_index() k.columns = ['coupon_id','redeem_count'] k.sort_values(by='redeem_count',ascending=False,inplace=True) print ('Top 5 Coupon ids redeemed') display (k.head()) l1 = pd.DataFrame(train_data_merge_EDA[(train_data_merge_EDA['coupon_id']==21)|(train_data_merge_EDA['coupon_id']==6)|(train_data_merge_EDA['coupon_id']==22)|(train_data_merge_EDA['coupon_id']==9)|(train_data_merge_EDA['coupon_id']==8)][['coupon_id','item_id','brand','brand_type','category']]) l1.drop_duplicates(subset=['coupon_id','item_id'], keep='first', inplace=True) l2 = pd.DataFrame(l1.groupby(['brand'])['brand'].count()) l2.columns = ['tot_brand_cnt'] l2['percent'] = round(l2['tot_brand_cnt']/(l2['tot_brand_cnt'].sum())*100,2) l2.sort_values(by='tot_brand_cnt',ascending=False,inplace=True) display (l2.head(10)) l3 = pd.DataFrame(l1.groupby(['brand_type'])['brand_type'].count()) l3.columns = ['tot_brand_type_cnt'] l3['percent'] = round(l3['tot_brand_type_cnt']/(l3['tot_brand_type_cnt'].sum())*100,2) l3.sort_values(by='tot_brand_type_cnt',ascending=False,inplace=True) display (l3) l4 = pd.DataFrame(l1.groupby(['category'])['category'].count()) l4.columns = ['tot_category_cnt'] l4['percent'] = round(l4['tot_category_cnt']/(l4['tot_category_cnt'].sum())*100,2) l4.sort_values(by='tot_category_cnt',ascending=False,inplace=True) display (l4)
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Basis visualization for top 5 coupons redeemed, we could look for trend towards1. brand 56 is the top selling, let's verify if specific brand shows tendency towards coupon redemption???2. Verify brand type shows tendency towards coupon redemption???3. Verify category shows tendency towards coupon redemption???4. Verify campaign type shows tendency towards coupon redemption???
''' 1. verify if specific brand shows tendency towards coupon redemption??? ''' m = pd.DataFrame(l1[(l1['brand']==56)|(l1['brand']==133)|(l1['brand']==1337)|(l1['brand']==544)|(l1['brand']==681)][['brand','brand_type','category']]) m.drop_duplicates(subset=['brand','brand_type','category'], keep='first', inplace=True) display(m)
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference: For brand 56, there is a wide range of category available under the umbrella of Food products and displays greater tendency towards coupon redemption as well. Even rest other top 4 brands as well belong to general food product category under Grocery.
''' 2. Verify brand type shows tendency towards coupon redemption??? ''' m1 = pd.DataFrame(train_data_merge_EDA.groupby(['brand_type'])['redemption_status'].sum()).reset_index() m1.columns = ['brand_type','tot_redeem'] m1['percent'] = round(m1['tot_redeem']/(m1['tot_redeem'].sum())*100,2) display (m1) %matplotlib notebook train_data_merge_EDA.groupby(['brand_type','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference: Coupon redemption percentage seems high when associated with an Established brand type.
''' 3. Verify category shows tendency towards coupon redemption??? ''' m2 = pd.DataFrame(train_data_merge_EDA.groupby(['category'])['redemption_status'].sum()).reset_index() m2.columns = ['category','tot_redeem'] m2['percent'] = round(m2['tot_redeem']/(m2['tot_redeem'].sum())*100,2) m2.sort_values(by='tot_redeem',ascending=False,inplace=True) display (m2) %matplotlib notebook train_data_merge_EDA.groupby(['category','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Grocery, Packaged Meat, Pharmaceutical, Natural Prodcut, Dairy & Juice and Meat are the category seems more associated with coupon redemption.
''' 4. Verify campaign type shows tendency towards coupon redemption??? ''' m3 = pd.DataFrame(train_data_merge_EDA.groupby(['campaign_type'])['redemption_status'].sum()).reset_index() m3.columns = ['campaign_type','tot_redeem'] m3['percent'] = round(m3['tot_redeem']/(m3['tot_redeem'].sum())*100,2) display (m3) %matplotlib notebook train_data_merge_EDA.groupby(['campaign_type','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Campaign type of X seems to have more associated with coupon redemption. Let's explore coupon redemption trend basis campaign start date and transaction date
n = pd.DataFrame(train_data_merge_EDA.groupby(['start_date_q','end_date_q'])['redemption_status'].sum()).reset_index() n.columns = ['start_date_q','end_date_q','tot_redeem'] n['percent'] = round(n['tot_redeem']/(n['tot_redeem'].sum())*100,2) n.sort_values(by='tot_redeem',ascending=False,inplace=True) display (n) %matplotlib notebook train_data_merge_EDA.groupby(['start_date_q','end_date_q','redemption_status']).size().unstack().plot(kind='barh',stacked=True, width=0.7) plt.show()
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Inference : Campaign started between Q2-13 to Q3-13 foolwed by Q1-13 to Q2-13, seems experience more association towards coupon redemption trend. So we could assume over here campaign spanned across early quarters of year seem to more association with coupon redemption.
n1 = pd.DataFrame(train_data_merge_EDA.groupby(['tran_date_q'])['redemption_status'].sum()).reset_index() n1.columns = ['tran_date_q','tot_redeem'] n1['percent'] = round(n1['tot_redeem']/(n1['tot_redeem'].sum())*100,2) n1.sort_values(by='tot_redeem',ascending=False,inplace=True) display (n1)
_____no_output_____
MIT
Hackathon_ML Sample Problems/Amex Dataset/ML on Amex Dataset_EDA.ipynb
girishvankudre/hackathon_ml_sample
Copyright 2019 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tensorflow.keras.preprocessing.text import Tokenizer sentences = [ 'i love my dog', 'I, love my cat', 'You love my dog!' ] tokenizer = Tokenizer(num_words = 100) tokenizer.fit_on_texts(sentences) word_index = tokenizer.word_index print(word_index)
_____no_output_____
MIT
Informatics/Deep Learning/TensorFlow - deeplearning.ai/3. NLP/Course_3_Week_1_Lesson_1.ipynb
MarcosSalib/Cocktail_MOOC
Mask R-CNN - Train on Custom DatasetThis notebook shows how to train Mask R-CNN on your own dataset. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
import os import sys import random import math import re import time import numpy as np import cv2 import matplotlib import matplotlib.pyplot as plt from config import Config import utils import model as modellib import visualize from model import log %matplotlib inline # Root directory of the project ROOT_DIR = os.getcwd() # Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") # Local path to trained weights file COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5") # Download COCO trained weights from Releases if needed if not os.path.exists(COCO_MODEL_PATH): utils.download_trained_weights(COCO_MODEL_PATH)
c:\users\yaroslav_strontsitsk\appdata\local\continuum\anaconda3\envs\maskrcnn\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
MIT
train_t_shirts.ipynb
lumstery/maskrcnn
Configurations
class ShapesConfig(Config): """Configuration for training on the toy shapes dataset. Derives from the base Config class and overrides values specific to the toy shapes dataset. """ # Give the configuration a recognizable name NAME = "clothes" # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 8 # Number of classes (including background) NUM_CLASSES = 1 + 1 # background + 1 shapes # Use small images for faster training. Set the limits of the small side # the large side, and that determines the image shape. IMAGE_MIN_DIM = 128 IMAGE_MAX_DIM = 128 # Use smaller anchors because our image and objects are small RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels # Reduce training ROIs per image because the images are small and have # few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 64 # Use a small epoch since the data is simple STEPS_PER_EPOCH = 100 # use small validation steps since the epoch is small VALIDATION_STEPS = 6 config = ShapesConfig() config.display()
Configurations: BACKBONE_SHAPES [[32 32] [16 16] [ 8 8] [ 4 4] [ 2 2]] BACKBONE_STRIDES [4, 8, 16, 32, 64] BATCH_SIZE 8 BBOX_STD_DEV [0.1 0.1 0.2 0.2] DETECTION_MAX_INSTANCES 100 DETECTION_MIN_CONFIDENCE 0.7 DETECTION_NMS_THRESHOLD 0.3 GPU_COUNT 1 IMAGES_PER_GPU 8 IMAGE_MAX_DIM 128 IMAGE_MIN_DIM 128 IMAGE_PADDING True IMAGE_SHAPE [128 128 3] LEARNING_MOMENTUM 0.9 LEARNING_RATE 0.001 MASK_POOL_SIZE 14 MASK_SHAPE [28, 28] MAX_GT_INSTANCES 100 MEAN_PIXEL [123.7 116.8 103.9] MINI_MASK_SHAPE (56, 56) NAME clothes NUM_CLASSES 2 POOL_SIZE 7 POST_NMS_ROIS_INFERENCE 1000 POST_NMS_ROIS_TRAINING 2000 ROI_POSITIVE_RATIO 0.33 RPN_ANCHOR_RATIOS [0.5, 1, 2] RPN_ANCHOR_SCALES (8, 16, 32, 64, 128) RPN_ANCHOR_STRIDE 1 RPN_BBOX_STD_DEV [0.1 0.1 0.2 0.2] RPN_NMS_THRESHOLD 0.7 RPN_TRAIN_ANCHORS_PER_IMAGE 256 STEPS_PER_EPOCH 100 TRAIN_ROIS_PER_IMAGE 64 USE_MINI_MASK True USE_RPN_ROIS True VALIDATION_STEPS 6 WEIGHT_DECAY 0.0001
MIT
train_t_shirts.ipynb
lumstery/maskrcnn
Notebook Preferences
def get_ax(rows=1, cols=1, size=8): """Return a Matplotlib Axes array to be used in all visualizations in the notebook. Provide a central point to control graph sizes. Change the default size attribute to control the size of rendered images """ _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows)) return ax
_____no_output_____
MIT
train_t_shirts.ipynb
lumstery/maskrcnn
DatasetLoad a datasetExtend the Dataset class and add a method to load the shapes dataset, `load_images()`, and override the following methods:* load_image()* load_mask()* image_reference()
class ShapesDataset(utils.Dataset): def load_images(self, count, prefix): """Load the requested number of images. count: number of images to load. """ # Add classes self.add_class("clothes", 1, "t-shirt") # Add images for i in range(count): self.add_image("clothes", image_id=i, path="./t-shirt/"+prefix+"/"+str(i+1)+".jpg",) def image_reference(self, image_id): info = self.image_info[image_id] if info["source"] == "clothes": return info["clothes"] else: super(self.__class__).image_reference(self, image_id) def load_mask(self, image_id): """Load instance mask for shape of the given image ID.""" info = self.image_info[image_id] img = cv2.imread(info['path'], cv2.IMREAD_UNCHANGED) #print("img.shape="+str(image.shape)) #resized_image = cv2.resize(img, (128, 128)) #print("resized_image.shape="+str(resized_image.shape)) #img[np.where((image!=[255,255,255]).all(axis=2))] = [0,0,0] lower_black = np.array([235,235,235], dtype = "uint8") upper_black = np.array([255,255,255], dtype = "uint8") mask = cv2.inRange(img, lower_black, upper_black) mask = mask[..., np.newaxis] mask = mask.astype(dtype=bool) mask = np.logical_not(mask) #cv2.imshow('mask', mask) #cv2.waitKey() class_ids = np.array([1]) return mask, class_ids.astype(np.int8) # Training dataset dataset_train = ShapesDataset() dataset_train.load_images(20,"train") dataset_train.prepare() # Validation dataset dataset_val = ShapesDataset() dataset_val.load_images(5,"validation") dataset_val.prepare() # Load and display random samples image_ids = np.random.choice(dataset_train.image_ids, 3) for image_id in image_ids: image = dataset_train.load_image(image_id) mask, class_ids = dataset_train.load_mask(image_id) visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names,1)
_____no_output_____
MIT
train_t_shirts.ipynb
lumstery/maskrcnn
Ceate Model
# Create model in training mode model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR) # Which weights to start with? init_with = "coco" # imagenet, coco, or last if init_with == "imagenet": model.load_weights(model.get_imagenet_weights(), by_name=True) elif init_with == "coco": # Load weights trained on MS COCO, but skip layers that # are different due to the different number of classes # See README for instructions to download the COCO weights model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask"]) elif init_with == "last": # Load the last model you trained and continue training model.load_weights(model.find_last()[1], by_name=True)
_____no_output_____
MIT
train_t_shirts.ipynb
lumstery/maskrcnn